id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
47920
|
import os
import numpy
from pydub import AudioSegment
from scipy.fftpack import fft
class AudioSignal(object):
def __init__(self, sample_rate, signal=None, filename=None):
# Set sample rate
self._sample_rate = sample_rate
if signal is None:
# Get file name and file extension
file, file_extension = os.path.splitext(filename)
# Check if file extension if audio format
if file_extension in ['.mp3', '.wav']:
# Read audio file
self._signal = self.read_audio_file(filename)
# Check if file extension if video format
elif file_extension in ['.mp4', '.mkv', 'avi']:
# Extract audio from video
new_filename = self.extract_audio_from_video(filename)
# read audio file from extracted audio file
self._signal = self.read_audio_file(new_filename)
# Case file extension is not supported
else:
print("Error: file not found or file extension not supported.")
elif filename is None:
# Cast signal to array
self._signal = signal
else:
print("Error : argument missing in AudioSignal() constructor.")
'''
Function to extract audio from a video
'''
def extract_audio_from_video(self, filename):
# Get video file name and extension
file, file_extension = os.path.splitext(filename)
# Extract audio (.wav) from video
os.system('ffmpeg -i ' + file + file_extension + ' ' + '-ar ' + str(self._sample_rate) + ' ' + file + '.wav')
print("Sucessfully converted {} into audio!".format(filename))
# Return audio file name created
return file + '.wav'
'''
Function to read audio file and to return audio samples of a specified WAV file
'''
def read_audio_file(self, filename):
# Get audio signal
audio_file = AudioSegment.from_file(filename)
# Resample audio signal
audio_file = audio_file.set_frame_rate(self._sample_rate)
# Cast to integer
if audio_file.sample_width == 2:
data = numpy.fromstring(audio_file._data, numpy.int16)
elif audio_file.sample_width == 4:
data = numpy.fromstring(audio_file._data, numpy.int32)
# Merge audio channels
audio_signal = []
for chn in list(range(audio_file.channels)):
audio_signal.append(data[chn::audio_file.channels])
audio_signal = numpy.array(audio_signal).T
# Flat signals
if audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
audio_signal = audio_signal.flatten()
# Convert stereo to mono
audio_signal = self.stereo_to_mono(audio_signal)
# Return sample rate and audio signal
return audio_signal
'''
Function to convert an input signal from stereo to mono
'''
@staticmethod
def stereo_to_mono(audio_signal):
# Check if signal is stereo and convert to mono
if isinstance(audio_signal, int):
return -1
if audio_signal.ndim == 1:
return audio_signal
elif audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
return audio_signal.flatten()
else:
if audio_signal.shape[1] == 2:
return (audio_signal[:, 1] / 2) + (audio_signal[:, 0] / 2)
else:
return -1
'''
Function to split the input signal into windows of same size
'''
def framing(self, size, step, hamming=False):
# Rescale windows step and size
win_size = int(size * self._sample_rate)
win_step = int(step * self._sample_rate)
# Number of frames
nb_frames = 1 + int((len(self._signal) - win_size) / win_step)
# Build Hamming function
if hamming is True:
ham = numpy.hamming(win_size)
else:
ham = numpy.ones(win_size)
# Split signals (and multiply each windows signals by Hamming functions)
frames = []
for t in range(nb_frames):
sub_signal = AudioSignal(self._sample_rate, signal=self._signal[(t * win_step): (t * win_step + win_size)] * ham)
frames.append(sub_signal)
return frames
'''
Function to compute the magnitude of the Discrete Fourier Transform coefficient
'''
def dft(self, norm=False):
# Commpute the magnitude of the spectrum (and normalize by the number of sample)
if norm is True:
dft = abs(fft(self._signal)) / len(self._signal)
else:
dft = abs(fft(self._signal))
return dft
'''
Function to apply pre-emphasis filter on signal
'''
def pre_emphasis(self, alpha =0.97):
# Emphasized signal
emphasized_signal = numpy.append(self._signal[0], self._signal[1:] - alpha * self._signal[:-1])
return emphasized_signal
|
48117
|
import pytest
@pytest.mark.parametrize(('x', 'y', ), [(0, [1]), (0, [1]), (str(0), str([1]))])
def test_foo(x, y):
assert str([int(x) + 1]) == y
|
48130
|
from inliner import inline
class SillyGetterSetter(object):
def __init__(self, stuff):
self.stuff = stuff
@inline
def setStuff(self, obj):
self.stuff = obj
@inline
def getStuff(self):
return self.stuff
@inline
def add_stuff(x, y):
return x + y
def add_lots_of_numbers():
for i in xrange(10):
add_stuff(i, i+1)
import dis
dis.dis(add_lots_of_numbers)
|
48134
|
import ctypes
class CDLL_errno(ctypes.CDLL):
class _FuncPtr(ctypes._CFuncPtr):
_flags_ = ctypes._FUNCFLAG_CDECL | ctypes._FUNCFLAG_USE_ERRNO
_restype_ = ctypes.c_int
def __call__(self, *args):
ctypes.set_errno(0)
try:
return ctypes._CFuncPtr.__call__(self, *args)
finally:
errno = ctypes.get_errno()
if errno:
import os
raise IOError(errno, os.strerror(errno))
def __init__(self, *args, **kw):
ctypes.CDLL.__init__(self, *args, **kw)
del self._FuncPtr
|
48158
|
import FWCore.ParameterSet.Config as cms
allSuperClusterCandidates = cms.EDProducer("ConcreteEcalCandidateProducer",
src = cms.InputTag("hybridSuperClusters"),
particleType = cms.string('gamma')
)
|
48173
|
from __future__ import absolute_import
from pyti import catch_errors
from pyti.exponential_moving_average import (
exponential_moving_average as ema
)
def double_exponential_moving_average(data, period):
"""
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
"""
catch_errors.check_for_period_error(data, period)
dema = (2 * ema(data, period)) - ema(ema(data, period), period)
return dema
|
48188
|
import importlib.util
import logging
import logging.config
import os
import typing as t
from pathlib import Path
import yaml
import xleapp.globals as g
from ..helpers.utils import generate_program_header
StrPath = t.Union[str, os.PathLike[str]]
class ProcessFileFilter(logging.Filter):
def filter(self, record):
return record.name == "xleapp.process" and record.levelno >= 20
class InfoLogFileFilter(logging.Filter):
def filter(self, record):
return record.name == "xleapp.logfile" and record.levelno >= 20
class DebugFileFilter(logging.Filter):
def filter(self, record):
return g.app.debug
class StreamHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord):
if record.msg.startswith("->"):
record.msg = f" {record.msg}"
logging.StreamHandler.emit(self, record)
class FileHandler(logging.FileHandler):
def __init__(
self,
filename: StrPath,
mode: str = "a",
encoding: t.Union[str, None] = None,
delay: bool = False,
errors: t.Union[str, None] = None,
) -> None:
super().__init__(
filename,
mode=mode,
encoding=encoding,
delay=delay,
errors=errors,
)
def emit(self, record: logging.LogRecord):
if record.msg.startswith("->"):
record.msg = f" {record.msg}"
logging.FileHandler.emit(self, record)
class FileHandlerWithHeader(logging.FileHandler):
def __init__(self, filename, header, mode="a", encoding=None, delay=0):
self.header = header
self.file_pre_exists = Path(filename)
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not delay and self.stream is not None:
self.stream.write("%s\n" % header)
def emit(self, record: logging.LogRecord):
if self.stream is None:
self.stream = self._open()
if not self.file_pre_exists:
self.stream.write("%s\n" % self.header)
message = record.msg
if message.startswith("->"):
message = f" {message}"
record.msg = message
logging.FileHandler.emit(self, record)
def init() -> None:
mod = importlib.util.find_spec(__name__)
if not mod:
raise FileNotFoundError("Missing package 'log_config.yaml' to configure logging!")
if mod.origin:
logConfig = Path(mod.origin).parent / "log_config.yaml"
with open(logConfig, "r") as file:
config = yaml.safe_load(file.read())
if not g.app.log_folder.exists():
g.app.log_folder.mkdir(parents=True, exist_ok=True)
info_log_file = config["handlers"]["info_file_handler"]["filename"]
config["handlers"]["info_file_handler"]["filename"] = (
g.app.log_folder / info_log_file
)
config["handlers"]["info_file_handler"]["header"] = generate_program_header(
project_version=f"{g.app.project} v{g.app.version}",
input_path=g.app.input_path,
output_path=g.app.output_path,
num_to_process=g.app.num_to_process,
num_of_categories=g.app.num_of_categories,
)
process_log_file = config["handlers"]["process_file_handler"]["filename"]
config["handlers"]["process_file_handler"]["filename"] = (
g.app.log_folder / process_log_file
)
debug_log_file = config["handlers"]["debug_file_handler"]["filename"]
config["handlers"]["debug_file_handler"]["filename"] = (
g.app.log_folder / debug_log_file
)
logging.config.dictConfig(config)
else:
raise FileNotFoundError(
"Package found! Missing 'log_config.yaml' to "
"configure logging! Reinstall package.",
)
|
48215
|
from django.db import models
LINEAGE_CHOICES = (
('startswith', 'startswith'),
('endswith', 'endswith'),
('contains', 'contains'),
('equals', 'equals')
)
class Lineage(models.Model):
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.CASCADE)
key = models.CharField(choices=LINEAGE_CHOICES, blank=False, null=False, max_length=255)
value = models.CharField(max_length=500, blank=False, null=False)
os = models.CharField(max_length=255, blank=False, null=False)
def __str__(self):
if self.parent:
return f"{self.os}: {self.parent.key}:{self.parent.value} > {self.key}:{self.value}"
else:
return f"{self.os}: {self.key}:{self.value}"
|
48230
|
from __future__ import absolute_import
from .duckpunch import DuckPunch
monkey_patch = DuckPunch()
|
48233
|
import numpy as np
from prml.nn.function import Function
class Product(Function):
def __init__(self, axis=None, keepdims=False):
if isinstance(axis, int):
axis = (axis,)
elif isinstance(axis, tuple):
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
def _forward(self, x):
self.output = np.prod(x, axis=self.axis, keepdims=True)
if not self.keepdims:
return np.squeeze(self.output)
else:
return self.output
def backward(self, delta, x):
if not self.keepdims and self.axis is not None:
for ax in self.axis:
delta = np.expand_dims(delta, ax)
dx = delta * self.output / x
return dx
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x)
|
48256
|
import flow
import tensorflow as tf
import gin
import numpy as np
import numpy.testing as npt
def test_f():
mol = gin.i_o.from_smiles.to_mol('CC')
mol = gin.deterministic.hydrogen.add_hydrogen(mol)
atoms, adjacency_map = mol
chinese_postman_routes = tf.constant(
[
[0, 1, 4, 1, 5, 1, 2, 3, 2, 7, 2, 6],
[0, 1, 4, 1, 5, 1, 2, 3, 2, 6, 2, 7],
[0, 1, 4, 1, 5, 1, 2, 6, 2, 3, 2, 7],
[0, 1, 4, 1, 5, 1, 2, 6, 2, 7, 2, 3],
[0, 1, 4, 1, 5, 1, 2, 7, 2, 6, 2, 3],
[0, 1, 4, 1, 5, 1, 2, 7, 2, 3, 2, 6],
[0, 1, 5, 1, 4, 1, 2, 3, 2, 7, 2, 6],
[0, 1, 5, 1, 4, 1, 2, 3, 2, 6, 2, 7],
[0, 1, 5, 1, 4, 1, 2, 6, 2, 3, 2, 7],
[0, 1, 5, 1, 4, 1, 2, 6, 2, 7, 2, 3],
[0, 1, 5, 1, 4, 1, 2, 7, 2, 6, 2, 3],
[0, 1, 5, 1, 4, 1, 2, 7, 2, 3, 2, 6],
[5, 1, 0, 1, 4, 1, 2, 3, 2, 7, 2, 6],
[5, 1, 0, 1, 4, 1, 2, 3, 2, 6, 2, 7],
[5, 1, 0, 1, 4, 1, 2, 6, 2, 3, 2, 7],
[5, 1, 0, 1, 4, 1, 2, 6, 2, 7, 2, 3],
[5, 1, 0, 1, 4, 1, 2, 7, 2, 6, 2, 3],
[5, 1, 0, 1, 4, 1, 2, 7, 2, 3, 2, 6],
[5, 1, 4, 1, 0, 1, 2, 3, 2, 7, 2, 6],
[5, 1, 4, 1, 0, 1, 2, 3, 2, 6, 2, 7],
[5, 1, 4, 1, 0, 1, 2, 6, 2, 3, 2, 7],
[5, 1, 4, 1, 0, 1, 2, 6, 2, 7, 2, 3],
[5, 1, 4, 1, 0, 1, 2, 7, 2, 6, 2, 3],
[5, 1, 4, 1, 0, 1, 2, 7, 2, 3, 2, 6],
[4, 1, 5, 1, 0, 1, 2, 3, 2, 7, 2, 6],
[4, 1, 5, 1, 0, 1, 2, 3, 2, 6, 2, 7],
[4, 1, 5, 1, 0, 1, 2, 6, 2, 3, 2, 7],
[4, 1, 5, 1, 0, 1, 2, 6, 2, 7, 2, 3],
[4, 1, 5, 1, 0, 1, 2, 7, 2, 6, 2, 3],
[4, 1, 5, 1, 0, 1, 2, 7, 2, 3, 2, 6],
[4, 1, 0, 1, 5, 1, 2, 3, 2, 7, 2, 6],
[4, 1, 0, 1, 5, 1, 2, 3, 2, 6, 2, 7],
[4, 1, 0, 1, 5, 1, 2, 6, 2, 3, 2, 7],
[4, 1, 0, 1, 5, 1, 2, 6, 2, 7, 2, 3],
[4, 1, 0, 1, 5, 1, 2, 7, 2, 6, 2, 3],
[4, 1, 0, 1, 5, 1, 2, 7, 2, 3, 2, 6],
],
dtype=tf.int64)
graph_flow = flow.GraphFlow(whiten=False)
z = tf.random.normal(
shape = (36, 6, 3))
x, log_det_zx = graph_flow.f_zx(z, atoms, adjacency_map, chinese_postman_routes)
z_, log_det_xz = graph_flow.f_xz(x, atoms, adjacency_map, chinese_postman_routes)
npt.assert_almost_equal(to_return_0.numpy(), to_return_1.numpy())
# x_, log_det_zx_ = graph_flow.f_zx(z_, atoms, adjacency_map, chinese_postman_routes)
# npt.assert_almost_equal(z_.numpy(), z.numpy())
# npt.assert_almost_equal(z.numpy(), z_.numpy())
# npt.assert_almost_equal(log_det_zx.numpy(), log_det_xz.numpy())
# npt.assert_almost_equal(x.numpy(), x_.numpy())
|
48257
|
import factory
from factory import fuzzy
from .. import models
class RegionFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Region
name = fuzzy.FuzzyChoice(models.RegionName)
|
48267
|
import datetime
from django.db import models
from django.utils import timezone
class Question( models.Model):
question_text = models.CharField( max_length=200 )
pub_date = models.DateTimeField( 'date published' )
def __str__( self ):
return self.question_text
def was_published_recently( self ):
return self.pub_date >= timezone.now() - datetime.timedelta( days=1 )
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey( Question, on_delete=models.CASCADE )
choice_text = models.CharField( max_length=200 )
votes = models.IntegerField( default=0 )
def __str__( self ):
return self.choice_text
|
48281
|
from rest_framework import viewsets
from .serializers import TeamSerializer
from .models import Team
from django.core.exceptions import PermissionDenied
class TeamViewSet(viewsets.ModelViewSet):
serializer_class = TeamSerializer
queryset = Team.objects.all()
def get_queryset(self):
teams = self.request.user.teams.all()
if not teams:
Team.objects.create(name='', org_number='', created_by=self.request.user)
return self.queryset.filter(created_by=self.request.user)
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
def perform_update(self, serializer):
obj = self.get_object()
if self.request.user != obj.created_by:
raise PermissionDenied('Wrong object owner')
serializer.save()
|
48311
|
from nonebot import on_command
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Bot, MessageEvent, MessageSegment, unescape
from .data_source import t2p, m2p
__des__ = '文本、Markdown转图片'
__cmd__ = '''
text2pic/t2p {text}
md2pic/m2p {text}
'''.strip()
__short_cmd__ = 't2p、m2p'
__example__ = '''
t2p test
m2p $test$ test `test`
'''.strip()
__usage__ = f'{__des__}\nUsage:\n{__cmd__}\nExample:\n{__example__}'
text2pic = on_command('text2pic', aliases={'t2p'}, priority=12)
md2pic = on_command('md2pic', aliases={'markdown', 'm2p'}, priority=12)
@text2pic.handle()
async def _(bot: Bot, event: MessageEvent, state: T_State):
msg = unescape(event.get_plaintext().strip())
if not msg:
await text2pic.finish()
img = await t2p(msg)
if img:
await text2pic.finish(MessageSegment.image(img))
@md2pic.handle()
async def _(bot: Bot, event: MessageEvent, state: T_State):
msg = unescape(event.get_plaintext().strip())
if not msg:
await md2pic.finish()
img = await m2p(msg)
if img:
await md2pic.finish(MessageSegment.image(img))
|
48313
|
import pytest
from helpers import create_request
import acurl
def test_to_curl():
r = create_request("GET", "http://foo.com")
assert r.to_curl() == "curl -X GET http://foo.com"
def test_to_curl_headers():
r = create_request(
"GET", "http://foo.com", headers=("Foo: bar", "My-Header: is-awesome")
)
assert (
r.to_curl()
== "curl -X GET -H 'Foo: bar' -H 'My-Header: is-awesome' http://foo.com"
)
def test_to_curl_cookies():
r = create_request(
"GET",
"http://foo.com",
cookies=(acurl._Cookie(False, "foo.com", True, "/", False, 0, "123", "456"),),
)
assert r.to_curl() == "curl -X GET --cookie 123=456 http://foo.com"
def test_to_curl_multiple_cookies():
r = create_request(
"GET",
"http://foo.com",
cookies=(
acurl._Cookie(False, "foo.com", True, "/", False, 0, "123", "456"),
acurl._Cookie(False, "foo.com", True, "/", False, 0, "789", "abc"),
),
)
assert r.to_curl() == "curl -X GET --cookie '123=456;789=abc' http://foo.com"
@pytest.mark.skip(reason="unimplemented")
def test_to_curl_cookies_wrong_domain():
# I'm not sure if this is a valid test case...Request objects should
# probably only be constructed via Session.request, which always creates
# cookies for the domain of the request. So the case this is exercising
# won't ever happen.
r = create_request(
"GET",
"http://foo.com",
cookies=(
acurl._Cookie(
False,
"bar.com", # The domain doesn't match, the cookie should not be passed
True,
"/",
False,
0,
"123",
"456",
),
),
)
assert r.to_curl() == "curl -X GET http://foo.com"
def test_to_curl_auth():
r = create_request("GET", "http://foo.com", auth=("user", "pass"))
assert r.to_curl() == "curl -X GET --user user:pass http://foo.com"
|
48385
|
import os
import unittest
this_dir = os.path.dirname(os.path.realpath(__file__))
class TestPageXML(unittest.TestCase):
def run_dataset_viewer(self, add_args):
from calamari_ocr.scripts.dataset_viewer import main
main(add_args + ["--no_plot"])
def test_cut_modes(self):
images = os.path.join(this_dir, "data", "avicanon_pagexml", "*.nrm.png")
self.run_dataset_viewer(["--gen", "PageXML", "--gen.images", images, "--gen.cut_mode", "BOX"])
self.run_dataset_viewer(["--gen", "PageXML", "--gen.images", images, "--gen.cut_mode", "MBR"])
|
48399
|
from __future__ import annotations
from argparse import ArgumentParser
from pathlib import Path
from .._colors import get_colors
from ..linter import TransformationType, Transformer
from ._base import Command
from ._common import get_paths
class DecorateCommand(Command):
"""Add decorators to your code.
```bash
python3 -m deal decorate project/
```
Options:
+ `--types`: types of decorators to apply. All are enabled by default.
+ `--double-quotes`: use double quotes. Single quotes are used by default.
+ `--nocolor`: do not use colors in the console output.
The exit code is always 0. If you want to test the code for missed decorators,
use the `lint` command instead.
"""
@staticmethod
def init_parser(parser: ArgumentParser) -> None:
parser.add_argument(
'--types',
nargs='*',
choices=[tt.value for tt in TransformationType],
default=sorted(tt.value for tt in TransformationType),
help='types of decorators to apply',
)
parser.add_argument(
'--double-quotes',
action='store_true',
help='use double quotes',
)
parser.add_argument('--nocolor', action='store_true', help='colorless output')
parser.add_argument('paths', nargs='*', default='.')
def __call__(self, args) -> int:
types = {TransformationType(t) for t in args.types}
colors = get_colors(args)
for arg in args.paths:
for path in get_paths(Path(arg)):
self.print('{magenta}{path}{end}'.format(path=path, **colors))
original_code = path.read_text(encoding='utf8')
tr = Transformer(
content=original_code,
path=path,
types=types,
)
if args.double_quotes:
tr = tr._replace(quote='"')
modified_code = tr.transform()
if original_code == modified_code:
self.print(' {blue}no changes{end}'.format(**colors))
else:
path.write_text(modified_code)
self.print(' {green}decorated{end}'.format(**colors))
return 0
|
48427
|
from .ranking import CreditRanking
from .interleaving_method import InterleavingMethod
import numpy as np
from scipy.optimize import linprog
class Optimized(InterleavingMethod):
'''
Optimized Interleaving
Args:
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
def __init__(self, lists, max_length=None, sample_num=None,
credit_func='inverse', secure_sampling=False):
'''
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
if sample_num is None:
raise ValueError('sample_num cannot be None, '
+ 'i.e. the initial sampling is necessary')
if credit_func == 'inverse':
self._credit_func = lambda x: 1.0 / x
elif credit_func == 'negative':
self._credit_func = lambda x: -x
else:
raise ValueError('credit_func should be either inverse or negative')
self._secure_sampling = secure_sampling
super(Optimized, self).__init__(lists,
max_length=max_length, sample_num=sample_num)
# self._rankings (sampled rankings) is obtained here
res = self._compute_probabilities(lists, self._rankings)
is_success, self._probabilities, _ = res
self._probabilities /= np.sum(self._probabilities)
if not is_success:
raise ValueError('Optimization failed')
def _sample_rankings(self):
'''
Sample `sample_num` rankings
'''
distribution = {}
if self._secure_sampling:
rankings = set()
for _ in range(self.sample_num):
rankings.add(self._sample(self.max_length, self.lists))
for ranking in rankings:
distribution[ranking] = 1.0 / len(rankings)
else:
while len(distribution) < self.sample_num:
ranking = self._sample(self.max_length, self.lists)
distribution[ranking] = 1.0 / self.sample_num
self._rankings, self._probabilities = zip(*distribution.items())
def _sample(self, max_length, lists):
'''
Prefix constraint sampling
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
max_length: the maximum length of resultant interleaving
lists: lists of document IDs
Return an instance of Ranking
'''
num_rankers = len(lists)
result = CreditRanking(num_rankers)
teams = set(range(num_rankers))
while len(result) < max_length:
if len(teams) == 0:
break
selected_team = np.random.choice(list(teams))
docs = [x for x in lists[selected_team] if not x in result]
if len(docs) > 0:
selected_doc = docs[0]
result.append(selected_doc)
else:
teams.remove(selected_team)
# assign credits
for docid in result:
for team in result.credits:
if docid in lists[team]:
rank = lists[team].index(docid) + 1
else:
rank = len(lists[team]) + 1
result.credits[team][docid] = self._credit_func(rank)
return result
def _compute_probabilities(self, lists, rankings):
'''
Solve the optimization problem in
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
lists: lists of document IDs
rankings: a list of Ranking instances
Return a list of probabilities for input rankings
'''
# probability constraints
A_p_sum = np.array([1]*len(rankings))
# unbiasedness constraints
ub_cons = self._unbiasedness_constraints(lists, rankings)
# sensitivity
sensitivity = self._sensitivity(lists, rankings)
# constraints
A_eq = np.vstack((A_p_sum, ub_cons))
b_eq = np.array([1.0] + [0.0]*ub_cons.shape[0])
# solving the optimization problem
res = linprog(sensitivity, # objective function
A_eq=A_eq, b_eq=b_eq, # constraints
bounds=[(0, 1)]*len(rankings) # 0 <= p <= 1
)
return res.success, res.x, res.fun
def _unbiasedness_constraints(self, lists, rankings):
'''
for each k and team x, for a certain c_k:
sum_{L_i} {p_i} * sum^k_{j=1} ranking.credits[x][d_j] = c_k
In other words,
sum_{L_i} {p_i} * sum^k_{j=1}
(ranking.credits[x][d_j] - ranking.credits[x+1][d_j]) = 0
'''
result = []
credits = np.zeros((self.max_length, len(rankings), len(lists)))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
for team in ranking.credits:
credits[idx, rid, team] = ranking.credits[team][docid]
if idx > 0:
credits[idx, rid, team] += credits[idx-1, rid, team]
for i in range(len(lists) - 1):
result.append(credits[:, :, i] - credits[:, :, i+1])
result = np.vstack(result)
return result
def _sensitivity(self, lists, rankings):
'''
Expected variance
'''
# compute the mean of each ranking
mu = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
credit = np.sum(
[ranking.credits[x][docid] for x in ranking.credits])
mu[rid] += click_prob * credit
mu /= len(lists)
# compute the variance
var = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for x in ranking.credits:
v = 0.0
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
if docid in ranking.credits[x]:
v += click_prob * ranking.credits[x][docid]
v -= mu[rid]
var[rid] += v ** 2
return var
@classmethod
def compute_scores(cls, ranking, clicks):
'''
ranking: an instance of Ranking
clicks: a list of indices clicked by a user
Return a list of scores of each ranker.
'''
return {i: sum([ranking.credits[i][ranking[c]] for c in clicks])
for i in ranking.credits}
|
48462
|
import os
import sys
sys.path.append('../')
import fire
import pickle
import json
def run_command(command):
if os.system(command) != 0:
raise RuntimeError()
def work_with_one_model(cleared_corpus_path, ling_data, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print('Extracting features.============================================')
run_command(f'python ./run_extract_features.py --cleared-corpus={cleared_corpus_path}' +
f' --ling-data={ling_data} --known-preds=true --output-dir={output_dir}')
print('Done.===========================================================')
print('Vectorize features.=============================================')
feature_path = os.path.join(output_dir, 'features.pckl')
run_command(f'python ./run_vectorize_features.py --feature_path={feature_path} --output_dir={output_dir}')
print('Done.===========================================================')
print('Generating embeddings.==========================================')
run_command(f'python ./run_generate_embeddings.py --feature_path={feature_path} --output_dir={output_dir}')
print('Done.===========================================================')
print('Training model.=================================================')
run_command(f'python ./run_train_model.py --input_dir={output_dir} --output_dir={output_dir}')
print('Done.===========================================================')
def extract_known_predicates(features_path, workdir):
with open(features_path, 'rb') as f:
dataframe = pickle.load(f)
known_preds = [e.split('_')[0] for e in dataframe.pred_lemma.tolist()]
with open(os.path.join(workdir, 'known_preds.json'), 'w') as f:
json.dump(known_preds, f)
def extract_known_predicated(cleared_corpus_path, workdir):
def make_pred_dict(data_chunk, pred_dict):
for sentence in data_chunk[1]:
for word in sentence:
pred_number = word.get('fillpred')
if pred_number:
if not pred_dict.get(sentence[pred_number]['lemma']):
pred_dict[sentence[pred_number]['lemma']] = {word.get('rolepred1'): 1}
else:
if not pred_dict.get(sentence[pred_number]['lemma']).get(word.get('rolepred1')):
pred_dict[sentence[pred_number]['lemma']][word.get('rolepred1')] = 1
else:
pred_dict[sentence[pred_number]['lemma']][word.get('rolepred1')] += 1
def filter_roles(pred_dictionary, threshold=5):
filtered_dict = {}
for predicate in pred_dictionary.keys():
new_pred = {}
for (key, value) in pred_dictionary[predicate].items():
if value > threshold:
new_pred[key] = value
filtered_dict[predicate] = new_pred
for predicate in filtered_dict.keys():
if not filtered_dict[predicate]:
top = sorted(pred_dict[predicate], key=pred_dict[predicate].get, reverse=True)[0]
filtered_dict[predicate][top] = pred_dict[predicate][top]
return filtered_dict
with open(cleared_corpus_path, 'r') as f:
data = json.load(f)
pred_dict = {}
for instance in data:
make_pred_dict(instance, pred_dict)
pred_dict = filter_roles(pred_dict)
known_preds = {key: list(value.keys()) for key, value in pred_dictionary.items()}
with open(os.path.join(workdir, 'known_preds.json'), 'w') as f:
json.dump(known_preds, f)
def main(data_dir, workdir):
cleared_corpus_path = os.path.join(data_dir, 'cleared_corpus.json')
ling_data = os.path.join(data_dir, 'ling_data.pckl')
print('Generating the model for known predicates**********************************')
output_dir = os.path.join(workdir, 'known_preds')
work_with_one_model(cleared_corpus_path, ling_data, output_dir)
extract_known_predicates(os.path.join(output_dir, 'features.pckl'), workdir)
print('Generating the model for unknown predicates********************************')
output_dir = os.path.join(workdir, 'unknown_preds')
work_with_one_model(cleared_corpus_path, ling_data, output_dir)
if __name__ == "__main__":
fire.Fire(main)
|
48499
|
import magma as m
from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire
from magma.backend.verilog import compile
from mantle.xilinx.spartan6 import FDCE
def test_fdce():
main = DefineCircuit('main', 'I', In(Bit), "O", Out(Bit), "CLK", In(Clock))
dff = FDCE()
wire(m.enable(1), dff.CE)
wire(0, dff.CLR)
wire(main.I, dff.D)
wire(dff.Q, main.O)
EndCircuit()
print(compile(main)) # compile will wire up the CLK
print(repr(main))
|
48519
|
import io
import os
import time
import urllib.request
import zipfile
import numpy as np
from scipy.io.wavfile import read as wav_read
from tqdm import tqdm
class dclde:
"""
The high-frequency dataset consists of marked encounters with echolocation
clicks of species commonly found along the US Atlantic Coast, and in the
Gulf of Mexico:
Mesoplodon europaeus - Gervais' beaked whale
Ziphius cavirostris - Cuvier's beaked whale
Mesoplodon bidens - Sowerby's beaked whale
Lagenorhynchus acutus - Atlantic white-sided dolphin
Grampus griseus - Risso's dolphin
Globicephala macrorhynchus - Short-finned pilot whale
Stenella sp. - Stenellid dolphins
Delphinid type A
Delphinid type B
Unidentified delphinid - delphinid other than those described above
The goal for these datasets is to identify acoustic encounters by species
during times when animals were echolocating. Analysts examined data for
echolocation clicks and approximated the start and end times of acoustic
encounters. Any period that was separated from another one by five minutes
or more was marked as a separate encounter. Whistle activity was not
considered. Consequently, while the use of whistle information during
echolocation activity is appropriate, reporting a species based on whistles
in the absence of echolocation activity will be considered a false positive
for this classification task.
"""
def download(path):
"""ToDo"""
# Load the dataset (download if necessary) and set
# the class attributes.
print("Loading DCLDE")
t = time.time()
if not os.path.isdir(path + "DCLDE"):
print("\tCreating Directory")
os.mkdir(path + "DCLDE")
if not os.path.exists(path + "DCLDE/DCLDE_LF_Dev.zip"):
url = "http://sabiod.univ-tln.fr/workspace/DCLDE2018/DCLDE_LF_Dev.zip"
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc="Wav files"
) as t:
urllib.request.urlretrieve(url, path + "DCLDE/DCLDE_LF_Dev.zip")
def load(window_size=441000, path=None):
"""ToDo"""
if path is None:
path = os.environ["DATASET_path"]
dclde.download(path)
# Loading the files
f = zipfile.ZipFile(path + "DCLDE/DCLDE_LF_Dev.zip")
wavs = list()
# labels = list()
for zipf in tqdm(f.filelist, ascii=True):
if ".wav" in zipf.filename and ".d100." in zipf.filename:
wavfile = f.read(zipf)
byt = io.BytesIO(wavfile)
wav = wav_read(byt)[1].astype("float32")
for s in range(len(wav) // window_size):
wavs.append(wav[s * window_size : (s + 1) * window_size])
# labels.append(zipf.filename.split('/')[2])
# return wavs,labels
wavs = np.expand_dims(np.asarray(wavs), 1)
dataset.add_variable({"signals": {"train_set": wavs}})
print(
"Dataset freefield1010 loaded in", "{0:.2f}".format(time.time() - t), "s."
)
return dataset
|
48588
|
import socket
import xmlrpc.client
""" referemce: https://stackoverflow.com/a/14397619 """
class ServerProxy:
def __init__(self, url, timeout=10):
self.__url = url
self.__timeout = timeout
self.__prevDefaultTimeout = None
def __enter__(self):
try:
if self.__timeout:
self.__prevDefaultTimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.__timeout)
proxy = xmlrpc.client.ServerProxy(self.__url, allow_none=True)
except Exception as ex:
raise Exception("Unable create XMLRPC-proxy for url '%s': %s" % (self.__url, ex))
return proxy
def __exit__(self, type, value, traceback):
if self.__prevDefaultTimeout is None:
socket.setdefaulttimeout(self.__prevDefaultTimeout)
|
48660
|
import numpy as np
from amlearn.utils.basetest import AmLearnTest
from amlearn.utils.data import get_isometric_lists
class test_data(AmLearnTest):
def setUp(self):
pass
def test_get_isometric_lists(self):
test_lists= [[1, 2, 3], [4], [5, 6], [1, 2, 3]]
isometric_lists = \
get_isometric_lists(test_lists, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_lists).shape, (4, 80))
test_arrays = np.array([np.array([1, 2, 3]), np.array([4]),
np.array([5, 6]), np.array([1, 2, 3])])
isometric_arrays = \
get_isometric_lists(test_arrays, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_arrays).shape, (4, 80))
|
48670
|
import json
import torch
import numpy as np
import os
#from pytorch_pretrained_bert import BertTokenizer
from transformers import BertTokenizer
class BertWordFormatter:
def __init__(self, config, mode):
self.max_question_len = config.getint("data", "max_question_len")
self.max_option_len = config.getint("data", "max_option_len")
self.tokenizer = BertTokenizer.from_pretrained(config.get("model", "bert_path"))
def convert_tokens_to_ids(self, tokens):
arr = []
for a in range(0, len(tokens)):
if tokens[a] in self.word2id:
arr.append(self.word2id[tokens[a]])
else:
arr.append(self.word2id["UNK"])
return arr
def convert(self, tokens, l, bk=False):
tokens = "".join(tokens)
# while len(tokens) < l:
# tokens.append("PAD")
# if bk:
# tokens = tokens[len(tokens) - l:]
# else:
# tokens = tokens[:l]
ids = self.tokenizer.tokenize(tokens)
return ids
def _convert_sentence_pair_to_bert_dataset(
self, context, max_len):
"""Convert sentence pairs to dataset for BERT model.
Args:
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: train: List[int], list of labels
test: []
Returns:
Train:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids, label)
Test:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids)
"""
all_input_ids, all_input_mask, all_segment_ids = [], [], []
for i, _ in enumerate(context):
if len(context[i]) > max_len:
context[i] = context[i][-max_len:]
tokens = ['[CLS]'] + context[i] + ['[SEP]']
segment_ids = [i%2] * len(tokens)
if len(tokens) > max_len:
tokens = tokens[:max_len]
assert len(tokens) == max_len
segment_ids = segment_ids[:max_len]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
tokens_len = len(input_ids)
input_ids += [0] * (max_len - tokens_len)
segment_ids += [0] * (max_len - tokens_len)
input_mask += [0] * (max_len - tokens_len)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_input_mask = torch.tensor(all_input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long)
# test
return (
all_input_ids, all_input_mask, all_segment_ids)
def process(self, data, config, mode, *args, **params):
context = []
question = []
label = []
idx = []
for temp_data in data:
idx.append(temp_data["id"])
if mode != "test":
# label_x = []
# for opt in list("ABCD"):
# if opt in temp_data["answer"]:
# label_x.append(1)
# else:
# label_x.append(0)
label_x = -1
if "A" in temp_data["answer"]:
label_x += 1
if "B" in temp_data["answer"]:
label_x += 2
if "C" in temp_data["answer"]:
label_x += 4
if "D" in temp_data["answer"]:
label_x += 8
label.append(label_x)
temp_context = []
temp_question = []
temp_question.append(self.convert(temp_data["statement"], self.max_question_len, bk=True))
for option in ["A", "B", "C", "D"]:
temp_context.append(self.convert(temp_data["option_list"][option], self.max_option_len))
context.extend(temp_context)
question.extend(temp_question)
# question = torch.tensor(question, dtype=torch.long)
# context = torch.tensor(context, dtype=torch.long)
question = self._convert_sentence_pair_to_bert_dataset(question, self.max_question_len)
context = self._convert_sentence_pair_to_bert_dataset(context, self.max_option_len)
if mode != "test":
label = torch.LongTensor(np.array(label, dtype=np.int))
return {"context": context, "question": question, 'label': label, "id": idx}
else:
return {"context": context, "question": question, "id": idx}
|
48677
|
import asyncio
import logging
import random
import warnings
from typing import AnyStr, ByteString, Callable, List, Optional, Union
from aiotfm.connection import Connection
from aiotfm.enums import Community, GameMode, TradeError
from aiotfm.errors import AiotfmException, AlreadyConnected, CommunityPlatformError, \
IncorrectPassword, InvalidEvent, LoginError, MaintenanceError, ServerUnreachable
from aiotfm.friend import Friend, FriendList
from aiotfm.inventory import Inventory, InventoryItem, Trade
from aiotfm.message import Channel, ChannelMessage, Message, Whisper
from aiotfm.packet import Packet
from aiotfm.player import Player, Profile
from aiotfm.room import Room, RoomList
from aiotfm.shop import Shop
from aiotfm.tribe import Tribe
from aiotfm.utils import Keys, Locale, get_ip, get_keys, shakikoo
logger = logging.getLogger('aiotfm')
class Client:
"""Represents a client that connects to Transformice.
Two argument can be passed to the :class:`Client`.
.. _event loop: https://docs.python.org/3/library/asyncio-eventloops.html
Parameters
----------
community: Optional[:class:`int`]
Defines the community of the client. Defaults to 0 (EN community).
auto_restart: Optional[:class:`bool`]
Whether the client should automatically restart on error. Defaults to False.
bot_role: Optional[:class:`bool`]
Whether the has the game's special role bot or not.
Avoids using the api endpoint and gives more stability.
loop: Optional[event loop]
The `event loop`_ to use for asynchronous operations. If ``None`` is passed (defaults),
the event loop used will be ``asyncio.get_event_loop()``.
max_retries: Optional[:class:`int`]
The maximum number of retries the client should attempt while connecting to the game.
Attributes
----------
username: Optional[:class:`str`]
The bot's username received from the server. Might be None if the bot didn't log in yet.
room: Optional[:class:`aiotfm.room.Room`]
The bot's room. Might be None if the bot didn't log in yet or couldn't join any room yet.
trade: Optional[:class:`aiotfm.inventory.Trade`]
The current trade that's going on (i.e: both traders accepted it).
trades: :class:`list`[:class:`aiotfm.inventory.Trade`]
All the trades that the bot participates. Most of them might be invitations only.
inventory: Optional[:class:`aiotfm.inventory.Inventory`]
The bot's inventory. Might be None if the bot didn't log in yet or it didn't receive
anything.
locale: :class:`aiotfm.locale.Locale`
The bot's locale (translations).
friends: Optional[:class:`aiotfm.friends.FriendList`]
The bot's friend list
"""
LOG_UNHANDLED_PACKETS = False
def __init__(
self,
community: Union[Community, int] = Community.en,
auto_restart: bool = False,
bot_role: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
max_retries: int = 6
):
self.loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop()
self.main: Connection = Connection('main', self, self.loop)
self.bulle: Connection = None
self._waiters: dict = {}
self._close_event: asyncio.Future = None
self._sequenceId: int = 0
self._channels: List[Channel] = []
self._restarting: bool = False
self._closed: bool = False
self._logged: bool = False
self._max_retries: int = max_retries
self.room: Room = None
self.trade: Trade = None
self.trades: dict = {}
self.inventory: Inventory = None
self.username: str = None
self.locale: Locale = Locale()
self.community: Community = Community(community)
self.friends: FriendList = None
self.keys: Keys = None
self.authkey: int = 0
self.auto_restart: bool = auto_restart
self.api_tfmid: int = None
self.api_token: str = None
self.bot_role: bool = bot_role
@property
def restarting(self) -> bool:
return self._restarting
@property
def closed(self) -> bool:
return self._closed
def _backoff(self, n: int) -> float:
"""Returns the numbers of seconds to wait until the n-th connection attempt. Capped at 10 minutes."""
return random.uniform(20, 30 * 2 ** min(n, 5))
def data_received(self, data: bytes, connection: Connection):
"""|coro|
Dispatches the received data.
:param data: :class:`bytes` the received data.
:param connection: :class:`aiotfm.Connection` the connection that received
the data.
"""
# :desc: Called when a socket receives a packet. Does not interfere
# with :meth:`Client.handle_packet`.
# :param connection: :class:`aiotfm.Connection` the connection that received
# the packet.
# :param packet: :class:`aiotfm.Packet` a copy of the packet.
self.dispatch('raw_socket', connection, Packet(data))
self.loop.create_task(self.handle_packet(connection, Packet(data)))
async def handle_packet(self, connection: Connection, packet: Packet) -> bool:
"""|coro|
Handles the known packets and dispatches events.
Subclasses should handle only the unhandled packets from this method.
Example: ::
class Bot(aiotfm.Client):
async def handle_packet(self, conn, packet):
handled = await super().handle_packet(conn, packet.copy())
if not handled:
# Handle here the unhandled packets.
pass
:param connection: :class:`aiotfm.Connection` the connection that received
the packet.
:param packet: :class:`aiotfm.Packet` the packet.
:return: True if the packet got handled, False otherwise.
"""
CCC = packet.readCode()
if CCC == (1, 1): # Old packets
oldCCC, *data = packet.readString().split(b'\x01')
data = list(map(bytes.decode, data))
oldCCC = tuple(oldCCC[:2])
# :desc: Called when an old packet is received. Does not interfere
# with :meth:`Client.handle_old_packet`.
# :param connection: :class:`aiotfm.Connection` the connection that received
# the packet.
# :param oldCCC: :class:`tuple` the packet identifiers on the old protocol.
# :param data: :class:`list` the packet data.
self.dispatch('old_packet', connection, oldCCC, data)
return await self.handle_old_packet(connection, oldCCC, data)
if CCC == (5, 21): # Joined room
self.room = Room(official=packet.readBool(), name=packet.readUTF())
# :desc: Called when the client has joined a room.
# :param room: :class:`aiotfm.room.Room` the room the client has entered.
self.dispatch('joined_room', self.room)
elif CCC == (5, 39): # Password required for the room
# :desc: Called when a password is required to enter a room
# :param room: :class:`aiotfm.room.Room` the room the server is asking for a password.
self.dispatch('room_password', Room(packet.readUTF()))
elif CCC == (6, 6): # Room message
username = packet.readUTF()
message = packet.readUTF()
player = self.room.get_player(username=username)
if player is None:
player = Player(username)
# :desc: Called when the client receives a message from the room.
# :param message: :class:`aiotfm.message.Message` the message.
self.dispatch('room_message', Message(player, message, self))
elif CCC == (6, 20): # Server message
packet.readBool() # if False then the message will appear in the #Server channel
t_key = packet.readUTF()
t_args = [packet.readUTF() for i in range(packet.read8())]
# :desc: Called when the client receives a message from the server that needs to be translated.
# :param message: :class:`aiotfm.locale.Translation` the message translated with the
# current locale.
# :param *args: a list of string used as replacement inside the message.
self.dispatch('server_message', self.locale[t_key], *t_args)
elif CCC == (8, 1): # Play emote
player = self.room.get_player(pid=packet.read32())
emote = packet.read8()
flag = packet.readUTF() if emote == 10 else ''
# :desc: Called when a player plays an emote.
# :param player: :class:`aiotfm.Player` the player.
# :param emote: :class:`int` the emote's id.
# :param flag: :class:`str` the flag's id.
self.dispatch('emote', player, emote, flag)
elif CCC == (8, 5): # Show emoji
player = self.room.get_player(pid=packet.read32())
emoji = packet.read8()
# :desc: Called when a player is showing an emoji above its head.
# :param player: :class:`aiotfm.Player` the player.
# :param emoji: :class:`int` the emoji's id.
self.dispatch('emoji', player, emoji)
elif CCC == (8, 6): # Player won
packet.read8()
player = self.room.get_player(pid=packet.read32())
player.score = packet.read16()
order = packet.read8()
player_time = packet.read16() / 100
# :desc: Called when a player get the cheese to the hole.
# :param player: :class:`aiotfm.Player` the player.
# :param order: :class:`int` the order of the player in the hole.
# :param player_time: :class:`float` player's time in the hole in seconds.
self.dispatch('player_won', player, order, player_time)
elif CCC == (8, 16): # Profile
# :desc: Called when the client receives the result of a /profile command.
# :param profile: :class:`aiotfm.player.Profile` the profile.
self.dispatch('profile', Profile(packet))
elif CCC == (8, 20): # Shop
# :desc: Called when the client receives the content of the shop.
# :param shop: :class:`aiotfm.shop.Shop` the shop.
self.dispatch('shop', Shop(packet))
elif CCC == (8, 22): # Skills
skills = {}
for _ in range(packet.read8()):
key, value = packet.read8(), packet.read8()
skills[key] = value
# :desc: Called when the client receives its skill tree.
# :param skills: :class:`dict` the skills.
self.dispatch('skills', skills)
elif CCC == (16, 2): # Tribe invitation received
author = packet.readUTF()
tribe = packet.readUTF()
# :desc: Called when the client receives an invitation to a tribe. (/inv)
# :param author: :class:`str` the player that invited you.
# :param tribe: :class:`str` the tribe.
self.dispatch('tribe_inv', author, tribe)
elif CCC == (26, 2): # Logged in successfully
player_id = packet.read32()
self.username = username = packet.readUTF()
played_time = packet.read32()
community = Community(packet.read8())
pid = packet.read32()
# :desc: Called when the client successfully logged in.
# :param uid: :class:`int` the client's unique id.
# :param username: :class:`str` the client's username.
# :param played_time: :class:`int` the total number of minutes the client has played.
# :param community: :class:`aiotfm.enums.Community` the community the client has connected to.
# :param pid: :class:`int` the client's player id.
self.dispatch('logged', player_id, username, played_time, community, pid)
elif CCC == (26, 3): # Handshake OK
online_players = packet.read32()
language = packet.readUTF()
country = packet.readUTF()
self.authkey = packet.read32()
self._logged = False
os_info = Packet.new(28, 17).writeString('en').writeString('Linux')
os_info.writeString('LNX 29,0,0,140').write8(0)
await connection.send(os_info)
# :desc: Called when the client can login through the game.
# :param online_players: :class:`int` the number of player connected to the game.
# :param language: :class:`str` the language the server is suggesting.
# :param country: :class:`str` the country detected from your ip.
self.dispatch('login_ready', online_players, language, country)
elif CCC == (26, 12): # Login result
self._logged = False
# :desc: Called when the client failed logging.
# :param code: :class:`int` the error code.
# :param error1: :class:`str` error messages.
# :param error2: :class:`str` error messages.
self.dispatch('login_result', packet.read8(), packet.readUTF(), packet.readUTF())
elif CCC == (26, 25): # Ping
# :desc: Called when the client receives the ping response from the server.
self.dispatch('ping')
elif CCC == (26, 35): # Room list
roomlist = RoomList.from_packet(packet)
# :desc: Dispatched when the client receives the room list
self.dispatch('room_list', roomlist)
elif CCC == (28, 6): # Server ping
await connection.send(Packet.new(28, 6).write8(packet.read8()))
elif CCC == (29, 6): # Lua logs
# :desc: Called when the client receives lua logs from #Lua.
# :param log: :class:`str` a log message.
self.dispatch('lua_log', packet.readUTF())
elif CCC == (31, 1): # Inventory data
self.inventory = Inventory.from_packet(packet)
self.inventory.client = self
# :desc: Called when the client receives its inventory's content.
# :param inventory: :class:`aiotfm.inventory.Inventory` the client's inventory.
self.dispatch('inventory_update', self.inventory)
elif CCC == (31, 2): # Update inventory item
item_id = packet.read16()
quantity = packet.read8()
if item_id in self.inventory.items:
item = self.inventory.items[item_id]
previous = item.quantity
item.quantity = quantity
# :desc: Called when the quantity of an item has been updated.
# :param item: :class:`aiotfm.inventory.InventoryItem` the new item.
# :param previous: :class:`int` the previous quantity.
self.dispatch('item_update', item, previous)
else:
item = InventoryItem(item_id=item_id, quantity=quantity)
self.inventory.items[item.id] = item
# :desc: Called when the client receives a new item in its inventory.
# :param item: :class:`aiotfm.inventory.InventoryItem` the new item.
self.dispatch('new_item', item)
elif CCC == (31, 5): # Trade invite
pid = packet.read32()
self.trades[pid] = Trade(self, self.room.get_player(pid=pid))
# :desc: Called when received an invitation to trade.
# :param trade: :class:`aiotfm.inventory.Trade` the trade object.
self.dispatch('trade_invite', self.trades[pid])
elif CCC == (31, 6): # Trade error
name = packet.readUTF().lower()
error = packet.read8()
if name == self.username.lower():
trade = self.trade
else:
for t in self.trades.values():
if t.trader.lower() == name:
trade = t
break
# :desc: Called when an error occurred with a trade.
# :param trade: :class:`aiotfm.inventory.Trade` the trade that failed.
# :param error: :class:`aiotfm.enums.TradeError` the error.
self.dispatch('trade_error', trade, TradeError(error))
trade._close()
elif CCC == (31, 7): # Trade start
pid = packet.read32()
trade = self.trades.get(pid)
if trade is None:
raise AiotfmException(f'Cannot find the trade from pid {pid}.')
trade._start()
self.trade = trade
# :desc: Called when a trade starts. You can access the trade object with `Client.trade`.
self.dispatch('trade_start')
elif CCC == (31, 8): # Trade items
export = packet.readBool()
id_ = packet.read16()
quantity = (1 if packet.readBool() else -1) * packet.read8()
items = self.trade.exports if export else self.trade.imports
items.add(id_, quantity)
trader = self if export else self.trade.trader
self.trade.locked = [False, False]
# :desc: Called when an item has been added/removed from the current trade.
# :param trader: :class:`aiotfm.Player` the player that triggered the event.
# :param id: :class:`int` the item's id.
# :param quantity: :class:`int` the quantity added/removed. Can be negative.
# :param item: :class:`aiotfm.inventory.InventoryItem` the item after the change.
self.dispatch('trade_item_change', trader, id_, quantity, items.get(id_))
elif CCC == (31, 9): # Trade lock
index = packet.read8()
locked = packet.readBool()
if index > 1:
self.trade.locked = [locked, locked]
who = "both"
else:
self.trade.locked[index] = locked
who = self.trade.trader if index == 0 else self
# :desc: Called when the trade got (un)locked.
# :param who: :class:`aiotfm.Player` the player that triggered the event.
# :param locked: :class:`bool` either the trade got locked or unlocked.
self.dispatch('trade_lock', who, locked)
elif CCC == (31, 10): # Trade complete
trade = self.trade
self.trade._close(succeed=True)
elif CCC == (44, 1): # Bulle switching
timestamp = packet.read32()
uid = packet.read32()
pid = packet.read32()
bulle_ip = packet.readUTF()
ports = packet.readUTF().split('-')
if self.bulle is not None:
self.bulle.close()
self.bulle = Connection('bulle', self, self.loop)
await self.bulle.connect(bulle_ip, int(random.choice(ports)))
await self.bulle.send(Packet.new(44, 1).write32(timestamp).write32(uid).write32(pid))
elif CCC == (44, 22): # Fingerprint offset changed
connection.fingerprint = packet.read8()
elif CCC == (60, 3): # Community platform
TC = packet.read16()
# :desc: Called when the client receives a packet from the community platform.
# :param TC: :class:`int` the packet's code.
# :param packet: :class:`aiotfm.Packet` the packet.
self.dispatch('raw_cp', TC, packet.copy(copy_pos=True))
if TC == 3: # Connected to the community platform
await self.sendCP(28) # Request friend list
# :desc: Called when the client is successfully connected to the community platform.
self.dispatch('ready')
elif TC == 32: # Friend connected
if self.friends is None:
return True
friend = self.friends.get_friend(packet.readUTF())
friend.isConnected = True
# :desc: Called when a friend connects to the game (not entirely fetched)
# :param friend: :class:`aiotfm.friend.Friend` friend after this update
self.dispatch('friend_connected', friend)
elif TC == 33: # Friend disconnected
if self.friends is None:
return True
friend = self.friends.get_friend(packet.readUTF())
friend.isConnected = False
# :desc: Called when a friend disconnects from the game (not entirely fetched)
# :param friend: :class:`aiotfm.friend.Friend` friend after this update
self.dispatch('friend_disconnected', friend)
elif TC == 34: # Friend list loaded
self.friends = FriendList(self, packet)
# :desc: Called when the friend list is loaded.
# :param friends: :class:`aiotfm.friend.FriendList` the friend list
self.dispatch('friends_loaded', self.friends)
elif TC == 35 or TC == 36: # Friend update / addition
if self.friends is None:
return True
new = Friend(self.friends, packet)
old = self.friends.get_friend(new.name)
if old is not None:
if old.isSoulmate: # Not sent by the server, checked locally.
self.friends.soulmate = new
new.isSoulmate = True
self.friends.friends.remove(old)
self.friends.friends.append(new)
if old is None:
# :desc: Called when a friend is added
# :param friend: :class:`aiotfm.friend.Friend` the friend
self.dispatch('new_friend', new)
else:
# :desc: Called when a friend is updated
# :param before: :class:`aiotfm.friend.Friend` friend before this update
# :param after: :class:`aiotfm.friend.Friend` friend after this update
self.dispatch('friend_update', old, new)
elif TC == 37: # Remove friend
if self.friends is None:
return True
friend = self.friends.get_friend(packet.read32())
if friend is not None:
if friend == self.friends.soulmate:
self.friends.soulmate = None
self.friends.friends.remove(friend)
# :desc: Called when a friend is removed
# :param friend: :class:`aiotfm.friend.Friend` the friend
self.dispatch('friend_remove', friend)
elif TC == 55: # Channel join result
sequenceId = packet.read32()
result = packet.read8()
# :desc: Called when the client receives the result of joining a channel.
# :param sequenceId: :class:`int` identifier returned by :meth:`Client.sendCP`.
# :param result: :class:`int` result code.
self.dispatch('channel_joined_result', sequenceId, result)
elif TC == 57: # Channel leave result
sequenceId = packet.read32()
result = packet.read8()
# :desc: Called when the client receives the result of leaving a channel.
# :param sequenceId: :class:`int` identifier returned by :meth:`Client.sendCP`.
# :param result: :class:`int` result code.
self.dispatch('channel_left_result', sequenceId, result)
elif TC == 59: # Channel /who result
idSequence = packet.read32()
result = packet.read8()
players = [Player(packet.readUTF()) for _ in range(packet.read16())]
# :desc: Called when the client receives the result of the /who command in a channel.
# :param idSequence: :class:`int` the reference to the packet that performed the request.
# :param players: List[:class:`aiotfm.Player`] the list of players inside the channel.
self.dispatch('channel_who', idSequence, players)
elif TC == 62: # Joined a channel
name = packet.readUTF()
if name in self._channels:
channel = [c for c in self._channels if c == name][0]
else:
channel = Channel(name, self)
self._channels.append(channel)
# :desc: Called when the client joined a channel.
# :param channel: :class:`aiotfm.message.Channel` the channel.
self.dispatch('channel_joined', channel)
elif TC == 63: # Quit a channel
name = packet.readUTF()
if name in self._channels:
self._channels.remove(name)
# :desc: Called when the client leaves a channel.
# :param name: :class:`str` the channel's name.
self.dispatch('channel_closed', name)
elif TC == 64: # Channel message
username, community = packet.readUTF(), packet.read32()
channel_name, message = packet.readUTF(), packet.readUTF()
channel = self.get_channel(channel_name)
author = self.room.get_player(username=username)
if author is None:
author = Player(username)
if channel is None:
channel = Channel(channel_name, self)
self._channels.append(channel)
channel_message = ChannelMessage(author, community, message, channel)
# :desc: Called when the client receives a message from a channel.
# :param message: :class:`aiotfm.message.ChannelMessage` the message.
self.dispatch('channel_message', channel_message)
elif TC == 65: # Tribe message
author, message = packet.readUTF(), packet.readUTF()
# :desc: Called when the client receives a message from the tribe.
# :param author: :class:`str` the message's author.
# :param message: :class:`str` the message's content.
self.dispatch('tribe_message', author, message)
elif TC == 66: # Whisper
author = Player(packet.readUTF())
commu = packet.read32()
receiver = Player(packet.readUTF())
message = packet.readUTF()
author = self.room.get_player(name=author, default=author)
receiver = self.room.get_player(name=receiver, default=receiver)
# :desc: Called when the client receives a whisper.
# :param message: :class:`aiotfm.message.Whisper` the message.
self.dispatch('whisper', Whisper(author, commu, receiver, message, self))
elif TC == 88: # tribe member connected
# :desc: Called when a tribe member connected.
# :param name: :class:`str` the member's name.
self.dispatch('member_connected', packet.readUTF())
elif TC == 90: # tribe member disconnected
# :desc: Called when a tribe member disconnected.
# :param name: :class:`str` the member's name.
self.dispatch('member_disconnected', packet.readUTF())
else:
if self.LOG_UNHANDLED_PACKETS:
print(CCC, TC, bytes(packet.buffer)[4:])
return False
elif CCC == (144, 1): # Set player list
before = self.room.players
self.room.players = {}
for _ in range(packet.read16()):
player = Player.from_packet(packet)
self.room.players[player.pid] = player
# :desc: Called when the client receives an update of all player in the room.
# :param before: Dict[:class:`aiotfm.Player`] the list of player before the update.
# :param players: Dict[:class:`aiotfm.Player`] the list of player updated.
self.dispatch('bulk_player_update', before, self.room.players)
elif CCC == (144, 2): # Add a player
after = Player.from_packet(packet)
before = self.room.players.pop(after.pid, None)
self.room.players[after.pid] = after
if before is None:
# :desc: Called when a player joined the room.
# :param player: :class:`aiotfm.Player` the player.
self.dispatch('player_join', after)
else:
# :desc: Called when a player's data on the room has been updated.
# :param before: :class:`aiotfm.Player` the player before the update.
# :param player: :class:`aiotfm.Player` the player updated.
self.dispatch('player_update', before, after)
else:
if self.LOG_UNHANDLED_PACKETS:
print(CCC, bytes(packet.buffer)[2:])
return False
return True
async def handle_old_packet(self, connection: Connection, oldCCC: tuple, data: list) -> bool:
"""|coro|
Handles the known packets from the old protocol and dispatches events.
Subclasses should handle only the unhandled packets from this method.
Example: ::
class Bot(aiotfm.Client):
async def handle_old_packet(self, conn, oldCCC, data):
handled = await super().handle_old_packet(conn, data.copy())
if not handled:
# Handle here the unhandled packets.
pass
:param connection: :class:`aiotfm.Connection` the connection that received
the packet.
:param oldCCC: :class:`tuple` the packet identifiers on the old protocol.
:param data: :class:`list` the packet data.
:return: True if the packet got handled, False otherwise.
"""
if oldCCC == (8, 5): # Player died
player = self.room.get_player(pid=data[0])
if player is not None:
player.score = int(data[2])
# :desc: Called when a player dies.
# :param player: :class:`aiotfm.Player` the player.
self.dispatch('player_died', player)
elif oldCCC == (8, 7): # Remove a player
player = self.room.players.pop(int(data[0]), None)
if player is not None:
# :desc: Called when a player leaves the room.
# :param player: :class:`aiotfm.Player` the player.
self.dispatch('player_remove', player)
else:
if self.LOG_UNHANDLED_PACKETS:
print("[OLD]", oldCCC, data)
return False
return True
def get_channel(self, name: str) -> Optional[Channel]:
"""Returns a channel from it's name or None if not found.
:param name: :class:`str` the name of the channel.
:return: :class:`aiotfm.message.ChannelMessage` or None
"""
if name is None:
return None
for channel in self._channels:
if channel.name == name:
return channel
def get_trade(self, player: Union[str, Player]) -> Optional[Trade]:
"""Returns the pending/current trade with a player.
:param player: :class:`aiotfm.Player` or :class:`str` the player.
:return: :class:`aiotfm.inventory.Trade` the trade with the player.
"""
if not isinstance(player, (str, Player)):
raise TypeError(f"Expected Player or str types got {type(player)}")
if isinstance(player, Player):
return self.trades.get(player.pid)
player = player.lower()
for trade in self.trades.values():
if trade.trader.lower() == player:
return trade
def event(self, coro: Callable) -> Callable:
"""A decorator that registers an event.
More about events [here](Events.md).
"""
name = coro.__name__
if not name.startswith('on_'):
raise InvalidEvent("'{}' isn't a correct event naming.".format(name))
if not asyncio.iscoroutinefunction(coro):
message = "Couldn't register a non-coroutine function for the event {}.".format(name)
raise InvalidEvent(message)
setattr(self, name, coro)
return coro
def wait_for(
self,
event: str,
condition: Optional[Callable] = None,
timeout: Optional[float] = None,
stopPropagation: bool = False
) -> asyncio.Future:
"""Wait for an event.
Example: ::
@client.event
async def on_room_message(author, message):
if message == 'id':
await client.sendCommand(f'profile {author.username}')
profile = await client.wait_for('on_profile', lambda p: p.username == author)
await client.sendRoomMessage('Your id: {}'.format(profile.id))
:param event: :class:`str` the event name.
:param condition: Optionnal[`function`] A predicate to check what to wait for.
The arguments must meet the parameters of the event being waited for.
:param timeout: Optionnal[:class:`float`] the number of seconds before
throwing asyncio.TimeoutError
:return: [`asyncio.Future`](https://docs.python.org/3/library/asyncio-future.html#asyncio.Future)
a future that you must await.
"""
event = event.lower()
future = self.loop.create_future()
if condition is None:
def everything(*a):
return True
condition = everything
if event not in self._waiters:
self._waiters[event] = []
self._waiters[event].append((condition, future, stopPropagation))
return asyncio.wait_for(future, timeout)
async def _run_event(self, coro: Callable, event_name: str, *args, **kwargs):
"""|coro|
Runs an event and handle the error if any.
:param coro: a coroutine function.
:param event_name: :class:`str` the event's name.
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: :class:`bool` whether the event ran successfully or not
"""
try:
await coro(*args, **kwargs)
return True
# except asyncio.CancelledError:
# raise
except Exception as e:
if hasattr(self, 'on_error'):
try:
await self.on_error(event_name, e, *args, **kwargs)
# except asyncio.CancelledError:
# raise
except Exception:
if self.auto_restart:
await self.restart(5)
else:
self.close()
return False
def dispatch(self, event: str, *args, **kwargs):
"""Dispatches events
:param event: :class:`str` event's name. (without 'on_')
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: [`Task`](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task)
the _run_event wrapper task
"""
method = 'on_' + event
if method in self._waiters:
to_remove = []
waiters = self._waiters[method]
for i, (cond, fut, stop) in enumerate(waiters):
if fut.cancelled():
to_remove.append(i)
continue
try:
result = bool(cond(*args))
except Exception as e:
fut.set_exception(e)
else:
if result:
fut.set_result(args[0] if len(args) == 1 else args if len(args) > 0 else None)
if stop:
del waiters[i]
return None
to_remove.append(i)
if len(to_remove) == len(waiters):
del self._waiters[method]
else:
for i in to_remove[::-1]:
del waiters[i]
coro = getattr(self, method, None)
if coro is not None:
dispatch = self._run_event(coro, method, *args, **kwargs)
return asyncio.ensure_future(dispatch, loop=self.loop)
async def on_error(self, event: str, err: Exception, *a, **kw):
"""Default on_error event handler. Prints the traceback of the error."""
logger.error('An error occurred while dispatching the event "%s":', event, exc_info=-3)
async def on_connection_error(self, conn: Connection, error: Exception):
"""Default on_connection_error event handler. Prints the error."""
logger.error('The %s connection has been closed.', conn.name, exc_info=error)
async def on_login_result(self, code: int, *args):
"""Default on_login_result handler. Raise an error and closes the connection."""
self.loop.call_later(3, self.close)
if code == 1:
raise AlreadyConnected()
if code == 2:
raise IncorrectPassword()
raise LoginError(code)
async def _connect(self):
"""|coro|
Creates a connection with the main server.
"""
if self._close_event is None:
raise AiotfmException(f'{self._connect.__name__} should not be called directly. Use start() instead.')
for port in random.sample(self.keys.server_ports, 4):
try:
await self.main.connect(self.keys.server_ip, port)
except Exception:
logger.debug(f'Unable to connect to the server "{self.keys.server_ip}:{port}".')
else:
break
else:
raise ServerUnreachable('Unable to connect to the server.')
while not self.main.open:
await asyncio.sleep(0)
async def sendHandshake(self):
"""|coro|
Sends the handshake packet so the server recognizes this socket as a player.
"""
packet = Packet.new(28, 1).write16(self.keys.version)
if not self.bot_role:
packet.writeString('en').writeString(self.keys.connection)
packet.writeString('Desktop').writeString('-').write32(0x1fbd).writeString('')
packet.writeString('74696720697320676f6e6e61206b696c6c206d7920626f742e20736f20736164')
packet.writeString(
"A=t&SA=t&SV=t&EV=t&MP3=t&AE=t&VE=t&ACC=t&PR=t&SP=f&SB=f&DEB=f&V=LNX 32,0,0,182&M=Adobe"
" Linux&R=1920x1080&COL=color&AR=1.0&OS=Linux&ARCH=x86&L=en&IME=t&PR32=t&PR64=t&LS=en-U"
"S&PT=Desktop&AVD=f&LFD=f&WD=f&TLS=t&ML=5.1&DP=72")
packet.write32(0).write32(0x6257).writeString('')
await self.main.send(packet)
async def start(
self,
api_tfmid: Optional[int] = None,
api_token: Optional[str] = None,
keys: Optional[Keys] = None,
**kwargs
):
"""|coro|
Starts the client.
:param api_tfmid: Optional[:class:`int`] your Transformice id.
:param api_token: Optional[:class:`str`] your token to access the API.
"""
if self.bot_role:
self.keys = await get_ip()
else:
if self.auto_restart and api_tfmid is None or api_token is None:
warnings.warn("The api token were not provided. The Client won't be able to restart.")
self.auto_restart = False
self.keys = keys
if keys is None:
self.keys = await get_keys(api_tfmid, api_token)
if 'username' in kwargs and 'password' in kwargs:
# Monkey patch the on_login_ready event
if hasattr(self, 'on_login_ready'):
event = getattr(self, 'on_login_ready')
self.on_login_ready = lambda *a: asyncio.gather(self.login(**kwargs), event(*a))
else:
self.on_login_ready = lambda *a: self.login(**kwargs)
retries = 0
on_started = None
keep_alive = Packet.new(26, 26)
while True:
self._close_event = asyncio.Future()
try:
logger.info('Connecting to the game.')
await self._connect()
await self.sendHandshake()
await self.locale.load()
retries = 0 # Connection successful
self._restarting = False
except Exception as e:
logger.error('Connection to the server failed.', exc_info=e)
if on_started is not None:
on_started.set_exception(e)
elif retries > self._max_retries:
raise e
else:
retries += 1
backoff = self._backoff(retries)
logger.info('Attempt %d failed. Reconnecting in %.2fs', retries, backoff)
await asyncio.sleep(backoff)
continue
else:
if on_started is not None:
on_started.set_result(None)
while not self._close_event.done():
# Keep the connection(s) alive
await asyncio.gather(*[c.send(keep_alive) for c in (self.main, self.bulle) if c])
await asyncio.wait((self._close_event,), timeout=15)
reason, delay, on_started = self._close_event.result()
self._close_event = asyncio.Future()
logger.debug('[Close Event] Reason: %s, Delay: %d, Callback: %s', reason, delay, on_started)
logger.debug('Will restart: %s', reason != 'stop' and self.auto_restart)
# clean up
for conn in (self.main, self.bulle):
if conn is not None:
conn.close()
if reason == 'stop' or not self.auto_restart:
break
await asyncio.sleep(delay)
# If we don't recreate the connection, we won't be able to connect.
self.main = Connection('main', self, self.loop)
self.bulle = None
# Fetch some fresh keys
if not self.bot_role and (reason != 'restart' or self.keys is None):
for i in range(self._max_retries):
try:
self.keys = await get_keys(api_tfmid, api_token)
break
except MaintenanceError:
if i == 0:
logger.info('The game is under maintenance.')
await asyncio.sleep(30)
else:
raise MaintenanceError('The game is under heavy maintenance.')
async def restart_soon(self, delay: float = 5.0, **kwargs):
"""|coro|
Restarts the client in several seconds.
:param delay: :class:`float` the delay before restarting. Default is 5 seconds.
:param args: arguments to pass to the :meth:`Client.restart` method.
:param kwargs: keyword arguments to pass to the :meth:`Client.restart` method."""
warnings.warn('`Client.restart_soon` is deprecated, use `Client.restart` instead.', DeprecationWarning)
await self.restart(delay, **kwargs)
async def restart(self, delay: float = 0, keys: Optional[Keys] = None):
"""|coro|
Restarts the client.
:param delay: the delay before restarting. By default, there is no delay.
:param keys:
"""
if not self.auto_restart or self._close_event is None:
raise AiotfmException(
'Unable to restart the Client. Either `auto_restart` is set to '
'False or you have not started the Client using `Client.start`.'
)
if self._restarting or self._close_event.done():
return
self.keys = keys
self._restarting = True
# :desc: Notify when the client restarts.
self.dispatch("restart")
restarted = asyncio.Future()
self._close_event.set_result(('restart', delay, restarted))
await restarted
async def login(self, username: str, password: str, encrypted: bool = True, room: str = '*aiotfm'):
"""|coro|
Log in the game.
:param username: :class:`str` the client username.
:param password: :class:`str` the client password.
:param encrypted: Optional[:class:`bool`] whether the password is already encrypted or not.
:param room: Optional[:class:`str`] the room where the client will be logged in.
"""
if self._logged:
raise AiotfmException('You cannot log in twice.')
self._logged = True
if not encrypted:
password = <PASSWORD>(password)
packet = Packet.new(26, 8).writeString(username).writeString(password)
packet.writeString("app:/TransformiceAIR.swf/[[DYNAMIC]]/2/[[DYNAMIC]]/4").writeString(room)
if self.bot_role:
packet.write16(0).write8(0).writeString('')
else:
packet.write32(self.authkey ^ self.keys.auth)
packet.write8(0).writeString('')
packet.cipher(self.keys.identification)
await self.main.send(Packet.new(176, 1).writeUTF(self.community.name))
await self.main.send(packet.write8(0))
def run(self, api_tfmid: int, api_token: str, username: str, password: str, **kwargs):
"""A blocking call that does the event loop initialization for you.
Equivalent to: ::
@bot.event
async def on_login_ready(*a):
await bot.login(username, password)
loop = asyncio.get_event_loop()
loop.create_task(bot.start(api_id, api_token))
loop.run_forever()
"""
try:
self.loop.run_until_complete(self.start(api_tfmid, api_token, username=username, password=password, **kwargs))
finally:
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
def close(self):
"""Closes the sockets."""
if self._closed:
return
self._closed = True
self._close_event.set_result(('stop', 0, None))
async def sendCP(self, code: int, data: Union[Packet, ByteString] = b'') -> int:
"""|coro|
Send a packet to the community platform.
:param code: :class:`int` the community platform code.
:param data: :class:`aiotfm.Packet` or :class:`bytes` the data.
:return: :class:`int` returns the sequence id.
"""
self._sequenceId = sid = (self._sequenceId + 1) % 0xFFFFFFFF
packet = Packet.new(60, 3).write16(code)
packet.write32(self._sequenceId).writeBytes(data)
await self.main.send(packet, cipher=True)
return sid
async def sendRoomMessage(self, message: str):
"""|coro|
Send a message to the room.
:param message: :class:`str` the content of the message.
"""
packet = Packet.new(6, 6).writeString(message)
await self.bulle.send(packet, cipher=True)
async def sendTribeMessage(self, message: str):
"""|coro|
Send a message to the tribe.
:param message: :class:`str` the content of the message.
"""
await self.sendCP(50, Packet().writeString(message))
async def sendChannelMessage(self, channel: Union[Channel, str], message: str):
"""|coro|
Send a message to a public channel.
:param channel: :class:`str` the channel's name.
:param message: :class:`str` the content of the message.
"""
if isinstance(channel, Channel):
channel = channel.name
return await self.sendCP(48, Packet().writeString(channel).writeString(message))
async def whisper(self, username: Union[Player, str], message: AnyStr, overflow: bool = False):
"""|coro|
Whisper to a player.
:param username: :class:`str` the player to whisper.
:param message: :class:`str` the content of the whisper.
:param overflow: :class:`bool` will send the complete message if True, splitted
in several messages.
"""
if isinstance(username, Player):
username = username.username
async def send(msg):
await self.sendCP(52, Packet().writeString(username).writeString(msg))
if isinstance(message, str):
message = message.encode()
message = message.replace(b'<', b'<').replace(b'>', b'>')
await send(message[:255])
for i in range(255, len(message), 255):
await asyncio.sleep(1)
await self.whisper(username, message[i:i + 255])
async def getTribe(self, disconnected: bool = True) -> Optional[Tribe]:
"""|coro|
Gets the client's :class:`aiotfm.Tribe` and return it
:param disconnected: :class:`bool` if True retrieves also the disconnected members.
:return: :class:`aiotfm.Tribe` or ``None``.
"""
sid = await self.sendCP(108, Packet().writeBool(disconnected))
def is_tribe(tc, packet):
return (tc == 109 and packet.read32() == sid) or tc == 130
tc, packet = await self.wait_for('on_raw_cp', is_tribe, timeout=5)
if tc == 109:
result = packet.read8()
if result == 1:
tc, packet = await self.wait_for('on_raw_cp', lambda tc, p: tc == 130, timeout=5)
elif result == 17:
return None
else:
raise CommunityPlatformError(118, result)
return Tribe(packet)
async def getRoomList(self, gamemode: Union[GameMode, int] = 0, timeout: float = 3) -> Optional[RoomList]:
"""|coro|
Get the room list
:param gamemode: Optional[:class:`aiotfm.enums.GameMode`] the room's gamemode.
:param timeout: Optional[:class:`int`] timeout in seconds. Defaults to 3 seconds.
:return: :class:`aiotfm.room.RoomList` the room list for the given gamemode or None
"""
await self.main.send(Packet.new(26, 35).write8(int(gamemode)))
def predicate(roomlist):
return gamemode == 0 or roomlist.gamemode == gamemode
try:
return await self.wait_for('on_room_list', predicate, timeout=timeout)
except asyncio.TimeoutError:
return None
async def playEmote(self, emote: int, flag: str = 'be'):
"""|coro|
Play an emote.
:param emote: :class:`int` the emote's id.
:param flag: Optional[:class:`str`] the flag for the emote id 10. Defaults to 'be'.
"""
packet = Packet.new(8, 1).write8(emote).write32(0)
if emote == 10:
packet.writeString(flag)
await self.bulle.send(packet)
async def sendSmiley(self, smiley: int):
"""|coro|
Makes the client showing a smiley above it's head.
:param smiley: :class:`int` the smiley's id. (from 0 to 9)
"""
if smiley < 0 or smiley > 9:
raise AiotfmException('Invalid smiley id')
packet = Packet.new(8, 5).write8(smiley)
await self.bulle.send(packet)
async def loadLua(self, lua_code: AnyStr):
"""|coro|
Load a lua code in the room.
:param lua_code: :class:`str` or :class:`bytes` the lua code to send.
"""
if isinstance(lua_code, str):
lua_code = lua_code.encode()
await self.bulle.send(Packet.new(29, 1).write24(len(lua_code)).writeBytes(lua_code))
async def sendCommand(self, command: str):
"""|coro|
Send a command to the game.
:param command: :class:`str` the command to send.
"""
await self.main.send(Packet.new(6, 26).writeString(command[:255]), cipher=True)
async def enterTribe(self):
"""|coro|
Enter the tribe house
"""
await self.main.send(Packet.new(16, 1))
async def enterTribeHouse(self):
"""|coro|
Alias for :meth:`Client.enterTribe`
"""
await self.enterTribe()
async def enterInvTribeHouse(self, author: str):
"""|coro|
Join the tribe house of another player after receiving an /inv.
:param author: :class:`str` the author's username who sent the invitation.
"""
await self.main.send(Packet.new(16, 2).writeString(author))
async def recruit(self, username: Union[Player, str]):
"""|coro|
Send a recruit request to a player.
:param username: :class:`str` the player's username you want to recruit.
"""
if isinstance(username, Player):
username = username.username
await self.sendCP(78, Packet().writeString(username))
async def joinRoom(
self,
room_name: str,
password: Optional[str] = None,
community: Optional[int] = None,
auto: bool = False
):
"""|coro|
Join a room.
The event 'on_joined_room' is dispatched when the client has successfully joined the room.
:param password: :class:`str` if given the client will ignore `community` and `auto` parameters
and will connect to the room with the given password.
:param room_name: :class:`str` the room's name.
:param community: Optional[:class:`int`] the room's community.
:param auto: Optional[:class:`bool`] joins a random room (I think).
"""
if password is not None:
packet = Packet.new(5, 39).writeString(password).writeString(room_name)
else:
packet = Packet.new(5, 38).writeString(Community(community or self.community).name)
packet.writeString(room_name).writeBool(auto)
await self.main.send(packet)
async def joinChannel(self, name: str, permanent: bool = True):
"""|coro|
Join a #channel.
The event 'on_channel_joined' is dispatched when the client has successfully joined
a channel.
:param name: :class:`str` the channel's name
:param permanent: Optional[:class:`bool`] if True (default) the server will automatically
reconnect the user to this channel when logged in.
"""
await self.sendCP(54, Packet().writeString(name).writeBool(permanent))
async def leaveChannel(self, channel: Union[Channel, str]):
"""|coro|
Leaves a #channel.
:param channel: :class:`aiotfm.message.Channel` channel to leave.
"""
if isinstance(channel, Channel):
name = channel.name
else:
name = channel
await self.sendCP(56, Packet().writeString(name))
async def requestShopList(self):
"""|coro|
Send a request to the server to get the shop list."""
await self.main.send(Packet.new(8, 20))
async def startTrade(self, player: Union[Player, str]) -> Trade:
"""|coro|
Starts a trade with the given player.
:param player: :class:`aiotfm.Player` the player to trade with.
:return: :class:`aiotfm.inventory.Trade` the resulting trade"""
if isinstance(player, Player) and player.pid == -1:
player = player.username
if isinstance(player, str):
player = self.room.get_player(username=player)
if player is None:
raise AiotfmException("The player must be in your room to start a trade.")
trade = Trade(self, player)
self.trades[player.pid] = trade
await trade.accept()
return trade
async def requestInventory(self):
"""|coro|
Send a request to the server to get the bot's inventory."""
await self.main.send(Packet.new(31, 1))
|
48678
|
import urllib as U
__all__ = ('sequence', )
def _seq_from_xml(xml):
start = xml.find(">", xml.find("<DNA")) + 1
end = xml.rfind("</DNA>")
return xml[start:end].replace(' ', '').replace('\n', '').strip()
def sequence(db, chrom, start, end):
"""
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
"""
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
48704
|
from drdown.users.models.model_health_team import HealthTeam
from ..models.model_static_data import StaticData
from ..models.model_medical_record import MedicalRecord
from drdown.users.models.model_user import User
from drdown.users.models.model_patient import Patient
from django.views.generic import CreateView, DeleteView, UpdateView, ListView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import UserPassesTestMixin
from ..forms.static_data_forms import StaticDataForm
from ..views.views_base import BaseViewForm, BaseViewUrl, BaseViewPermissions
class StaticDataCreateView(BaseViewUrl, BaseViewForm, BaseViewPermissions,
CreateView):
model = StaticData
form_class = StaticDataForm
template_name = 'medicalrecords/medicalrecord_static_data_form.html'
class StaticDataUpdateView(BaseViewUrl, UpdateView):
model = StaticData
form_class = StaticDataForm
template_name = 'medicalrecords/medicalrecord_static_data_form.html'
slug_url_kwarg = 'username'
slug_field = 'patient__user__username'
|
48721
|
import unittest
from jit import jit
from jit import j_types as j
@jit
def test_print(x: j.i64):
return print(x)
class Test(unittest.TestCase):
def test_void_zero(self):
self.assertEqual(test_print(8), 2)
self.assertEqual(test_print(64), 3)
|
48759
|
from collections import namedtuple
Genotype = namedtuple('Genotype', 'backbone rpn')
OP_NAMES = [
'sep_conv_3x3',
'sep_conv_3x3_dil3',
'sep_conv_5x5_dil6',
'skip_connect',
'def_conv_3x3',
]
AGG_NAMES = [
'psum',
'cat'
]
HEAD_OP_NAMES = [
'conv1x1',
'conv3x3',
'sep_conv_3x3',
'sep_conv_3x3_dil3',
'skip_connect',
'def_conv_3x3',
]
HEAD_AGG_NAMES = [
'psum',
'cat'
]
|
48765
|
from setuptools import setup
setup(
name='NotebookScripter',
version='6.0.0',
packages=('NotebookScripter',),
url='https://github.com/breathe/NotebookScripter',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
install_requires=(
"ipython",
"nbformat"
),
tests_require=(
"nose",
"coverage",
"snapshottest",
"matplotlib"
),
description='Expose ipython jupyter notebooks as callable functions. More info here https://github.com/breathe/NotebookScripter',
long_description='Expose ipython jupyter notebooks as callable functions. More info here https://github.com/breathe/NotebookScripter',
classifiers=(
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython')
)
|
48796
|
import vcr
from fast_arrow import Client
def gen_vcr():
return vcr.VCR(
cassette_library_dir='tests/fixtures_vcr',
record_mode='none',
match_on=['method', 'scheme', 'host', 'port', 'path', 'query'],
)
def gen_client():
auth_data = gen_auth_data()
client = Client(auth_data)
return client
def gen_auth_data():
auth_data = {
"account_id": 123,
"access_token": "<PASSWORD>",
"refresh_token": "<PASSWORD>",
"device_token": "<PASSWORD>",
}
return auth_data
|
48822
|
from models.attention import *
from models.rnn import *
from models.seq2seq import *
from models.loss import *
from models.beam import *
|
48872
|
import torch
from uninas.modules.mixed.mixedop import AbstractDependentMixedOp
from uninas.methods.strategies.manager import StrategyManager
from uninas.register import Register
@Register.network_mixed_op()
class SplitWeightsMixedOp(AbstractDependentMixedOp):
"""
all op choices on one path in parallel,
the weight strategy decides which results to compute and combine
in addition, load different sets of weights for the operations,
depending on architecture choices in previous layers
due to the used saving/loading approach, this operation will most likely malfunction in distributed settings
"""
max_depth = 2
def __init__(self, submodules: list, name: str, strategy_name: str, depth=0):
"""
:param submodules: list or nn.ModuleList of choices
:param name: name of the architecture weight
:param strategy_name: name of the architecture strategy to use
:param depth: depth, how many previous architecture decisions to consider
"""
super().__init__(submodules, name, strategy_name)
# store previous names in case this mixed op will be deepened, no need to store the own name
self._add_to_kwargs(depth=depth)
self._all_prev_names = StrategyManager().ordered_names(unique=False)[-self.max_depth - 1:-1]
self._state_dicts = {}
self._last_state = 'w'
self.change_depth(new_depth=self.depth)
def change_depth(self, new_depth=1):
"""
called by a SplitWeightsMixedOpCallback,
increases the recursive depth of the op, copying the weights, using a copy depending on a previous layer choice
"""
if new_depth > 0:
assert new_depth >= self.depth, "Can not reduce the depth"
assert new_depth <= self.max_depth, "Can not increase the depth beyond %d" % self.max_depth
assert StrategyManager().is_only_single_path()
while self.depth < min([new_depth, len(self._all_prev_names)]):
if len(self._state_dicts) == 0:
self._state_dicts[self._last_state] = self.submodules.state_dict()
# enlarge dict of stored state dicts by one layer
new_state_dicts = {'0.%s' % k: v for k, v in self._state_dicts.items()}
self._state_dicts = new_state_dicts
self._last_state = '0.%s' % self._last_state
self.depth += 1
def _get_current_state_name(self) -> str:
""" get a name for the current setting (e.g. "0.1.w") that depends on the previously chosen indices """
names = self._all_prev_names[-self.depth:]
return '.'.join([str(self.sm.get_finalized_indices(n, flat=True)) for n in names] + ['w'])
def _set_weight(self):
if self.depth > 0:
# get name of currently used local architecture
cur_state = self._get_current_state_name()
if self._last_state != cur_state:
# store current weights
self._state_dicts[self._last_state] = {k: v.detach().clone()
for k, v in self.submodules.state_dict().items()}
# load data of current weight into the parameter
self.submodules.load_state_dict(self._state_dicts.get(cur_state, self._state_dicts[self._last_state]))
self._last_state = cur_state
def _save_add_dict(self) -> dict:
""" additional info stored in the save_dict """
return dict(depth=self.depth, _last_state=self._last_state, _state_dicts=self._state_dicts)
def _load_add_dict(self, dct: dict):
""" additional info restored from the save_dict """
self.depth = dct.get('depth', self.depth)
self._last_state = dct.get('_last_state', self._last_state)
self._state_dicts = dct.get('_state_dicts', self._state_dicts)
def forward(self, x: torch.Tensor) -> torch.Tensor:
self._set_weight()
return self.ws.combine(self.name, x, self.submodules)
|
48890
|
import ray
from ray.experimental.workflow import storage
from ray.experimental.workflow import workflow_storage
def some_func(x):
return x + 1
def some_func2(x):
return x - 1
def test_raw_storage():
ray.init()
workflow_id = test_workflow_storage.__name__
raw_storage = storage.get_global_storage()
step_id = "some_step"
input_metadata = {"2": "c"}
output_metadata = {"a": 1}
args = ([1, "2"], {"k": b"543"})
output = ["the_answer"]
object_resolved = 42
obj_ref = ray.put(object_resolved)
# test creating normal objects
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
step_status = raw_storage.get_step_status(workflow_id, step_id)
assert step_status.args_exists
assert step_status.output_object_exists
assert step_status.output_metadata_exists
assert step_status.input_metadata_exists
assert step_status.func_body_exists
assert raw_storage.load_step_input_metadata(workflow_id,
step_id) == input_metadata
assert raw_storage.load_step_func_body(workflow_id, step_id)(33) == 34
assert raw_storage.load_step_args(workflow_id, step_id) == args
assert ray.get(raw_storage.load_object_ref(
workflow_id, obj_ref.hex())) == object_resolved
assert raw_storage.load_step_output_metadata(workflow_id,
step_id) == output_metadata
assert raw_storage.load_step_output(workflow_id, step_id) == output
# test overwrite
input_metadata = [input_metadata, "overwrite"]
output_metadata = [output_metadata, "overwrite"]
args = (args, "overwrite")
output = (output, "overwrite")
object_resolved = (object_resolved, "overwrite")
obj_ref = ray.put(object_resolved)
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func2)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
assert raw_storage.load_step_input_metadata(workflow_id,
step_id) == input_metadata
assert raw_storage.load_step_func_body(workflow_id, step_id)(33) == 32
assert raw_storage.load_step_args(workflow_id, step_id) == args
assert ray.get(raw_storage.load_object_ref(
workflow_id, obj_ref.hex())) == object_resolved
assert raw_storage.load_step_output_metadata(workflow_id,
step_id) == output_metadata
assert raw_storage.load_step_output(workflow_id, step_id) == output
ray.shutdown()
def test_workflow_storage():
ray.init()
workflow_id = test_workflow_storage.__name__
raw_storage = storage.get_global_storage()
step_id = "some_step"
input_metadata = {
"name": "test_basic_workflows.append1",
"object_refs": ["abc"],
"workflows": ["def"]
}
output_metadata = {
"output_step_id": "a12423",
"dynamic_output_step_id": "b1234"
}
args = ([1, "2"], {"k": b"543"})
output = ["the_answer"]
object_resolved = 42
obj_ref = ray.put(object_resolved)
# test basics
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
wf_storage = workflow_storage.WorkflowStorage(workflow_id)
assert wf_storage.load_step_output(step_id) == output
assert wf_storage.load_step_args(step_id, [], []) == args
assert wf_storage.load_step_func_body(step_id)(33) == 34
assert ray.get(wf_storage.load_object_ref(
obj_ref.hex())) == object_resolved
# test "inspect_step"
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
output_object_valid=True)
assert inspect_result.is_recoverable()
step_id = "some_step2"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
output_step_id=output_metadata["dynamic_output_step_id"])
assert inspect_result.is_recoverable()
step_id = "some_step3"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
args_valid=True,
func_body_valid=True,
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert inspect_result.is_recoverable()
step_id = "some_step4"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
func_body_valid=True,
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert not inspect_result.is_recoverable()
step_id = "some_step5"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert not inspect_result.is_recoverable()
step_id = "some_step6"
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult()
assert not inspect_result.is_recoverable()
ray.shutdown()
|
48940
|
import math
import brownie
from brownie import chain
def test_only_distributor_allowed(alice, stream):
with brownie.reverts("dev: only distributor"):
stream.notify_reward_amount(10 ** 18, {"from": alice})
def test_retrieves_reward_token(bob, stream, reward_token):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = reward_token.balanceOf(stream)
assert post_notify == 10 ** 18
def test_reward_rate_updates(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = stream.reward_rate()
assert post_notify > 0
assert post_notify == 10 ** 18 / (86400 * 10)
def test_reward_rate_updates_mid_duration(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
chain.sleep(86400 * 5) # half of the duration
# top up the balance to be 10 ** 18 again
stream.notify_reward_amount(10 ** 18 / 2, {"from": bob})
post_notify = stream.reward_rate()
# should relatively close .00001 seems about good of a heuristic
assert math.isclose(post_notify, 10 ** 18 / (86400 * 10), rel_tol=0.00001)
def test_period_finish_updates(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.period_finish() == tx.timestamp + 86400 * 10
def test_update_last_update_time(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.last_update_time() == tx.timestamp
|
48942
|
import itertools
import logging
from typing import Any, Dict, Set, Tuple
from pycoin.coins.bitcoin import Tx as pycoin_tx
from electrum_gui.common.basic.functional.require import require
from electrum_gui.common.coin import data as coin_data
from electrum_gui.common.conf import settings
from electrum_gui.common.provider import data, interfaces
from electrum_gui.common.provider.chains.btc import hardware_mixin, message_mixin
from electrum_gui.common.provider.chains.btc.clients import blockbook
from electrum_gui.common.provider.chains.btc.sdk import network, transaction
from electrum_gui.common.secret import interfaces as secret_interfaces
logger = logging.getLogger("app.chain")
class BTCProvider(interfaces.ProviderInterface, hardware_mixin.BTCHardwareMixin, message_mixin.BTCMessageMixin):
def __init__(self, chain_info: coin_data.ChainInfo, *args, **kwargs):
super(BTCProvider, self).__init__(chain_info, *args, **kwargs)
self._network = None
self._tx_version = None
self._tx_op_return_size_limit = None
self._supported_encodings = None
@property
def network(self) -> Any:
if self._network is None:
self._network = network.get_network_by_chain_code(self.chain_info.chain_code)
return self._network
@property
def tx_version(self) -> int:
if self._tx_version is None:
self._tx_version = transaction.TX_VERSION
return self._tx_version
@property
def tx_op_return_size_limit(self) -> int:
if self._tx_op_return_size_limit is None:
self._tx_op_return_size_limit = transaction.TX_OP_RETURN_SIZE_LIMIT
return self._tx_op_return_size_limit
@property
def supported_encodings(self) -> Set[str]:
if self._supported_encodings is None:
self._supported_encodings = {
*self.chain_info.bip44_purpose_options.keys(),
self.chain_info.default_address_encoding,
}
return self._supported_encodings
@property
def client(self) -> blockbook.BlockBook:
return self.client_selector(instance_required=blockbook.BlockBook)
def verify_address(self, address: str) -> data.AddressValidation:
is_valid, encoding = False, None
try:
parsed_address = self.network.parse.address(address)
address_info = parsed_address.info() if parsed_address else {}
address_type = address_info.get("type")
if address_type == "p2pkh":
encoding = "P2PKH"
elif address_type == "p2pkh_wit":
encoding = "P2WPKH"
elif address_type == "p2sh":
encoding = "P2WPKH-P2SH" # Cannot distinguish between legacy P2SH and P2WPKH-P2SH
is_valid = encoding is not None and encoding in self.supported_encodings
encoding = encoding if is_valid else None
except Exception as e:
logger.exception(f"Illegal address: {address}, error: {e}")
address = address if is_valid else ""
return data.AddressValidation(
normalized_address=address,
display_address=address,
is_valid=is_valid,
encoding=encoding,
)
def pubkey_to_address(self, verifier: secret_interfaces.VerifierInterface, encoding: str = None) -> str:
require(encoding in self.supported_encodings, f"Invalid address encoding: {encoding}")
pubkey = verifier.get_pubkey(compressed=True)
pubkey_hash = self.network.keys.public(pubkey).hash160(is_compressed=True)
if encoding == "P2PKH": # Pay To Public Key Hash
address = self.network.address.for_p2pkh(pubkey_hash)
elif encoding == "P2WPKH": # Pay To Witness Public Key Hash
address = self.network.address.for_p2pkh_wit(pubkey_hash)
elif encoding == "P2WPKH-P2SH": # P2WPKH nested in BIP16 P2SH
witness_script = self.network.contract.for_p2pkh_wit(pubkey_hash)
address = self.network.address.for_p2s(witness_script)
else:
raise Exception("Should not be here")
return address
def fill_unsigned_tx(self, unsigned_tx: data.UnsignedTx) -> data.UnsignedTx:
fee_price_per_unit = unsigned_tx.fee_price_per_unit or int(
self.client.get_prices_per_unit_of_fee().normal.price
)
fee_limit = unsigned_tx.fee_limit or 0
if unsigned_tx.inputs and unsigned_tx.outputs:
input_validations = [self.verify_address(i.address) for i in unsigned_tx.inputs]
output_validations = [self.verify_address(i.address) for i in unsigned_tx.outputs]
if all(i.is_valid for i in itertools.chain(input_validations, output_validations)):
vsize = transaction.calculate_vsize(
input_encodings=[i.encoding for i in input_validations],
output_encodings=[i.encoding for i in output_validations],
op_return=unsigned_tx.payload.get("op_return"),
op_return_size_limit=self.tx_op_return_size_limit,
)
fee_limit = max(fee_limit, vsize)
fee_limit = fee_limit or transaction.PLACEHOLDER_VSIZE
return unsigned_tx.clone(
fee_limit=fee_limit,
fee_price_per_unit=fee_price_per_unit,
)
def sign_transaction(
self, unsigned_tx: data.UnsignedTx, signers: Dict[str, secret_interfaces.SignerInterface]
) -> data.SignedTx:
tx = transaction.create_pycoin_tx(
self.network,
unsigned_tx,
version=self.tx_version,
op_return_size_limit=self.tx_op_return_size_limit,
)
tx.check()
tx.sign(
hash160_lookup=transaction.build_hash160_lookup(self.network, signers.values()),
p2sh_lookup=transaction.build_p2sh_lookup(self.network, signers.values()),
)
self._check_tx_after_signed(tx)
return data.SignedTx(
txid=tx.id(),
raw_tx=tx.as_hex(),
)
def _check_tx_after_signed(self, tx: pycoin_tx.Tx):
unsigned_after = tx.bad_solution_count()
if unsigned_after > 0:
not_fully_signed_message = (
f"{unsigned_after} TxIn items still unsigned, tx: {tx.as_hex(include_unspents=True)}"
)
if settings.IS_DEV:
dump_message = transaction.debug_dump_tx(self.network, tx)
logger.error("\n".join((not_fully_signed_message, dump_message)))
raise Exception(not_fully_signed_message)
def get_token_info_by_address(self, token_address: str) -> Tuple[str, str, int]:
raise NotImplementedError()
|
48943
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os, sys
from tensorboardX import SummaryWriter
import time
import numpy as np
import pprint
import socket
import pickle
from resnet import *
from kwng import *
from gaussian import *
from data_loader import *
class Trainer(object):
def __init__(self,args):
torch.manual_seed(args.seed)
self.args = args
self.device = assign_device(args.device)
self.log_dir = make_log_dir(args)
if args.log_in_file:
self.log_file = open(os.path.join(self.log_dir, 'log.txt'), 'w', buffering=1)
sys.stdout = self.log_file
sys.stderr = self.log_file
print("Process id: " + str(os.getpid()) + " | hostname: " + socket.gethostname())
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(args))
print('Creating writer')
self.writer = SummaryWriter(self.log_dir)
print('Loading data')
if not os.path.isdir(args.data_dir):
os.makedirs(args.data_dir, exist_ok=True)
self.data_loaders = get_data_loader(args)
self.total_epochs = self.args.total_epochs
print('==> Building model..')
self.build_model()
def build_model(self):
self.net = get_network(self.args)
self.net = self.net.to(self.device)
if self.args.dtype=='64':
self.net = self.net.double()
if self.device == 'cuda':
self.net = torch.nn.DataParallel(self.net)
cudnn.benchmark = True
self.init_train_values()
self.criterion = get_criterion(self.args)
self.optimizer = get_optimizer(self.args,self.net.parameters(),self.net)
self.scheduler = get_scheduler(self.args,self.optimizer)
self.wrapped_optimizer = get_wrapped_optimizer(self.args,self.optimizer,self.criterion,self.net, device=self.device)
def train(self):
print(' Starting training')
self.init_train_values()
for epoch in range(self.start_epoch, self.start_epoch+self.total_epochs):
train_acc = self.epoch_pass(epoch,'train')
val_acc = self.epoch_pass(epoch,'val')
if self.args.use_scheduler:
self.scheduler.step()
return train_acc,val_acc
def test(self):
print('Starting test')
test_acc = self.epoch_pass(0,'test')
return test_acc
def init_train_values(self):
if self.args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(self.log_dir+'/checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(self.log_dir+'/checkpoint/ckpt.t7')
self.net.load_state_dict(checkpoint['net'])
self.best_acc = checkpoint['acc']
self.best_loss = checkpoint['loss']
self.start_epoch = checkpoint['epoch']
self.total_iters = checkpoint['total_iters']
else:
self.best_acc = 0 # best test accuracy
self.start_epoch = 0 # start from epoch 0 or last checkpoint epoch
self.total_iters = 0
self.best_loss = torch.tensor(np.inf)
def epoch_pass(self,epoch,phase):
print('Epoch: '+ str(epoch) + ' | ' + phase + ' phase')
if phase == 'train':
self.net.train(True) # Set model to training mode
else:
self.net.train(False) # Set model to evaluate mode
self.net.train()
loss = 0
correct = 0
total = 0
counts = 0
for batch_idx, (inputs, targets) in enumerate(self.data_loaders[phase]):
tic = time.time()
inputs, targets = inputs.to(self.device), targets.to(self.device)
if self.args.dtype=='64':
inputs=inputs.double()
if phase=="train":
self.total_iters+=1
loss_step, predicted = self.wrapped_optimizer.step(inputs,targets)
loss_step, predicted = self.wrapped_optimizer.eval(inputs,targets)
loss += loss_step
running_loss = loss/(batch_idx+1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc= 100.*correct/total
if phase=="train":
self.writer.add_scalars('data/train_loss_step',{"loss_step":loss_step,"loss_averaged":running_loss},self.total_iters)
toc = time.time()
print(' Loss: ' + str(round(running_loss,3))+ ' | Acc: '+ str(acc) + ' ' +'('+str(correct) +'/'+str(total)+')' + ' time: ' + str(toc-tic) + ' iter: '+ str(batch_idx))
counts += 1
self.writer.add_scalars('data/total_stats_'+phase, {"loss":loss/(batch_idx+1), "correct":acc},epoch)
# Save checkpoint.
if phase == 'val':
avg_loss = loss/(batch_idx+1)
if avg_loss < self.best_loss:
save_checkpoint(self.writer.logdir,acc,avg_loss,epoch,self.total_iters,self.wrapped_optimizer.net)
self.best_loss = avg_loss
return acc
def save_checkpoint(checkpoint_dir,acc,loss,epoch,total_iters,net):
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'loss':loss,
'epoch': epoch,
'total_iters':total_iters,
}
if not os.path.isdir(checkpoint_dir +'/checkpoint'):
os.mkdir(checkpoint_dir + '/checkpoint')
torch.save(state,checkpoint_dir +'/checkpoint/ckpt.t7')
def assign_device(device):
if device >-1:
device = 'cuda:'+str(device) if torch.cuda.is_available() and device>-1 else 'cpu'
elif device==-1:
device = 'cuda'
elif device==-2:
device = 'cpu'
return device
def make_log_dir(args):
if args.with_sacred:
log_dir = args.log_dir + '_' + args.log_name
else:
log_dir = os.path.join(args.log_dir,args.log_name)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
return log_dir
def get_dtype(args):
if args.dtype=='32':
return torch.float32
elif args.dtype=='64':
return torch.float64
def get_network(args):
if args.network=='ResNet18':
return ResNet18(num_classes = args.num_classes)
elif args.network=='ResNet18IllCond':
return ResNet18IllCond(num_classes = args.num_classes)
def get_kernel(args,device = 'cuda'):
dtype = get_dtype(args)
if args.kernel=='gaussian':
return Gaussian(1,args.log_bandwidth,dtype=dtype, device = device)
def get_wrapped_optimizer(args,optimizer,criterion,net,device = 'cuda'):
if args.estimator=='EuclideanGradient':
return OptimizerWrapper(optimizer,criterion,net,args.clip_grad)
elif args.estimator=='KWNG':
kernel = get_kernel(args, device=device)
estimator = KWNG(kernel,eps=args.epsilon, num_basis = args.num_basis,with_diag_mat = args.with_diag_mat)
return KWNGWrapper(optimizer,criterion,net,args.clip_grad,estimator,args.dumping_freq,args.reduction_coeff,args.min_red,args.max_red)
def get_data_loader(args):
if args.dataset=='cifar10':
args.num_classes = 10
return CIFARLoader(args.data_dir,args.b_size)
elif args.dataset=='cifar100':
args.num_classes = 100
return CIFAR100Loader(args.data_dir,args.b_size)
def get_optimizer(args,params,net):
if args.optimizer=='sgd':
return optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
def get_scheduler(args,optimizer):
if args.scheduler=='MultiStepLR':
if args.milestone is None:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(args.total_epochs*0.5), int(args.total_epochs*0.75)], gamma=args.lr_decay)
else:
milestone = [int(_) for _ in args.milestone.split(',')]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestone, gamma=args.lr_decay)
return lr_scheduler
def get_criterion(args):
if args.criterion=='cross_entropy':
return nn.CrossEntropyLoss()
|
48950
|
from distutils.core import setup, Extension
cGeo = Extension("cGeo", sources = ["cGeo.c"])
setup (name = "cGeo",
version = "0.1",
author = "<NAME>",
description = "Fast geometric functionality.",
ext_modules = [cGeo])
|
48955
|
import json
from codegen import json_definitions as jd
from codegen import json_writer as jw
from codegen import fblas_routine
from codegen import fblas_types
import codegen.generator_definitions as gd
from codegen.fblas_helper import FBLASHelper
import logging
import os
import jinja2
from typing import List
class HostAPICodegen:
_output_path = ""
def __init__(self, output_path: str):
self._output_path = output_path
def generateRoutines(self, routines: List[fblas_routine.FBLASRoutine]):
"""
Generates the code for the given routines
:param routines:
:return:
"""
routine_id = 0
json_routines = []
for r in routines:
print("Generating: " + r.user_name)
#dispatch
method_name = "_codegen_" + r.blas_name
method = getattr(self, method_name)
jr = method(r, routine_id)
routine_id = routine_id + 1
json_routines.append(jr)
#Output json for generated routines
json_content = {"routine": json_routines}
jw.write_to_file(self._output_path+"generated_routines.json", json_content)
def _write_file(self, path, content, append=False):
print("Generating file: "+path)
with open(path, "a" if append else "w") as f:
if append is True:
f.write("\n")
f.write(content)
def _read_template_file(self, path):
templates = os.path.join(os.path.dirname(__file__), "../../templates")
loader = jinja2.FileSystemLoader(searchpath=templates)
logging.basicConfig()
logger = logging.getLogger('logger')
logger = jinja2.make_logging_undefined(logger=logger, base=jinja2.Undefined)
env = jinja2.Environment(loader=loader, undefined=logger)
env.lstrip_blocks = True
env.trim_blocks = True
return env.get_template(path)
def _codegen_dot(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file("1/dot.cl")
chan_in_x_name = gd.CHANNEL_IN_VECTOR_X_BASE_NAME+str(id)
chan_in_y_name = gd.CHANNEL_IN_VECTOR_Y_BASE_NAME+str(id)
chan_out = gd.CHANNEL_OUT_SCALAR_BASE_NAME+str(id)
channels_routine = {"channel_in_vector_x": chan_in_x_name, "channel_in_vector_y": chan_in_y_name, "channel_out_scalar": chan_out}
output_path = self._output_path + "/" + routine.user_name+".cl"
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
#add helpers
template = self._read_template_file("helpers/"+gd.TEMPLATE_READ_VECTOR_X)
channels_helper = {"channel_out_vector": chan_in_x_name}
helper_name_read_x = gd.HELPER_READ_VECTOR_X_BASE_NAME+str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
#Read y
template = self._read_template_file("helpers/" + gd.TEMPLATE_READ_VECTOR_Y)
channels_helper = {"channel_out_vector": chan_in_y_name}
helper_name_read_y = gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper),
append=True)
#Write scalar
template = self._read_template_file("helpers/" + gd.TEMPLATE_WRITE_SCALAR)
channels_helper = {"channel_in_scalar": chan_out}
helper_name_write_scalar = gd.HELPER_WRITE_SCALAR_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_write_scalar, helper=routine, channels=channels_helper),
append=True)
#create the json entries
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_SCALAR, helper_name_write_scalar)
return json
def _codegen_axpy(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file("1/axpy.cl")
chan_in_x_name = gd.CHANNEL_IN_VECTOR_X_BASE_NAME+str(id)
chan_in_y_name = gd.CHANNEL_IN_VECTOR_Y_BASE_NAME+str(id)
chan_out = gd.CHANNEL_OUT_VECTOR_BASE_NAME+str(id)
channels_routine = {"channel_in_vector_x": chan_in_x_name, "channel_in_vector_y": chan_in_y_name, "channel_out_vector": chan_out}
output_path = self._output_path + "/" + routine.user_name+".cl"
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
#add helpers
template = self._read_template_file("helpers/"+gd.TEMPLATE_READ_VECTOR_X)
channels_helper = {"channel_out_vector": chan_in_x_name}
helper_name_read_x = gd.HELPER_READ_VECTOR_X_BASE_NAME+str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
#Read y
template = self._read_template_file("helpers/" + gd.TEMPLATE_READ_VECTOR_Y)
channels_helper = {"channel_out_vector": chan_in_y_name}
helper_name_read_y = gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper),
append=True)
#Write vector
template = self._read_template_file("helpers/" + gd.TEMPLATE_WRITE_VECTOR)
channels_helper = {"channel_in_vector": chan_out}
helper_name_write_vector = gd.HELPER_WRITE_VECTOR_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_write_vector, helper=routine, channels=channels_helper),
append=True)
#create the json entries
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_VECTOR, helper_name_write_vector)
return json
|
48977
|
from celery.decorators import task
@task
def make_pi(num_calcs):
"""
Simple pi approximation based on the Leibniz formula for pi.
http://en.wikipedia.org/wiki/Leibniz_formula_for_pi
:param num_calcs: defines the length of the sequence
:type num_calcs: positive int
:returns: an approximation of pi
"""
print "Approximating pi with %s iterations" % num_calcs
pi = 0.0
for k in xrange(num_calcs):
pi += 4 * ((-1)**k / ((2.0 * k) + 1))
return pi
|
49008
|
import urllib2
import json
global base_url
global auth_key
global ticket
def get_ticket():
url = base_url + "/alfresco/service/api/login"
headers = {"Content-Type": "application/json"}
data = auth_key
try:
response = make_post_request(url, data, headers)
return json.load(response).get("data").get("ticket")
except urllib2.HTTPError as err:
print("Failed to Login ")
print(err.code)
def make_request(url, headers):
request = urllib2.Request(url)
for key, value in headers.items():
request.add_header(key, value)
return urllib2.urlopen(request)
def make_post_request(url, data, headers):
request = urllib2.Request(url)
for key, value in headers.items():
request.add_header(key, value)
return urllib2.urlopen(request, json.dumps(data))
|
49017
|
from .. import testing
class LogicalFunctionsTest(testing.FunctionalTestCase):
filename = "logical.xlsx"
def test_IF_false_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!C2'),
self.evaluator.get_cell_value('Sheet1!C2')
)
def test_IF_true_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!C3'),
self.evaluator.get_cell_value('Sheet1!C3')
)
def test_AND_false_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!D2'),
self.evaluator.get_cell_value('Sheet1!D2')
)
def test_AND_true_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!D3'),
self.evaluator.get_cell_value('Sheet1!D3')
)
def test_OR_false_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!E2'),
self.evaluator.get_cell_value('Sheet1!E2')
)
def test_OR_true_case(self):
self.assertEqual(
self.evaluator.evaluate('Sheet1!E3'),
self.evaluator.get_cell_value('Sheet1!E3')
)
|
49084
|
del_items(0x800A0E8C)
SetType(0x800A0E8C, "void VID_OpenModule__Fv()")
del_items(0x800A0F4C)
SetType(0x800A0F4C, "void InitScreens__Fv()")
del_items(0x800A103C)
SetType(0x800A103C, "void MEM_SetupMem__Fv()")
del_items(0x800A1068)
SetType(0x800A1068, "void SetupWorkRam__Fv()")
del_items(0x800A10F8)
SetType(0x800A10F8, "void SYSI_Init__Fv()")
del_items(0x800A1204)
SetType(0x800A1204, "void GM_Open__Fv()")
del_items(0x800A1228)
SetType(0x800A1228, "void PA_Open__Fv()")
del_items(0x800A1260)
SetType(0x800A1260, "void PAD_Open__Fv()")
del_items(0x800A12A4)
SetType(0x800A12A4, "void OVR_Open__Fv()")
del_items(0x800A12C4)
SetType(0x800A12C4, "void SCR_Open__Fv()")
del_items(0x800A12F4)
SetType(0x800A12F4, "void DEC_Open__Fv()")
del_items(0x800A1568)
SetType(0x800A1568, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A163C)
SetType(0x800A163C, "char *GetWord__FPc(char *VStr)")
|
49152
|
from office365.runtime.client_value import ClientValue
class SpaApplication(ClientValue):
pass
|
49187
|
from flask import Flask
app = Flask(__name__)
posts = {
0: {
'title': 'Hello, world',
'content': 'This is my first blog post!'
}
}
@app.route('/')
def home():
return 'Hello, world!'
# This route expects to be in the format of /post/0 (for example).
# Then it will pass 0 as argument to the post() function.
@app.route('/post/<int:post_id>')
def post(post_id):
"""
This function runs when a user visits route such as:
- /post/0
- /post/2
- /post/99
But not:
- /post/a
- /post/something/else
- /posts/1
Then we get the 0 as a number (not a string!) as argument, so we can use it.
"""
post = posts.get(post_id) # Retrieve the post from our global posts dictionary by the ID passed in as argument.
return f"Post {post['title']}, content:\n\n{post['content']}" # Return the title and content formatted a bit nicer.
if __name__ == '__main__':
app.run(debug=True)
|
49193
|
from spacenetutilities.labeltools import coreLabelTools
import json
import glob
import argparse
from datetime import datetime
import os
def modifyTimeField(geoJson, geoJsonNew, featureItemsToAdd=['ingest_tim', 'ingest_time', 'edit_date'], featureKeyListToRemove=[]):
now = datetime.today()
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
for featureKey in featureItemsToAdd:
if not (featureKey in tmpFeature['properties']):
print('inserting missing field')
print(now.isoformat())
tmpFeature['properties'][featureKey] = now.isoformat()
else:
if not tmpFeature['properties'][featureKey]:
print('filling empty field')
tmpFeature['properties'][featureKey] = now.isoformat()
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdFieldFromJsonEntries(geoJson, geoJsonNew, featureKeyListToRemove=['Id', 'id'], featureItemsToAdd={}):
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
tmpFeature.update(featureItemsToAdd)
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdinGeoJSONFolder(folder, modifier='noid'):
geoJsonList = glob.glob(os.path.join(folder, '*.geojson'))
for geojsonName in geoJsonList:
removeIdFieldFromJsonEntries(geojsonName, geojsonName.replace('.geojson', '{}.geojson'.format(modifier)))
|
49224
|
from hlo import ShardingSpec, ShardingSpecType
from cluster_env import ClusterEnvironment
from common import compute_bytes
def test_tile():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1, 0], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [0], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 1, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3, 1)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2, 1)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
def test_tile2():
cluster_env = ClusterEnvironment([[0, 1, 2, 3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
def test_tile3():
cluster_env = ClusterEnvironment([[0, 1], [2, 3]], [1,1], [1,1], None)
shape = (12, 12)
src = ShardingSpec.split(shape, 1, cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
print(src)
print(dst)
cost = cluster_env.resharding_cost(shape, src, dst)
print(cost)
def assert_allclose(x, y):
assert abs((x - y) / (y + 1e-8)) < 0.01
def test_resharding_cost():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1, 1], [1, 1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [1, 0], [1, 0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 1))
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.replicated(cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 0)
+ cluster_env.all_gather_cost(compute_bytes(shape), 1))
def test_resharding_cost2():
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
if __name__ == "__main__":
test_tile()
test_tile2()
#test_tile3()
test_resharding_cost()
test_resharding_cost2()
|
49274
|
import bpy
import numpy as np
from PIL import Image
class CarModelViewToImage():
# def __init__:
# self.camera_ = None
# self.image_folder_ = None
# self.car_width_ = 0
# self.car_length_ = 0
# self.viewport_width_ = 0
# self.viewport_height_ = 0
# self.stride_ = 0
# self.stride_radians_ = 0
# self.car_ = None
# self.scene_length_ = 0
# self.scene_height_ = 0
# self.light_ctr_ = None
def init(self, info):
"""
info: {
"car_width" : float,
"car_length": float,
"viewport_width" : float,
"viewport_height" : float,
"image_folder" : string
}
"""
# get base information
self.car_width_ = info["car_width"]
self.car_length_ = info["car_length"]
self.viewport_width_ = info["viewport_width"]
self.viewport_height_ = info["viewport_height"]
self.image_folder_ = info["image_folder"]
self.scene_length_ = self.car_length_ * 2
self.scene_height_ = self.car_length_
bpy.context.scene.render.resolution_x = self.viewport_width_
bpy.context.scene.render.resolution_y = self.viewport_height_
bpy.context.scene.render.filepath = self.image_folder_
# resize model and light
# save model dimensions and location
self.car_ = bpy.data.objects["car"]
# save light location
self.light_ctr_ = [bpy.data.objects["left_light"],
bpy.data.objects["right_light"], bpy.data.objects["top_light"]]
# move model and light
offset = self.car_.location.copy()
self.car_.location -= offset
for l in self.light_ctr_:
l.location -= offset
# calculate prop from length and resize
car_length_now = max(self.car_.dimensions)
scale_size = self.car_length_ / car_length_now
self.car_.scale *= scale_size
for l in self.light_ctr_:
l.location *= scale_size
l.scale *= scale_size
# set camera
bpy.ops.object.camera_add()
self.camera_ = bpy.data.objects["Camera"]
# set camera base info
self.camera_.data.lens_unit = "FOV"
self.camera_.data.angle = np.radians(90)
self.camera_.data.clip_start = 0.1
self.camera_.data.clip_end = self.scene_length_ * 2
# set camera constraint
bpy.ops.object.constraint_add(type="TRACK_TO")
bpy.context.object.constraints["Track To"].up_axis = 'UP_Y'
bpy.context.object.constraints["Track To"].track_axis = 'TRACK_NEGATIVE_Z'
bpy.context.object.constraints["Track To"].target = self.car_
bpy.context.object.constraints["Track To"].use_target_z = True
# set render Node
self.scene_ = bpy.context.scene
self.scene_.use_nodes = True
self.tree_ = self.scene_.node_tree
self.links_ = self.tree_.links
# clear default nodes
for n in self.tree_.nodes:
self.tree_.nodes.remove(n)
self.render_layer_ = self.tree_.nodes.new('CompositorNodeRLayers')
self.viewer_image_ = self.tree_.nodes.new('CompositorNodeViewer')
self.viewer_image_.use_alpha = False
def set_camera_pos(self, x, y, z=None):
# 计算真实坐标
real_x = np.clip(x, -1, 1) * self.scene_length_
real_y = np.clip(y, -1, 1) * self.scene_length_
self.camera_.location[0] = real_x
self.camera_.location[1] = real_y
if(z != None):
real_z = np.clip(z, 0, 1) * self.scene_height_
self.camera_.location[2] = real_z
def render_image(self, img_name, folder_path):
"""
渲染图像
"""
filepath = folder_path + img_name
filepath_depth = folder_path + "z" + img_name
# color
self.links_.clear()
self.links_.new(self.render_layer_.outputs[0], self.viewer_image_.inputs[0])
bpy.ops.render.render()
bpy.data.images[0].save_render(filepath)
# depth
self.links_.clear()
# self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_depth_.inputs[0])
self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_image_.inputs[0])
bpy.ops.render.render()
pixels = bpy.data.images['Viewer Node'].pixels
pixels = np.array(pixels)[::4][::-1] # get the pixels
pixels[pixels < 10000000000.0] = 255
pixels[pixels >= 10000000000.0] = 0
pix = pixels.astype(dtype=np.uint8).reshape((self.viewport_height_, self.viewport_width_))
img = Image.fromarray(pix)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save(filepath_depth)
def get_single_image(self, x, y, z, img_name, folder_path=""):
"""
x,y,z:摄像头位置在场景的比例,其中x、y为-1~1,z为0~1
img_name : 文件名
folder_path : 文件夹路径
"""
# 设置摄像机
self.set_camera_pos(x,y,z)
# 渲染并保存图像
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
self.render_image(img_name, folder_path)
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
def get_surround_image(self, xy, z, rotate_stride, folder_path = ""):
"""
x,y,z:摄像头位置在场景的比例,其中x、y为-1~1,z为0~1
rotate_stride : 旋转的角度
folder_path : 文件夹路径
"""
def set_camera_pos(angle, camera_to_origin_length):
self.camera_.location[0] = camera_to_origin_length * np.cos(np.radians(angle))
self.camera_.location[1] = camera_to_origin_length * np.sin(np.radians(angle))
# 计算旋转角度相关信息
bpy.context.scene.camera = self.camera_
self.stride_ = rotate_stride
self.stride_radians_ = np.radians(rotate_stride)
# set camera parameters
self.set_camera_pos(xy, 0, z)
real_xy = self.scene_length_ * np.clip(xy, -1, 1)
real_z = self.scene_height_ * np.clip(z, 0, 1)
camera_length = np.sqrt(real_xy**2 + real_z**2)
for i in range(0, 360, rotate_stride):
img_name = str(i) + ".jpg"
set_camera_pos(i, camera_length)
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
if __name__ == '__main__':
info = {
"car_width" : 30,
"car_length": 50,
"viewport_width" : 1280,
"viewport_height" : 720,
"image_folder" : "E:/company/MyWork/Workspace/CPU_3D/resources/Huake8296/car_image/single/"
}
car_view = CarModelViewToImage()
car_view.init(info)
#car_view.get_single_image(0, 0, 1, "top_view.jpg")# have a bug
#car_view.get_surround_image(-0.6, 0.4, 90)
car_view.get_single_image(0, -0.6, 0.6, "view_front.jpg")
car_view.get_single_image(0, 0.6, 0.6, "view_back.jpg")
car_view.get_single_image(0.6, 0, 0.6, "view_left.jpg")
car_view.get_single_image(-0.6, 0, 0.6, "view_right.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_left_front.jpg")
car_view.get_single_image(0.6, 0.6, 0.6, "view_left_back.jpg")
car_view.get_single_image(-0.6, -0.6, 0.6, "view_right_front.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_right_back.jpg")
|
49276
|
import os
from os.path import exists
from os.path import join
from os.path import splitext
from subprocess import check_call
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from .compat import is_posix
from .exc import CommandError
def open_in_editor(
filename: str, environ: Optional[Dict[str, str]] = None
) -> None:
"""
Opens the given file in a text editor. If the environment variable
``EDITOR`` is set, this is taken as preference.
Otherwise, a list of commonly installed editors is tried.
If no editor matches, an :py:exc:`OSError` is raised.
:param filename: The filename to open. Will be passed verbatim to the
editor command.
:param environ: An optional drop-in replacement for ``os.environ``. Used
mainly for testing.
"""
env = os.environ if environ is None else environ
try:
editor = _find_editor(env)
check_call([editor, filename])
except Exception as exc:
raise CommandError("Error executing editor (%s)" % (exc,)) from exc
def _find_editor(environ: Mapping[str, str]) -> str:
candidates = _default_editors()
for i, var in enumerate(("EDITOR", "VISUAL")):
if var in environ:
user_choice = environ[var]
if exists(user_choice):
return user_choice
if os.sep not in user_choice:
candidates.insert(i, user_choice)
for candidate in candidates:
path = _find_executable(candidate, environ)
if path is not None:
return path
raise OSError(
"No suitable editor found. Please set the "
'"EDITOR" or "VISUAL" environment variables'
)
def _find_executable(
candidate: str, environ: Mapping[str, str]
) -> Optional[str]:
# Assuming this is on the PATH, we need to determine it's absolute
# location. Otherwise, ``check_call`` will fail
if not is_posix and splitext(candidate)[1] != ".exe":
candidate += ".exe"
for path in environ.get("PATH", "").split(os.pathsep):
value = join(path, candidate)
if exists(value):
return value
return None
def _default_editors() -> List[str]:
# Look for an editor. Prefer the user's choice by env-var, fall back to
# most commonly installed editor (nano/vim)
if is_posix:
return ["sensible-editor", "editor", "nano", "vim", "code"]
else:
return ["code.exe", "notepad++.exe", "notepad.exe"]
|
49287
|
import clr
clr.AddReferenceToFile('ObjectUtils.dll')
import ObjectUtils
print '[+] Added ObjectUtils'
|
49324
|
import json
import os
from datetime import datetime
import boto3
from aws_lambda_powertools.logging import Logger
logger = Logger()
@logger.inject_lambda_context
def main(event, context):
records = event.get("Records", [])
entries = []
stream_label = os.environ["STREAM_LABEL"]
logger.info(
{"record_count": len(records), "stream": stream_label,}
)
for record in records:
keys = record.get("dynamodb").get("Keys")
pk = keys["pk"]["S"]
sk = keys["sk"]["S"]
# pk and sk are prefixed with <type>#, every char before the '#' describes the attribute type
pk_type = pk[: pk.find("#")]
sk_type = sk[: sk.find("#")]
event_name = record["eventName"]
logger.info(
{
"pk": pk,
"pk_type": pk_type,
"sk": sk,
"sk_type": sk_type,
"event_name": event_name,
}
)
entry = {
"Source": f"{stream_label}",
"Resources": [],
"DetailType": event_name,
"Detail": json.dumps(
{"pk_type": pk_type, "sk_type": sk_type, "record": record}
),
"EventBusName": "default",
}
entries.append(entry)
client = boto3.client("events")
response = client.put_events(Entries=entries)
logger.debug(entries)
logger.info(
{"num_entries": len(records), "failed_entries": response["FailedEntryCount"],}
)
return
|
49341
|
import json
import os
import time
from copy import deepcopy
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from typing import List, Dict
from matplotlib import pyplot as plt
from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, \
LikelihoodFactor, BinaryFactorMixture, KWayFactor
from sampler.NestedSampling import GlobalNestedSampler
from sampler.SimulationBasedSampler import SimulationBasedSampler
from slam.Variables import Variable, VariableType
from slam.FactorGraph import FactorGraph
from slam.BayesTree import BayesTree, BayesTreeNode
import numpy as np
from sampler.sampler_utils import JointFactor
from utils.Functions import sort_pair_lists
from utils.Visualization import plot_2d_samples
from utils.Functions import sample_dict_to_array, array_order_to_dict
class SolverArgs:
def __init__(self,
elimination_method: str = "natural",
posterior_sample_num: int = 500,
local_sample_num: int = 500,
store_clique_samples: bool = False,
local_sampling_method="direct",
adaptive_posterior_sampling=None,
*args, **kwargs
):
# graph-related and tree-related params
self.elimination_method = elimination_method
self.posterior_sample_num = posterior_sample_num
self.store_clique_samples = store_clique_samples
self.local_sampling_method = local_sampling_method
self.local_sample_num = local_sample_num
self.adaptive_posterior_sampling = adaptive_posterior_sampling
def jsonStr(self):
return json.dumps(self.__dict__)
class CliqueSeparatorFactor(ImplicitPriorFactor):
def sample(self, num_samples: int, **kwargs):
return NotImplementedError("implementation depends on density models")
class ConditionalSampler:
def conditional_sample_given_observation(self, conditional_dim,
obs_samples=None,
sample_number=None):
"""
This method returns samples with the dimension of conditional_dim.
If sample_number is given, samples of the first conditional_dim variables are return.
If obs_samples is given, samples of the first conditional_dim variables after
the dimension of obs_samples will be returned. obs_samples.shape = (sample num, dim)
Note that the dims here are of the vectorized point on manifolds not the dim of manifold.
"""
raise NotImplementedError("Implementation depends on density estimation method.")
class FactorGraphSolver:
"""
This is the abstract class of factor graph solvers.
It mainly works as:
1. the interface for users to define and solve factor graphs.
2. the maintainer of factor graphs and Bayes tree for incremental inference
3. fitting probabilistic models to the working part of factor graph and Bayes tree
4. inference (sampling) on the entire Bayes tree
The derived class may reply on different probabilistic modeling approaches.
"""
def __init__(self, args: SolverArgs):
"""
Parameters
----------
elimination_method : string
option of heuristics for variable elimination ordering.
TODO: this can be a dynamic parameter when updating Bayes tree
"""
self._args = args
self._physical_graph = FactorGraph()
self._working_graph = FactorGraph()
self._physical_bayes_tree = None
self._working_bayes_tree = None
self._conditional_couplings = {} # map from Bayes tree clique to flows
self._implicit_factors = {} # map from Bayes tree clique to factor
self._samples = {} # map from variable to samples
self._new_nodes = []
self._new_factors = []
self._clique_samples = {} # map from Bayes tree clique to samples
self._clique_true_obs = {} # map from Bayes tree clique to observations which augments flow models
self._clique_density_model = {} # map from Bayes tree clique to flow model
# map from Bayes tree clique to variable pattern; (Separator,Frontal) in reverse elimination order
self._clique_variable_pattern = {}
self._elimination_ordering = []
self._reverse_ordering_map = {}
self._temp_training_loss = {}
def set_args(self, args: SolverArgs):
raise NotImplementedError("Implementation depends on probabilistic modeling approaches.")
@property
def elimination_method(self) -> str:
return self._args.elimination_method
@property
def elimination_ordering(self) -> List[Variable]:
return self._elimination_ordering
@property
def physical_vars(self) -> List[Variable]:
return self._physical_graph.vars
@property
def new_vars(self) -> List[Variable]:
return self._new_nodes
@property
def working_vars(self) -> List[Variable]:
return self._working_graph.vars
@property
def physical_factors(self) -> List[Factor]:
return self._physical_graph.factors
@property
def new_factors(self) -> List[Factor]:
return self._new_factors
@property
def working_factors(self) -> List[Factor]:
return self._working_graph.factors
@property
def working_factor_graph(self) -> FactorGraph:
return self._working_graph
@property
def physical_factor_graph(self) -> FactorGraph:
return self._physical_graph
@property
def working_bayes_tree(self) -> BayesTree:
return self._working_bayes_tree
@property
def physical_bayes_tree(self) -> BayesTree:
return self._physical_bayes_tree
def generate_natural_ordering(self) -> None:
"""
Generate the ordering by which nodes are added
"""
self._elimination_ordering = self._physical_graph.vars + self._new_nodes
def generate_pose_first_ordering(self) -> None:
"""
Generate the ordering by which nodes are added and lmk eliminated later
"""
natural_order = self._physical_graph.vars + self._new_nodes
pose_list = []
lmk_list = []
for node in natural_order:
if node._type == VariableType.Landmark:
lmk_list.append(node)
else:
pose_list.append(node)
self._elimination_ordering = pose_list + lmk_list
def generate_ccolamd_ordering(self) -> None:
"""
"""
physical_graph_ordering = [var for var in self._elimination_ordering if var not in self._working_graph.vars]
working_graph_ordering = self._working_graph.analyze_elimination_ordering(
method="ccolamd", last_vars=
[[var for var in self._working_graph.vars if
var.type == VariableType.Pose][-1]])
self._elimination_ordering = physical_graph_ordering + working_graph_ordering
def generate_ordering(self) -> None:
"""
Generate the ordering by which Bayes tree should be generated
"""
if self._args.elimination_method == "natural":
self.generate_natural_ordering()
elif self._args.elimination_method == "ccolamd":
self.generate_ccolamd_ordering()
elif self._args.elimination_method == "pose_first":
self.generate_pose_first_ordering()
self._reverse_ordering_map = {
var: index for index, var in
enumerate(self._elimination_ordering[::-1])}
# TODO: Add other ordering methods
def add_node(self, var: Variable = None, name: str = None,
dim: int = None) -> "FactorGraphSolver":
"""
Add a new node
The node has not been added to the physical or current factor graphs
:param var:
:param name: used only when variable is not specified
:param dim: used only when variable is not specified
:return: the current problem
"""
if var:
self._new_nodes.append(var)
else:
self._new_nodes.append(Variable(name, dim))
return self
def add_factor(self, factor: Factor) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param factor
:return: the current problem
"""
self._new_factors.append(factor)
return self
def add_prior_factor(self, vars: List[Variable],
distribution: dist.Distribution) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param distribution
:return: the current problem
"""
self._new_factors.append(ExplicitPriorFactor(
vars=vars, distribution=distribution))
return self
def add_likelihood_factor(self, vars: List[Variable],
likelihood: like.LikelihoodBase) -> "FactorGraphSolver":
"""
Add a likelihood factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param likelihood
:return: the current problem
"""
self._new_factors.append(LikelihoodFactor(
vars=vars, log_likelihood=likelihood))
return self
def update_physical_and_working_graphs(self, timer: List[float] = None, device: str = "cpu"
) -> "FactorGraphSolver":
"""
Add all new nodes and factors into the physical factor graph,
retrieve the working factor graph, update Bayes trees
:return: the current problem
"""
start = time.time()
# Determine the affected variables in the physical Bayes tree
old_nodes = set(self.physical_vars)
nodes_of_new_factors = set.union(*[set(factor.vars) for
factor in self._new_factors])
old_nodes_of_new_factors = set.intersection(old_nodes,
nodes_of_new_factors)
# Get the working factor graph
if self._physical_bayes_tree: # if not first step, get sub graph
affected_nodes, sub_bayes_trees = \
self._physical_bayes_tree. \
get_affected_vars_and_partial_bayes_trees(
vars=old_nodes_of_new_factors)
self._working_graph = self._physical_graph.get_sub_factor_graph_with_prior(
variables=affected_nodes,
sub_trees=sub_bayes_trees,
clique_prior_dict=self._implicit_factors)
else:
sub_bayes_trees = set()
for node in self._new_nodes:
self._working_graph.add_node(node)
for factor in self._new_factors:
self._working_graph.add_factor(factor)
# Get the working Bayes treeget_sub_factor_graph
old_ordering = self._elimination_ordering
self.generate_ordering()
self._working_bayes_tree = self._working_graph.get_bayes_tree(
ordering=[var for var in self._elimination_ordering
if var in set(self.working_vars)])
# Update the physical factor graph
for node in self._new_nodes:
self._physical_graph.add_node(node)
for factor in self._new_factors:
self._physical_graph.add_factor(factor)
# Update the physical Bayesian tree
self._physical_bayes_tree = self._working_bayes_tree.__copy__()
self._physical_bayes_tree.append_child_bayes_trees(sub_bayes_trees)
# Delete legacy conditional samplers in the old tree and
# convert the density model w/o separator at leaves to density model w/ separator.
cliques_to_delete = set()
for old_clique in set(self._clique_density_model.keys()).difference(self._physical_bayes_tree.clique_nodes):
for new_clique in self._working_bayes_tree.clique_nodes:
if old_clique.vars == new_clique.vars and [var for var in old_ordering if var in old_clique.vars] == \
[var for var in self._elimination_ordering if var in new_clique.vars]:
# This clique was the root in the old tree but is leaf in the new tree.
# If the ordering of variables remains the same, its density model can be re-used.
# Update the clique to density model dict
self._clique_true_obs[new_clique] = self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
self._clique_variable_pattern[new_clique] = self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
self._clique_samples[new_clique] = self._clique_samples[old_clique]
self._clique_density_model[new_clique] = \
self.root_clique_density_model_to_leaf(old_clique, new_clique, device)
# since new clique will be skipped, related factors shall be eliminated beforehand.
# TODO: update _clique_density_model.keys() in which some clique parents change
# TODO: this currently has no impact on results
# TODO: if we store all models or clique-depend values on cliques, this issue will disappear
new_separator_factor = None
if new_clique.separator:
# extract new factor over separator
separator_var_list = sorted(new_clique.separator, key=lambda x: self._reverse_ordering_map[x])
new_separator_factor = \
self.clique_density_to_separator_factor(separator_var_list,
self._clique_density_model[new_clique],
self._clique_true_obs[old_clique])
self._implicit_factors[new_clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=new_clique,
new_factor=new_separator_factor)
break
cliques_to_delete.add(old_clique)
for old_clique in cliques_to_delete:
del self._clique_density_model[old_clique]
del self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
del self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
del self._clique_samples[old_clique]
# Clear all newly added variables and factors
self._new_nodes = []
self._new_factors = []
end = time.time()
if timer is not None:
timer.append(end - start)
return self
def root_clique_density_model_to_leaf(self,
old_clique: BayesTreeNode,
new_clique: BayesTreeNode,
device) -> "ConditionalSampler":
"""
when old clique and new clique have same variables but different division of frontal and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
f"total time: {sum(step_timer)}")
file = open(f"{step_file_prefix}_ordering", "w+")
file.write(" ".join([var.name for var in solver.elimination_ordering]))
file.close()
file = open(f"{step_file_prefix}_split_timing", "w+")
file.write(" ".join([str(t) for t in detailed_timer]))
file.close()
file = open(f"{step_file_prefix}_step_training_loss", "w+")
last_training_loss = json.dumps(solver._temp_training_loss)
file.write(last_training_loss)
file.close()
posterior_sampling_timer.append(detailed_timer[-1])
fitting_timer.append(sum(detailed_timer[1:-1]))
X = np.hstack([cur_sample[var] for var in solver.elimination_ordering])
np.savetxt(fname=step_file_prefix, X=X)
# check transformation
if check_root_transform:
root_clique = solver.physical_bayes_tree.root
root_clique_model = solver._clique_density_model[root_clique]
y = root_clique_model.prior.sample((3000,))
tx = deepcopy(y)
if hasattr(root_clique_model, "flows"):
for f in root_clique_model.flows[::-1]:
tx = f.inverse_given_separator(tx, None)
y = y.detach().numpy()
tx = tx.detach().numpy()
np.savetxt(fname=step_file_prefix + '_root_normal_data', X=y)
np.savetxt(fname=step_file_prefix + '_root_transformed', X=tx)
plt.figure()
x_sort, tx_sort = sort_pair_lists(tx[:,0], y[:,0])
plt.plot(x_sort, tx_sort)
plt.ylabel("T(x)")
plt.xlabel("x")
plt.savefig(f"{step_file_prefix}_transform.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
# clique dim and timing
np.savetxt(fname=step_file_prefix + '_dim_time', X=np.array(clique_dim_timer))
if traj_plot:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
title=f'Step {i}',
plot_all_meas=False,
plot_meas_give_pose=[var for var in step_nodes if var.type == VariableType.Pose],
rbt_traj_no_samples=True,
truth_R2=True,
truth_SE2=False,
truth_odometry_color='k',
truth_landmark_markersize=10,
truth_landmark_marker='x',
file_name=f"{step_file_prefix}.png",
**plot_args)
else:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
file_name=f"{step_file_prefix}.png", title=f'Step {i}',
**plot_args)
solver.plot2d_mean_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png", **plot_args)
# solver.plot2d_MAP_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png")
file = open(f"{run_dir}/step_timing", "w+")
file.write(" ".join(str(t) for t in step_timer))
file.close()
file = open(f"{run_dir}/step_list", "w+")
file.write(" ".join(str(s) for s in step_list))
file.close()
file = open(f"{run_dir}/posterior_sampling_timer", "w+")
file.write(" ".join(str(t) for t in posterior_sampling_timer))
file.close()
file = open(f"{run_dir}/fitting_timer", "w+")
file.write(" ".join(str(t) for t in fitting_timer))
file.close()
plt.figure()
plt.plot(np.array(step_list)*5+5, step_timer, 'go-', label='Total')
plt.plot(np.array(step_list)*5+5, posterior_sampling_timer, 'ro-', label='Posterior sampling')
plt.plot(np.array(step_list)*5+5, fitting_timer, 'bd-', label='Learning NF')
plt.ylabel(f"Time (sec)")
plt.xlabel(f"Key poses")
plt.legend()
plt.savefig(f"{run_dir}/step_timing.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
if mixture_factor2weights:
# write updated hypothesis weights
hypo_file = open(run_dir + f'/step{i}.hypoweights', 'w+')
plt.figure()
for factor, weights in mixture_factor2weights.items():
hypo_weights = factor.posterior_weights(cur_sample)
line = ' '.join([var.name for var in factor.vars]) + ' : ' + ','.join(
[str(w) for w in hypo_weights])
hypo_file.writelines(line + '\n')
weights.append(hypo_weights)
for i_w in range(len(hypo_weights)):
plt.plot(np.arange(i + 1 - len(weights), i + 1), np.array(weights)[:, i_w], '-o',
label=f"H{i_w}at{factor.observer_var.name}" if not isinstance(factor, KWayFactor) else
f"{factor.observer_var.name} to {factor.observed_vars[i_w].name}")
hypo_file.close()
plt.legend()
plt.xlabel('Step')
plt.ylabel('Hypothesis weights')
plt.savefig(run_dir + f'/step{i}_hypoweights.png', dpi=300)
if show_plot: plt.show()
plt.close()
|
49349
|
from plugins.adversary.app.commands.command import CommandLine
from typing import Callable, Tuple
from plugins.adversary.app.commands import parsers
def files() -> Tuple[CommandLine, Callable[[str], None]]:
command = 'powershell -command "&{$filetype = @(\\"*.docx\\",\\"*.pdf\\",\\"*.xlsx\\"); $startdir = ' \
'\\"c:\\\\Users\\\\\\"; for($k=0;$k -lt $filetype.length; $k++){ $core = dir $startdir\($filetype[$k]) ' \
'-Recurse | Select @{Name=\\"Path\\";Expression={$_.Fullname -as [string]}}; foreach ($alpha in $core) ' \
'{$filename = $alpha.Path -as [string]; [Byte[]] $corrupt_file = [System.IO.File]::ReadAllBytes(' \
'$filename); [Byte[]] $key_file = [System.IO.File]::ReadAllBytes($(' \
'-join($filename, \\".old\\"))); for($i=0; $i -lt $key_file.Length; $i++) { $corrupt_file[$i] = ' \
'$key_file[$i];} [System.IO.File]::WriteAllBytes($(resolve-path $filename), $corrupt_file); ' \
'Remove-Item $(-join($filename,\\".old\\"))}}}"'
return CommandLine('cmd /c {}'.format(command)), parsers.footprint.recover_files
def password(user: str, password: str) -> Tuple[CommandLine, Callable[[str], None]]:
command = 'net user ' + user + ' ' + password
return CommandLine('cmd /c {}'.format(command)), parsers.footprint.password
|
49353
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
def __init__(self,margin = 0.2, sigma = 0.3):
super(TripletLoss,self).__init__()
self.margin = margin
self.sigma = sigma
def forward(self,f_anchor,f_positive, f_negative): # (-1,c)
d_ap = torch.norm(f_anchor - f_positive, dim = 1) / self.sigma # (-1,1)
d_an = torch.norm(f_anchor - f_negative, dim = 1) / self.sigma
return torch.clamp(torch.exp(d_ap) - torch.exp(d_an) + self.margin,0).sum()
class MetricSoftmaxLoss(nn.Module):
def __init__(self):
super(MetricSoftmaxLoss,self).__init__()
def forward(self,f_anchor,f_positive, f_negative):
d_ap = torch.norm(f_anchor - f_positive, dim = 1)
d_an = torch.norm(f_anchor - f_negative, dim = 1)
return -torch.log(torch.exp(d_an) / (torch.exp(d_an) + torch.exp(d_ap))).sum()
def hard_samples_mining(f_anchor,f_positive, f_negative, margin):
d_ap = torch.norm(f_anchor - f_positive, dim = 1)
d_an = torch.norm(f_anchor - f_negative, dim = 1)
idx = (d_ap - d_an) < margin
return idx
def renorm(x):
return x.renorm(2,0,1e-5).mul(1e5)
class MetricLoss(nn.Module):
def __init__(self,margin = 0.2, sigma = 0.3, l = 1.):
super(MetricLoss, self).__init__()
self.l = l
self.margin = margin
self.trip = TripletLoss(margin, sigma)
self.soft = MetricSoftmaxLoss()
def forward(self, f_anchor,f_positive, f_negative):
f_anchor, f_positive, f_negative = renorm(f_anchor), renorm(f_positive), renorm(f_negative)
with torch.no_grad():
idx = hard_samples_mining(f_anchor, f_positive, f_negative, self.margin)
#print(idx)
loss_trip = self.trip(f_anchor, f_positive, f_negative)
loss_soft = self.soft(f_anchor, f_positive, f_negative)
#print(loss_trip.item(), loss_soft.item())
return loss_trip + self.l * loss_soft
#return self.trip(f_anchor[idx], f_positive[idx], f_negative[idx]) + self.l * self.soft(f_anchor[idx], f_positive[idx], f_negative[idx])
if __name__ == "__main__":
x = torch.randn(3,17)
y = torch.randn(3,17)
z = torch.randn(3,17)
loss_fn = MetricLoss()
res = loss_fn(x,y,z)
|
49358
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import scripts.conll18_ud_eval as ud_eval
from scripts.reinsert_compounds import reinsert_compounds
def evaluate(gold_filename, sys_filename, metric):
""""""
reinsert_compounds(gold_filename, sys_filename)
gold_conllu_file = ud_eval.load_conllu_file(gold_filename)
sys_conllu_file = ud_eval.load_conllu_file(sys_filename)
evaluation = ud_eval.evaluate(gold_conllu_file, sys_conllu_file)
return evaluation[metric].f1
|
49370
|
from injector import inject
from domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionCommand import CheckDatabaseConnectionCommand
from domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionRequest import CheckDatabaseConnectionRequest
from infrastructure.api.ResourceBase import ResourceBase
from infrastructure.api.decorators.Controller import controller
from infrastructure.cqrs.Dispatcher import Dispatcher
@controller()
class CheckConnectionDatabaseResource(ResourceBase):
@inject
def __init__(self,
dispatcher: Dispatcher,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.dispatcher = dispatcher
def post(self, req: CheckDatabaseConnectionRequest):
"""
Check Database Connection
"""
command = CheckDatabaseConnectionCommand(request=req)
self.dispatcher.dispatch(command)
|
49434
|
from .anchor_head_multi import AnchorHeadMulti
from .anchor_head_single import AnchorHeadSingle
from .anchor_head_template import AnchorHeadTemplate
from .point_head_box import PointHeadBox
from .point_head_simple import PointHeadSimple
from .point_intra_part_head import PointIntraPartOffsetHead
from .anchor_head_seg import AnchorHeadSeg
from .center_head import CenterHead
from .mm_head import MMHead
from .e2e_head import E2EHead
from .fusion_head import FusionHead
from .attention_fusion_head import AttnFusionHead
from .e2e_fusion_head import E2EFusionHead
from .e2e_seqfuse_head import E2ESeqFusionHead
from .e2e_seq_head import E2ESeqHead
from .e2e_refine_head import E2ERefinementHead
from .e2e_seq_token_head import E2ESeqTokenHead
__all__ = {
'AnchorHeadTemplate': AnchorHeadTemplate,
'AnchorHeadSingle': AnchorHeadSingle,
'PointIntraPartOffsetHead': PointIntraPartOffsetHead,
'PointHeadSimple': PointHeadSimple,
'PointHeadBox': PointHeadBox,
'AnchorHeadMulti': AnchorHeadMulti,
'AnchorHeadSeg': AnchorHeadSeg,
'CenterHead': CenterHead,
'MMHead': MMHead,
'E2EHead': E2EHead,
'FusionHead': FusionHead,
'AttnFusionHead': AttnFusionHead,
'E2EFusionHead': E2EFusionHead,
'E2ESeqFusionHead': E2ESeqFusionHead,
'E2ESeqHead': E2ESeqHead,
'E2ESeqTokenHead': E2ESeqTokenHead,
'E2ERefinementHead': E2ERefinementHead
}
|
49440
|
BOT_NAME = 'p5_downloader_middleware_handson'
SPIDER_MODULES = ['p5_downloader_middleware_handson.spiders']
NEWSPIDER_MODULE = 'p5_downloader_middleware_handson.spiders'
ROBOTSTXT_OBEY = True
DOWNLOADER_MIDDLEWARES = {
'p5_downloader_middleware_handson.middlewares.SeleniumDownloaderMiddleware': 543,
}
SELENIUM_ENABLED = True
|
49467
|
import numpy as np
def load_lda(path):
rows = []
with open(path, 'r') as f:
for line in f:
line = line.strip(" []\n")
if line:
rows.append(np.fromstring(line, dtype=np.float32, sep=' '))
matrix = np.array(rows).T
return matrix[:-1], matrix[-1]
|
49475
|
import unittest
from nose.plugins.attrib import attr
from testing.utils import DumpResponse
import cloudsigma.resource as resource
@attr('acceptance_test')
class FirewallPolicyTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.client = resource.FirewallPolicy()
self.dump_response = DumpResponse(clients=[self.client])
self.base_policy = {
"name": "My awesome policy",
"rules": [
{
"dst_ip": "23",
"direction": "out",
"action": "drop",
"comment": "Drop traffic from the VM to IP address 172.16.17.32/32"
},
{
"src_ip": "172.16.31.10/24",
"ip_proto": "tcp",
"dst_port": "22",
"direction": "in",
"action": "accept",
"comment": "Allow SSH traffic to the VM from our office in Dubai"
},
{
"ip_proto": "tcp",
"dst_port": "22",
"direction": "in",
"action": "drop",
"comment": "Drop all other SSH traffic to the VM"
},
{
"src_ip": "!172.16.17.32",
"ip_proto": "udp",
"direction": "in",
"action": "drop",
"comment": "Drop all UDP traffic to the VM, not originating from 172.16.17.32"
},
{
"ip_proto": "tcp",
"dst_port": "!1:1024",
"direction": "in",
"action": "drop",
"comment": "Drop any traffic, to the VM with destination port not between 1-1024"
}
]
}
self._clean_policies()
def tearDown(self):
self._clean_policies()
def _clean_policies(self):
policies = self.client.list_detail()
server_client = resource.Server()
deleted_servers = []
for policy in policies:
for server in policy['servers']:
if server['uuid'] not in deleted_servers:
deleted_servers.append(server['uuid'])
server_client.delete(server['uuid'])
self.client.delete(policy['uuid'])
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('fwpolicy_schema'):
self.client.get_schema()
@attr('docs_snippets')
def test_crud_policy(self):
base_policy = self.base_policy.copy()
with self.dump_response('fwpolicy_create_minimal'):
min_policy = self.client.create({})
self.assertDictContainsSubset({}, min_policy)
with self.dump_response('fwpolicy_create_full'):
full_policy = self.client.create(base_policy)
# Test if applied rules look like the ones returned from the API.
# The dict is subset will not work, because API
# alters/normalizes some of the data.
for idx, rules in enumerate(base_policy['rules']):
for key in rules:
match_a = str(full_policy['rules'][idx][key])
match_b = rules[key]
print(match_a, match_b)
self.assertTrue(match_a.startswith(match_b))
with self.dump_response('fwpolicy_list'):
self.client.list()
with self.dump_response('fwpolicy_list_detail'):
res = self.client.list_detail()
self.assertEqual(len(res), 2)
updated_policy = full_policy.copy()
updated_policy['rules'] = [updated_policy['rules'][0]]
with self.dump_response('fwpolicy_get'):
self.client.get(full_policy['uuid'])
with self.dump_response('fwpolicy_update'):
up_pol = self.client.update(full_policy['uuid'], updated_policy)
self.assertEqual(len(up_pol['rules']), 1)
with self.dump_response('fwpolicy_delete'):
self.client.delete(full_policy['uuid'])
self.client.delete(min_policy['uuid'])
res = self.client.list()
self.assertEqual(len(res), 0)
@attr('docs_snippets')
def test_server_fw_rules(self):
policy = self.client.create(self.base_policy)
server_def = {
'name': 'FirewalledServer',
'cpu': 1000,
'mem': 512 * 1024 ** 2,
'vnc_password': '<PASSWORD>',
"nics": [
{
"firewall_policy": policy['uuid'],
"ip_v4_conf": {
"ip": None,
"conf": "dhcp"
},
"model": "virtio",
}
],
}
server_client = resource.Server()
with DumpResponse(clients=[server_client])("fwpolicy_server_attach"):
server = server_client.create(server_def)
self.assertEqual(
server['nics'][0]['firewall_policy']['uuid'], policy['uuid']
)
self.client.delete(policy['uuid'])
server = server_client.get(server['uuid'])
self.assertIsNone(server['nics'][0]['firewall_policy'])
server_client.delete(server['uuid'])
|
49514
|
import os
import random
import datetime
import argparse
import numpy as np
from tqdm import tqdm
from model.unetdsbn import Unet2D
from utils.loss import dice_loss1
from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel
import torch
import torchvision.transforms as tfs
from torch import optim
from torch.optim import Adam
from torch.backends import cudnn
from torch.nn import DataParallel
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser('Dual Normalization U-Net Training')
parser.add_argument('--data_dir', type=str, default='./data/brats/npz_data')
parser.add_argument('--train_domain_list_1', nargs='+')
parser.add_argument('--train_domain_list_2', nargs='+')
parser.add_argument('--result_dir', type=str, default='./results/unet_dn')
parser.add_argument('--n_classes', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--save_step', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--deterministic', dest='deterministic', action='store_true')
args = parser.parse_args()
def repeat_dataloader(iterable):
""" repeat dataloader """
while True:
for x in iterable:
yield x
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
if __name__== '__main__':
start_time = datetime.datetime.now()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
base_dir = args.data_dir
batch_size = args.batch_size
save_step = args.save_step
lr = args.lr
train_domain_list_1 = args.train_domain_list_1
train_domain_list_2 = args.train_domain_list_2
max_epoch = args.n_epochs
result_dir = args.result_dir
n_classes = args.n_classes
log_dir = os.path.join(result_dir, 'log')
model_dir = os.path.join(result_dir, 'model')
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
dataloader_train = []
model = Unet2D(num_classes=n_classes, norm='dsbn', num_domains=2)
params_num = sum(p.numel() for p in model.parameters())
print("\nModle's Params: %.3fM" % (params_num / 1e6))
model = DataParallel(model).cuda()
optimizer = Adam(params=model.parameters(), lr=lr, betas=(0.9, 0.999))
exp_lr = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
dataset_1 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_1,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_1 = DataLoader(dataset_1, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_1)
dataset_2 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_2,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_2 = DataLoader(dataset_2, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_2)
for epoch_num in range(max_epoch):
data_iter = [repeat_dataloader(dataloader_train[i]) for i in range(2)]
print('Epoch: {}, LR: {}'.format(epoch_num, round(exp_lr.get_last_lr()[0], 6)))
tbar = tqdm(dataloader_train[0], ncols=150)
model.train()
for i, batch in enumerate(tbar):
### get all domains' sample_batch ###
sample_batches = [batch]
other_sample_batches = [next(data_iter[i]) for i in range(1, 2)]
sample_batches += other_sample_batches
total_loss = 0
count = 0
for train_idx in range(2):
count += 1
sample_data, sample_label = sample_batches[train_idx]['image'].cuda(), sample_batches[train_idx]['onehot_label'].cuda()
outputs_soft = model(sample_data, domain_label=train_idx*torch.ones(sample_data.shape[0], dtype=torch.long))
loss = dice_loss1(outputs_soft, sample_label)
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
tbar.set_description('Total Loss: {}'.format(round((total_loss / count), 6)))
exp_lr.step()
if (epoch_num + 1) % save_step == 0:
model_save_model_path = os.path.join(model_dir, 'epoch_{}.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
model_save_model_path = os.path.join(model_dir, 'final_model.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
end_time = datetime.datetime.now()
print('Finish running. Cost total time: {} hours'.format((end_time - start_time).seconds / 3600))
|
49520
|
from aorist import (
Attribute,
NaturalNumber,
StringIdentifier,
DateString,
POSIXTimestamp,
PositiveFloat,
default_tabular_schema,
RowStruct,
StaticDataTable,
DataSchema,
StorageSetup,
RemoteStorageSetup,
Storage,
RemoteStorage,
RemoteLocation,
CSVEncoding,
Encoding,
DataSet,
DatumTemplate,
Asset,
WebLocation,
FileBasedStorageLayout,
CSVHeader,
FileHeader,
APIOrFileLayout,
SingleFileLayout,
FreeText,
Empty,
FIPSStateCode,
IntegerNumber,
)
attributes = [
Attribute(DateString("Date")),
Attribute(StringIdentifier("State")),
Attribute(NaturalNumber("Cases_Total")),
Attribute(NaturalNumber("Cases_White")),
Attribute(NaturalNumber("Cases_Black")),
Attribute(NaturalNumber("Cases_Latinx")),
Attribute(NaturalNumber("Cases_Asian")),
Attribute(NaturalNumber("Cases_AIAN")),
Attribute(NaturalNumber("Cases_NHPI")),
Attribute(NaturalNumber("Cases_Multiracial")),
Attribute(NaturalNumber("Cases_Other")),
Attribute(NaturalNumber("Cases_Unknown")),
Attribute(NaturalNumber("Cases_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Cases_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Cases_Ethnicity_Unknown")),
Attribute(NaturalNumber("Deaths_Total")),
Attribute(NaturalNumber("Deaths_White")),
Attribute(NaturalNumber("Deaths_Black")),
Attribute(NaturalNumber("Deaths_Latinx")),
Attribute(NaturalNumber("Deaths_Asian")),
Attribute(NaturalNumber("Deaths_AIAN")),
Attribute(NaturalNumber("Deaths_NHPI")),
Attribute(NaturalNumber("Deaths_Multiracial")),
Attribute(NaturalNumber("Deaths_Other")),
Attribute(NaturalNumber("Deaths_Unknown")),
Attribute(NaturalNumber("Deaths_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Deaths_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Deaths_Ethnicity_Unknown")),
Attribute(NaturalNumber("Hosp_Total")),
Attribute(NaturalNumber("Hosp_White")),
Attribute(NaturalNumber("Hosp_Black")),
Attribute(NaturalNumber("Hosp_Latinx")),
Attribute(NaturalNumber("Hosp_Asian")),
Attribute(NaturalNumber("Hosp_AIAN")),
Attribute(NaturalNumber("Hosp_NHPI")),
Attribute(NaturalNumber("Hosp_Multiracial")),
Attribute(NaturalNumber("Hosp_Other")),
Attribute(NaturalNumber("Hosp_Unknown")),
Attribute(NaturalNumber("Hosp_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Hosp_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Hosp_Ethnicity_Unknown")),
Attribute(NaturalNumber("Tests_Total")),
Attribute(NaturalNumber("Tests_White")),
Attribute(NaturalNumber("Tests_Black")),
Attribute(NaturalNumber("Tests_Latinx")),
Attribute(NaturalNumber("Tests_Asian")),
Attribute(NaturalNumber("Tests_AIAN")),
Attribute(NaturalNumber("Tests_NHPI")),
Attribute(NaturalNumber("Tests_Multiracial")),
Attribute(NaturalNumber("Tests_Other")),
Attribute(NaturalNumber("Tests_Unknown")),
Attribute(NaturalNumber("Tests_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Tests_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Tests_Ethnicity_Unknown")),
]
trcdt_datum = RowStruct(
name="the_racial_covid_data_tracker_datum",
attributes=attributes,
)
trcdt_schema = default_tabular_schema(
DatumTemplate(trcdt_datum), attributes
)
table = Asset(StaticDataTable(
name="the_racial_covid_data_tracker_table",
schema=DataSchema(trcdt_schema),
setup=StorageSetup(RemoteStorageSetup(
remote=Storage(RemoteStorage(
location=RemoteLocation(
WebLocation(
address=("https://docs.google.com/spreadsheets/d/e/2PACX-1vS8SzaERcKJOD"
"_EzrtCDK1dX1zkoMochlA9iHoHg_RSw3V8bkpfk1mpw4pfL5RdtSOyx_oScsUt"
"yXyk/pub?gid=43720681&single=true&output=csv"),
)
),
layout=APIOrFileLayout(
FileBasedStorageLayout(
SingleFileLayout()
),
),
encoding=Encoding(CSVEncoding(header=FileHeader(
CSVHeader(num_lines=1)
))),
)),
)),
tag="the_racial_covid_data_tracker",
))
trcdt_dataset = DataSet(
name="The-covid-racial-data-tracker",
description="""
The COVID Racial Data Tracker is a collaboration between the COVID
Tracking Project and the Boston University Center for Antiracist
Research. Together, they’re gathering the most complete and up-to
-date race and ethnicity data on COVID-19 in the United States.
""",
source_path=__file__,
datum_templates=[DatumTemplate(trcdt_datum)],
assets={
"The COVID Racial Data Tracker data": table,
},
access_policies=[]
)
|
49529
|
import argparse
import pickle
from utils.hit_rate_utils import NewHitRateEvaluator
from utils.constants import EVAL_SPLITS_DICT
from lib.refer import REFER
def threshold_with_confidence(exp_to_proposals, conf):
results = {}
for exp_id, proposals in exp_to_proposals.items():
assert len(proposals) >= 1
sorted_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)
thresh_proposals = [sorted_proposals[0]]
for prop in sorted_proposals[1:]:
if prop['score'] > conf:
thresh_proposals.append(prop)
else:
break
results[exp_id] = thresh_proposals
return results
def main(args):
dataset_splitby = '{}_{}'.format(args.dataset, args.split_by)
eval_splits = EVAL_SPLITS_DICT[dataset_splitby]
# Load proposals
proposal_path = 'cache/proposals_{}_{}_{}.pkl'.format(args.m, args.dataset, args.tid)
print('loading {} proposals from {}...'.format(args.m, proposal_path))
with open(proposal_path, 'rb') as f:
proposal_dict = pickle.load(f)
# Load refer
refer = REFER('data/refer', dataset=args.dataset, splitBy=args.split_by)
# Evaluate hit rate
print('Hit rate on {}\n'.format(dataset_splitby))
evaluator = NewHitRateEvaluator(refer, top_N=None, threshold=args.thresh)
print('conf: {:.3f}'.format(args.conf))
for split in eval_splits:
exp_to_proposals = proposal_dict[split]
exp_to_proposals = threshold_with_confidence(exp_to_proposals, args.conf)
proposal_per_ref, hit_rate = evaluator.eval_hit_rate(split, exp_to_proposals)
print('[{:5s}] hit rate: {:.2f} @ {:.2f}'.format(split, hit_rate*100, proposal_per_ref))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--m', type=str, required=True)
parser.add_argument('--dataset', default='refcoco')
parser.add_argument('--split-by', default='unc')
parser.add_argument('--tid', type=str, required=True)
parser.add_argument('--thresh', type=float, default=0.5)
parser.add_argument('--conf', type=float, required=True)
main(parser.parse_args())
|
49629
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^add/$', views.add_exploit, name='crits-exploits-views-add_exploit'),
url(r'^edit/cve/$', views.edit_exploit_cve, name='crits-exploits-views-edit_exploit_cve'),
url(r'^edit/name/(?P<id_>\S+)/$', views.edit_exploit_name, name='crits-exploits-views-edit_exploit_name'),
url(r'^details/(?P<id_>\S+)/$', views.exploit_detail, name='crits-exploits-views-exploit_detail'),
url(r'^remove/(?P<id_>\S+)/$', views.remove_exploit, name='crits-exploits-views-remove_exploit'),
url(r'^list/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
url(r'^list/(?P<option>\S+)/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
]
|
49694
|
import json
import random
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String, MetaData, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import select
import monitor_db
import monitor_logger
import monitor_util
Base = declarative_base()
url = 'mysql+mysqlconnector://hawkeye:Hawkeye#[email protected]:3306/wingx_hawkeye'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
logger = monitor_logger.get_logger(__name__)
# http://flask-sqlalchemy.pocoo.org/2.3/
# http://docs.sqlalchemy.org/en/latest/
# SQLAlchemy orm
class Monitor(Base):
__tablename__ = 't_credit_monitor'
id = Column('id', Integer, primary_key=True)
credit_type = Column('credit_type', String(128))
query_type = Column('query_type', String(128))
credit_status = Column('credit_status', String(128))
monitor_time = Column('monitor_time', String(128))
elapsed_time = Column('elapsed_time', String(128))
create_time = Column('create_time', String(128))
def __init__(self, id, credit_type, query_type, credit_status, monitor_time, elapsed_time, create_time):
self.id = id
self.credit_type = credit_type
self.query_type = query_type
self.credit_status = credit_status
self.monitor_time = monitor_time
self.elapsed_time = elapsed_time
self.create_time = create_time
def __repr__(self):
return '<id is %s, creditType is %s, queryType is %s, creditStatus is %s, monitorTime is %s, elapsedTime is %s>' % (
self.id, self.credit_type, self.query_type, self.credit_status, self.monitor_time, self.elapsed_time)
# Flask-SQLAlchemy
class FlaskMonitor(db.Model):
__tablename__ = 't_credit_monitor'
id = Column('id', Integer, primary_key=True)
credit_type = Column('credit_type', String(128))
query_type = Column('query_type', String(128))
credit_status = Column('credit_status', String(128))
monitor_time = Column('monitor_time', String(128))
elapsed_time = Column('elapsed_time', String(128))
create_time = Column('create_time', String(128))
def __init__(self, id, credit_type, query_type, credit_status, monitor_time, elapsed_time, create_time):
self.id = id
self.credit_type = credit_type
self.query_type = query_type
self.credit_status = credit_status
self.monitor_time = monitor_time
self.elapsed_time = elapsed_time
self.create_time = create_time
def __repr__(self):
return '<id is %s, creditType is %s, queryType is %s, creditStatus is %s, monitorTime is %s, elapsedTime is %s>' % (
self.id, self.credit_type, self.query_type, self.credit_status, self.monitor_time, self.elapsed_time)
# SQLAlchemy core
metadata = MetaData()
T_Monitor = Table('t_credit_monitor', metadata, Column('id', Integer, primary_key=True)
, Column('credit_type', String(128))
, Column('query_type', String(128))
, Column('credit_status', String(128))
, Column('monitor_time', String(128))
, Column('elapsed_time', String(128))
, Column('create_time', String(128)))
# http://docs.sqlalchemy.org/en/latest/
# SQLAlchemy orm
def get_monitor_with_orm():
s = monitor_db.get_connection_session(url)
print(s.query(Monitor).limit(2).all())
print(s.query(Monitor).first())
print(type(s.query(Monitor)))
print(s.query(Monitor).count())
# SQLAlchemy core
def get_monitor_with_core():
conn = monitor_db.get_connection_with_url(url)
sql = select([T_Monitor])
result = conn.execute(sql)
print(result.rowcount)
print(type(result.fetchall()))
# using flask_sqlalchemy
def get_monitor_flask_sqlalchemy(page=1, limit=10):
try:
logger.debug('get_monitor_flask_sqlalchemy: page is %s, limit is %s' % (page, limit))
return FlaskMonitor.query.paginate(page, limit)
except Exception as e:
logger.debug("Exception in get_monitor_flask_sqlalchemy %s" % e)
return None
# add monitor
def add_monitor(d):
logger.debug('add monitor is %s' % d)
conn = monitor_db.get_connection_with_url(url)
d = json.loads(d)
# Content-Type: application/json
conn.execute(T_Monitor.insert(), [{
'credit_type': d['credit_type']
, 'query_type': d['query_type']
, 'credit_status': d['credit_status']
, 'elapsed_time': int(random.random() * 100)
}])
# # Content-Type: application/x-www-form-urlencoded; charset=UTF-8
# for key in d.keys():
# logger.debug("form data is %s" % json.loads(key))
# d_dict = json.loads(key)
# conn.execute(T_Monitor.insert(), [{
# 'credit_type': d_dict['credit_type']
# , 'query_type': d_dict['query_type']
# , 'credit_status': d_dict['credit_status']
# , 'elapsed_time': int(random.random() * 100)
# }])
if __name__ == '__main__':
print(get_monitor_flask_sqlalchemy(1, 2).items)
|
49709
|
import numpy as np
import kinpy as kp
arm = kp.build_serial_chain_from_urdf(
open("ur/ur.urdf").read(),
root_link_name="base_link",
end_link_name="ee_link",
)
fk_solution = arm.forward_kinematics(np.zeros(len(arm.get_joint_parameter_names())))
print(fk_solution)
|
49732
|
from pygears import gear, datagear, alternative, module
from pygears.typing.qround import get_out_type, get_cut_bits
from pygears.typing import Uint, code, Bool, Int, Fixp, Ufixp
@datagear
def qround(din,
*,
fract=0,
cut_bits=b'get_cut_bits(din, fract)',
signed=b'din.signed') -> b'get_out_type(din, fract)':
res = code(din, Int if signed else Uint) + (Bool(1) << (cut_bits - 1))
return code(res >> cut_bits, module().tout)
# @datagear
# def qround_even(din,
# *,
# fract=0,
# cut_bits=b'get_cut_bits(din, fract)',
# signed=b'din.signed') -> b'get_out_type(din, fract)':
# val_coded = code(din, Int if signed else Uint)
# round_bit = val_coded[cut_bits]
# res = val_coded + Uint([round_bit] + [~round_bit] * (cut_bits - 1))
# return code(res[cut_bits:])
@gear
def truncate(din, *, nbits=2) -> b'din':
pass
@gear
def round_half_up(din, *, nbits=2) -> b'din':
pass
@gear
def round_to_zero(din, *, nbits=2) -> b'din':
pass
@gear
async def round_to_even(din, *, nbits=2) -> b'din':
async with din as d:
return round(float(d) / (2**nbits)) * (2**nbits)
|
49733
|
import unittest
from mhgan import *
class Test_Topology(unittest.TestCase):
def test_io_tensors(self):
gan = WGAN(Generator([100], [28, 28, 1]), Discriminator())
self.assertEqual(type(gan.G), tf.Tensor)
self.assertEqual(type(gan.G), tf.Tensor)
self.assertEqual(type(gan.x), tf.Tensor)
self.assertEqual(type(gan.z), tf.Tensor)
|
49761
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from mne._ola import _COLA, _Interp2, _Storer
def test_interp_2pt():
"""Test our two-point interpolator."""
n_pts = 200
assert n_pts % 50 == 0
feeds = [ # test a bunch of feeds to make sure they don't break things
[n_pts],
[50] * (n_pts // 50),
[10] * (n_pts // 10),
[5] * (n_pts // 5),
[2] * (n_pts // 2),
[1] * n_pts,
]
# ZOH
values = np.array([10, -10])
expected = np.full(n_pts, 10)
for feed in feeds:
expected[-1] = 10
interp = _Interp2([0, n_pts], values, 'zero')
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected)
interp = _Interp2([0, n_pts - 1], values, 'zero')
expected[-1] = -10
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected)
# linear and inputs of different sizes
values = [np.arange(2)[:, np.newaxis, np.newaxis], np.array([20, 10])]
expected = [
np.linspace(0, 1, n_pts, endpoint=False)[np.newaxis, np.newaxis, :],
np.linspace(20, 10, n_pts, endpoint=False)]
for feed in feeds:
interp = _Interp2([0, n_pts], values, 'linear')
outs = [interp.feed(f) for f in feed]
outs = [np.concatenate([o[0] for o in outs], axis=-1),
np.concatenate([o[1] for o in outs], axis=-1)]
assert_allclose(outs[0], expected[0], atol=1e-7)
assert_allclose(outs[1], expected[1], atol=1e-7)
# cos**2 and more interesting bounds
values = np.array([10, -10])
expected = np.full(n_pts, 10.)
expected[-5:] = -10
cos = np.cos(np.linspace(0, np.pi / 2., n_pts - 9,
endpoint=False))
expected[4:-5] = cos ** 2 * 20 - 10
for feed in feeds:
interp = _Interp2([4, n_pts - 5], values, 'cos2')
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected, atol=1e-7)
out = interp.feed(10)[0]
assert_allclose(out, [values[-1]] * 10, atol=1e-7)
# hann and broadcasting
n_hann = n_pts - 9
expected[4:-5] = np.hanning(2 * n_hann + 1)[n_hann:-1] * 20 - 10
expected = np.array([expected, expected[::-1] * 0.5])
values = np.array([values, values[::-1] * 0.5]).T
for feed in feeds:
interp = _Interp2([4, n_pts - 5], values, 'hann')
out = np.concatenate([interp.feed(f)[0] for f in feed], axis=-1)
assert_allclose(out, expected, atol=1e-7)
# one control point and None support
values = [np.array([10]), None]
for start in [0, 50, 99, 100, 1000]:
interp = _Interp2([start], values, 'zero')
out, none = interp.feed(n_pts)
assert none is None
expected = np.full(n_pts, 10.)
assert_allclose(out, expected)
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_cola(ndim):
"""Test COLA processing."""
sfreq = 1000.
rng = np.random.RandomState(0)
def processor(x):
return (x / 2.,) # halve the signal
for n_total in (999, 1000, 1001):
signal = rng.randn(n_total)
out = rng.randn(n_total) # shouldn't matter
for _ in range(ndim - 1):
signal = signal[np.newaxis]
out = out[np.newaxis]
for n_samples in (99, 100, 101, 102,
n_total - n_total // 2 + 1, n_total):
for window in ('hann', 'bartlett', 'boxcar', 'triang'):
# A few example COLA possibilities
n_overlaps = ()
if window in ('hann', 'bartlett') or n_samples % 2 == 0:
n_overlaps += ((n_samples + 1) // 2,)
if window == 'boxcar':
n_overlaps += (0,)
for n_overlap in n_overlaps:
# can pass callable or ndarray
for storer in (out, _Storer(out)):
cola = _COLA(processor, storer, n_total, n_samples,
n_overlap, sfreq, window)
n_input = 0
# feed data in an annoying way
while n_input < n_total:
next_len = min(rng.randint(1, 30),
n_total - n_input)
cola.feed(signal[..., n_input:n_input + next_len])
n_input += next_len
assert_allclose(out, signal / 2., atol=1e-7)
|
49791
|
from reaction import *
import rospy
class ReactionPublishRosOutNode(Reaction):
"""A reaction that is able to publish a message on rosout."""
def __init__(self, autonomy_level, message, loglevel):
super(ReactionPublishRosOutNode, self).__init__(None, autonomy_level)
#: The message to publish.
#: :type: string
self._message = message
if loglevel == "info":
log = rospy.loginfo
elif loglevel == "debug":
log = rospy.logdebug
elif loglevel == "err":
log = rospy.logerr
elif loglevel == "warn":
log = rospy.logwarn
elif loglevel == "fatal":
log = rospy.logfatal
else:
# loglevel does not make sense
rospy.logwarn(
"A reaction wants to log on loglevel"
+ " '%s', but that loglevel does not exist." % loglevel
+ " Setting loglevel to info.")
log = rospy.info
#: The logging function to use.
self.__log = log
def execute_reaction(self):
"""Log the reaction message at a specific loglevel."""
self.__log(self._message)
|
49808
|
import traceback
from alog import debug, info, error
from mykonos.core.core import Core
from mykonos.keywords.management_device import ManagementDevice
from mykonos.keywords.decorators import Parallel
class LocatorElement(Core):
def __init__(self):
self.device_mobile = self.device()
self.management_device = ManagementDevice()
@Parallel.device_check
def get_locator(self, device=None, *argument, **settings):
"""Access locator from device.
**selector support:**
* text, textContains, textMatches, textStartsWith
* className, classNameMatches
* description,descriptionContains,descriptionMatches,descriptionStartsWith
* checkable, checked, clickable, longClickable
* scrollable, enabled,focusable, focused, selected
* packageName, packageNameMatches
* resourceId, resourceIdMatches
* index, instance
**Example:**
|| ${locator}= Get Locator | text=sample text
With Device:
|| @{emulator} = | 192.168.1.1 | 192.168.1.2
|| ${locator}= Get Locator | text=sample text | devices_parallel=@{emulator}
"""
if device is not None:
get_device = self.management_device.scan_current_device(device)
return get_device(*argument, **settings)
else:
return self.device_mobile(*argument, **settings)
def get_child(self, parent, *argument, **settings):
"""Access child locator from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${child}= Get Child | parent=${locator} text=sample text
"""
return parent.child(*argument, **settings)
def get_sibling(self, parent, *argument, **settings):
"""Access sibling locator from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${sibling}= Get Sibiling | parent=${locator} text=sample text
"""
return parent.sibling(*argument, **settings)
def left_position(self, parent, *argument, **settings):
"""Access left position from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${left}= Left Position | parent=${locator} text=sample text
"""
return parent.left(*argument, **settings)
def right_position(self, parent, *argument, **settings):
"""Access left position from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${right}= Right Position | parent=${locator} text=sample text
"""
return parent.right(*argument, **settings)
def up_position(self, parent, *argument, **settings):
"""Access left position from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${up}= Up Position | parent=${locator} text=sample text
"""
return parent.up(*argument, **settings)
def down_position(self, parent, *argument, **settings):
"""Access left position from device.
**Example:**
|| ${locator}= Get Locator | text=sample text
|| ${down}= Down Position | parent=${locator} text=sample text
"""
return parent.down(*argument, **settings)
@Parallel.device_check
def get_locator_by_index(self, device=None, *argument, **settings):
"""Get Element locator by index on device.
**Example:**
|| Get Locator By Index | text=sample_text | index=1
|| ${locator}= Get Locator | text=sample text
|| Get Locator By Index | locator=${locator} | index=1
"""
index = int(settings['index'])
del settings['index']
if 'locator' in settings:
locator = settings['locator']
del settings['locator']
return locator[index]
else:
if device is not None:
get_device = self.management_device.scan_current_device(device)
return get_device(*argument, **settings)[index]
else:
return self.device_mobile(*argument, **settings)[index]
def handlers(self, action, function):
"""Call customized function on device.
**Example:**
|| Handlers | action=on | function=sample_function
"""
if 'on' in action:
return self.device_mobile.handlers.on(function)
elif 'off' in action:
return self.device_mobile.handlers.off(function)
class WatcherElement(Core):
"""Class is used to perform some actions when selector cannot be found."""
def __init__(self):
self.device_mobile = self.device()
def __watcher_register(self, **settings):
name = settings['name']
del settings['name']
return self.device_mobile.watcher(name)
def watcher(self, **settings):
"""Watcher is registered when a selector cannot be find.
name=name of watcher
WHEN, className=sample_class
WHEN, packageName=sample_package
**Example:**
|| ${sample_watcher}=name=sample_watcher | className=sample_class
|| Click Element | watcher=${sample_watcher} | text=sample_text
"""
name_watcher = settings['name']
del settings['name']
self.__watcher = self.__watcher_register(name=name_watcher)
return self.__watcher.when(**settings)
def watcher_action(self, action, **settings):
"""Watcher Action is used to running the action on the watcher.
run=Force to run all watchers
remove=Remvoe watchers
reset=Reset all triggered watchers
list=List all watchers
triggered=Check if there is any watcher triggered
**Example:**
|| Watcher Action | action=run
|| Watcher Action | action=remove
|| Watcher Action | action=remove | name=sample_watcher
|| Watcher Action | action=reset
|| Watcher Action | action=list
|| Watcher Action | action=triggered
"""
if 'run' in action:
return self.device_mobile.watchers.run()
elif 'remove' in action:
if 'name' in settings:
name = settings['name']
del settings['name']
return self.device_mobile.watchers.remove(name)
else:
return self.device_mobile.watchers.remove()
elif 'list' in action:
return self.device_mobile.watchers
elif 'reset' in action:
return self.device_mobile.watchers.reset()
elif 'triggered' in action:
return self.device_mobile.watchers.triggered
|
49851
|
from confluent_kafka import TopicPartition as GroupPartitionMetadata
from confluent_kafka.admin import BrokerMetadata
from confluent_kafka.admin import GroupMember as GroupMemberMetadata
from confluent_kafka.admin import GroupMetadata, PartitionMetadata, TopicMetadata
from kaskade.kafka.models import (
Broker,
Group,
GroupMember,
GroupPartition,
Partition,
Topic,
)
def metadata_to_broker(metadata: BrokerMetadata) -> Broker:
return Broker(id=metadata.id, host=metadata.host, port=metadata.port)
def metadata_to_group(metadata: GroupMetadata) -> Group:
return Group(
id=metadata.id,
broker=metadata_to_broker(metadata.broker),
state=metadata.state,
members=[],
partitions=[],
)
def metadata_to_group_member(metadata: GroupMemberMetadata) -> GroupMember:
return GroupMember(
id=metadata.id,
group="",
client_id=metadata.client_id,
client_host=metadata.client_host,
)
def metadata_to_group_partition(metadata: GroupPartitionMetadata) -> GroupPartition:
return GroupPartition(
id=metadata.partition,
topic=metadata.topic,
offset=metadata.offset,
group="",
high=0,
low=0,
)
def metadata_to_partition(metadata: PartitionMetadata) -> Partition:
return Partition(
id=metadata.id,
leader=metadata.leader,
replicas=metadata.replicas,
isrs=metadata.isrs,
high=0,
low=0,
)
def metadata_to_topic(metadata: TopicMetadata) -> Topic:
name = metadata.topic
return Topic(
name=name,
groups=[],
partitions=[],
)
|
49873
|
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics
import KratosMultiphysics.TrilinosApplication as KratosTrilinos
class TestTrilinosMatrix(KratosUnittest.TestCase):
def test_resize(self):
comm = KratosTrilinos.CreateEpetraCommunicator(KratosMultiphysics.DataCommunicator.GetDefault())
space = KratosTrilinos.TrilinosSparseSpace()
pb = space.CreateEmptyVectorPointer(comm)
space.ResizeVector(pb,2)
n = space.Size(pb.GetReference())
self.assertEqual(n,2)
if __name__ == '__main__':
KratosUnittest.main()
|
49925
|
if True:
import numpy as np
d = 3
K = 50
N = 10 ** 6
a = np.zeros(3)
b = np.ones(3)
orders = np.array([K for i in range(d)])
coeffs = np.random.random([k + 2 for k in orders])
points = np.random.random((N, d)) # each line is a vector
points_c = points.T.copy() # each column is a vector
vals = np.zeros(N)
print(points.max().max())
print(points.min().min())
import time
from alternative_implementations import *
from eval_cubic_splines_cython import vec_eval_cubic_spline_3 as rr
vec_eval_cubic_spline_3(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined_columns(
a, b, orders, coeffs, points_c, vals
) # warmup
vec_eval_cubic_spline_3_kernel(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined_lesswork(orders, coeffs, points, vals, Ad, dAd)
# rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a, b, orders, coeffs, points, vals)
t1 = time.time()
vec_eval_cubic_spline_3(a, b, orders, coeffs, points, vals)
t2 = time.time()
vec_eval_cubic_spline_3_inlined(a, b, orders, coeffs, points, vals)
t3 = time.time()
vec_eval_cubic_spline_3_inlined_columns(a, b, orders, coeffs, points_c, vals)
t4 = time.time()
vec_eval_cubic_spline_3_kernel(a, b, orders, coeffs, points, vals)
t5 = time.time()
vec_eval_cubic_spline_3_inlined_lesswork(orders, coeffs, points, vals, Ad, dAd)
t6 = time.time()
# rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a, b, orders, coeffs, points, vals)
t7 = time.time()
print("one function call per point: {}".format(t2 - t1))
print("inlined (points in rows): {}".format(t3 - t2))
print("inlined (points in columns): {}".format(t4 - t3))
print("kernel: {}".format(t5 - t4))
print("less work: {}".format(t6 - t5))
print("cython: {}".format(t7 - t6))
print(vals[:10, 0])
|
49947
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
def get_ellipse_params(rho,M):
"""
Returns ellipse params (excl center point)
"""
#eigenvalue decomposition to get the axes
w,v=np.linalg.eigh(M/rho)
try:
#let the smaller eigenvalue define the width (major axis*2!)
width = 2/float(np.sqrt(w[0]))
height = 2/float(np.sqrt(w[1]))
#the angle of the ellipse is defined by the eigenvector assigned to the smallest eigenvalue (because this defines the major axis (width of the ellipse))
angle = np.rad2deg(np.arctan2(v[:,0][1],v[:,0][0]))
except:
print("paramters do not represent an ellipse.")
return width,height,angle
def get_ellipse_patch(px,py,rho,M,alpha_val=1,linec="red",facec="none",linest="solid"):
"""
return an ellipse patch
"""
w,h,a = get_ellipse_params(rho,M)
return patches.Ellipse((px,py), w, h, a, alpha=alpha_val,ec=linec,facecolor=facec,linestyle=linest)
def plot_ellipse(px,py,rho, M, save_to=None, show=True):
p=get_ellipse_patch(px,py,rho,M)
fig, ax = plt.subplots()
ax.add_patch(p)
l=np.max([p.width,p.height])
ax.set_xlim(px-l/2,px+l/2)
ax.set_ylim(py-l/2,py+l/2)
ax.grid(True)
if not (save_to is None):
plt.savefig(save_to)
if show:
plt.show()
|
49971
|
import FWCore.ParameterSet.Config as cms
ctppsRPAlignmentCorrectionsDataESSourceXML = cms.ESSource("CTPPSRPAlignmentCorrectionsDataESSourceXML",
verbosity = cms.untracked.uint32(0),
MeasuredFiles = cms.vstring(),
RealFiles = cms.vstring(),
MisalignedFiles = cms.vstring()
)
|
49972
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from config import gamma, lr
def flat_grad(grads):
grad_flatten = []
for grad in grads:
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(hessians):
hessians_flatten = []
for hessian in hessians:
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def flat_params(model):
params = []
for param in model.parameters():
params.append(param.data.view(-1))
params_flatten = torch.cat(params)
return params_flatten
def update_model(model, new_params):
index = 0
for params in model.parameters():
params_length = len(params.view(-1))
new_param = new_params[index: index + params_length]
new_param = new_param.view(params.size())
params.data.copy_(new_param)
index += params_length
def kl_divergence(net, old_net, states):
policy = net(states)
old_policy = old_net(states).detach()
kl = old_policy * torch.log(old_policy / policy)
kl = kl.sum(1, keepdim=True)
return kl
def fisher_vector_product(net, states, p, cg_damp=0.1):
kl = kl_divergence(net, net, states)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) # create_graph is True if we need higher order derivative products
kl_grad = flat_grad(kl_grad)
kl_grad_p = (kl_grad * p.detach()).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters())
kl_hessian_p = flat_hessian(kl_hessian_p)
return kl_hessian_p + cg_damp * p.detach()
def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10):
x = torch.zeros(loss_grad.size())
r = loss_grad.clone()
p = loss_grad.clone()
r_dot_r = torch.dot(r, r)
for i in range(n_step):
A_dot_p = fisher_vector_product(net, states, p)
alpha = r_dot_r / torch.dot(p, A_dot_p)
x += alpha * p
r -= alpha * A_dot_p
new_r_dot_r = torch.dot(r,r)
betta = new_r_dot_r / r_dot_r
p = r + betta * p
r_dot_r = new_r_dot_r
if r_dot_r < residual_tol:
break
return x
class TNPG(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(TNPG, self).__init__()
self.t = 0
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.fc_1 = nn.Linear(num_inputs, 128)
self.fc_2 = nn.Linear(128, num_outputs)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight)
def forward(self, input):
x = torch.tanh(self.fc_1(input))
policy = F.softmax(self.fc_2(x))
return policy
@classmethod
def train_model(cls, net, transitions):
states, actions, rewards, masks = transitions.state, transitions.action, transitions.reward, transitions.mask
states = torch.stack(states)
actions = torch.stack(actions)
rewards = torch.Tensor(rewards)
masks = torch.Tensor(masks)
returns = torch.zeros_like(rewards)
running_return = 0
for t in reversed(range(len(rewards))):
running_return = rewards[t] + gamma * running_return * masks[t]
returns[t] = running_return
policies = net(states)
policies = policies.view(-1, net.num_outputs)
policy_actions = (policies * actions.detach()).sum(dim=1)
loss = (policy_actions * returns).mean()
loss_grad = torch.autograd.grad(loss, net.parameters())
loss_grad = flat_grad(loss_grad)
step_dir = conjugate_gradient(net, states, loss_grad.data)
params = flat_params(net)
new_params = params + lr * step_dir
update_model(net, new_params)
return -loss
def get_action(self, input):
policy = self.forward(input)
policy = policy[0].data.numpy()
action = np.random.choice(self.num_outputs, 1, p=policy)[0]
return action
|
49973
|
import json
import os
import sys
import jsonpatch
import unittest
import pytest
from deepdiff import DeepDiff
from mock import patch
from dump.helper import create_template_dict, sort_lists
from dump.plugins.port import Port
from dump.match_infra import MatchEngine, ConnectionPool
from swsscommon.swsscommon import SonicV2Connector
# Location for dedicated db's used for UT
module_tests_path = os.path.dirname(__file__)
dump_tests_path = os.path.join(module_tests_path, "../")
tests_path = os.path.join(dump_tests_path, "../")
dump_test_input = os.path.join(tests_path, "dump_input")
port_files_path = os.path.join(dump_test_input, "port")
# Define the mock files to read from
dedicated_dbs = {}
dedicated_dbs['CONFIG_DB'] = os.path.join(port_files_path, "config_db.json")
dedicated_dbs['APPL_DB'] = os.path.join(port_files_path, "appl_db.json")
dedicated_dbs['ASIC_DB'] = os.path.join(port_files_path, "asic_db.json")
dedicated_dbs['STATE_DB'] = os.path.join(port_files_path, "state_db.json")
def populate_mock(db, db_names):
for db_name in db_names:
db.connect(db_name)
# Delete any default data
db.delete_all_by_pattern(db_name, "*")
with open(dedicated_dbs[db_name]) as f:
mock_json = json.load(f)
for key in mock_json:
for field, value in mock_json[key].items():
db.set(db_name, key, field, value)
@pytest.fixture(scope="class", autouse=True)
def match_engine():
print("SETUP")
os.environ["VERBOSE"] = "1"
# Monkey Patch the SonicV2Connector Object
from ...mock_tables import dbconnector
db = SonicV2Connector()
# popualate the db with mock data
db_names = list(dedicated_dbs.keys())
try:
populate_mock(db, db_names)
except Exception as e:
assert False, "Mock initialization failed: " + str(e)
# Initialize connection pool
conn_pool = ConnectionPool()
DEF_NS = '' # Default Namespace
conn_pool.cache = {DEF_NS: {'conn': db,
'connected_to': set(db_names)}}
# Initialize match_engine
match_engine = MatchEngine(conn_pool)
yield match_engine
print("TEARDOWN")
os.environ["VERBOSE"] = "0"
@pytest.mark.usefixtures("match_engine")
class TestPortModule:
def test_working_state(self, match_engine):
"""
Scenario: When the config is properly applied and propagated
"""
params = {Port.ARG_NAME: "Ethernet176", "namespace": ""}
m_port = Port(match_engine)
returned = m_port.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"])
expect["CONFIG_DB"]["keys"].append("PORT|Ethernet176")
expect["APPL_DB"]["keys"].append("PORT_TABLE:Ethernet176")
expect["STATE_DB"]["keys"].append("PORT_TABLE|Ethernet176")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x100000000036a")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd000000000a4d")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True)
assert not ddiff, ddiff
def test_missing_asic_port(self, match_engine):
"""
Scenario: When the config was applied and just the SAI_OBJECT_TYPE_PORT is missing
"""
params = {Port.ARG_NAME: "Ethernet160", "namespace": ""}
m_port = Port(match_engine)
returned = m_port.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"])
expect["CONFIG_DB"]["keys"].append("PORT|Ethernet160")
expect["APPL_DB"]["keys"].append("PORT_TABLE:Ethernet160")
expect["STATE_DB"]["keys"].append("PORT_TABLE|Ethernet160")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd000000000a49")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_PORT")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True)
assert not ddiff, ddiff
def test_missing_asic_hostif(self, match_engine):
"""
Scenario: When the config was applied and it did not propagate to ASIC DB
"""
params = {Port.ARG_NAME: "Ethernet164", "namespace": ""}
m_port = Port(match_engine)
returned = m_port.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"])
expect["CONFIG_DB"]["keys"].append("PORT|Ethernet164")
expect["APPL_DB"]["keys"].append("PORT_TABLE:Ethernet164")
expect["STATE_DB"]["keys"].append("PORT_TABLE|Ethernet164")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_PORT")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF")
ddiff = DeepDiff(returned, expect, ignore_order=True)
assert not ddiff, ddiff
def test_missing_state_and_appl(self, match_engine):
"""
Scenario: When the config was applied and it did not propagate to other db's
"""
params = {Port.ARG_NAME: "Ethernet156", "namespace": ""}
m_port = Port(match_engine)
returned = m_port.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"])
expect["CONFIG_DB"]["keys"].append("PORT|Ethernet156")
expect["APPL_DB"]["tables_not_found"].append("PORT_TABLE")
expect["STATE_DB"]["tables_not_found"].append("PORT_TABLE")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_PORT")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF")
ddiff = DeepDiff(returned, expect, ignore_order=True)
assert not ddiff, ddiff
def test_no_port(self, match_engine):
"""
Scenario: When no entry for the port is present in any of the db's
"""
params = {Port.ARG_NAME: "Ethernet152", "namespace": ""}
m_port = Port(match_engine)
returned = m_port.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"])
expect["CONFIG_DB"]["tables_not_found"].append("PORT")
expect["APPL_DB"]["tables_not_found"].append("PORT_TABLE")
expect["STATE_DB"]["tables_not_found"].append("PORT_TABLE")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_PORT")
expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF")
ddiff = DeepDiff(returned, expect, ignore_order=True)
assert not ddiff, ddiff
def test_all_args(self, match_engine):
"""
Scenario: Verify Whether the get_all_args method is working as expected
"""
params = {}
m_port = Port(match_engine)
returned = m_port.get_all_args("")
expect = ["Ethernet156", "Ethernet160", "Ethernet164", "Ethernet176"]
ddiff = DeepDiff(expect, returned, ignore_order=True)
assert not ddiff, ddiff
|
49976
|
from neural_processes.generic_map import GenericMap
class BaseMap(GenericMap):
'''
Parametrizing the mapping used in Neural Processes
f = BaseMap(..., deterministic=True)
y = f(x,z)
-- or --
f = BaseMap(..., deterministic=False)
y_mean, y_std = f(x,z)
The PETS paper (data efficient model-based RL...)
gets best results when the models are not
deterministic
Assuming all inputs and outputs are flat
if deterministic, output layer has no activation function
if stochastic, outputs mean and **LOG** diag covariance for a Gaussian
'''
def __init__(
self,
z_dim,
input_dims,
output_dims,
siamese_input=True,
num_siamese_input_layers=1,
siamese_input_layer_dim=128,
num_hidden_layers=1,
hidden_dim=128,
siamese_output=True,
num_siamese_output_layers=1,
siamese_output_layer_dim=128,
act='relu',
deterministic=False,
use_bn=False
):
all_input_dims = [z_dim] + input_dims
super(BaseMap, self).__init__(
all_input_dims,
output_dims,
siamese_input=siamese_input,
num_siamese_input_layers=num_siamese_input_layers,
siamese_input_layer_dim=siamese_input_layer_dim,
num_hidden_layers=num_hidden_layers,
hidden_dim=hidden_dim,
siamese_output=siamese_output,
num_siamese_output_layers=num_siamese_output_layers,
siamese_output_layer_dim=siamese_output_layer_dim,
act=act,
deterministic=deterministic,
use_bn=use_bn
)
def forward(self, z, inputs):
'''
Output is:
deterministic: a list
not: a list of lists
'''
all_inputs = [z] + inputs
return super(BaseMap, self).forward(all_inputs)
# class MazeConvBaseMap():
# '''
# Maps actions to (1, h, w) using a linear map and concats
# it to the input image which is then processed down with convolutions.
# with upconvolution brought back up to the output size
# '''
# def __init__(
# self,
# kernel_sizes,
# num_channels,
# strides,
# paddings,
# hidden_sizes,
# input_size,
# # output_size, outputs are same dims as input_size and a scalar for the reward
# action_dim,
# init_w=3e-3,
# hidden_activation=F.relu,
# output_activation=identity,
# hidden_init=ptu.fanin_init,
# b_init_value=0.1,
# ):
# self.save_init_params(locals())
# super().__init__()
# self.kernel_sizes = kernel_sizes
# self.num_channels = num_channels
# self.strides = strides
# self.paddings = paddings
# self.hidden_activation = hidden_activation
# self.output_activation = output_activation
# self.convs = []
# self.fcs = []
# in_c = input_size[0]
# in_h = input_size[1]
# for k, c, s, p in zip(kernel_sizes, num_channels, strides, paddings):
# conv = nn.Conv2d(in_c, c, k, stride=s, padding=p)
# hidden_init(conv.weight)
# conv.bias.data.fill_(b_init_value)
# self.convs.append(conv)
# out_h = int(math.floor(
# 1 + (in_h + 2*p - k)/s
# ))
# in_c = c
# in_h = out_h
# in_dim = in_c * in_h * in_h
# for h in hidden_sizes:
# fc = nn.Linear(in_dim, h)
# in_dim = h
# hidden_init(fc.weight)
# fc.bias.data.fill_(b_init_value)
# self.fcs.append(fc)
# self.last_fc = nn.Linear(in_dim, output_size)
# self.last_fc.weight.data.uniform_(-init_w, init_w)
# self.last_fc.bias.data.uniform_(-init_w, init_w)
# def forward(self, input, return_preactivations=False):
# h = input
# for conv in self.convs:
# h = conv(h)
# h = self.hidden_activation(h)
# h = h.view(h.size(0), -1)
# for i, fc in enumerate(self.fcs):
# h = fc(h)
# h = self.hidden_activation(h)
# preactivation = self.last_fc(h)
# output = self.output_activation(preactivation)
# if return_preactivations:
# return output, preactivation
# else:
# return output
|
50004
|
import os
import launch
import launch_ros.actions
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
graphbasedslam_param_dir = launch.substitutions.LaunchConfiguration(
'graphbasedslam_param_dir',
default=os.path.join(
get_package_share_directory('graph_based_slam'),
'param',
'graphbasedslam.yaml'))
graphbasedslam = launch_ros.actions.Node(
package='graph_based_slam',
executable='graph_based_slam_node',
parameters=[graphbasedslam_param_dir],
output='screen'
)
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(
'graphbasedslam_param_dir',
default_value=graphbasedslam_param_dir,
description='Full path to graphbasedslam parameter file to load'),
graphbasedslam,
])
|
50024
|
from typing import Dict, Iterable, List, Optional, Tuple, Union
from ._meta import _Meta
from ._no_return import _NoReturn
from ._return_type import _ReturnType
class _Return(_ReturnType):
"""
Class that describes how to interpret the return value(s) of a Callable.
Positional arguments are treated as types of return value(s) and keyword
arguments are treated as metadata and their types.
"""
__slots__ = ('values', 'meta', '_types')
def __init__(self, values: Tuple, meta: Optional[_Meta]=None) -> None:
if not values:
if meta:
raise RuntimeError("'values' is empty, use _Meta instead.")
else:
raise RuntimeError("'values' and 'meta' are empty, use _NoReturn instead.")
if meta:
assert isinstance(meta, _Meta), f"meta '{meta}' is not an instance of _Meta"
super().__init__()
self.values = tuple(values)
self.meta = meta
self._types = list(values)
if meta:
self._types.append(meta)
def parse_result(self, result: Tuple[Tuple, Dict]) -> Tuple[Tuple, Dict]:
"""
Verify the return value of a Callable matches what this instance describes.
Parameters
----------
result : Tuple[Tuple, Dict]
Returns
-------
Tuple[Tuple, Dict]
return[0]: actual results or positional arguments for the next Callable
return[1]: updated metadata for the Callables to follow
Raises
------
RuntimeError
when mis-match in shape or type
"""
if len(self._types) > 1:
assert isinstance(result, tuple), f"Returned value '{result}' is not an instance of Tuple"
if len(result) != len(self._types):
raise RuntimeError("Expecting {} values, but only {} were returned."
.format(len(self._types), len(result)))
_result: Tuple
if len(self._types) == 1:
_result = (result,)
else:
_result = result
args: List = []
meta: Dict = {}
wrong_type_args = []
for item, _type in zip(_result, self._types):
if hasattr(_type, "parse_result"):
sub_args, sub_meta = _type.parse_result(item)
args += sub_args
meta.update(sub_meta)
elif hasattr(_type, '__origin__') and _type.__origin__:
if (
_type.__origin__ == Union and
isinstance(item, _type.__args__)
):
args.append(item)
elif isinstance(item, _type.__origin__):
args.append(item)
elif isinstance(item, _type):
args.append(item)
else:
wrong_type_args.append((item, _type))
if wrong_type_args:
msg = " and ".join("'{}' is not of type {}".format(item, _type)
for item, _type in wrong_type_args)
raise RuntimeError("Values error: {}.".format(msg))
return tuple(args), meta
def assert_is_superset(self, _type: Optional[_ReturnType]) -> None:
"""
Assert this instance is a superset of the given _type.
Parameters
----------
_type : None or an _Return, _Meta, _NoReturn instance
Returns
-------
None
Raises
------
RuntimeError
when this instance is not a superset of _type
"""
if not _type:
return
if isinstance(_type, _Meta):
if not self.meta:
raise RuntimeError('{} is not a superset of {}.'.format(self, _type))
self.meta.assert_is_superset(_type)
return
if isinstance(_type, _Return):
if self.values != _type.values:
raise RuntimeError("Return values are not the same '{}' vs '{}'."
.format(self.values, _type.values))
if self.meta:
self.meta.assert_is_superset(_type.meta)
def downcast_result(self, result: Tuple[Tuple, Dict]) -> Tuple[Tuple, Optional[Dict]]:
"""
Downcast the return value of a Callable to what this instance defines.
Parameters
----------
result : Tuple[Tuple, Dict]
Returns
-------
Tuple[Tuple, Dict]
return[0]: downcasted results or positional arguments for the next Callable
return[1]: downcasted metadata for the Callables to follow
Raises
------
RuntimeError
when mis-match in shape or type, or ambigous conversion
"""
assert result and isinstance(result, tuple), f"Value '{result}' is not an instance of Tuple"
values = result[0]
if self.values and len(self.values) != len(values):
raise RuntimeError("Cannot downcast {} to {}".format(values, self.values))
new_values = []
new_meta = None
for value, _type in zip(values, self.values):
if isinstance(value, _type):
new_values.append(value)
else:
raise RuntimeError("Cannot downcast {} to {}".format(values, self.values))
if self.meta:
_, new_meta = self.meta.downcast_result(result)
return tuple(new_values), new_meta
def __repr__(self):
return "({}, {})".format(self.values, self.meta)
def __eq__(self, other):
if isinstance(other, _Return):
return self.values == other.values and self.meta == other.meta
return False
|
50029
|
import supriya.nonrealtime
def test_01():
session = supriya.nonrealtime.Session()
assert session.offsets == [float("-inf"), 0.0]
assert session.duration == 0.0
def test_02():
session = supriya.nonrealtime.Session()
with session.at(0):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, float("inf")]
assert session.duration == 0.0
def test_03():
session = supriya.nonrealtime.Session()
with session.at(23.5):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, 23.5, float("inf")]
assert session.duration == 23.5
def test_04():
session = supriya.nonrealtime.Session()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5]
assert session.duration == 24.5
def test_05():
session = supriya.nonrealtime.Session()
with session.at(0):
session.add_group()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5, float("inf")]
assert session.duration == 24.5
def test_06():
session = supriya.nonrealtime.Session(padding=11.0)
assert session.offsets == [float("-inf"), 0.0]
assert session.duration == 0.0
def test_07():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(0):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, float("inf")]
assert session.duration == 0.0
def test_08():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(23.5):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, 23.5, float("inf")]
assert session.duration == 34.5
def test_09():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5]
assert session.duration == 35.5
def test_10():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(0):
session.add_group()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5, float("inf")]
assert session.duration == 35.5
|
50045
|
from __future__ import print_function
import sys, random, json, os, tempfile
from collections import Counter
import numpy as np
INSIDE_BLENDER = True
try:
import bpy
from mathutils import Vector
except ImportError as e:
INSIDE_BLENDER = False
if INSIDE_BLENDER:
try:
import utils
except ImportError as e:
print("\nERROR")
print("Running render_images.py from Blender and cannot import utils.py.")
print("You may need to add a .pth file to the site-packages of Blender's")
print("bundled python with a command like this:\n")
print("echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth")
print("\nWhere $BLENDER is the directory where Blender is installed, and")
print("$VERSION is your Blender version (such as 2.78).")
sys.exit(1)
def render_scene(args,
output_image='render.png',
output_scene='render_json',
output_blendfile=None,
objects=[],
**kwargs
):
# Load the main blendfile
bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
# Load materials
utils.load_materials(args.material_dir)
# Set render arguments so we can get pixel coordinates later.
# We use functionality specific to the CYCLES renderer so BLENDER_RENDER
# cannot be used.
render_args = bpy.context.scene.render
render_args.engine = "CYCLES"
render_args.filepath = output_image
render_args.resolution_x = args.width
render_args.resolution_y = args.height
render_args.resolution_percentage = 100
render_args.tile_x = args.render_tile_size
render_args.tile_y = args.render_tile_size
if args.use_gpu == 1:
# Blender changed the API for enabling CUDA at some point
if bpy.app.version < (2, 78, 0):
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
else:
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'CUDA'
# Some CYCLES-specific stuff
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = args.render_num_samples
bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
if args.use_gpu == 1:
bpy.context.scene.cycles.device = 'GPU'
# This will give ground-truth information about the scene and its objects
scene_struct = {
'image_filename': os.path.basename(output_image),
'objects': [],
'directions': {},
}
scene_struct.update(kwargs)
if bpy.app.version < (2, 80, 0):
bpy.ops.mesh.primitive_plane_add(radius=5)
else:
bpy.ops.mesh.primitive_plane_add(size=5)
plane = bpy.context.object
def rand(L):
return 2.0 * L * (random.random() - 0.5)
# Add random jitter to camera position
if args.camera_jitter > 0:
for i in range(3):
bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)
# Figure out the left, up, and behind directions along the plane and record
# them in the scene structure
camera = bpy.data.objects['Camera']
plane_normal = plane.data.vertices[0].normal
if bpy.app.version < (2, 80, 0):
cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
else:
cam_behind = camera.matrix_world.to_quaternion() @ Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() @ Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() @ Vector((0, 1, 0))
plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
plane_up = cam_up.project(plane_normal).normalized()
# Delete the plane; we only used it for normals anyway. The base scene file
# contains the actual ground plane.
utils.delete_object(plane)
# Save all six axis-aligned directions in the scene struct
scene_struct['directions']['behind'] = tuple(plane_behind)
scene_struct['directions']['front'] = tuple(-plane_behind)
scene_struct['directions']['left'] = tuple(plane_left)
scene_struct['directions']['right'] = tuple(-plane_left)
scene_struct['directions']['above'] = tuple(plane_up)
scene_struct['directions']['below'] = tuple(-plane_up)
# Add random jitter to lamp positions
if args.key_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
if args.back_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
if args.fill_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)
# Now make some random objects
blender_objects = add_objects(args, scene_struct, camera, objects)
# Render the scene and dump the scene data structure
scene_struct['objects'] = objects
scene_struct['relationships'] = compute_all_relationships(scene_struct)
while True:
try:
bpy.ops.render.render(write_still=True)
break
except Exception as e:
print(e)
with open(output_scene, 'w') as f:
json.dump(scene_struct, f, indent=2)
if output_blendfile is not None:
bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
def add_objects(args, scene_struct, camera, objects):
"""
Add objects to the current blender scene
"""
blender_objects = []
for obj in objects:
# Actually add the object to the scene
utils.add_object(args.shape_dir,
obj["shape"],
obj["size"],
obj["location"],
theta=obj["rotation"])
bobj = bpy.context.object
blender_objects.append(bobj)
utils.add_material(obj["material"], Color=obj["color"])
obj["pixel_coords"] = utils.get_camera_coords(camera, bobj.location)
loc = np.array(bobj.location)
dim = np.array(bobj.dimensions)
half = dim / 2
corners = []
corners.append(loc + half * [1,1,1])
corners.append(loc + half * [1,1,-1])
corners.append(loc + half * [1,-1,1])
corners.append(loc + half * [1,-1,-1])
corners.append(loc + half * [-1,1,1])
corners.append(loc + half * [-1,1,-1])
corners.append(loc + half * [-1,-1,1])
corners.append(loc + half * [-1,-1,-1])
import mathutils
corners_camera_coords = np.array([ utils.get_camera_coords(camera, mathutils.Vector(tuple(corner)))
for corner in corners ])
xmax = np.amax(corners_camera_coords[:,0])
ymax = np.amax(corners_camera_coords[:,1])
xmin = np.amin(corners_camera_coords[:,0])
ymin = np.amin(corners_camera_coords[:,1])
obj["bbox"] = (float(xmin), float(ymin), float(xmax), float(ymax))
return blender_objects
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['location']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['location']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships
def check_visibility(blender_objects, min_pixels_per_object):
"""
Check whether all objects in the scene have some minimum number of visible
pixels; to accomplish this we assign random (but distinct) colors to all
objects, and render using no lighting or shading or antialiasing; this
ensures that each object is just a solid uniform color. We can then count
the number of pixels of each color in the output image to check the visibility
of each object.
Returns True if all objects are visible and False otherwise.
"""
f, path = tempfile.mkstemp(suffix='.png')
object_colors = render_shadeless(blender_objects, path=path)
img = bpy.data.images.load(path)
p = list(img.pixels)
color_count = Counter((p[i], p[i+1], p[i+2], p[i+3])
for i in range(0, len(p), 4))
os.remove(path)
if len(color_count) != len(blender_objects) + 1:
return False
for _, count in color_count.most_common():
if count < min_pixels_per_object:
return False
return True
def render_shadeless(blender_objects, path='flat.png'):
"""
Render a version of the scene with shading disabled and unique materials
assigned to all objects, and return a set of all colors that should be in the
rendered image. The image itself is written to path. This is used to ensure
that all objects will be visible in the final rendered scene.
"""
render_args = bpy.context.scene.render
# Cache the render args we are about to clobber
old_filepath = render_args.filepath
old_engine = render_args.engine
old_use_antialiasing = render_args.use_antialiasing
# Override some render settings to have flat shading
render_args.filepath = path
render_args.engine = 'BLENDER_RENDER'
render_args.use_antialiasing = False
# Move the lights and ground to layer 2 so they don't render
utils.set_layer(bpy.data.objects['Lamp_Key'], 2)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)
utils.set_layer(bpy.data.objects['Lamp_Back'], 2)
utils.set_layer(bpy.data.objects['Ground'], 2)
# Add random shadeless materials to all objects
object_colors = set()
old_materials = []
for i, obj in enumerate(blender_objects):
old_materials.append(obj.data.materials[0])
bpy.ops.material.new()
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % i
while True:
r, g, b = [random.random() for _ in range(3)]
if (r, g, b) not in object_colors: break
object_colors.add((r, g, b))
mat.diffuse_color = [r, g, b]
mat.use_shadeless = True
obj.data.materials[0] = mat
# Render the scene
bpy.ops.render.render(write_still=True)
# Undo the above; first restore the materials to objects
for mat, obj in zip(old_materials, blender_objects):
obj.data.materials[0] = mat
# Move the lights and ground back to layer 0
utils.set_layer(bpy.data.objects['Lamp_Key'], 0)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)
utils.set_layer(bpy.data.objects['Lamp_Back'], 0)
utils.set_layer(bpy.data.objects['Ground'], 0)
# Set the render settings back to what they were
render_args.filepath = old_filepath
render_args.engine = old_engine
render_args.use_antialiasing = old_use_antialiasing
return object_colors
|
50053
|
from django import forms
from .models import PollContent
class PollForm(forms.ModelForm):
model = PollContent
|
50081
|
import tweepy
from application.twitter.listener.streaming import TwitterStreamingListener, TwitterUserStreamingListener
from application.twitter.interface import TwitterInterface
class TwitterListener(TwitterInterface):
def __init__(self, keywords, user, *args, **kwargs):
"""
Twitter Listener constructor. This class is used as middleware for the Twitter Listener
:param keywords: List of keywords
:param user: User to listen
"""
super(TwitterListener, self).__init__(*args, **kwargs)
self.user = None
self.keywords = None
if len(keywords[0]) > 0:
self.keywords = keywords
self.stream = tweepy.streaming.Stream(self.auth, TwitterStreamingListener(keywords))
if len(user) > 0:
try:
self.user = self.api.get_user(user)
self.user_stream = tweepy.streaming.Stream(self.auth, TwitterUserStreamingListener(user))
except Exception:
raise Exception("Error during the listener creation, does the user exists?")
def start(self, process_manager):
"""
Create new Twitter Listener Process
:param process_manager: Process Manager Instance
:return:
"""
if self.keywords:
try:
process_manager.create_process(target=lambda: self.stream.filter(track=self.keywords),
name='Twitter Keywords Listener: <%s>' % '-'.join(self.keywords),
ptype='twitter_listener')
except Exception:
raise Exception("Error creating new Keywords listener")
if self.user:
try:
process_manager.create_process(target=lambda: self.user_stream.filter(follow=[self.user.id_str]),
name='Twitter User Listener: <%s>' % self.user.screen_name,
ptype='twitter_listener')
except Exception:
raise Exception("Error creating new User tweet listener ")
def __str__(self):
"""
String representation
:return:
"""
return 'Twitter Listener'
|
50107
|
import json
import logging
from enum import Enum
from typing import Dict
import requests
from prometheus_client.parser import text_string_to_metric_families
from src.utils.exceptions import (NoMetricsGivenException,
MetricNotFoundException,
ReceivedUnexpectedDataException)
class RequestStatus(Enum):
SUCCESS = True
FAILED = False
def get_json(endpoint: str, logger: logging.Logger, params=None,
verify: bool = True):
if params is None:
params = {}
get_ret = requests.get(url=endpoint, params=params, timeout=10,
verify=verify, headers={'Connection': 'close'})
logger.debug("get_json: get_ret: %s", get_ret)
return json.loads(get_ret.content.decode('UTF-8'))
def get_prometheus(endpoint: str, logger: logging.Logger, verify: bool = True):
metrics = requests.get(endpoint, timeout=10, verify=verify, headers={
'Connection': 'close'}).content
logger.debug("Retrieved prometheus data from endpoint: " + endpoint)
return metrics.decode('utf-8')
def get_prometheus_metrics_data(endpoint: str,
requested_metrics: Dict[str, str],
logger: logging.Logger,
verify: bool = True) -> Dict:
"""
:param endpoint: The endpoint we are obtaining the data from
:param requested_metrics: A dict which is expected with the following
structure:
{
"metric": "optional" | any string
}
Where if the metric is set as "optional" an exception is not raised if that
metric is not found. Furthermore, this function will set the metric's value
to None if this is the case.
If the metric is not set as optional and it cannot be found at the data
source its value is set as None.
:param logger: Where logging should be sent
:param verify: Will verify the certificate if set to True
:return: The metrics with their values
"""
response = {}
if len(requested_metrics) == 0:
raise NoMetricsGivenException("No metrics given when requesting "
"prometheus data from " + endpoint)
metrics = get_prometheus(endpoint, logger, verify)
for family in text_string_to_metric_families(metrics):
for sample in family.samples:
if sample.name in requested_metrics:
if sample.name not in response:
if sample.labels != {}:
response[sample.name] = {}
response[sample.name][json.dumps(sample.labels)] = \
sample.value
else:
response[sample.name] = sample.value
else:
if sample.labels != {}:
response[sample.name][json.dumps(sample.labels)] = \
sample.value
else:
response[sample.name] = sample.value + \
response[sample.name]
missing_metrics = set(requested_metrics) - set(response)
for metric in missing_metrics:
if requested_metrics[metric].lower() == "optional":
response[metric] = None
else:
raise MetricNotFoundException(metric, endpoint)
return response
def transformed_data_processing_helper(component_name: str, configuration: Dict,
transformed_data: Dict,
*other_args) -> None:
"""
This function attempts to execute the appropriate processing function
on the transformed data based on a configuration. If the transformed
data is malformed, this function will raise an UnexpectedDataException
:param configuration: A dict with the following schema:
{
'<source_name>': {
'<data_index_Key>': <related_processing_fn>
}
}
:param transformed_data: The data received from the transformed
:param component_name: The name fo the component receiving the transformed
: data
:return: None
: Raises an UnexpectedDataException if the transformed_data is
malformed
"""
processing_performed = False
for source, processing_details in configuration.items():
# If the required source is not in the transformed data, then the
# transformed data is malformed, therefore raise an exception.
if source not in transformed_data:
raise ReceivedUnexpectedDataException(component_name)
# If the source is enabled, process its transformed data.
if transformed_data[source]:
# Check which index_key was passed by the transformer and
# execute the appropriate function.
sub_processing_performed = False
for data_index_key, processing_fn in processing_details.items():
if data_index_key in transformed_data[source]:
processing_fn(transformed_data[source][data_index_key],
*other_args)
processing_performed = True
sub_processing_performed = True
break
# If this is false, it means that no processing fn could be
# applied to the source's data
if not sub_processing_performed:
raise ReceivedUnexpectedDataException(component_name)
# If no processing is performed, it means that the data was not
# properly formatted, therefore raise an error.
if not processing_performed:
raise ReceivedUnexpectedDataException(component_name)
|
50131
|
from omegaconf import OmegaConf
def default_detection_train_config():
# FIXME currently using args for train config, will revisit, perhaps move to Hydra
h = OmegaConf.create()
# dataset
h.skip_crowd_during_training = True
# augmentation
h.input_rand_hflip = True
h.train_scale_min = 0.1
h.train_scale_max = 2.0
h.autoaugment_policy = None
# optimization
h.momentum = 0.9
h.learning_rate = 0.08
h.lr_warmup_init = 0.008
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.clip_gradients_norm = 10.0
h.num_epochs = 300
# regularization l2 loss.
h.weight_decay = 4e-5
h.lr_decay_method = 'cosine'
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None
return h
|
50162
|
import numpy as np
import math
from pyspark.sql import Row
"""
Implementation of Lorentz vector
"""
class LorentzVector(object):
def __init__(self, *args):
if len(args)>0:
self.x = args[0]
self.y = args[1]
self.z = args[2]
self.t = args[3]
def SetPtEtaPhiM(self, pt, eta, phi, mass):
pt = abs(pt)
self.SetXYZM(pt*math.cos(phi), pt*math.sin(phi), pt*math.sinh(eta), mass)
def SetXYZM(self, x, y, z, m):
self.x = x;
self.y = y
self.z = z
if (m>=0):
self.t = math.sqrt(x*x + y*y + z*z + m*m)
else:
self.t = math.sqrt(max(x*x + y*y + z*z - m*m, 0))
def E(self):
return self.t
def Px(self):
return self.x
def Py(self):
return self.y
def Pz(self):
return self.z
def Pt(self):
return math.sqrt(self.x*self.x + self.y*self.y)
def Eta(self):
cosTheta = self.CosTheta()
if cosTheta*cosTheta<1:
return -0.5*math.log((1.0 - cosTheta)/(1.0 + cosTheta))
if self.z == 0: return 0
def mag(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def CosTheta(self):
return 1.0 if self.mag()==0.0 else self.z/self.mag()
def Phi(self):
return math.atan2(self.y, self.x)
def DeltaR(self, other):
deta = self.Eta() - other.Eta()
dphi = self.Phi() - other.Phi()
pi = math.pi
while dphi > pi: dphi -= 2*pi
while dphi < -pi: dphi += 2*pi
return math.sqrt(deta*deta + dphi*dphi)
"""
Functions used to return the Pt map of selected tracks, neutrals and photons
"""
def ChPtMapp(DR, event):
pTmap = []
for h in event.EFlowTrack:
if h.PT<= 0.5: continue
pTmap.append([h.Eta, h.Phi, h.PT])
return np.asarray(pTmap)
def NeuPtMapp(DR, event):
pTmap = []
for h in event.EFlowNeutralHadron:
if h.ET<= 1.0: continue
pTmap.append([h.Eta, h.Phi, h.ET])
return np.asarray(pTmap)
def PhotonPtMapp(DR, event):
pTmap = []
for h in event.EFlowPhoton:
if h.ET<= 1.0: continue
pTmap.append([h.Eta, h.Phi, h.ET])
return np.asarray(pTmap)
"""
Functions used to return the Pt map of selected tracks, neutrals and photons
Versions used for the optimized filtering with Spark SQL and HOF
"""
# get the selected tracks
def ChPtMapp2(Tracks):
#pTmap = []
pTmap = np.zeros((len(Tracks), 3))
for i, h in enumerate(Tracks):
pTmap[i] = [h["Eta"], h["Phi"], h["PT"]]
return pTmap
# get the selected neutrals
def NeuPtMapp2(NeutralHadrons):
pTmap = np.zeros((len(NeutralHadrons), 3))
for i, h in enumerate(NeutralHadrons):
pTmap[i] = [h["Eta"], h["Phi"], h["ET"]]
return pTmap
# get the selected photons
def PhotonPtMapp2(Photons):
pTmap = np.zeros((len(Photons), 3))
for i, h in enumerate(Photons):
pTmap[i] = [h["Eta"], h["Phi"], h["ET"]]
return pTmap
"""
Get the particle ISO
"""
def PFIso(p, DR, PtMap, subtractPt):
if p.Pt() <= 0.: return 0.
DeltaEta = PtMap[:,0] - p.Eta()
DeltaPhi = PtMap[:,1] - p.Phi()
twopi = 2.* math.pi
DeltaPhi = DeltaPhi - twopi*(DeltaPhi > twopi) + twopi*(DeltaPhi < -1.*twopi)
isInCone = DeltaPhi*DeltaPhi + DeltaEta*DeltaEta < DR*DR
Iso = PtMap[isInCone, 2].sum()/p.Pt()
if subtractPt: Iso = Iso -1
return float(Iso)
|
50182
|
import pytest, warnings, numpy as np
from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology
from ....support import assert_equal, assert_all_equal, assert_distribution
# Set seed for reproducible randomness
seed = 0
np.random.seed(seed)
rng = np.random.RandomState(seed)
# ========= #
# _Topology #
# ========= #
# --------------------------- #
# _Topology.uniform_initial() #
# --------------------------- #
def test_uniform_initial_min():
"""Generate a uniform initial state distribution with the minimum number of states"""
topology = _Topology(n_states=1, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
1.
]))
def test_uniform_initial_small():
"""Generate a uniform initial state distribution with a few states"""
topology = _Topology(n_states=2, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.5, 0.5
]))
def test_uniform_initial_many():
"""Generate a uniform initial state distribution with many states"""
topology = _Topology(n_states=5, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.2, 0.2, 0.2, 0.2, 0.2
]))
# -------------------------- #
# _Topology.random_initial() #
# -------------------------- #
def test_random_initial_min():
"""Generate a random initial state distribution with minimal states"""
topology = _Topology(n_states=1, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
1.
]))
def test_random_initial_small():
"""Generate a random initial state distribution with few states"""
topology = _Topology(n_states=2, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.57633871, 0.42366129
]))
def test_random_initial_many():
"""Generate a random initial state distribution with many states"""
topology = _Topology(n_states=5, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.15210286, 0.10647349, 0.20059295, 0.11120171, 0.42962898
]))
# ================== #
# _LeftRightTopology #
# ================== #
# ---------------------------------------- #
# _LeftRightTopology.uniform_transitions() #
# ---------------------------------------- #
def test_left_right_uniform_transitions_min():
"""Generate a uniform left-right transition matrix with minimal states"""
topology = _LeftRightTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_left_right_uniform_transitions_small():
"""Generate a uniform left-right transition matrix with few states"""
topology = _LeftRightTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0. , 1. ]
]))
def test_left_right_uniform_transitions_many():
"""Generate a uniform left-right transition matrix with many states"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.2, 0.2 , 0.2 , 0.2 , 0.2 ],
[0. , 0.25, 0.25 , 0.25 , 0.25 ],
[0. , 0. , 0.33333333, 0.33333333, 0.33333333],
[0. , 0. , 0. , 0.5 , 0.5 ] ,
[0. , 0. , 0. , 0. , 1. ]
]))
# --------------------------------------- #
# _LeftRightTopology.random_transitions() #
# --------------------------------------- #
def test_left_right_random_transitions_min():
"""Generate a random left-right transition matrix with minimal states"""
topology = _LeftRightTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_left_right_random_transitions_small():
"""Generate a random left-right transition matrix with few states"""
topology = _LeftRightTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.23561633, 0.76438367],
[0. , 1. ]
]))
def test_left_right_random_transitions_many():
"""Generate a random left-right transition matrix with many states"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.23169814, 0.71716356, 0.02033845, 0.02516204, 0.00563782],
[0. , 0.19474072, 0.16405008, 0.22228532, 0.41892388],
[0. , 0. , 0.42912755, 0.16545797, 0.40541448],
[0. , 0. , 0. , 0.109713 , 0.890287 ],
[0. , 0. , 0. , 0. , 1. ]
]))
# ----------------------------------------- #
# _LeftRightTopology.validate_transitions() #
# ----------------------------------------- #
def test_left_right_validate_transitions_invalid():
"""Validate an invalid left-right transition matrix"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = _ErgodicTopology(n_states=5, random_state=rng).random_transitions()
with pytest.raises(ValueError) as e:
topology.validate_transitions(transitions)
assert str(e.value) == 'Left-right transition matrix must be upper-triangular'
def test_left_right_validate_transitions_valid():
"""Validate a valid left-right transition matrix"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
# -------------------------------------- #
# _ErgodicTopology.uniform_transitions() #
# -------------------------------------- #
def test_ergodic_uniform_transitions_min():
"""Generate a uniform ergodic transition matrix with minimal states"""
topology = _ErgodicTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_ergodic_uniform_transitions_small():
"""Generate a uniform ergodic transition matrix with few states"""
topology = _ErgodicTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0.5, 0.5]
]))
def test_ergodic_uniform_transitions_many():
"""Generate a uniform ergodic transition matrix with many states"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2]
]))
# ------------------------------------- #
# _ErgodicTopology.random_transitions() #
# ------------------------------------- #
def test_ergodic_random_transitions_min():
"""Generate a random ergodic transition matrix with minimal states"""
topology = _ErgodicTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_ergodic_random_transitions_small():
"""Generate a random ergodic transition matrix with few states"""
topology = _ErgodicTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.9474011 , 0.0525989 ],
[0.85567599, 0.14432401]
]))
def test_ergodic_random_transitions_many():
"""Generate a random ergodic transition matrix with many states"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.58715548, 0.14491542, 0.20980762, 0.00623944, 0.05188205],
[0.0840705 , 0.23055049, 0.08297536, 0.25124688, 0.35115677],
[0.02117615, 0.37664662, 0.26705912, 0.09851123, 0.23660688],
[0.01938041, 0.16853843, 0.52046123, 0.07535256, 0.21626737],
[0.04996846, 0.44545843, 0.12079423, 0.07154241, 0.31223646]
]))
# --------------------------------------- #
# _ErgodicTopology.validate_transitions() #
# --------------------------------------- #
def test_ergodic_validate_transitions_invalid():
"""Validate an invalid ergodic transition matrix"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = _LeftRightTopology(n_states=5, random_state=rng).random_transitions()
with pytest.warns(UserWarning):
topology.validate_transitions(transitions)
def test_ergodic_validate_transitions_valid():
"""Validate a valid ergodic transition matrix"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
# =============== #
# _LinearTopology #
# =============== #
# ------------------------------------- #
# _LinearTopology.uniform_transitions() #
# ------------------------------------- #
def test_linear_uniform_transitions_min():
"""Generate a uniform linear transition matrix with minimal states"""
topology = _LinearTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_linear_uniform_transitions_small():
"""Generate a uniform linear transition matrix with few states"""
topology = _LinearTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0. , 1. ]
]))
def test_linear_uniform_transitions_many():
"""Generate a uniform linear transition matrix with many states"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5, 0. , 0. , 0. ],
[0. , 0.5, 0.5, 0. , 0. ],
[0. , 0. , 0.5, 0.5, 0. ],
[0. , 0. , 0. , 0.5, 0.5],
[0. , 0. , 0. , 0. , 1. ]
]))
# ------------------------------------ #
# _LinearTopology.random_transitions() #
# ------------------------------------ #
def test_linear_random_transitions_min():
"""Generate a random linear transition matrix with minimal states"""
topology = _LinearTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_linear_random_transitions_small():
"""Generate a random linear transition matrix with few states"""
topology = _LinearTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.65157396, 0.34842604],
[0. , 1. ]
]))
def test_linear_random_transitions_many():
"""Generate a random linear transition matrix with many states"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.44455421, 0.55544579, 0. , 0. , 0. ],
[0. , 0.57553614, 0.42446386, 0. , 0. ],
[0. , 0. , 0.92014965, 0.07985035, 0. ],
[0. , 0. , 0. , 0.66790982, 0.33209018],
[0. , 0. , 0. , 0. , 1. ]
]))
# -------------------------------------- #
# _LinearTopology.validate_transitions() #
# -------------------------------------- #
def test_linear_validate_transitions_invalid():
"""Validate an invalid linear transition matrix"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = _ErgodicTopology(n_states=5, random_state=rng).random_transitions()
with pytest.raises(ValueError) as e:
topology.validate_transitions(transitions)
assert str(e.value) == 'Left-right transition matrix must be upper-triangular'
def test_linear_validate_transitions_valid():
"""Validate a valid linear transition matrix"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
|
50196
|
from django.conf import settings
from django.http import HttpResponse
from django.urls import include, path
from django.contrib.flatpages.views import flatpage as flatpage_view
from django.apps import apps as django_apps
from django_distill import distill_url, distill_path, distill_re_path
def test_no_param_view(request):
return HttpResponse(b'test', content_type='application/octet-stream')
def test_positional_param_view(request, param):
return HttpResponse(b'test' + param.encode(),
content_type='application/octet-stream')
def test_named_param_view(request, param=None):
return HttpResponse(b'test' + param.encode(),
content_type='application/octet-stream')
def test_session_view(request):
request.session['test'] = 'test'
return HttpResponse(b'test', content_type='application/octet-stream')
def test_broken_view(request):
# Trigger a normal Python exception when rendering
a = 1 / 0
def test_http404_view(request):
response = HttpResponse(b'404', content_type='application/octet-stream')
response.status_code = 404
return response
def test_no_param_func():
return None
def test_positional_param_func():
return ('12345',)
def test_named_param_func():
return [{'param': 'test'}]
def test_flatpages_func():
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
flatpages = current_site.flatpage_set.filter(registration_required=False)
for flatpage in flatpages:
yield {'url': flatpage.url}
urlpatterns = [
distill_url(r'^url/$',
test_no_param_view,
name='url-no-param',
distill_func=test_no_param_func,
distill_file='test'),
distill_url(r'^url-no-func/$',
test_no_param_view,
name='url-no-param-no-func',
distill_file='test'),
distill_url(r'^url/([\d]+)$',
test_positional_param_view,
name='url-positional-param',
distill_func=test_positional_param_func),
distill_url(r'^url/(?P<param>[\w]+)$',
test_named_param_view,
name='url-named-param',
distill_func=test_named_param_func),
path('path/namespace1/',
include('tests.namespaced_urls', namespace='test_namespace')),
path('path/no-namespace/',
include('tests.no_namespaced_urls')),
]
if settings.HAS_RE_PATH:
urlpatterns += [
distill_re_path(r'^re_path/$',
test_no_param_view,
name='re_path-no-param',
distill_func=test_no_param_func,
distill_file='test'),
distill_re_path(r'^re_path-no-func/$',
test_no_param_view,
name='re_path-no-param-no-func',
distill_file='test'),
distill_re_path(r'^re_path/([\d]+)$',
test_positional_param_view,
name='re_path-positional-param',
distill_func=test_positional_param_func),
distill_re_path(r'^re_path/(?P<param>[\w]+)$',
test_named_param_view,
name='re_path-named-param',
distill_func=test_named_param_func),
distill_re_path(r'^re_path/broken$',
test_broken_view,
name='re_path-broken',
distill_func=test_no_param_func),
distill_re_path(r'^re_path/ignore-sessions$',
test_session_view,
name='re_path-ignore-sessions',
distill_func=test_no_param_func),
distill_re_path(r'^re_path/404$',
test_http404_view,
name='re_path-404',
distill_status_codes=(404,),
distill_func=test_no_param_func),
distill_re_path(r'^re_path/flatpage(?P<url>.+)$',
flatpage_view,
name='re_path-flatpage',
distill_func=test_flatpages_func),
]
if settings.HAS_PATH:
urlpatterns += [
distill_path('path/',
test_no_param_view,
name='path-no-param',
distill_func=test_no_param_func,
distill_file='test'),
distill_path('path-no-func/',
test_no_param_view,
name='path-no-param-no-func',
distill_file='test'),
distill_path('path/<int>',
test_positional_param_view,
name='path-positional-param',
distill_func=test_positional_param_func),
distill_path('path/<str:param>',
test_named_param_view,
name='path-named-param',
distill_func=test_named_param_func),
distill_path('path/broken',
test_broken_view,
name='path-broken',
distill_func=test_no_param_func),
distill_path('path/ignore-sessions',
test_session_view,
name='path-ignore-sessions',
distill_func=test_no_param_func),
distill_path('path/404',
test_http404_view,
name='path-404',
distill_status_codes=(404,),
distill_func=test_no_param_func),
distill_path('path/flatpage<path:url>',
flatpage_view,
name='path-flatpage',
distill_func=test_flatpages_func),
]
|
50238
|
from ClassCalculator import Calculator
class Main:
def get_input(self,message):
return input(message)
def menu(self):
t = True
while t:
m = "---Menu---\n"
m += "Escribe la abreviación adecuada\n"
m += "Suma: x mas y\nResta: x menos y\nMultiplicacion: x por y\nDivision: x entre y\n"
m += "Potencia: x elevado_a y\nRaiz: x raiz_de y\nPara salir escriba 'salir'\n\n"
inpt = self.get_input(m)
if inpt == 'Exit':
print("\nHasta pronto.")
t= False
else:
data = inpt.split(' ')
print("Resultado = "+ str(self.Calc(int(data[0]),data[1],int(data[2]))))
t = True
def Calc(self,a, oper, b):
X=Calculator(a,b)
switch={
'mas':X.suma,
'menos':X.resta,
'por':X.multi,
'entre':X.divi,
'elevado_a':X.pote,
'raiz_de':X.raiz
}
return switch.get(oper)()
Cycle = Main()
print(Cycle.menu())
|
50246
|
import aio_pika
import asyncio
import config
import inspect
import logging
import orjson
import sys
import traceback
import zangy
from classes.misc import Status, Session
from classes.state import State
from discord import utils
from discord.ext import commands
from discord.ext.commands import DefaultHelpCommand, Context
from discord.ext.commands.core import _CaseInsensitiveDict
from discord.ext.commands.view import StringView
from discord.gateway import DiscordWebSocket
from discord.http import HTTPClient
from discord.utils import parse_time, to_json
log = logging.getLogger(__name__)
class Bot(commands.AutoShardedBot):
def __init__(self, command_prefix, help_command=DefaultHelpCommand(), description=None, **kwargs):
self.command_prefix = command_prefix
self.extra_events = {}
self._BotBase__cogs = {}
self._BotBase__extensions = {}
self._checks = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description = inspect.cleandoc(description) if description else ""
self.owner_id = kwargs.get("owner_id")
self.owner_ids = kwargs.get("owner_ids", set())
self._skip_check = lambda x, y: x == y
self.help_command = help_command
self.case_insensitive = kwargs.get("case_insensitive", False)
self.all_commands = _CaseInsensitiveDict() if self.case_insensitive else {}
self.ws = None
self.loop = asyncio.get_event_loop()
self.http = HTTPClient(None, loop=self.loop)
self._handlers = {"ready": self._handle_ready}
self._hooks = {}
self._listeners = {}
self._connection = None
self._closed = False
self._ready = asyncio.Event()
self._redis = None
self._amqp = None
self._amqp_channel = None
self._amqp_queue = None
@property
def config(self):
return config
async def user(self):
return await self._connection.user()
async def users(self):
return await self._connection._users()
async def guilds(self):
return await self._connection.guilds()
async def emojis(self):
return await self._connection.emojis()
async def cached_messages(self):
return await self._connection._messages()
async def private_channels(self):
return await self._connection.private_channels()
async def shard_count(self):
return int(await self._redis.get("gateway_shards"))
async def started(self):
return parse_time(str(await self._connection._get("gateway_started").split(".")[0]))
async def statuses(self):
return [Status(x) for x in await self._connection._get("gateway_statuses")]
async def sessions(self):
return {int(x): Session(y) for x, y in (await self._connection._get("gateway_sessions")).items()}
async def get_channel(self, channel_id):
return await self._connection.get_channel(channel_id)
async def get_guild(self, guild_id):
return await self._connection._get_guild(guild_id)
async def get_user(self, user_id):
return await self._connection.get_user(user_id)
async def get_emoji(self, emoji_id):
return await self._connection.get_emoji(emoji_id)
async def get_all_channels(self):
for guild in await self.guilds():
for channel in await guild.channels():
yield channel
async def get_all_members(self):
for guild in await self.guilds():
for member in await guild.members():
yield member
async def _get_state(self, **options):
return State(
dispatch=self.dispatch,
handlers=self._handlers,
hooks=self._hooks,
http=self.http,
loop=self.loop,
redis=self._redis,
shard_count=await self.shard_count(),
**options,
)
async def get_context(self, message, *, cls=Context):
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if self._skip_check((await message.author()).id, (await self.user()).id):
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
if message.content.startswith(tuple(prefix)):
invoked_prefix = utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError("get_prefix must return either a string or a list of string, "
"not {}".format(prefix.__class__.__name__))
for value in prefix:
if not isinstance(value, str):
raise TypeError("Iterable command_prefix or list returned from get_prefix must "
"contain only strings, not {}".format(value.__class__.__name__))
raise
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
async def process_commands(self, message):
if (await message.author()).bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def receive_message(self, msg):
self.ws._dispatch("socket_raw_receive", msg)
msg = orjson.loads(msg)
self.ws._dispatch("socket_response", msg)
op = msg.get("op")
data = msg.get("d")
event = msg.get("t")
old = msg.get("old")
if op != self.ws.DISPATCH:
return
try:
func = self.ws._discord_parsers[event]
except KeyError:
log.debug("Unknown event %s.", event)
else:
try:
await func(data, old)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event)
except asyncio.CancelledError:
pass
removed = []
for index, entry in enumerate(self.ws._dispatch_listeners):
if entry.event != event:
continue
future = entry.future
if future.cancelled():
removed.append(index)
continue
try:
valid = entry.predicate(data)
except Exception as exc:
future.set_exception(exc)
removed.append(index)
else:
if valid:
ret = data if entry.result is None else entry.result(data)
future.set_result(ret)
removed.append(index)
for index in reversed(removed):
del self.ws._dispatch_listeners[index]
async def send_message(self, msg):
data = to_json(msg)
self.ws._dispatch("socket_raw_send", data)
await self._amqp_channel.default_exchange.publish(aio_pika.Message(body=data), routing_key="gateway.send")
async def start(self):
log.info("Starting...")
self._redis = await zangy.create_pool(self.config.redis_url, 5)
self._amqp = await aio_pika.connect_robust(self.config.amqp_url)
self._amqp_channel = await self._amqp.channel()
self._amqp_queue = await self._amqp_channel.get_queue("gateway.recv")
self._connection = await self._get_state()
self._connection._get_client = lambda: self
self.ws = DiscordWebSocket(socket=None, loop=self.loop)
self.ws.token = self.http.token
self.ws._connection = self._connection
self.ws._discord_parsers = self._connection.parsers
self.ws._dispatch = self.dispatch
self.ws.call_hooks = self._connection.call_hooks
await self.http.static_login(self.config.token, bot=True)
for extension in self.config.cogs:
try:
self.load_extension("cogs." + extension)
except Exception:
log.error(f"Failed to load extension {extension}.", file=sys.stderr)
log.error(traceback.print_exc())
async with self._amqp_queue.iterator() as queue_iter:
async for message in queue_iter:
async with message.process(ignore_processed=True):
await self.receive_message(message.body)
message.ack()
|
50250
|
from enum import Enum
from abc import ABC, abstractmethod
import logging
import json
import trio
class DeviceType(Enum):
"""
The DeviceType defines which kind of Elro device this is
"""
CO_ALARM = "0000"
WATER_ALARM = "0004"
HEAT_ALARM = "0003"
FIRE_ALARM = "0005"
DOOR_WINDOW_SENSOR = "0101"
class Device(ABC):
"""
A Device is an Elro device that is connected to the system
"""
def __init__(self, device_id, device_type):
"""
Constructor
:param device_id: The device ID
:param device_type: The device type
"""
self.id = device_id
self._name = ""
self._battery_level = -1
self._device_state = ""
self.device_type = device_type
self.updated = trio.Event()
self.alarm = trio.Event()
@property
def name(self):
"""
The name of the device
:return: The name
"""
return self._name
@name.setter
def name(self, name):
self._name = name
self._send_update_event()
@property
def device_state(self):
"""
The current state of the device as a string
:return: The device state
"""
return self._device_state
@device_state.setter
def device_state(self, device_state):
self._device_state = device_state
self._send_update_event()
@property
def battery_level(self):
"""
The current battery level of the device in percent.
:return: The battery level
"""
return self._battery_level
@battery_level.setter
def battery_level(self, battery_level):
self._battery_level = battery_level
self._send_update_event()
def _send_update_event(self):
"""
Triggers the self.updated event
"""
self.updated.set()
self.updated = trio.Event()
def send_alarm_event(self):
"""
Triggers the self.alarm event.
"""
self.alarm.set()
self.alarm = trio.Event()
def update(self, data):
"""
Updates this device with the data received from the actual device
:param data: The data dict received from the actual device
"""
self.device_type = data["data"]["device_name"]
# set battery status
batt = int(data["data"]["device_status"][2:4], 16)
self.battery_level = batt
self.device_state = "Unknown"
self.update_specifics(data)
self._send_update_event()
@abstractmethod
def update_specifics(self, data):
"""
An abstract method that is called to update type specific things.
:param data: The data dict received from the actual device
"""
pass
def __str__(self):
return f"<{self.device_type}: {self.name} (id: {self.id})>"
def __repr__(self):
return str(self)
@property
def json(self):
"""
A json representation of the device.
:return: A str containing json.
"""
return json.dumps({"name": self.name,
"id": self.id,
"type": self.device_type,
"state": self.device_state,
"battery": self.battery_level})
class WindowSensor(Device):
"""
A sensor that can detect open/close state of a window.
"""
def __init__(self, device_id):
"""
Constructor
:param device_id: The device ID
"""
super().__init__(device_id, "0101")
def update_specifics(self, data):
"""
Updates the window "Open"/"Closed" state
:param data: The data dict received from the actual device
"""
if data["data"]["device_name"] != DeviceType.DOOR_WINDOW_SENSOR.value:
AttributeError(f"Tried to update a window sensor to type "
f"{DeviceType(data['data']['device_name'])}")
if data["data"]["device_status"][4:-2] == "55":
logging.debug("Door/window id " + str(self.id) + " open!")
self.device_state = "Open"
elif data["data"]["device_status"][4:-2] == "AA":
logging.debug("Door/window id " + str(self.id) + " closed!")
self.device_state = "Closed"
class AlarmSensor(Device):
"""
A device that can ring an alarm (HeatAlarm, WaterAlarm, FireAlarm, COAlarm)
"""
def __init__(self, device_id, device_type):
"""
Constructor
:param device_id: The device ID
:param device_type: The device type
"""
super().__init__(device_id, device_type)
def update_specifics(self, data):
"""
Updates the alarm state of the device.
:param data: The data dict received from the actual device
"""
if data["data"]["device_status"][4:-2] == "BB":
self.device_state = "Alarm"
elif data["data"]["device_status"][4:-2] == "AA":
self.device_state = "Normal"
def create_device_from_data(data):
"""
Factory method to create a device from a data dict
:param data: The data dict received from the actual device
:return: A Device object
"""
if data["data"]["device_name"] == DeviceType.DOOR_WINDOW_SENSOR.value:
return WindowSensor(data["data"]["device_ID"])
else:
return AlarmSensor(data["data"]["device_ID"], data["data"]["device_name"])
|
50272
|
from extraction.runnables import Extractor, RunnableError, ExtractorResult
import extractor.csxextract.interfaces as interfaces
import extractor.csxextract.config as config
import extractor.csxextract.filters as filters
import defusedxml.ElementTree as safeET
import xml.etree.ElementTree as ET
import xml.sax.saxutils as xmlutils
import extraction.utils
import tempfile
import requests
import re
import os
# Returns full TEI xml document of the PDF
class GrobidTEIExtractor(interfaces.FullTextTEIExtractor):
dependencies = frozenset([filters.AcademicPaperFilter])
result_file_name = '.tei'
def extract(self, data, dep_results):
xml = _call_grobid_method(data, 'processFulltextDocument')
return ExtractorResult(xml_result=xml)
# Returns TEI xml document only of the PDF's header info
class GrobidHeaderTEIExtractor(interfaces.HeaderTEIExtractor):
dependencies = frozenset([filters.AcademicPaperFilter])
result_file_name = '.header.tei'
def extract(self, data, dep_results):
xml = _call_grobid_method(data, 'processHeaderDocument')
return ExtractorResult(xml_result=xml)
class GrobidCitationTEIExtractor(Extractor):
dependencies = frozenset([filters.AcademicPaperFilter])
result_file_name = '.cite.tei'
def extract(self, data, dep_results):
xml = _call_grobid_method(data, 'processReferences')
return ExtractorResult(xml_result=xml)
def _call_grobid_method(data, method):
url = '{0}/api/{1}'.format(config.GROBID_HOST, method)
# Write the pdf data to a temporary location so Grobid can process it
path = extraction.utils.temp_file(data, suffix='.pdf')
files = {'input': (path, open(path, 'rb')),}
try:
resp = requests.post(url, files=files)
except requests.exceptions.RequestException as ex:
raise RunnableError('Request to Grobid server failed')
finally:
os.remove(path)
if resp.status_code != 200:
raise RunnableError('Grobid returned status {0} instead of 200\nPossible Error:\n{1}'.format(resp.status_code, resp.text))
# remove all namespace info from xml string
# this is hacky but makes parsing it much much easier down the road
#remove_xmlns = re.compile(r'\sxmlns[^"]+"[^"]+"')
#xml_text = remove_xmlns.sub('', resp.content)
#xml = safeET.fromstring(xml_text)
xmlstring = re.sub(' xmlns="[^"]+"', '', resp.content, count=1)
xml = safeET.fromstring(xmlstring)
return xml
|
50304
|
class Error:
none_or_invalid_attribute = "main attributes should have value."
unacceptable_json = "json input has unacceptable format."
unacceptable_object_type = "object has unacceptable type"
|
50325
|
class Solution:
"""
@param grids: a maxtrix with alphabet
@return: return sorted lists
"""
def CounterDiagonalSort(self, grids):
# write your code here
m = len(grids)
n = len(grids[0])
table = []
for i in range(m):
temp = []
row = i
col = 0
while row >= 0 and col < n:
temp.append(grids[row][col])
row -= 1
col += 1
table.append(temp)
for i in range(m, m + n - 1):
temp = []
row = m - 1
col = i - m + 1
while row >= 0 and col < n:
temp.append(grids[row][col])
row -= 1
col += 1
table.append(temp)
l = min(m, n)
result = []
for t in table:
temp = []
i = 0
while len(temp) < l:
temp.append(t[i % len(t)])
i += 1
result.append(temp)
return sorted(result)
|
50354
|
import torch
import numpy as np
class ToTensorGjz(object):
def __call__(self, pic):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
def __repr__(self):
return self.__class__.__name__ + '()'
class NormalizeGjz(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
tensor.sub_(self.mean).div_(self.std)
return tensor
def crop_img(img, roi_box):
h, w = img.shape[:2]
sx, sy, ex, ey = [int(round(_)) for _ in roi_box]
dh, dw = ey - sy, ex - sx
if len(img.shape) == 3:
res = np.zeros((dh, dw, 3), dtype=np.uint8)
else:
res = np.zeros((dh, dw), dtype=np.uint8)
if sx < 0:
sx, dsx = 0, -sx
else:
dsx = 0
if ex > w:
ex, dex = w, dw - (ex - w)
else:
dex = dw
if sy < 0:
sy, dsy = 0, -sy
else:
dsy = 0
if ey > h:
ey, dey = h, dh - (ey - h)
else:
dey = dh
res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]
return res
|
50356
|
import unittest
from flexmock import flexmock
import swiftclient
import swiftclient.utils
from ansible.modules.network.fabric import swift_fileutil
from test_fabric_base import TestFabricModule
from test_fabric_base import set_module_args
from ansible.module_utils import fabric_utils
class TestSwiftFileUtilModule(TestFabricModule):
module = swift_fileutil
def setUp(self):
super(TestSwiftFileUtilModule, self).setUp()
# Mocking the swift connection object
self.mockobj = flexmock().should_receive('get_account').and_return(['storageurl']).mock()
flexmock(swiftclient.client).should_receive('Connection').and_return(self.mockobj)
flexmock(self.mockobj).should_receive('post_account').and_return(None)
flexmock(self.mockobj).url = "storage_url"
flexmock(self.mockobj).should_receive("close").and_return(None)
fake_logger = flexmock()
flexmock(fake_logger).should_receive('error')
flexmock(fake_logger).should_receive('debug')
flexmock(fabric_utils).should_receive('fabric_ansible_logger').and_return(fake_logger)
self.args_dict = dict(authtoken="<PASSWORD>", authurl="auth_url", user="admin", key="contrail", tenant_name="project",
auth_version="3.0", temp_url_key="temp_url_key1",
temp_url_key_2="temp_url_key2", chosen_temp_url_key="temp_url_key",
container_name="container", filename="sample.txt", expirytime=3600)
# Testing the swift utility module
def test_fileutility01(self):
fake_image_url = "/v1/sample.txt"
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(fake_image_url)
set_module_args(self.args_dict)
result = self.execute_module()
self.assertTrue(result["url"])
self.assertEqual(result["url"], fake_image_url)
# Testing when generate_temp_url returns None
def test_fileutility02(self):
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(None)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing when generate_temp_url raises exception
def test_fileutility_03(self):
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_raise(Exception)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# #Testing the swift connection after retry
def test_fileutility04(self):
flexmock(swiftclient.client).should_receive('Connection').and_return(None)
self.args_dict['connection_retry_count'] = 1
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing the update account error
def test_fileutility05(self):
flexmock(self.mockobj).should_receive('post_account').and_raise(Exception)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing the case where optional args are not passed and it should take default value
def test_fileutility06(self):
for e in ["tenant_name","auth_version","chosen_temp_url_key","connection_retry_count"]:
self.args_dict.pop(e, None)
set_module_args(self.args_dict)
fake_image_url = "/v1/sample.txt"
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(fake_image_url)
result = self.execute_module()
self.assertTrue(result["url"])
self.assertEqual(result["url"], fake_image_url)
|
50397
|
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import collections
import copy
import functools
import itertools
import json
import jsonschema
import logging
import os
import random
from ruamel.yaml import YAML
import six
from six import iteritems
from six.moves import range
from ..job_steps import step_runners
from ..utils import download_utils, fs_utils, job_utils
def experiment_paths(local_dirs, job_name, exp_name, urls_file):
output_dir = os.path.join(local_dirs['output'], job_name, exp_name)
return {
'client_dir': os.path.dirname(os.path.realpath(__file__)),
'archives_dir': local_dirs['archives'],
'metadata_dir': local_dirs['metadata'],
'output_dir': output_dir,
'fasta_output_dir': os.path.join(output_dir, 'fasta'),
'metadata_output_file': os.path.join(output_dir, 'metadata.json'),
'log_file': os.path.join(output_dir, 'log.txt'),
'experiment_rerun_file': os.path.join(output_dir,
'rerun_experiment.yml'),
'urls_file': urls_file
}
def preprocess_experiments(experiments, select_copy_for_options):
def inflate_expand_option(option_vals):
if isinstance(option_vals, six.string_types):
[start, end] = option_vals.split('..')
return range(int(start), int(end)+1)
else:
return option_vals
def exp_name_with_options(exp_name, option_values):
return exp_name + '-' + '-'.join('{}={}'.format(option_key, option_val)
for (option_key, option_val)
in iteritems(dict(option_values)))
# handle expand_options
final_experiments = collections.OrderedDict()
for exp_name, exp_options in iteritems(experiments):
if 'expand_options' in exp_options:
nonexpanded_options = exp_options.copy()
nonexpanded_options.pop('expand_options')
expand_options = exp_options['expand_options']
expand_values = list(itertools.product(*(
[(option_key, option_val)
for option_val in inflate_expand_option(option_vals)]
for (option_key, option_vals) in iteritems(expand_options)
)))
for expanded_options in expand_values:
new_exp_name = exp_name_with_options(exp_name,
expanded_options)
new_exp_options = dict(expanded_options, **nonexpanded_options)
# handle selection copy_for_options
if select_copy_for_options:
sliced_options = [o for o in expanded_options if
o[0] not in select_copy_for_options]
for opts in expand_values:
if opts == expanded_options:
break
elif all(o in opts for o in sliced_options):
new_exp_options['selection_copy_from'] = \
exp_name_with_options(exp_name, opts)
break
final_experiments[new_exp_name] = new_exp_options
else:
final_experiments[exp_name] = exp_options
return final_experiments
def preprocess_steps(steps, paths, exp_options, disable_avx):
def make_output_paths(options, keys):
for key in keys:
if key in options and not os.path.isabs(options[key]):
options[key] = os.path.join(paths['output_dir'], options[key])
steps = copy.deepcopy(steps)
for step_options in steps:
if step_options['type'] == 'select':
step_options.update(paths)
elif step_options['type'] == 'kmers':
step_options['fasta_output_dir'] = paths['fasta_output_dir']
step_options['disable_avx'] = disable_avx
if step_options['k'] == 'from_options':
step_options['k'] = exp_options['k']
make_output_paths(step_options, ['output_file'])
elif step_options['type'] == 'distances':
step_options['disable_avx'] = disable_avx
make_output_paths(step_options, ['input_file', 'output_prefix'])
elif step_options['type'] == 'mds':
make_output_paths(step_options, ['dists_file', 'output_file'])
elif step_options['type'] == 'classify':
step_options['metadata_file'] = paths['metadata_output_file']
make_output_paths(step_options, ['features_file', 'output_file'])
generation_opts = next(
(step for step in steps if step['type'] == 'kmers' and
step['output_file'] == step_options['features_file']),
None
)
if generation_opts:
step_options['generation_options'] = {
k: generation_opts[k] for k in
{'mode', 'k', 'bits_per_element'}
}
return steps
def validate_schema(data, schema_name):
with open(os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', 'schemas',
schema_name + '.json'
))) as schema:
try:
jsonschema.validate(data, json.load(schema))
except Exception as e:
e.message = ('error while validating {}: {}'
.format(schema_name, e.message))
raise e
def validate_job_options(options):
validate_schema(options, 'job_options')
# check lambdas under experiments
if isinstance(options['experiments'], six.string_types):
job_utils.parse_multiline_lambda_str(options['experiments'])
else:
for exp_opts in options['experiments'].values():
if isinstance(exp_opts['groups'], six.string_types):
job_utils.parse_multiline_lambda_str(exp_opts['groups'])
# check select step
select_steps = [s for s in options['steps'] if s['type'] == 'select']
if len(select_steps) > 1:
raise Exception('at most one step of type select is allowed in a job')
elif select_steps:
select_step = select_steps[0]
job_utils.parse_multiline_lambda_str(select_step['pick_group'])
if 'postprocess' in select_step:
job_utils.parse_multiline_lambda_str(select_step['postprocess'])
def load_metadata(metadata_dir, urls_file, name):
file_path = os.path.join(metadata_dir, name + '.json')
if not os.path.exists(file_path):
download_utils.download_file(
download_utils.url_for_file(file_path, urls_file, 'metadata'),
file_path
)
with open(file_path, 'r') as f:
metadata = json.load(f)
return metadata
def run_experiment_steps(steps, exp_options):
for i, step_options in enumerate(steps):
step_desc = "step '{}' ({}/{})".format(step_options['type'], i+1,
len(steps))
with job_utils.log_step(step_desc, start_stars=True):
step_runners[step_options['type']](step_options, exp_options)
def run(args):
job_options = YAML(typ='safe').load(
download_utils.read_file_or_url(args.job_file)
)
validate_job_options(job_options)
settings = YAML(typ='safe').load(
download_utils.read_file_or_url(args.settings_file)
)
validate_schema(settings, 'settings')
if args.validate_only:
if args.urls_file:
validate_schema(YAML(typ='safe').load(
download_utils.read_file_or_url(args.urls_file)
), 'file_urls')
print('INFO options files validated successfully')
return
local_dirs = settings['local_dirs']
job_name = job_options['name']
# create archives/metadata dirs if they don't exist
fs_utils.mkdir_p(local_dirs['archives'])
fs_utils.mkdir_p(local_dirs['metadata'])
experiments = job_options['experiments']
if isinstance(experiments, six.string_types):
paths = experiment_paths(local_dirs, job_name, '')
experiments = job_utils.parse_multiline_lambda_str(
experiments, load_metadata=functools.partial(
load_metadata, paths['metadata_dir'], args.urls_file
)
)()
first_select = next((step for step in job_options['steps'] if
step['type'] == 'select'), {})
experiments = preprocess_experiments(experiments,
first_select.get('copy_for_options'))
log, formatter = job_utils.setup_logging(job_name, settings)
for i, (exp_name, exp_options) in enumerate(iteritems(experiments)):
with job_utils.log_step("experiment '{}' ({}/{})"
.format(exp_name, i+1, len(experiments)),
start_stars=True):
exp_options = exp_options.copy()
exp_options['experiment_name'] = exp_name
# get ready
paths = experiment_paths(local_dirs, job_name, exp_name,
args.urls_file)
steps = preprocess_steps(job_options['steps'], paths, exp_options,
args.disable_avx)
if isinstance(exp_options['groups'], six.string_types):
metadata = None
if 'dataset' in exp_options and ('metadata' in
exp_options['dataset']):
metadata_name = exp_options['dataset']['metadata']
metadata = load_metadata(paths['metadata_dir'],
args.urls_file, metadata_name)
exp_options['groups'] = job_utils.parse_multiline_lambda_str(
exp_options['groups'],
load_metadata=functools.partial(
load_metadata, paths['metadata_dir'], args.urls_file
)
)(dict(exp_options, **paths), metadata)
fs_utils.mkdir_p(paths['output_dir'])
# start file log
file_logger = logging.FileHandler(paths['log_file'], mode='w')
file_logger.setFormatter(formatter)
log.addHandler(file_logger)
# seed the RNG
if 'random_seed' in job_options:
random_seed = job_options['random_seed']
else:
random_seed = random.getrandbits(32)
log.info('using random seed value %d', random_seed)
random.seed(random_seed)
# create a re-run file
with open(paths['experiment_rerun_file'], 'w') as rerun_file:
YAML().dump({
'name': job_name,
'random_seed': random_seed,
'experiments': {
exp_name: exp_options
},
'steps': job_options['steps']
}, rerun_file)
# run steps
run_experiment_steps(steps, exp_options)
# finish file log
file_logger.close()
log.removeHandler(file_logger)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.