content
stringlengths 5
1.05M
|
---|
import uuid #python uuid package to give each uploaded image a unique name/id
import os #as using os.path to provide a valid path for our file destination
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.conf import settings #as we want to use AUTH_USER_MODEL to apply foreign key.https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.ForeignKey
def recipe_image_file_path(instance, filename): #Generate file path for new recipe image
ext = filename.split('.')[-1] #stripping the extension part of the filename and storing it in variable ext
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager): #to pull in all features of BaseUserManager and override some functions to handle our email instead of username
def create_user(self, email, password=None, **extra_fields): #creates and saves a new user
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password) #to store password as a hash
user.save(using=self._db)
return user
def create_superuser(self, email, password): #creates and saves a new super user
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin): #custom user model that supports using email instead of username
email = models.EmailField(max_length=250, unique=True)
name = models.CharField(max_length=250)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager() #assigning UserManager to the objects attribute i.e UserManager runs for every new object or new User.
USERNAME_FIELD = 'email' #so we can use email as a field to login
class Tag(models.Model): # Tag to be used for a recipe
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE, #as when we delete the user we delete the tags as well
) #assigning foreign key to the User object.
def __str__(self): #using dunder method to add string rep of the model
return self.name
class Ingredient(models.Model): #Ingredient to be used in a recipe
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model): #Recipe model/object
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag') #using ManyToManyField as many recipes can have many tags and ingredients. ManyToManyField is like ForeignKey.#Note- Placed the name of class/model Tag in string i.e '' if we dont do this then we need to make sure that model/class is above our current class/model which can turn tricky once we have too many models.
image = models.ImageField(null=True, upload_to=recipe_image_file_path) # passing reference to the function so it can be called every time we upload in the background.
def __str__(self) :
return self.title
|
import asyncio
from contextlib import suppress
from os.path import dirname
from os.path import join
import pyperf # type: ignore
from dipdup.config import DipDupConfig
from dipdup.dipdup import DipDup
from dipdup.test import with_operation_index_fuzzer
def add_cmdline_args(cmd, args):
cmd += ['--quiet']
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
paths = [
join(dirname(__file__), '..', 'integration_tests', name)
for name in [
'hic_et_nunc.yml',
]
]
async def _match():
for path in paths:
config = DipDupConfig.load([path])
config.database.path = ':memory:'
config.initialize()
with with_operation_index_fuzzer(10, 3):
dipdup = DipDup(config)
with suppress(asyncio.CancelledError):
await dipdup.run()
runner.bench_func('index_match_operations', lambda: asyncio.run(_match()))
|
from gym.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple
import numpy as np
import unittest
import ray
from ray.tune import register_env
from ray.rllib.algorithms.qmix import QMixConfig
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class AvailActionsTestEnv(MultiAgentEnv):
num_actions = 10
action_space = Discrete(num_actions)
observation_space = Dict(
{
"obs": Dict(
{
"test": Dict({"a": Discrete(2), "b": MultiDiscrete([2, 3, 4])}),
"state": MultiDiscrete([2, 2, 2]),
}
),
"action_mask": Box(0, 1, (num_actions,)),
}
)
def __init__(self, env_config):
super().__init__()
self.state = None
self.avail = env_config.get("avail_actions", [3])
self.action_mask = np.array([0] * 10)
for a in self.avail:
self.action_mask[a] = 1
def reset(self):
self.state = 0
return {
"agent_1": {
"obs": self.observation_space["obs"].sample(),
"action_mask": self.action_mask,
},
"agent_2": {
"obs": self.observation_space["obs"].sample(),
"action_mask": self.action_mask,
},
}
def step(self, action_dict):
if self.state > 0:
assert (
action_dict["agent_1"] in self.avail
and action_dict["agent_2"] in self.avail
), "Failed to obey available actions mask!"
self.state += 1
rewards = {"agent_1": 1, "agent_2": 0.5}
obs = {
"agent_1": {
"obs": self.observation_space["obs"].sample(),
"action_mask": self.action_mask,
},
"agent_2": {
"obs": self.observation_space["obs"].sample(),
"action_mask": self.action_mask,
},
}
dones = {"__all__": self.state >= 20}
return obs, rewards, dones, {}
class TestQMix(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_avail_actions_qmix(self):
grouping = {
"group_1": ["agent_1", "agent_2"],
}
obs_space = Tuple(
[
AvailActionsTestEnv.observation_space,
AvailActionsTestEnv.observation_space,
]
)
act_space = Tuple(
[AvailActionsTestEnv.action_space, AvailActionsTestEnv.action_space]
)
register_env(
"action_mask_test",
lambda config: AvailActionsTestEnv(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space
),
)
config = (
QMixConfig()
.framework(framework="torch")
.environment(
env="action_mask_test",
env_config={"avail_actions": [3, 4, 8]},
)
.rollouts(num_envs_per_worker=5)
) # Test with vectorization on.
trainer = config.build()
for _ in range(4):
trainer.train() # OK if it doesn't trip the action assertion error
assert trainer.train()["episode_reward_mean"] == 30.0
trainer.stop()
ray.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
from enum import Enum
from glTF_editor.common.utils_py import \
UnicodeType
from glTF_editor.common.data_serializer import \
serializer
class Asset(object):
"""
...
"""
def __init__(self,
version=None,
copyright=None,
generator=None,
minVersion=None,
extensions=None,
extras=None):
self._version = 2.0
self._copyright = getpass.getuser()
self._generator = Krita glTF Editor
self._minVersion = None
self._extensions = None
self._extras = None
if version is not None:
self.version = version
if copyright is not None:
self.copyright = copyright
if generator is not None:
self.generator = generator
if minVersion is not None:
self.minVersion = minVersion
if extensions is not None:
self.extensions = extensions
if extras is not None:
self.extras = extras
@classmethod
def cast(cls, obj):
if isinstance(obj, cls):
return cls(
version=obj.version,
copyright=obj.copyright,
generator=obj.generator,
minVersion=obj.minVersion,
extensions=obj.extensions,
extras=obj.extras)
elif isinstance(obj, Mapping):
return cls(
version=obj.get("version"),
copyright=obj.get("copyright"),
generator=obj.get("generator"),
minVersion=obj.get("minVersion"),
extensions=obj.get("extensions"),
extras=obj.get("extras"))
elif isinstance(obj, UnicodeType):
unserialized = serializer.loads(obj)
if not isinstance(unserialized, UnicodeType):
return cls.cast(unserialized)
elif isinstance(obj, Iterable):
it = iter(obj)
return cls(
version=next(it),
copyright=next(it),
generator=next(it),
minVersion=next(it),
extensions=next(it),
extras=next(it))
raise RuntimeError("Unable to Cast {obj!r} to {cls} instance.".format(**locals()))
def getVersion(self):
if self._version is None:
return None
else:
return iter(self._version)
def setVersion(self, newVersion):
if newVersion is None:
self._version = None
else:
self._version = [UnicodeType(n) for n in newVersion]
version = property(getVersion, setVersion)
def getCopyright(self):
if self._copyright is None:
return None
else:
return iter(self._copyright)
def setCopyright(self, newCopyright):
if newCopyright is None:
self._copyright = None
else:
self._copyright = [UnicodeType(n) for n in newCopyright]
copyright = property(getCopyright, setCopyright)
def getGenerator(self):
if self._generator is None:
return None
else:
return iter(self._generator)
def setGenerator(self, newGenerator):
if newGenerator is None:
self._generator = None
else:
self._generator = [UnicodeType(n) for n in newGenerator]
generator = property(getGenerator, setGenerator)
def getMinVersion(self):
if self._minVersion is None:
return None
else:
return iter(self._minVersion)
def setMinVersion(self, newMinVersion):
if newMinVersion is None:
self._minVersion = None
else:
self._minVersion = [UnicodeType(n) for n in newMinVersion]
minVersion = property(getMinVersion, setMinVersion)
def getExtensions(self):
if self._extensions is None:
return None
else:
return iter(self._extensions)
def setExtensions(self, newExtensions):
if newExtensions is None:
self._extensions = None
else:
self._extensions = [UnicodeType(n) for n in newExtensions]
extensions = property(getExtensions, setExtensions)
def getExtras(self):
if self._extras is None:
return None
else:
return iter(self._extras)
def setExtras(self, newExtras):
if newExtras is None:
self._extras = None
else:
self._extras = [UnicodeType(n) for n in newExtras]
extras = property(getExtras, setExtras)
def __str__(self):
return serializer.dumps(self, type_hints=False)
def __repr__(self):
cls = type(self)
return ("{cls.__name__}("
"version={self.version!r}, "
"copyright={self.copyright!r}, "
"generator={self.generator!r}, "
"minVersion={self.minVersion!r}, "
"extensions={self.extensions!r}, "
"extras={self.extras!r}")").format(**locals())
def to_dict(obj):
result = oDict()
def add_valid(attr_name):
value = getattr(obj, attr_name, None)
if value is not None:
result[attr_name] = value.value if isinstance(value, Enum) else value
add_valid("version")
add_valid("copyright")
add_valid("generator")
add_valid("minVersion")
add_valid("extensions")
add_valid("extras")
return result
serializer.register(
data_cls=Asset,
from_dict=lambda dct: Asset.cast(dct),
to_dict=to_dict)
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
Implementation details
"""
import re
class parser_t(object):
"""implementation details"""
def __init__(
self,
pattern_char_begin,
pattern_char_end,
pattern_char_separator):
self.__begin = pattern_char_begin
self.__end = pattern_char_end
self.__separator = pattern_char_separator
# right now parser does not take into account next qualifiers, but it
# will
self.__text_qualifier = '"'
self.__char_qualifier = "'"
self.__escape = '\\'
def has_pattern(self, decl_string):
"""
Implementation detail
"""
if self.__begin == "<":
# Cleanup parentheses blocks before checking for the pattern
# See also the args() method (in this file) for more explanations.
decl_string = re.sub("\s\(.*?\)", "", decl_string).strip()
last_part = decl_string.split('::')[-1]
return (
-1 != decl_string.find(self.__begin) and -
1 != last_part.find(self.__end)
)
def name(self, decl_string):
"""implementation details"""
if not self.has_pattern(decl_string):
return decl_string
args_begin = decl_string.find(self.__begin)
return decl_string[0: args_begin].strip()
def __find_args_separator(self, decl_string, start_pos):
"""implementation details"""
bracket_depth = 0
for index, ch in enumerate(decl_string[start_pos:]):
if ch not in (self.__begin, self.__end, self.__separator):
continue # I am interested only in < and >
elif self.__separator == ch:
if not bracket_depth:
return index + start_pos
elif self.__begin == ch:
bracket_depth += 1
elif not bracket_depth:
return index + start_pos
else:
bracket_depth -= 1
return -1
def args(self, decl_string):
"""
Extracts a list of arguments from the provided declaration string.
Implementation detail. Example usages:
Input: myClass<std::vector<int>, std::vector<double>>
Output: [std::vector<int>, std::vector<double>]
Args:
decl_string (str): the full declaration string
Returns:
list: list of arguments as strings
"""
args_begin = decl_string.find(self.__begin)
args_end = decl_string.rfind(self.__end)
if -1 in (args_begin, args_end) or args_begin == args_end:
raise RuntimeError(
"%s doesn't validate template instantiation string" %
decl_string)
args_only = decl_string[args_begin + 1: args_end].strip()
# The list of arguments to be returned
args = []
parentheses_blocks = []
prev_span = 0
if self.__begin == "<":
# In case where we are splitting template names, there
# can be parentheses blocks (for arguments) that need to be taken
# care of.
# Build a regex matching a space (\s)
# + something inside parentheses
regex = re.compile("\s\(.*?\)")
for m in regex.finditer(args_only):
# Store the position and the content
parentheses_blocks.append([m.start() - prev_span, m.group()])
prev_span = m.end() - m.start()
# Cleanup the args_only string by removing the parentheses and
# their content.
args_only = args_only.replace(m.group(), "")
# Now we are trying to split the args_only string in multiple arguments
previous_found, found = 0, 0
while True:
found = self.__find_args_separator(args_only, previous_found)
if -1 == found:
args.append(args_only[previous_found:].strip())
# This is the last argument. Break out of the loop.
break
else:
args.append(args_only[previous_found: found].strip())
previous_found = found + 1 # skip found separator
# Get the size and position for each argument
absolute_pos_list = []
absolute_pos = 0
for arg in args:
absolute_pos += len(arg)
absolute_pos_list.append(absolute_pos)
for item in parentheses_blocks:
# In case where there are parentheses blocks we add them back
# to the right argument
parentheses_block_absolute_pos = item[0]
parentheses_block_string = item[1]
current_arg_absolute_pos = 0
for arg_index, arg_absolute_pos in enumerate(absolute_pos_list):
current_arg_absolute_pos += arg_absolute_pos
if current_arg_absolute_pos >= parentheses_block_absolute_pos:
# Add the parentheses block back and break out of the loop.
args[arg_index] += parentheses_block_string
break
return args
NOT_FOUND = (-1, -1)
"""implementation details"""
def find_args(self, text, start=None):
"""implementation details"""
if start is None:
start = 0
first_occurance = text.find(self.__begin, start)
if first_occurance == -1:
return self.NOT_FOUND
previous_found, found = first_occurance + 1, 0
while True:
found = self.__find_args_separator(text, previous_found)
if -1 == found:
return self.NOT_FOUND
elif text[found] == self.__end:
return first_occurance, found
else:
previous_found = found + 1 # skip found sep
def split(self, decl_string):
"""implementation details"""
assert self.has_pattern(decl_string)
return self.name(decl_string), self.args(decl_string)
def split_recursive(self, decl_string):
"""implementation details"""
assert self.has_pattern(decl_string)
answer = []
to_go = [decl_string]
while to_go:
name, args = self.split(to_go.pop())
answer.append((name, args))
for arg in args:
if self.has_pattern(arg):
to_go.append(arg)
return answer
def join(self, name, args, arg_separator=None):
"""implementation details"""
if None is arg_separator:
arg_separator = ', '
args = [_f for _f in args if _f]
if not args:
args_str = ' '
elif 1 == len(args):
args_str = ' ' + args[0] + ' '
else:
args_str = ' ' + arg_separator.join(args) + ' '
return ''.join([name, self.__begin, args_str, self.__end])
def normalize(self, decl_string, arg_separator=None):
"""implementation details"""
if not self.has_pattern(decl_string):
return decl_string
name, args = self.split(decl_string)
for i, arg in enumerate(args):
args[i] = self.normalize(arg)
return self.join(name, args, arg_separator)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os.path
import shutil
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
import spack.cmd.common.arguments as arguments
import spack.config
import spack.paths
import spack.util.gpg
from spack.util.executable import which
from spack.util.spack_yaml import syaml_dict
description = "set up spack for our tutorial (WARNING: modifies config!)"
section = "config"
level = "long"
# tutorial configuration parameters
tutorial_branch = "releases/v%d.%d" % spack.spack_version_info[:2]
tutorial_mirror = "file:///mirror"
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")
# configs to remove
rm_configs = [
"~/.spack/linux/compilers.yaml",
"~/.spack/packages.yaml",
"~/.spack/mirrors.yaml",
"~/.spack/modules.yaml",
"~/.spack/config.yaml",
]
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['yes_to_all'])
def tutorial(parser, args):
if not spack.cmd.spack_is_git_repo():
tty.die("This command requires a git installation of Spack!")
if not args.yes_to_all:
tty.msg("This command will set up Spack for the tutorial at "
"https://spack-tutorial.readthedocs.io.",
"")
tty.warn("This will modify your Spack configuration by:",
" - deleting some configuration in ~/.spack",
" - adding a mirror and trusting its public key",
" - checking out a particular branch of Spack",
"")
if not tty.get_yes_or_no("Are you sure you want to proceed?"):
tty.die("Aborted")
rm_cmds = ["rm -f %s" % f for f in rm_configs]
tty.msg("Reverting compiler and repository configuration", *rm_cmds)
for path in rm_configs:
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
tty.msg("Ensuring that the tutorial binary mirror is configured:",
"spack mirror add tutorial %s" % tutorial_mirror)
mirror_config = syaml_dict()
mirror_config["tutorial"] = tutorial_mirror
spack.config.set('mirrors', mirror_config, scope="user")
tty.msg("Ensuring that we trust tutorial binaries",
"spack gpg trust %s" % tutorial_key)
spack.util.gpg.trust(tutorial_key)
# Note that checkout MUST be last. It changes Spack under our feet.
# If you don't put this last, you'll get import errors for the code
# that follows (exacerbated by the various lazy singletons we use)
tty.msg("Ensuring we're on the releases/v0.16 branch")
git = which("git", required=True)
with working_dir(spack.paths.prefix):
git("checkout", tutorial_branch)
# NO CODE BEYOND HERE
|
from tkinter import *
import ged_lib as gl
import tag_records as tag
import write_ged_file as wgf
want_ui = True
def clicked1():
gl.process_ged_file()
def clicked2():
tag.tag_ancestors_families()
def clicked3():
wgf.write_ged_file()
def exit():
root.destroy()
if want_ui:
font = "Tahoma"
font_size = 10
root = Tk()
root.title("")
root.geometry('340x520') #width, height
f00 = Label(root, text=" V1.00",font=(font, font_size))
f00.grid(row=0, column=0)
f11 = Label(root, text=" ",font=(font, 10))
f11.grid(row=1, column=1)
f31 = Label(root, text="Family Tree Splitter",font=(font, font_size))
f31.grid(row=3, column=1)
f41 = Label(root, text=" ",font=(font, font_size))
f41.grid(row=4, column=1)
f51 = Button(root, text=" Process GED file ", font=(font, font_size), command=clicked1)
f51.grid(row=5, column=1)
f61 = Label(root, text=" ",font=(font, font_size))
f61.grid(row=6, column=1)
f71 = Button(root, text=" Tag Records ", font=(font, font_size), command=clicked2)
f71.grid(row=7, column=1)
f81 = Label(root, text=" ",font=(font, font_size))
f81.grid(row=8, column=1)
f91 = Button(root, text=" Write New GED File ", font=(font, font_size), command=clicked3)
f91.grid(row=9, column=1)
f101 = Label(root, text=" ",font=(font, font_size))
f101.grid(row=10, column=1)
f111 = Button(root, text=" Exit ", font=(font, font_size), command=exit)
f111.grid(row=11, column=1)
f121 = Label(root, text=" ",font=(font, font_size))
f121.grid(row=12, column=1)
f131 = Button(root, text=" Edit Parameters ", font=(font, font_size), command=gl.edit_params)
f131.grid(row=13, column=1)
root.mainloop()
else:
gl.process_ged_file()
tag.tag_ancestors_families()
tag.tag_individuals_families()
wgf.write_ged_file()
|
import re
from sphinx.ext.autodoc import (
ALL, Documenter,
bool_option, members_option, members_set_option)
from .domain import SolidityDomain
from .sourceregistry import SolidityObject
from sphinx.util.logging import getLogger
logger = getLogger(__name__)
class SolidityObjectDocumenter(Documenter):
domain = 'sol'
option_spec = {
'members': members_option,
'undoc-members': bool_option,
'noindex': bool_option,
'exclude-members': members_set_option,
}
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
"""Called to see if a member of a Python object can be documented by this documenter.
Will always answer no because this Documenter is built for Solidity."""
return False
def get_sourcename(self):
return '{}:docstring of {} {}'.format(
self.object.file,
self.object.objtype,
'.'.join(filter(lambda x: x,
(self.object.contract_name,
self.object.name))),
)
def add_directive_header(self):
domain = getattr(self, 'domain', 'sol')
directive = getattr(self, 'directivetype', self.objtype)
sourcename = self.get_sourcename()
self.add_line('.. {domain}:{directive}:: {signature}'.format(
domain=domain, directive=directive, signature=self.object.signature
), sourcename)
if self.options.noindex:
self.add_line(u' :noindex:', sourcename)
def add_content(self, more_content):
"""Add content from source docs and user."""
sourcename = self.get_sourcename()
if self.object.docs:
self.add_line('', sourcename)
for line in self.object.docs.splitlines():
self.add_line(line, sourcename)
# add additional content (e.g. from document), if present
if more_content:
self.add_line('', sourcename)
for line, src in zip(more_content.data, more_content.items):
self.add_line(line, src[0], src[1])
def document_members(self, all_members=False):
# type: (bool) -> None
"""Generate reST for member documentation.
If *all_members* is True, do all members, else those given by
*self.options.members*.
"""
sourcename = self.get_sourcename()
want_all = all_members or self.options.members is ALL
if not want_all and not self.options.members:
return
expressions = [
SolidityObject.file == self.object.file,
SolidityObject.contract_name == self.object.name
]
if not want_all:
members_inset = set()
should_include_fallback = False
should_include_constructor = False
for member in self.options.members:
if member == '<fallback>':
should_include_fallback = True
elif member == 'constructor':
should_include_constructor = True
elif member:
members_inset.add(member)
expr = SolidityObject.name.in_(members_inset)
if should_include_fallback:
expr |= (SolidityObject.objtype == 'function') & (SolidityObject.name.is_null(True))
if should_include_constructor:
expr |= (SolidityObject.objtype == 'constructor') & (SolidityObject.name.is_null(True))
expressions.append(expr)
if self.options.exclude_members:
should_exclude_fallback = False
should_exclude_constructor = False
if '<fallback>' in self.options.exclude_members:
self.options.exclude_members.remove('<fallback>')
should_exclude_fallback = True
if 'constructor' in self.options.exclude_members:
self.options.exclude_members.remove('constructor')
should_exclude_constructor = True
expr = SolidityObject.name.not_in(self.options.exclude_members)
subexpr = SolidityObject.name.is_null(True)
if should_exclude_fallback:
subexpr &= (SolidityObject.objtype != 'function')
if should_exclude_constructor:
subexpr &= (SolidityObject.objtype != 'constructor')
expr |= subexpr
expressions.append(expr)
for member in SolidityObject.select().where(*expressions):
self.add_line('', sourcename)
full_mname = '{file}:{contract}{name}{paramtypes}'.format(
file=member.file,
contract='' if member.contract_name is None
else member.contract_name + '.',
name=member.name or '',
paramtypes='' if member.paramtypes is None
else '(' + member.paramtypes + ')',
)
documenter = all_solidity_documenters[member.objtype](
self.directive, full_mname, self.indent)
documenter.generate(all_members=True)
def generate(self, more_content=None, all_members=False):
# type: (Any, str, bool, bool) -> None
"""Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content.
If *all_members* is True, document all members.
"""
directive = getattr(self, 'directivetype', self.objtype)
# parse components out of name
(file, _, namepath) = self.name.rpartition(':')
(contract_name, _, fullname) = namepath.partition('.')
(name, _, paramtypes) = fullname.partition('(')
# normalize components
name = name.strip() or None
if directive in ('contract', 'interface', 'library') and name is None:
name = contract_name
contract_name = None
paramtypes = ','.join(ptype.strip() for ptype in paramtypes.split(','))
paramtypes = re.sub(r'\s+', ' ', paramtypes)
if paramtypes.endswith(')'):
paramtypes = paramtypes[:-1]
# build query
expressions = [
SolidityObject.objtype == directive,
SolidityObject.name == name,
]
if file:
expressions.append(SolidityObject.file == file)
if contract_name:
expressions.append(SolidityObject.contract_name == contract_name)
if paramtypes:
expressions.append(SolidityObject.paramtypes == paramtypes)
# get associated object
query = SolidityObject.select().where(*expressions)
sol_objects = tuple(query)
if len(sol_objects) == 0:
logger.warning('{} {} could not be found via query:\n{}'.format(
directive, self.name, ',\n'.join(
' ' + str(expr.lhs.column_name) +
str(expr.op) + ('' if expr.rhs is None else expr.rhs)
for expr in expressions
)))
return
elif len(sol_objects) > 1:
logger.warning('multiple candidates for {} {} found:\n{}'.format(
directive, self.name,
'\n'.join(' ' + obj.signature for obj in sol_objects)))
self.object = sol_objects[0]
# begin rendering output
sourcename = self.get_sourcename()
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
self.add_line('', sourcename)
# generate the directive header and options, if applicable
self.add_directive_header()
# make sure content is indented
# TODO: consider adding a source unit directive
self.indent += self.content_indent
# add all content (from docstrings, attribute docs etc.)
self.add_content(more_content)
# document members, if possible
if directive in ('contract', 'interface', 'library'):
self.add_line('', sourcename)
self.document_members(all_members)
def method_stub(self):
raise NotImplementedError
for method_name in (
'parse_name', 'import_object', 'get_real_modname', 'check_module',
'format_args', 'format_name', 'format_signature', 'get_doc', 'process_doc',
'get_object_members', 'filter_members',
):
setattr(SolidityObjectDocumenter, method_name, method_stub)
all_solidity_documenters = dict(
(objtype, type(
objtype.capitalize() + 'Documenter',
(SolidityObjectDocumenter,),
{
'objtype': 'sol' + objtype,
'directivetype': objtype,
}
)) for objtype in SolidityDomain.directives.keys()
)
|
import pyodbc
import sqlalchemy as sa
import sqlalchemy.dialects.mssql as mssql
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.backends.base_sqlalchemy.alchemy as alch
# used for literal translate
from ibis.backends.base_sqlalchemy.alchemy import fixed_arity, unary
def raise_unsupported_op_error(translator, expr, *args):
msg = "SQLServer backend doesn't support {} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
# Aggregation
# copied from postgresql compiler
# support for of bit columns in aggregate methods
def _reduction(func_name, cast_type='int32'):
def reduction_compiler(t, expr):
arg, where = expr.op().args
if arg.type().equals(dt.boolean):
arg = arg.cast(cast_type)
func = getattr(sa.func, func_name)
if where is not None:
arg = where.ifelse(arg, None)
return func(t.translate(arg))
return reduction_compiler
# String
# TODO: substr and find are copied from SQLite, we should really have a
# "base" set of SQL functions that are the most common APIs across the major
# RDBMS
def _substr(t, expr):
f = sa.func.substring
arg, start, length = expr.op().args
sa_arg = t.translate(arg)
sa_start = t.translate(start)
if length is None:
return f(sa_arg, sa_start + 1)
else:
sa_length = t.translate(length)
return f(sa_arg, sa_start + 1, sa_length)
def _string_find(t, expr):
arg, substr, start, _ = expr.op().args
sa_arg = t.translate(arg)
sa_substr = t.translate(substr)
if start is not None:
sa_start = t.translate(start)
return sa.func.charindex(sa_substr, sa_arg, sa_start) - 1
return sa.func.charindex(sa_substr, sa_arg) - 1
# Numerical
def _floor_divide(t, expr):
left, right = map(t.translate, expr.op().args)
return sa.func.floor(left / right)
def _extract(fmt):
def translator(t, expr):
(arg,) = expr.op().args
sa_arg = t.translate(arg)
# sa.literal_column is used becuase it makes the argument pass
# in NOT as a parameter
return sa.cast(
sa.func.datepart(sa.literal_column(fmt), sa_arg), sa.SMALLINT
)
return translator
_operation_registry = alch._operation_registry.copy()
_operation_registry.update(
{
# aggregate methods
ops.Count: _reduction(sa.func.count),
ops.Max: _reduction('max'),
ops.Min: _reduction('min'),
ops.Sum: _reduction('sum'),
ops.Mean: _reduction('avg', 'float64'),
# string methods
ops.LStrip: unary(sa.func.ltrim),
ops.Lowercase: unary(sa.func.lower),
ops.RStrip: unary(sa.func.rtrim),
ops.Repeat: fixed_arity(sa.func.replicate, 2),
ops.Reverse: unary(sa.func.reverse),
ops.StringFind: _string_find,
ops.StringLength: unary(sa.func.length),
ops.StringReplace: fixed_arity(sa.func.replace, 3),
ops.Strip: unary(sa.func.trim),
ops.Substring: _substr,
ops.Uppercase: unary(sa.func.upper),
# math
ops.Abs: unary(sa.func.abs),
ops.Acos: unary(sa.func.acos),
ops.Asin: unary(sa.func.asin),
ops.Atan2: fixed_arity(sa.func.atn2, 2),
ops.Atan: unary(sa.func.atan),
ops.Ceil: unary(sa.func.ceiling),
ops.Cos: unary(sa.func.cos),
ops.Floor: unary(sa.func.floor),
ops.FloorDivide: _floor_divide,
ops.Power: fixed_arity(sa.func.power, 2),
ops.Sign: unary(sa.func.sign),
ops.Sin: unary(sa.func.sin),
ops.Sqrt: unary(sa.func.sqrt),
ops.Tan: unary(sa.func.tan),
# timestamp methods
ops.TimestampNow: fixed_arity(sa.func.GETDATE, 0),
ops.ExtractYear: _extract('year'),
ops.ExtractMonth: _extract('month'),
ops.ExtractDay: _extract('day'),
ops.ExtractHour: _extract('hour'),
ops.ExtractMinute: _extract('minute'),
ops.ExtractSecond: _extract('second'),
ops.ExtractMillisecond: _extract('millisecond'),
}
)
_unsupported_ops = [
# standard operations
ops.NotContains,
ops.NullIf,
ops.NotAny,
# miscellaneous
ops.Least,
ops.Greatest,
# numeric
ops.Round,
ops.Log2,
ops.Ln,
ops.Log10,
ops.Log,
ops.Exp,
ops.Modulus,
# string
ops.Contains,
ops.LPad,
ops.RPad,
ops.Capitalize,
ops.RegexSearch,
ops.RegexExtract,
ops.RegexReplace,
ops.StringAscii,
ops.StringSQLLike,
# aggregate methods
ops.CumulativeMax,
ops.CumulativeMin,
ops.CumulativeMean,
ops.CumulativeSum,
# datetime methods
ops.TimestampTruncate,
]
_unsupported_ops = {k: raise_unsupported_op_error for k in _unsupported_ops}
_operation_registry.update(_unsupported_ops)
class MSSQLExprTranslator(alch.AlchemyExprTranslator):
_registry = _operation_registry
_rewrites = alch.AlchemyExprTranslator._rewrites.copy()
_type_map = alch.AlchemyExprTranslator._type_map.copy()
_type_map.update(
{
dt.Boolean: pyodbc.SQL_BIT,
dt.Int8: mssql.TINYINT,
dt.Int32: mssql.INTEGER,
dt.Int64: mssql.BIGINT,
dt.Float: mssql.REAL,
dt.Double: mssql.REAL,
dt.String: mssql.VARCHAR,
}
)
rewrites = MSSQLExprTranslator.rewrites
compiles = MSSQLExprTranslator.compiles
class MSSQLDialect(alch.AlchemyDialect):
translator = MSSQLExprTranslator
dialect = MSSQLDialect
|
import math
class Config:
# input dim
window_width = 800
window_height = 800
intruder_size = 0
EPISODES = 1000
G = 9.8
tick = 30
scale = 30
# distance param
minimum_separation = 555 / scale
NMAC_dist = 150 / scale
horizon_dist = 4000 / scale
initial_min_dist = 3000 / scale
goal_radius = 600 / scale
# speed
min_speed = 50 / scale
max_speed = 80 / scale
d_speed = 5 / scale
speed_sigma = 2 / scale
position_sigma = 10 / scale
# heading in rad TBD
d_heading = math.radians(5)
heading_sigma = math.radians(2)
# bank
min_bank = -25
max_bank = 25
d_bank = 5
bank_sigma = 4
#maximum steps of one episode
max_steps = 1000
|
import unittest
from kiss_headers import Header, Headers, lock_output_type, parse_it
from kiss_headers.utils import decode_partials
RAW_HEADERS = """accept-ch: DPR
accept-ch-lifetime: 2592000
alt-svc: quic=":443"; ma=2592000; v="46,43", h3-Q050=":443"; ma=2592000, h3-Q049=":443"; ma=2592000, h3-Q048=":443"; ma=2592000, h3-Q046=":443"; ma=2592000, h3-Q043=":443"; ma=2592000
cache-control: private, max-age=0
content-encoding: br
content-length: 64032
content-type: text/html; charset=UTF-8
date: Mon, 16 Mar 2020 21:27:31 GMT
expires: -1
p3p: CP="This is not a P3P policy! See g.co/p3phelp for more info."
server: gws
set-cookie: 1P_JAR=2020-03-16-21; expires=Wed, 15-Apr-2020 21:27:31 GMT; path=/; domain=.google.fr; Secure; SameSite=none
set-cookie: NID=200=IGpBMMA3G7tki0niFFATFQ2BnsNceVP6XBtwOutoyw97AJ4_YFT5l1oLfLeX22xeI_STiP4omAB4rmMP3Sxgyo287ldQGwdZSdPOOZ_Md3roDOMAOtXEQ_hFbUvo0VPjS2gL1y00_6kQwpVxCghI2Ozrx-A4Xks3ZIXRj11RsWs; expires=Tue, 15-Sep-2020 21:27:31 GMT; path=/; domain=.google.fr; Secure; HttpOnly; SameSite=none
set-cookie: CONSENT=WP.284b10; expires=Fri, 01-Jan-2038 00:00:00 GMT; path=/; domain=.google.fr
status: 200
strict-transport-security: max-age=31536000
x-frame-options: SAMEORIGIN
x-xss-protection: 0""".replace(
"\n", "\r\n"
)
RAW_HEADERS_MOZILLA = """GET /home.html HTTP/1.1
Host: developer.mozilla.org
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:50.0) Gecko/20100101 Firefox/50.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
Accept-Language: en-US,en;q=0.5
Accept-Encoding: gzip, deflate, br
Referer: https://developer.mozilla.org/testpage.html
Connection: keep-alive
Upgrade-Insecure-Requests: 1
If-Modified-Since: Mon, 18 Jul 2016 02:36:04 GMT
If-None-Match: "c561c68d0ba92bbeb8b0fff2a9199f722e3a621a"
Cache-Control: max-age=0""".replace(
"\n", "\r\n"
)
class MyKissHeadersFromStringTest(unittest.TestCase):
headers: Headers
def setUp(self) -> None:
MyKissHeadersFromStringTest.headers = parse_it(RAW_HEADERS)
def test_decode_partials(self):
self.assertEqual(
[("Subject", "pöstal")],
decode_partials([("Subject", "=?iso-8859-1?q?p=F6stal?=")]),
)
def test_bytes_headers(self):
self.assertEqual(
MyKissHeadersFromStringTest.headers, parse_it(RAW_HEADERS.encode("utf-8"))
)
def test_two_headers_eq(self):
self.assertEqual(MyKissHeadersFromStringTest.headers, parse_it(RAW_HEADERS))
self.assertNotEqual(
MyKissHeadersFromStringTest.headers, parse_it(RAW_HEADERS_MOZILLA)
)
def test_headers_get_has(self):
self.assertIsNone(MyKissHeadersFromStringTest.headers.get("received"))
self.assertFalse(MyKissHeadersFromStringTest.headers.has("received"))
self.assertEqual(
"SAMEORIGIN", MyKissHeadersFromStringTest.headers.get("x-frame-options")
)
def test_repr_dict(self):
dict_ = MyKissHeadersFromStringTest.headers.to_dict()
self.assertIn("set-cookie", dict_)
self.assertIn("p3p", dict_)
self.assertTrue(
dict_["set-cookie"].startswith(
"1P_JAR=2020-03-16-21; expires=Wed, 15-Apr-2020 21:27:31 GMT; path=/;"
)
)
self.assertTrue(
dict_["set-cookie"].endswith(
"CONSENT=WP.284b10; expires=Fri, 01-Jan-2038 00:00:00 GMT; path=/; domain=.google.fr"
)
)
def test_repr_str(self):
self.assertEqual(RAW_HEADERS, repr(MyKissHeadersFromStringTest.headers))
self.assertEqual(RAW_HEADERS, str(MyKissHeadersFromStringTest.headers))
self.assertEqual(
"SAMEORIGIN", str(MyKissHeadersFromStringTest.headers.x_frame_options)
)
self.assertEqual(
"x-frame-options: SAMEORIGIN",
repr(MyKissHeadersFromStringTest.headers.x_frame_options),
)
def test_control_basis_exist(self):
self.assertEqual("DPR", MyKissHeadersFromStringTest.headers.accept_ch)
self.assertEqual(3, len(MyKissHeadersFromStringTest.headers.set_cookie))
self.assertIn("Secure", MyKissHeadersFromStringTest.headers.set_cookie[0])
self.assertEqual(
"This is not a P3P policy! See g.co/p3phelp for more info.",
MyKissHeadersFromStringTest.headers.p3p.cp,
)
self.assertTrue(MyKissHeadersFromStringTest.headers.has("Cache-Control"))
self.assertTrue(MyKissHeadersFromStringTest.headers.content_type.has("charset"))
self.assertEqual(
"UTF-8", MyKissHeadersFromStringTest.headers.content_type.get("charset")
)
def test_control_first_line_not_header(self):
headers = parse_it(RAW_HEADERS_MOZILLA)
self.assertEqual(17, len(headers))
self.assertIn("host", headers)
self.assertIn("Cache-Control", headers)
def test_headers_to_bytes(self):
headers = parse_it(RAW_HEADERS_MOZILLA)
self.assertEqual(headers, parse_it(bytes(headers)))
def test_verify_autocompletion_capability(self):
headers = parse_it(RAW_HEADERS_MOZILLA)
self.assertIn("accept_encoding", dir(headers))
self.assertIn("accept_language", dir(headers))
self.assertTrue(headers.accept)
self.assertIn("q", dir(headers.accept[-1]))
def test_fixed_type_output(self):
headers = parse_it(RAW_HEADERS_MOZILLA)
self.assertEqual(Header, type(headers.host))
lock_output_type()
self.assertEqual(list, type(headers.host))
self.assertEqual(1, len(headers.host))
lock_output_type(False)
self.assertEqual(Header, type(headers.host))
if __name__ == "__main__":
unittest.main()
|
from flask import request
from flask_restful import Resource
from src.models.alert import AlertModel
from src.models.user import User
from src.utils.checkauth import authrequired
class Alert(Resource):
@authrequired
def put(self):
data = request.get_json()
if data is None:
return {'message': 'data not correct'}, 400
username = request.headers.get('audience')
if None in [data.get('alerts'), username]:
return {'message': 'data not correct'}, 400
user = User.get_by_username(username)
for alertdata in data.get('alerts'):
if None in [user.id, alertdata.get('product'), alertdata.get('price')]:
return {'message': 'data not correct'}, 400
alert = AlertModel(user.id, alertdata.get('product'), alertdata.get('price'),
alertdata.get('currency') or 'PLN')
alert.add_alert()
return {'message': 'alerts added succesfully'}, 201
@authrequired
def delete(self):
data = request.get_json()
if data is None:
return {'message': 'data not correct'}, 400
username = request.headers.get('audience')
if None in [data.get('alerts'), username]:
return {'message': 'data not correct'}, 400
user = User.get_by_username(username)
for alert in [AlertModel.get_alert_by_id(_id) for _id in data['alerts']]:
if alert is None:
continue
if alert.user != user.id:
return {'message': "you cannot delete alerts that aren't yours"}, 401
alert.delete_alert()
return {'message': 'deleted successfully'}, 201
@authrequired
def patch(self):
data = request.get_json()
if data is None:
return {'message': 'data not correct'}, 400
username = request.headers.get('audience')
if None in [username, data.get('alerts')]:
return {'message': 'data not correct'}, 400
user = User.get_by_username(username)
for alertdict in data.get('alerts'):
if None in [alertdict.get('id'), alertdict.get('product'), alertdict.get('price'),
alertdict.get('currency')]:
return {'message': 'data not correct'}, 400
alert = AlertModel.get_alert_by_id(alertdict['id'])
if alert is None:
return {'message': 'data not correct'}, 400
if user.id != alert.user:
return {'message': "you cannot change alerts that aren't yours"}, 401
alert.update_info(alertdict['product'], alertdict['price'], alertdict.get('currency'))
return {'message': 'updated successfully'}
@authrequired
def get(self):
data = request.get_json()
if data is None:
return {'message': 'data not correct'}, 400
username = request.headers.get('audience')
if None in [username, data.get('alerts')]:
return {'message': 'data not correct'}, 400
user = User.get_by_username(username)
alerts = []
for alertid in data.get('alerts'):
alert = AlertModel.get_alert_by_id(alertid)
if alert is None:
continue
if user.id != alert.user:
return {'message': 'you can get only your own alerts'}, 401
alerts.append(alert)
return {'alerts': AlertModel.list_to_dict(alerts)}, 201
class Alerts(Resource):
@authrequired
def get(self, username):
if not username == request.headers.get('audience'):
return {'message': 'you can get only your own alerts'}, 400
user = User.get_by_username(username)
alerts = AlertModel.get_alerts_by_user_id(user.id)
return {'alerts': AlertModel.list_to_dict(alerts)}, 201
class ChangeActive(Resource):
@authrequired
def post(self):
data = request.get_json()
if data is None:
return {'message': 'data not correct'}, 400
username = request.headers.get('audience')
if None in [username, data.get('alerts')]:
return {'message': 'data not correct'}, 400
user = User.get_by_username(username)
for alertid in data.get('alerts'):
alert = AlertModel.get_alert_by_id(alertid)
if alert is None:
continue
if user.id != alert.user:
return {'message': 'you can modify only your own alerts'}, 401
alert.change_active()
return {'message': 'updated active states successfully'}, 201
|
import json
import logging
import re
from datetime import datetime, timedelta
from uuid import uuid4
import requests
import pytz
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, ForeignKey
from sqlalchemy import Integer, Text, Boolean, String, Enum, DateTime, JSON
from sqlalchemy import asc
from flask_login import UserMixin
from sqlalchemy.schema import DefaultClause
from edutalk.ag_ccmapi import project, deviceobject, devicefeature, networkapplication as na
from edutalk.exceptions import CCMAPIError
from edutalk.config import config
db = config.db
log = logging.getLogger('edutalk.models')
pid_list = [0]
def get_project_info(pid,user):
return project.get(pid)['odo'][1]['do_id']
def set_pid(pid):
pid_list.append(pid)
return 200
def get_pre_pid():
return pid_list[-1]
class DictMixin:
def to_dict(self, fields=None):
if fields is None:
fields = map(lambda x: x.name, self.__table__.columns)
return {x: getattr(self, x) for x in fields}
class TimestampMixin():
# Ref: https://myapollo.com.tw/zh-tw/sqlalchemy-mixin-and-custom-base-classes/
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(pytz.UTC)
)
updated_at = Column(
DateTime(timezone=True),
nullable=True,
onupdate=lambda: datetime.now(pytz.UTC)
)
class User(db.Model, DictMixin, TimestampMixin, UserMixin):
__tablename__ = 'User'
id = Column(Integer, primary_key=True, nullable=False)
username = Column(String(255)) # this is a cache stored the result of ccm api query
approved = Column(Boolean, default=False)
# iottalk_id = Column(Integer, nullable=False)
is_superuser = Column(Boolean, default=False)
group_id = Column(Integer, ForeignKey('Group.id'), nullable=False)
lecture_projects = db.relationship('LectureProject', cascade='all,delete',
backref='user')
_token = Column(String, nullable=False, unique=True, default=lambda: uuid4().hex) # just a UUID
@property
def token(self): # readonly
return self._token
# _cookies = Column(String()) # the iottalk ccm cookies
# @property
# def cookies(self):
# return json.loads(self._cookies)
# @cookies.setter
# def cookies(self, val: dict):
# self._cookies = json.dumps(val)
# cookies = db.synonym('_cookies', descriptor=cookies)
# @property
# def ccm_session(self):
# s = getattr(self, '__ccm_session', None) # cache
# if not s:
# s = self.__ccm_session = requests.Session()
# s.cookies.update(self.cookies)
# return s
@property
def is_teacher(self):
return self.group.name == 'teacher'
@property
def is_admin(self):
return self.group.name == 'administrator'
# for OAuth
sub = Column(String(255), unique=True)
email = Column(String(255))
refresh_token = db.relationship(
'RefreshToken',
back_populates='user',
uselist=False, # For one-to-one relationship, ref: https://tinyurl.com/jemrw6uf
cascade='all, delete-orphan',
passive_deletes=True,
)
access_tokens = db.relationship(
'AccessToken',
back_populates='user',
cascade='all, delete-orphan',
passive_deletes=True
)
class Group(db.Model):
__tablename__ = 'Group'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), unique=True)
users = db.relationship('User', cascade='all,delete', backref='group')
@classmethod
def default(cls):
if len(User.query.all()) == 0: # assume the first user is admin
return cls.query.filter_by(name='administrator').first()
return cls.query.filter_by(name='student').first()
class Lecture(db.Model, DictMixin):
__tablename__ = 'Lecture'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False, unique=True)
idx = Column(Integer, nullable=False) # lecture orders
url = Column(Text) # HackMD url
idm = Column(String(255)) # the input device model name
odm = Column(String(255), nullable=False, unique=True) # the output device model name
joins = Column(JSON)
lecture_projects = db.relationship('LectureProject', cascade='all,delete',
backref='lecture')
code = Column(String) # the vpython program
def __init__(self, **kwargs):
if 'code_path' in kwargs:
p = kwargs.pop('code_path')
with open(p) as f:
kwargs['code'] = f.read()
return super().__init__(**kwargs)
@classmethod
def list_(cls):
return tuple(map(
lambda x: x.to_dict(['id', 'name', 'url', 'idm', 'odm']),
cls.query.order_by(asc(cls.idx)).all()))
@property
def da_name(self): # readonly
return self.odm
@classmethod
def isexist(cls, name):
return cls.query.filter_by(name=name).first() is not None
class Template(db.Model, DictMixin):
__tablename__ = 'Template'
id = Column(Integer, primary_key=True, nullable=False)
dm = Column(String(255), nullable=False, unique=True)
code = Column(String) # the vpython program template
def __init__(self, **kwargs):
if 'code_path' in kwargs:
p = kwargs.pop('code_path')
with open(p) as f:
kwargs['code'] = f.read()
return super().__init__(**kwargs)
@classmethod
def isexist(cls, **kwargs):
return cls.query.filter_by(**kwargs).first() is not None
class LectureProject(db.Model, DictMixin):
__tablename__ = 'LectureProject'
id = Column(Integer, primary_key=True, nullable=False)
u_id = Column(Integer, ForeignKey('User.id'), nullable=False)
lec_id = Column(Integer, ForeignKey('Lecture.id'), nullable=False)
p_id = Column(Integer, nullable=False) # iottalk project id
code = Column(String) # the vpython program
# ``user`` is avialable via backref
# ``lecture`` is avialable via backref
@classmethod
def get_or_create(cls, user, lecture):
x = cls.query.filter(cls.user == user, cls.lecture == lecture).first()
if not x:
p_id = project.create(name=lecture.name)
project.on(p_id)
x = cls(user=user, lecture=lecture, p_id=p_id, code=lecture.code)
x.create_na()
db.session.add(x)
db.session.commit()
# TODO: rollback all ccmapi if any exception
return x
def create_na(self): # create device objects
# create device objects
# logger_df_id_list = []
# for idf in self.lecture.joins:
# if idf=="Acceleration-I" or idf=="Gyroscope-I" or idf=="Orientation-I" or idf=="Magnetometer-I" or idf=="Humidity-I" or idf=="UV-I" or idf=="Alcohol-I":
# logger_odf_name = idf[:-1]+"logger"
# logger_df = devicefeature.feature_get_by_name(logger_odf_name)
# logger_df_id = logger_df['df_id']
# logger_df_id_list.append(logger_df_id)
ido_id = deviceobject.create(self.p_id, self.lecture.idm)[0]
odo_id = deviceobject.create(self.p_id, self.lecture.odm)[0]
# logger_odo_id = deviceobject.create(self.p_id, "FileLogger1")
for idf, odf, default_value in self.lecture.joins:
idf = re.sub(r'_', r'-', idf)
odf = re.sub(r'_', r'-', odf)
# if idf=="Acceleration-I" or idf=="Gyroscope-I" or idf=="Orientation-I" or idf=="Magnetometer-I" or idf=="Humidity-I" or idf=="UV-I" or idf=="Alcohol-I":
# logger_odf = idf[:-2]+"_logger"
# na.create(self.p_id, [(ido_id, idf), (odo_id, odf), (logger_odo_id, logger_odf)])
# else:
# na_id = na.create(self.p_id, [(ido_id, idf), (odo_id, odf)])
na_id = na.create(self.p_id, [(ido_id, idf), (odo_id, odf)])
# def get_logger_df_name(df_id_list, idf):
# if idf=="Acceleration-I" or idf=="Gyroscope-I" or idf=="Orientation-I" or idf=="Magnetometer-I" or idf=="Humidity-I" or idf=="UV-I" or idf=="Alcohol-I":
# logger_odf = idf[:-1]+"logger"
# logger_df_id = devicefeature.feature_get_by_name(logger_odf)
# df_id_list.append(logger_df_id)
@property
def ido(self):
return deviceobject.get(
self.p_id,
project.get(self.p_id)['ido'][0]['do_id'])
@property
def odo(self):
return deviceobject.get(
self.p_id,
project.get(self.p_id)['odo'][0]['do_id'])
@classmethod
def get_by_lec_user(cls, lecture, user):
return cls.query.filter(cls.lecture == lecture, cls.user == user).first()
def delete(self):
try:
project.delete(self.p_id)
except CCMAPIError as e:
log.warning('user %s project %s delete failed',
self.user.username, self.p_id)
db.session.delete(self)
db.session.commit()
class RefreshToken(db.Model, TimestampMixin):
id = Column(Integer, primary_key=True)
token = Column(Text)
user_id = Column(Integer, ForeignKey('User.id'))
user = db.relationship('User', back_populates='refresh_token')
access_tokens = db.relationship(
'AccessToken',
back_populates='refresh_token',
cascade='all, delete-orphan',
passive_deletes=True
)
class AccessToken(db.Model, TimestampMixin):
id = Column(Integer, primary_key=True)
token = Column(Text)
expires_at = Column(db.DateTime())
user_id = Column(Integer, ForeignKey('User.id'))
refresh_token_id = Column(Integer, ForeignKey('refresh_token.id'))
user = db.relationship('User', back_populates='access_tokens')
refresh_token = db.relationship('RefreshToken', back_populates='access_tokens')
|
'''
Created on 18 Jul 2017
@author: spotnuru
'''
from cassandra.cluster import Cluster
cluster = Cluster(['127.0.0.1'])
session = cluster.connect('task1')
session.execute("CREATE TABLE watches(id int PRIMARY KEY, "+ "name text);")
print("Table Created") |
from pathlib import Path
from mseg_semantic.utils.dataset import SemData, make_dataset
TEST_DATA_ROOT = Path(__file__).resolve().parent / "test_data"
def test_make_dataset() -> None:
"""Ensure make_dataset() returns the proper outputs
"""
split = "train"
data_root = "/home/dummy_data_root"
data_list_fpath = str(TEST_DATA_ROOT / "dummy_camvid_train.txt")
image_label_list = make_dataset(split, data_root, data_list_fpath)
expected_image_label_list = [
(f"{data_root}/701_StillsRaw_full/0001TP_006690.png", f"{data_root}/semseg11/0001TP_006690_L.png"),
(f"{data_root}/701_StillsRaw_full/0001TP_006720.png", f"{data_root}/semseg11/0001TP_006720_L.png"),
(f"{data_root}/701_StillsRaw_full/0001TP_006750.png", f"{data_root}/semseg11/0001TP_006750_L.png")
]
assert image_label_list == expected_image_label_list
|
import _ast
from daipecore.lineage.DecoratorParserInterface import DecoratorParserInterface
from pysparkbundle.lineage.PathWriter import PathWriter
class PathWriterParser(DecoratorParserInterface):
def __init__(self, name: str, mode: str):
self.__name = name
self.__mode = mode
def parse(self, decorator: _ast.Call):
arg: _ast.Str = decorator.args[0]
return PathWriter(arg.s, self.__mode)
def get_name(self) -> str:
return self.__name
|
"""
Indico Request Handler
"""
import json, traceback
import tornado.web
from intercombot.error import IndicoError, RouteNotFound, ServerError
from intercombot.utils import LOGGER
class JSONEncoder(json.JSONEncoder):
def default(self, o):
return json.JSONEncoder.default(self, o)
class IndicoHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def post(self, action):
try:
# Fetch appropriate handler
if not action:
action = "_base"
if not hasattr(self, str(action)):
raise RouteNotFound(action)
# Pass along the data and get a result
handler = getattr(self, str(action))
result = handler(self.request.body)
self.respond(result, 200)
except IndicoError as e:
self.respond(e.message, e.code)
except Exception as e:
LOGGER.exception("======== INDICO SERVER ERROR ========",)
error = ServerError()
self.respond(error.message, error.code)
def respond(self, data, code=200):
self.set_status(code)
self.write(JSONEncoder().encode({
"status": code,
"data": data
}))
self.finish()
|
import torch as th
from torch.optim.optimizer import Optimizer, required
def normalize_param(W):
return W / W.norm(2).clamp(min=1e-12)
def to_vector(tensors):
"""Flatten a list of parameters/gradients to a vector"""
return th.cat([t.view(-1) for t in tensors]).detach()
def from_vector(tensors, vector):
"""Reverse `to_vector` (overwrites the tensor values)"""
pointer = 0
for tensor in tensors:
new_val = vector[pointer:pointer+tensor.numel()].view(tensor.size())
tensor.copy_(new_val)
pointer += tensor.numel()
class MultiObjSGD(Optimizer):
"""
This optimizer works like SGD excepts:
1. it stores gradient from an auxiliary task with `.save_auxiliary()`
2. it uses those auxiliary gradients using `.combine_gradientss()` before
applying the update
Args:
full_gradients (bool): do gradient combination ops on the full
gradients (as opposed to separately for each parameter)
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, always_project=True,
full_gradients=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov,
frozen=False)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(MultiObjSGD, self).__init__(params, defaults)
self.always_project = always_project
self.full_gradients = full_gradients
def __setstate__(self, state):
super(MultiObjSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
def save_auxiliary(self):
"""This saves the gradients wrt. the auxiliary objective"""
for group in self.param_groups:
for p in group["params"]:
param_state = self.state[p]
# skip frozen parameters (TODO: remove this)
if getattr(param_state, "frozen", False):
continue
# Actually save the gradient
param_state["aux_grad"] = th.zeros_like(p.data)
if p.grad is None:
continue
d_p = p.grad.data
param_state["aux_grad"].add_(d_p)
def combine_gradients(self, g_p, aux_g_p):
"""Manipulate the gradient g_p using the gradient from the auxiliary
objective aux_g_p"""
raise NotImplementedError()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# Apply momentum and everything to get final gradient
params = []
lrs = []
grads = []
aux_grads = []
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
# skip frozen parameters
if getattr(param_state, "frozen", False):
print("Skipping parameter of size", p.dim())
continue
if momentum != 0:
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = th.zeros_like(
p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# Track parameters, learning rate, gradients and auxiliary
# gradients
params.append(p)
lrs.append(group["lr"])
grads.append(d_p)
if "aux_grad" in param_state:
aux_grads.append(param_state["aux_grad"])
else:
aux_grads.append(th.zeros_like(d_p))
# Combine gradients
if self.full_gradients:
# Consider parameters as one vector
new_grad_vec = self.combine_gradients(
to_vector(grads),
to_vector(aux_grads)
)
# Overwrite gradients
from_vector(grads, new_grad_vec)
else:
# Treat each parameter independently
grads = [self.combine_gradients(g, aux_g)
for g, aux_g in zip(grads, aux_grads)]
# Apply the update
for p, lr, g in zip(params, lrs, grads):
p.data.add_(-lr, g)
return loss
|
#
# Copyright (c) 2021 Red Hat, Inc.
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# Contributors:
# Red Hat, Inc. - initial API and implementation
#
import os
import argparse
import yaml
def write_contents(filename: str, mode: str, contents: str) -> None:
"""
Write the string to the specified filename
"""
with open(filename, mode) as out:
out.write(contents)
def get_crd_metadata(output_path: str) -> None:
"""
Read in the devworkspace and devworkspace template crds and generate metadata into api/apis.ts
"""
crd_path = "crds"
typescript_contents = ""
devworkspace_crd_path = os.path.join(crd_path, 'workspace.devfile.io_devworkspaces.yaml')
with open(devworkspace_crd_path, 'r') as devfile_file:
yaml_data = yaml.load(devfile_file, Loader=yaml.FullLoader)
spec, group, kind, plural, singular, versions, latest_version, latest_api_version = extract_fields(yaml_data)
typescript_contents += generate_typescript(latest_api_version, group, kind, plural, singular, versions,
latest_version)
devworkspacetemplate_crd_path = os.path.join(crd_path, 'workspace.devfile.io_devworkspacetemplates.yaml')
with open(devworkspacetemplate_crd_path, 'r') as devfile_file:
yaml_data = yaml.load(devfile_file, Loader=yaml.FullLoader)
spec, group, kind, plural, singular, versions, latest_version, latest_api_version = extract_fields(yaml_data)
typescript_contents += generate_typescript(latest_api_version, group, kind, plural, singular, versions,
latest_version)
write_contents(os.path.join(output_path, "constants", "constants.ts"), "w", typescript_contents)
def extract_fields(yaml_data: {}) -> (str, str, str, str, str, [], str, str):
"""
Extract metadata from the crds
"""
spec = yaml_data['spec']
group = spec['group']
kind = spec['names']['kind']
plural = spec['names']['plural']
singular = spec['names']['singular']
versions = [version['name'] for version in spec['versions']]
latest_version = versions[len(versions) - 1]
latest_api_version = "{}/{}".format(group, latest_version)
return spec, group, kind, plural, singular, versions, latest_version, latest_api_version
def generate_typescript(api_version: str, group: str, kind: str, plural: str, singular: str, versions: [],
latest_version: str) -> str:
"""
Export a string representation of the typescript
"""
return f"""
export const {singular + "ApiVersion"} = '{api_version}';
export const {singular + "Group"} = '{group}';
export const {singular + "Kind"} = '{kind}';
export const {singular + "Plural"} = '{plural}';
export const {singular + "Singular"} = '{singular}';
export const {singular + "Versions"} = {versions};
export const {singular + "LatestVersion"} = '{latest_version}';
"""
def export_typescript_api(output_path: str) -> None:
"""
Export constants into api.ts
"""
export_contents = """
export * from './constants/constants';
"""
write_contents(os.path.join(output_path, "api.ts"), "a", export_contents)
if __name__ == "__main__":
# Get any additional metadata we can from the crds
parser = argparse.ArgumentParser(description='Generate metadata from crds')
parser.add_argument('-p', '--path', action='store', type=str, help='The path to the constants directory')
args = parser.parse_args()
if not args.path:
parser.print_help()
parser.exit()
path = args.path
# Grab the metadata from the crds and put it into constants/constant.ts in typescript-model
get_crd_metadata(path)
# Export constants/constant.ts so that you can import constants from the package
export_typescript_api(path)
|
from ..base import ShopifyResource
from .usage_charge import UsageCharge
def _get_first_by_status(resources, status):
for resource in resources:
if resource.status == status:
return resource
return None
class RecurringApplicationCharge(ShopifyResource):
def usage_charges(self):
return UsageCharge.find(recurring_application_charge_id=self.id)
def customize(self, **kwargs):
self._load_attributes_from_response(self.put("customize", recurring_application_charge= kwargs))
@classmethod
def current(cls):
"""
Returns first RecurringApplicationCharge object with status=active.
If not found, None will be returned.
"""
return _get_first_by_status(cls.find(), "active")
def activate(self):
self._load_attributes_from_response(self.post("activate"))
|
import numpy
from .observable import BaseObservable
def get_flat_local_connections_log_values(wave_function, local_connections, all_use_conn):
local_connections_reshape = numpy.moveaxis(local_connections, 1, 0).reshape((-1,)
+ local_connections.shape[2:])
flat_conn = local_connections_reshape[all_use_conn.astype(numpy.bool).T.flatten(), ...]
return wave_function(flat_conn)[:, 0]
class Observable(BaseObservable):
"""docstring for ExactVariational"""
def __init__(self, operator):
super(Observable, self).__init__()
self.operator = operator
def local_values_optimized_for_unbalanced_local_connections(self, wave_function, local_connections,
hamiltonian_values,
all_use_conn):
batch_size = all_use_conn.shape[1]
local_values = numpy.zeros((batch_size,), dtype=numpy.complex128)
flat_log_values = get_flat_local_connections_log_values(wave_function, local_connections, all_use_conn)
conn_per_sample = all_use_conn.sum(axis=0).astype(numpy.int32)
idx = 0
for i in range(batch_size):
sample_log_values = flat_log_values[idx:idx + conn_per_sample[i]]
idx += conn_per_sample[i]
sample_val_division = numpy.exp(sample_log_values - sample_log_values[0])
local_values[i] = numpy.multiply(hamiltonian_values[all_use_conn.astype(numpy.bool)
[:, i], i], sample_val_division).sum()
return local_values
def local_values_optimized_for_balanced_local_connections(self, wave_function, local_connections, hamiltonian_values):
flat_conn = local_connections.reshape((-1,) + self.operator.hilbert_state_shape)
flat_log_values = wave_function(flat_conn)[:, 0]
log_values = flat_log_values.reshape(local_connections.shape[0:2])
log_val_diff = log_values - log_values[0, :]
connections_division = numpy.exp(log_val_diff)
return numpy.multiply(hamiltonian_values, connections_division).sum(axis=0)
def local_values(self, wave_function, configurations):
local_connections, hamiltonian_values, all_use_conn = self.operator.find_conn(configurations)
if all_use_conn.mean() < 0.95:
return self.local_values_optimized_for_unbalanced_local_connections(wave_function,
local_connections,
hamiltonian_values,
all_use_conn)
else:
return self.local_values_optimized_for_balanced_local_connections(wave_function,
local_connections,
hamiltonian_values)
|
"""VIMS calibration data module.
Source: https://pds-imaging.jpl.nasa.gov/data/cassini/cassini_orbiter/vims-calibration-files/vims-pipeline-RC19-files-2018/ # noqa:501
"""
import numpy as np
from .vars import DATA, RC
from ..interp import lin_interp
from ..pds.times import dyear
class VIMSCalibData(type):
"""Abstract VIMS data."""
name = None
__vis = None
__ir = None
def __str__(cls):
return cls.__name__
def __repr__(cls):
return f'<{cls.__class__.__name__}> {cls}'
def __call__(cls, year, vis=False):
if hasattr(year, 'channel'):
vis = year.channel == 'VIS'
if hasattr(year, 'start'):
year = dyear(year.start)
years, data = cls.vis if vis else cls.ir
return lin_interp(year, years, data)
def csv(cls, vis=False):
"""Calibration multiplier data file."""
return DATA / f'{RC}-VIMS_{"VIS" if vis else "IR"}-{cls.name}.csv'
@property
def vis(cls):
"""Visible multiplier factors."""
if cls.__vis is None:
cls.__vis = cls._load_data(vis=True)
return cls.__vis
@property
def ir(cls):
"""Infrared multiplier factors."""
if cls.__ir is None:
cls.__ir = cls._load_data(vis=False)
return cls.__ir
def _load_data(cls, vis=False):
"""Load csv data."""
years, *data = np.loadtxt(cls.csv(vis=vis), delimiter=', ', unpack=True)
return years, data
class Multiplier(metaclass=VIMSCalibData):
"""VIMS calibration multiplier."""
name = 'calibration_multiplier'
class SolarFlux(metaclass=VIMSCalibData):
"""VIMS calibration solar flux."""
name = 'solar'
class Efficiency(metaclass=VIMSCalibData):
"""VIMS calibration efficiency (photon calibration)."""
name = 'wave_photon_cal'
class Wavelengths(metaclass=VIMSCalibData):
"""VIMS calibration wavelengths."""
name = 'wavelengths'
|
# coding=utf-8
#
levels={
'SXP':0,
'USE':0,
'MGR':0,
'MM3':0,
'CCK':0,
}
class Config(object):
preprocessorPrint=0
realtimeImportPrint=0
realtimeIssuePrint=0
realtimeUSE=0
realtimeCheckers=0
modules={
'USE':'modelscript.use.engine',
'MGR':'modelscript.use.engine.merger',
'MM3':'modelscript.megamodels.metametamodel',
'CCK':'modelscript.megamodels.checkers',
'SXP':'modelscript.use.sex.parser',
}
# from modelscript.base.issues import DEBUG
# print(DEBUG)
# ISS=0
# import modelscript.megamodels.metametamodel
# import modelscript.megamodels.checkers
# import modelscript.use.sex.parser
#
#
# def setDebugLevels():
# for key in modules.keys():
# print('BB' + key)
#
# modname=modules[key]
# module = __import__(modname, globals(), locals(), ['DEBUG'], 0)
# module.DEBUG=levels[key]
# print(modname+'.DEBUG='+str(levels[key]))
#
#
# setDebugLevels()
|
from collections import OrderedDict
from feature.feature import *
class FeatureMeta:
def __init__(self):
super().__init__()
self.continuous_feats = OrderedDict()
self.categorical_feats = OrderedDict()
self.multi_category_feats = OrderedDict()
self.feat_dict = {}
def add_continuous_feat(self, name, transformation=None, discretize=False, discretize_bin=10):
self.delete_feat(name)
self.continuous_feats[name] = ContinuousFeature(name, transformation, discretize, discretize_bin)
self.feat_dict[name] = 'continuous'
def add_categorical_feat(self, name, all_categories=None):
self.delete_feat(name)
self.categorical_feats[name] = CategoricalFeature(name, all_categories)
self.feat_dict[name] = 'categorical'
def add_multi_category_feat(self, name, all_categories=None):
self.delete_feat(name)
self.multi_category_feats[name] = MultiCategoryFeature(name, all_categories)
self.feat_dict[name] = 'multi_category'
def delete_feat(self, name):
if name in self.feat_dict:
feat_type = self.feat_dict[name]
if feat_type == 'continuous':
del self.continuous_feats[name]
elif feat_type == 'categorical':
del self.categorical_feats[name]
elif feat_type == 'multi_category':
del self.multi_category_feats[name]
def get_num_feats(self):
total_dim = 0
total_dim += len(self.continuous_feats)
for key in self.categorical_feats:
feat = self.categorical_feats[key]
total_dim += feat.dim
for key in self.multi_category_feats:
feat = self.multi_category_feats[key]
total_dim += feat.dim
return total_dim
def get_num_fields(self):
return len(self.feat_dict.keys())
def get_num_continuous_fields(self):
return len(self.continuous_feats.keys())
def __str__(self):
feats_list = [self.continuous_feats, self.categorical_feats, self.multi_category_feats]
info_strs = []
for feats in feats_list:
info_str = ''
for key in feats:
feat = feats[key]
info_str += str(feat)
info_str += '\n'
info_strs.append(info_str)
return 'Continuous Features:\n{}Categorical Features:\n{}Multi-Category Features:\n{}'.format(*info_strs)
|
"""
requests patcher module.
"""
from __future__ import absolute_import
import wrapt
from epsagon.modules.general_wrapper import wrapper
from ..events.urllib import UrllibEventFactory
def _wrapper(wrapped, instance, args, kwargs):
"""
General wrapper for requests instrumentation.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:return: None
"""
return wrapper(UrllibEventFactory, wrapped, instance, args, kwargs)
def patch():
"""
Patch module.
:return: None
"""
try:
wrapt.wrap_function_wrapper(
'urllib.request',
'OpenerDirector._open',
_wrapper
)
except Exception: # pylint: disable=broad-except
# Can happen in different Python versions.
pass
|
"""Tests for treadmill.runtime.linux.image.docker.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
import mock
# Disable W0611: Unused import
import treadmill.tests.treadmill_test_skip_windows # pylint: disable=W0611
from treadmill import fs
from treadmill.runtime.linux.image import _docker
class DockerTest(unittest.TestCase):
"""test docker function for linux native runtime
"""
def setUp(self):
self.root_dir = tempfile.mkdtemp()
fs.mkdir_safe(os.path.join(self.root_dir, 'docker', 'etc'))
def tearDown(self):
if self.root_dir and os.path.isdir(self.root_dir):
shutil.rmtree(self.root_dir)
@mock.patch('treadmill.utils.get_uid_gid', mock.Mock(return_value=(1, 1)))
def test__create_overlay_passwd(self):
"""Test create overlay passwd file
"""
# pylint: disable=protected-access
_docker._create_overlay_passwd(self.root_dir, 'me')
passwd = os.path.join(self.root_dir, 'docker', 'etc', 'passwd')
self.assertTrue(os.path.isfile(passwd))
with io.open(passwd) as f:
self.assertEqual(
'root:x:0:0:root:/root:/bin/sh\nme:x:1:1::/:/sbin/nologin\n',
f.read()
)
@mock.patch(
'grp.getgrgid',
mock.Mock(return_value=mock.Mock(gr_name='foo'))
)
@mock.patch('treadmill.utils.get_uid_gid', mock.Mock(return_value=(1, 1)))
def test__create_overlay_group(self):
"""Test create overlay group file
"""
# pylint: disable=protected-access
_docker._create_overlay_group(self.root_dir, 'me')
group = os.path.join(self.root_dir, 'docker', 'etc', 'group')
self.assertTrue(os.path.isfile(group))
with io.open(group) as f:
self.assertEqual(
'root:x:0\nfoo:x:1\n',
f.read()
)
if __name__ == '__main__':
unittest.main()
|
import urllib.parse
from datetime import timedelta
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from api import db
from api.services.deck import create_snapshot_for_deck
from api.utils.auth import create_access_token
from ..utils import create_admin_token, create_user_token
from .deck_utils import create_deck_for_user
@pytest.fixture(scope="module", autouse=True)
def user1(cards_session):
user1, _ = create_user_token(cards_session)
return user1
@pytest.fixture(scope="module", autouse=True)
def deck1(cards_session, user1):
return create_deck_for_user(cards_session, user1, release_stub="master-set")
@pytest.fixture(scope="module", autouse=True)
def snapshot1(cards_session, user1, deck1):
return create_snapshot_for_deck(
cards_session,
user1,
deck1,
title="First Snapshot",
description="First description",
is_public=True,
)
@pytest.fixture(scope="module", autouse=True)
def private_snapshot1(cards_session, user1, deck1):
return create_snapshot_for_deck(
cards_session,
user1,
deck1,
title="Private Snapshot",
description="Private description",
is_public=False,
)
@pytest.fixture(scope="module", autouse=True)
def private_deck1(cards_session, user1):
return create_deck_for_user(cards_session, user1, release_stub="expansion")
@pytest.fixture(scope="module", autouse=True)
def user2(cards_session):
user2, _ = create_user_token(cards_session)
return user2
@pytest.fixture(scope="module", autouse=True)
def deck2(cards_session, user2):
return create_deck_for_user(cards_session, user2, release_stub="expansion")
@pytest.fixture(scope="module", autouse=True)
def snapshot2(cards_session, user2, deck2):
return create_snapshot_for_deck(
cards_session,
user2,
deck2,
title="Second Snapshot",
is_public=True,
)
def test_get_decks(client: TestClient, snapshot1, snapshot2):
"""Basic deck filtration must work properly"""
# Public deck listings return all public decks, but no private decks
response = client.get("/v2/decks")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 2
# Decks are in reverse chronological order, hence the index order being backward
assert data["results"][1]["id"] == snapshot1.id
assert data["results"][0]["id"] == snapshot2.id
def test_get_decks_legacy_decks(
client: TestClient, session: db.Session, user1, snapshot1, snapshot2
):
"""Legacy decks must be shown when requested, and not otherwise"""
# We can't create legacy decks, so for the purposes of this test we'll fake it
legacy_deck = create_deck_for_user(session, user1)
legacy_snapshot = create_snapshot_for_deck(
session, user1, legacy_deck, title="Legacy Deck", is_public=True
)
legacy_deck.is_legacy = True
legacy_snapshot.is_legacy = True
session.commit()
response = client.get("/v2/decks")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 2
assert data["results"][1]["id"] == snapshot1.id
assert data["results"][0]["id"] == snapshot2.id
response = client.get("/v2/decks?show_legacy=true")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["id"] == legacy_snapshot.id
def test_get_decks_deleted_snapshots(
client: TestClient, session: db.Session, snapshot1, user1, deck1
):
"""Deleted snapshots must be excluded from the listing"""
snapshot1_2 = create_snapshot_for_deck(session, user1, deck1, is_public=True)
# Verify our new snapshot is the first item in the listing
response = client.get("/v2/decks")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["results"][0]["id"] == snapshot1_2.id
# Delete our snapshot, and verify it is gone
snapshot1_2.is_deleted = True
session.commit()
response = client.get("/v2/decks")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["results"][0]["id"] != snapshot1_2.id
def test_get_decks_filter_preconstructed(
client: TestClient, session: db.Session, user1
):
"""Filtering by preconstructed decks must work"""
# Create a preconstructed deck
precon_deck = create_deck_for_user(session, user1, release_stub="master-set")
precon_snapshot = create_snapshot_for_deck(
session, user1, precon_deck, is_public=True
)
precon_snapshot.is_preconstructed = True
session.commit()
response = client.get("/v2/decks?show_preconstructed=true")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["id"] == precon_snapshot.id
def test_get_decks_filter_title(client: TestClient, session, snapshot1):
"""Filtering by snapshot title must work"""
response = client.get(f"/v2/decks?q={urllib.parse.quote(snapshot1.title)}")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1, data
assert data["results"][0]["id"] == snapshot1.id, data
def test_get_decks_filter_phoenixborn(client: TestClient, snapshot1):
"""Filtering by snapshot Phoenixborn must work"""
response = client.get("/v2/decks?phoenixborn=one-phoenixborn")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["id"] == snapshot1.id
def test_get_decks_filter_card(client: TestClient, snapshot2):
"""Filtering by included card must work"""
response = client.get("/v2/decks?card=two-ready-spell")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["id"] == snapshot2.id
def test_get_decks_filter_user(client: TestClient, user1, snapshot1):
"""Filtering by user badge must work"""
# Public deck listings offer filtration by user
response = client.get(f"/v2/decks?player={urllib.parse.quote(user1.badge)}")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["id"] == snapshot1.id
def test_get_mine(client: TestClient, user1, deck1, private_deck1):
"""Listing private decks returns the current user's decks"""
# This endpoint is functionally identical to the generic deck filter, aside from returning saved
# decks, so no need to test all the filters
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/mine", headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 2
assert data["results"][0]["id"] == private_deck1.id
assert data["results"][1]["id"] == deck1.id
def test_get_private_share_deck(client: TestClient, private_deck1):
"""Direct share UUIDs must allow access to the exact deck or snapshot"""
response = client.get(f"/v2/decks/shared/{private_deck1.direct_share_uuid}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == private_deck1.id
def test_get_private_share_published_snapshot(client: TestClient, snapshot1):
"""Direct share UUIDs must allow access to public snapshots"""
response = client.get(f"/v2/decks/shared/{snapshot1.direct_share_uuid}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == snapshot1.id
def test_get_private_share_deleted(
client: TestClient, session: db.Session, user1, deck1
):
"""Deleted decks must throw an error when accessing their direct share UUID"""
snapshot2 = create_snapshot_for_deck(session, user1, deck1)
snapshot2.is_deleted = True
session.commit()
response = client.get(f"/v2/decks/shared/{snapshot2.direct_share_uuid}")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_get_deck_deleted(client: TestClient, session: db.Session, user1):
"""Deleted decks must not provide access"""
deck = create_deck_for_user(session, user1)
deck.is_deleted = True
session.commit()
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
# Invisible to the owning user
response = client.get(
f"/v2/decks/{deck.id}", headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == status.HTTP_404_NOT_FOUND
# Invisible to unauthenticated, too
response = client.get(f"/v2/decks/{deck.id}")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_get_deck_no_record(client: TestClient, session: db.Session, user1):
"""Trying to fetch an ID that no longer exists must fail correctly"""
deck = create_deck_for_user(session, user1)
deleted_id = deck.id
session.delete(deck)
session.commit()
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{deleted_id}", headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_get_deck_deleted_public_snapshot(
client: TestClient, session: db.Session, user1
):
"""Decks with a deleted public snapshot must throw an error"""
deck = create_deck_for_user(session, user1)
snapshot = create_snapshot_for_deck(session, user1, deck, is_public=True)
snapshot.is_deleted = True
session.commit()
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
# Invisible to the owning user
response = client.get(
f"/v2/decks/{deck.id}", headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == status.HTTP_404_NOT_FOUND
# Invisible to unauthenticated, too
response = client.get(f"/v2/decks/{deck.id}")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_get_deck_private_snapshot(client: TestClient, session: db.Session, user1):
"""Unauthenticated users must not be able to access private snapshots"""
deck = create_deck_for_user(session, user1)
snapshot = create_snapshot_for_deck(session, user1, deck)
response = client.get(f"/v2/decks/{snapshot.id}")
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_get_deck_private_saved(client: TestClient, deck1):
"""Unauthenticated users must not be able to access private decks via show_saved"""
response = client.get(f"/v2/decks/{deck1.id}", params={"show_saved": True})
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_get_deck_public_snapshot(client: TestClient, snapshot1):
"""Public snapshots must return the snapshot"""
response = client.get(f"/v2/decks/{snapshot1.id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["deck"]["id"] == snapshot1.id
def test_get_deck_private_snapshot_owned(
client: TestClient, session: db.Session, user1
):
"""Private snapshots must be returned if requested by the owner"""
deck = create_deck_for_user(session, user1)
snapshot = create_snapshot_for_deck(session, user1, deck)
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{snapshot.id}", headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == status.HTTP_200_OK
assert response.json()["deck"]["id"] == snapshot.id
def test_get_deck(client: TestClient, deck1, snapshot1):
"""By default, the latest public snapshot is returned"""
response = client.get(f"/v2/decks/{deck1.id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["deck"]["id"] == snapshot1.id
def test_get_deck_saved(client: TestClient, deck1, user1):
"""Showing saved decks must work for the owner"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{deck1.id}",
params={"show_saved": True},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["deck"]["id"] == deck1.id
assert data["deck"]["is_saved"] == True
def test_list_snapshots_bad_id(client: TestClient, session: db.Session, user1):
"""Not found error thrown when viewing non-existent deck"""
deck = create_deck_for_user(session, user1)
deleted_id = deck.id
session.delete(deck)
session.commit()
response = client.get(f"/v2/decks/{deleted_id}/snapshots")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_list_snapshots_deleted_deck(client: TestClient, session: db.Session, deck1):
"""Not found error thrown when viewing a deleted deck"""
deck1.is_deleted = True
session.commit()
response = client.get(f"/v2/decks/{deck1.id}/snapshots")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_list_snapshots_snapshot_id(client: TestClient, snapshot1):
"""Not found error thrown when viewing a snapshot instead of a source deck"""
response = client.get(f"/v2/decks/{snapshot1.id}/snapshots")
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_list_snapshots_anonymous_user(client: TestClient, private_snapshot1):
"""Anonymous users can only view public snapshots"""
response = client.get(f"/v2/decks/{private_snapshot1.source_id}/snapshots")
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert len(data["results"]) == 1
def test_list_snapshots_other_user(client: TestClient, user2, private_snapshot1):
"""Users cannot view private snapshots for other user's decks"""
token = create_access_token(
data={"sub": user2.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{private_snapshot1.source_id}/snapshots",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert len(data["results"]) == 1
def test_list_snapshots(client: TestClient, user1, private_snapshot1):
"""Users can view both private and public snapshots for decks they own"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{private_snapshot1.source_id}/snapshots",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 2
assert len(data["results"]) == 2
def test_list_snapshots_public_only(client: TestClient, user1, private_snapshot1):
"""Users can view listings that include only public snapshots"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.get(
f"/v2/decks/{private_snapshot1.source_id}/snapshots",
params={"show_public_only": True},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert len(data["results"]) == 1
def test_edit_snapshot_bad_id(client: TestClient, session: db.Session, user1, deck1):
"""Not found error thrown when viewing non-existent ID"""
snapshot = create_snapshot_for_deck(session, user1, deck1)
deleted_id = snapshot.id
session.delete(snapshot)
session.commit()
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.patch(
f"/v2/decks/snapshots/{deleted_id}",
headers={"Authorization": f"Bearer {token}"},
json={"title": "New title"},
)
assert response.status_code == status.HTTP_404_NOT_FOUND, response.json()
def test_edit_snapshot_others_snapshot(client: TestClient, user1, snapshot2):
"""Permissions error when attempting to edit other people's decks"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.patch(
f"/v2/decks/snapshots/{snapshot2.id}",
headers={"Authorization": f"Bearer {token}"},
json={"title": "New title"},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_edit_snaphot_not_snapshot(client: TestClient, user1, deck1):
"""Generic error when trying to edit something that is not a snapshot"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
response = client.patch(
f"/v2/decks/snapshots/{deck1.id}",
headers={"Authorization": f"Bearer {token}"},
json={"title": "New title"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_edit_snapshot_notes_required_for_moderation(
client: TestClient, session: db.Session, snapshot1
):
"""Moderation notes are required for admin moderation"""
admin, token = create_admin_token(session)
response = client.patch(
f"/v2/decks/snapshots/{snapshot1.id}",
headers={"Authorization": f"Bearer {token}"},
json={"title": "New title"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_edit_snapshot_moderate_description(
client: TestClient, session: db.Session, snapshot1
):
"""Moderating a description saves the old description"""
admin, token = create_admin_token(session)
old_description = snapshot1.description
new_description = "New description"
moderation_notes = "Changed description"
response = client.patch(
f"/v2/decks/snapshots/{snapshot1.id}",
headers={"Authorization": f"Bearer {token}"},
json={"description": new_description, "moderation_notes": moderation_notes},
)
assert response.status_code == status.HTTP_200_OK
session.refresh(snapshot1)
assert snapshot1.description == new_description
assert snapshot1.original_description == old_description
assert snapshot1.is_moderated is True
assert snapshot1.moderation_notes == moderation_notes
def test_edit_snapshot(client: TestClient, session: db.Session, user1, snapshot1):
"""Users can edit their own snapshots"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
new_title = "New title"
new_description = "New description"
response = client.patch(
f"/v2/decks/snapshots/{snapshot1.id}",
headers={"Authorization": f"Bearer {token}"},
json={"title": new_title, "description": new_description},
)
assert response.status_code == status.HTTP_200_OK
session.refresh(snapshot1)
assert snapshot1.title == new_title
assert snapshot1.description == new_description
def test_edit_snapshot_clear_description(
client: TestClient, session: db.Session, user1, snapshot1
):
"""Users can pass empty strings to clear descriptions"""
token = create_access_token(
data={"sub": user1.badge},
expires_delta=timedelta(minutes=15),
)
old_title = snapshot1.title
new_description = ""
response = client.patch(
f"/v2/decks/snapshots/{snapshot1.id}",
headers={"Authorization": f"Bearer {token}"},
json={"description": new_description},
)
assert response.status_code == status.HTTP_200_OK
session.refresh(snapshot1)
assert snapshot1.title == old_title
assert snapshot1.description is None
|
"""
[2015-10-09] Challenge #235 [Hard] Contiguous Chain Variation
https://www.reddit.com/r/dailyprogrammer/comments/3o36b6/20151009_challenge_235_hard_contiguous_chain/
# Description
Based on [Challenge #227 Contiguous chains](http://redd.it/3gpjn3)
... but with a chain meaning 1 *continuous* strand, where each link in the chain can be connected to *at most* two
neighbors. For the purposes of this problem, chains can only be contiguous if they connect horizontally of vertically,
not diagonally (which is the same original constraint).
For example, the input:
4 9
xxxx xxxx
xxx
x x x
xxxxxxxxx
has at least 3 chains, with several valid layouts for the chains. One possible layout that shows 3 chains:
1111 2222
112
3 1 3
333333333
Another way to find 3:
1111 1111
111
2 2 3
222223333
There is also a valid set of 4 chains:
1111 2222
111
3 3 4
333334444
but 4 is not a correct (minimal) output, because 3 is possible.
Your challenge, should you choose to accept it, is to find the minimum number of chains in a given input.
# Challenge Input
4 9
xxxx xxxx
xxx
x x x
xxxxxxxxx
# Challenge Output
3
# Credit
This challenge was suggested by /u/BarqsDew over in /r/DailyProgrammer_Ideas. If you have any suggested challenges,
please share them and there's a good chance we'll use them.
"""
def main():
pass
if __name__ == "__main__":
main()
|
from tensorflow.python.compiler.tensorrt import trt_convert as trt
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.FP16,
max_workspace_size_bytes=8000000000)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir='checkpoints/LPD_mobilenet_v2_keras_pretrained_v1/saved_model',
conversion_params=conversion_params)
converter.convert()
converter.save('checkpoints/LPD_mobilenet_v2_keras_pretrained_v1/trt')
print('Done Converting to TF-TRT FP16')
|
""" Leetcode 322 - Coin Change
https://leetcode.com/problems/coin-change/
1. Time: O(n*amount) Memory: O(amount)
2. Time: O(n*amount) Memory: O(amount)
3. Time: Pending...
"""
from typing import List
class Solution1:
""" 1. Dynamic Programming
Borrow from: https://leetcode.com/problems/coin-change/discuss/77360/C%2B%2B-O(n*amount)-time-O(amount)-space-DP-solution
"""
def coin_change(self, coins: List[int], amount: int) -> int:
if amount == 0:
return 0
dp = [0] + [amount+1] * amount
for i in range(1, amount+1):
dp[i] = min(
[dp[i-x] + 1 if i >= x else amount + 1 for x in coins])
return -1 if dp[amount] > amount else dp[amount]
class Solution2:
""" 2. BFS
Borrow from: https://leetcode.com/problems/coin-change/discuss/77361/Fast-Python-BFS-Solution
"""
def coin_change(self, coins, amount):
if amount == 0:
return 0
values = [0]
temp = []
visited = [True] + [False] * amount
count = 0
while values:
count += 1
for value in values:
for coin in coins:
if value + coin == amount:
return count
if value + coin > amount:
continue
elif not visited[value+coin]:
visited[value+coin] = True
temp.append(value+coin)
values, temp = temp, []
return -1
class Solution3:
""" 3. DFS """
def coin_change(self, coins, amount):
coins.sort(reverse=True)
THRESHOLD = 10 ** 4 + 1
count = THRESHOLD
def helper(total, current_count, idx):
nonlocal count
if total == amount:
count = min(count, current_count)
if idx < len(coins):
if coins[idx] <= amount < total + coins[idx] * (count - current_count):
for x in range(1, min(count-current_count, (amount-total)//coins[idx])+1):
helper(total+x*coins[idx], current_count+x, idx+1)
helper(total, current_count, idx+1)
helper(0, 0, 0)
return -1 if count == THRESHOLD else count
if __name__ == '__main__':
coins = [1, 2, 5]
amount = 11
res = Solution3().coin_change(coins, amount)
print(res)
|
# %%
'''Example script for using differentiable WDFs to determine the parameters of an RC lowpass filter'''
import sys
sys.path.insert(0, "../lib")
import numpy as np
import tf_wdf as wdf
from tf_wdf import tf
import tqdm as tqdm
import matplotlib.pyplot as plt
import audio_dspy as adsp
import scipy.signal as signal
FS = 48000
# %%
# Construct Differentiable WDF circuit model:
# (based loosely on: https://github.com/andreofner/APC/blob/master/IIR.py)
class Model(tf.Module):
def __init__(self):
super(Model, self).__init__()
self.Vs = wdf.IdealVoltageSource()
self.R1 = wdf.Resistor(1000, True)
self.C1 = wdf.Capacitor(1.0e-6, FS, True)
self.S1 = wdf.Series(self.R1, self.C1)
self.I1 = wdf.Inverter(self.S1)
def forward(self, input):
sequence_length = input.shape[1]
batch_size = input.shape[0]
input = tf.cast(tf.expand_dims(input, axis=-1), dtype=tf.float32)
output_sequence = tf.TensorArray(
dtype=tf.float32, size=sequence_length, clear_after_read=False
)
self.I1.calc_impedance()
for i in range(sequence_length):
self.Vs.set_voltage(input[:, i])
self.Vs.incident(self.I1.reflected())
self.I1.incident(self.Vs.reflected())
output = wdf.voltage(self.C1)
output_sequence = output_sequence.write(i, output)
output_sequence = output_sequence.stack()
return output_sequence
# %%
# Generate data:
batch_size = 256
n_batches = 5
freq = 720
sweep = adsp.sweep_log(100, 10000, (batch_size * n_batches) / FS, FS)[
: batch_size * n_batches
]
b, a = adsp.design_LPF1(720, FS)
sweep_filt = signal.lfilter(b, a, sweep)
data_in = np.array([sweep])
data_in_batched = np.array(np.array_split(data_in[0], n_batches))
data_target = np.transpose(np.array([sweep_filt]))
print(data_in.shape)
print(data_in_batched.shape)
print(data_target.shape)
plt.plot(data_in[0])
plt.plot(data_in_batched[0])
plt.plot(data_target[:, 0])
# %%
# Training loop:
model = Model()
loss_func = tf.keras.losses.MeanSquaredError()
R_optimizer = tf.keras.optimizers.Adam(learning_rate=25.0)
C_optimizer = tf.keras.optimizers.Adam(learning_rate=10.0e-9)
Rs = []
Cs = []
losses = []
for epoch in tqdm.tqdm(range(100)):
with tf.GradientTape() as tape:
outs = model.forward(data_in)[..., 0]
loss = loss_func(outs, data_target)
grads = tape.gradient(loss, model.trainable_variables)
if epoch % 25 == 0:
print(f"\nCheckpoint (Epoch = {epoch}):")
print(f" Loss: {loss}")
print(f" Grads: {[g.numpy() for g in grads]}")
print(f" Trainables: {[t.numpy() for t in model.trainable_variables]}")
R_optimizer.apply_gradients([(grads[1], model.R1.R)])
C_optimizer.apply_gradients([(grads[0], model.C1.C)])
Rs.append(model.R1.R.numpy())
Cs.append(model.C1.C.numpy())
losses.append(loss)
print(f"\nFinal Results:")
print(f" Loss: {loss}")
print(f" Grads: {[g.numpy() for g in grads]}")
print(f" Trainables: {[t.numpy() for t in model.trainable_variables]}")
# %%
# Print results:
final_freq = 1.0 / (2 * np.pi * model.R1.R * model.C1.C)
print(final_freq)
outs = model.forward(data_in)[..., 0]
plt.plot(data_target[:, 0])
plt.plot(outs, "--")
# %%
# Plot results:
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
twin2.spines.right.set_position(("axes", 1.2))
(Rs_plot,) = ax.plot(Rs, "b-", label="R")
(Cs_plot,) = twin1.plot(Cs, "r-", label="C")
(losses_plot,) = twin2.plot(losses, "g-", label="Error")
ax.set_xlabel("Epoch")
ax.set_ylabel("Resistance [Ohms]")
twin1.set_ylabel("Capacitor [Farads]")
twin2.set_ylabel("Error")
ax.yaxis.label.set_color(Rs_plot.get_color())
twin1.yaxis.label.set_color(Cs_plot.get_color())
twin2.yaxis.label.set_color(losses_plot.get_color())
ax.legend(handles=[Rs_plot, Cs_plot, losses_plot])
plt.title("Diff. RC Lowpass Training")
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig("plots/RC_lpf.png")
# %%
|
#! /usr/bin/env python3
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
-r (--recurse) Recurse. Search dla all .py files w subdirectories too.
-n (--nobackup) No backup. Does nie make a ".bak" file before reindenting.
-v (--verbose) Verbose. Print informative msgs; inaczej no output.
(--newline) Newline. Specify the newline character to use (CRLF, LF).
Default jest the same jako the original file.
-h (--help) Help. Print this usage information oraz exit.
Change Python (.py) files to use 4-space indents oraz no hard tab characters.
Also trim excess spaces oraz tabs z ends of lines, oraz remove empty lines
at the end of files. Also ensure the last line ends przy a newline.
If no paths are given on the command line, reindent operates jako a filter,
reading a single source file z standard input oraz writing the transformed
source to standard output. In this case, the -d, -r oraz -v flags are
ignored.
You can dalej one albo more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, jeżeli the -r
option jest given, likewise recursively dla subdirectories.
If output jest nie to standard output, reindent overwrites files w place,
renaming the originals przy a .bak extension. If it finds nothing to
change, the file jest left alone. If reindent does change a file, the changed
file jest a fixed-point dla future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting jest figuring out what to do przy comment
lines. So long jako the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
The backup file jest a copy of the one that jest being reindented. The ".bak"
file jest generated przy shutil.copy(), but some corner cases regarding
user/group oraz permissions could leave the backup file more readable than
you'd prefer. You can always use the --nobackup option to prevent this.
"""
__version__ = "1"
zaimportuj tokenize
zaimportuj os
zaimportuj shutil
zaimportuj sys
verbose = Nieprawda
recurse = Nieprawda
dryrun = Nieprawda
makebackup = Prawda
# A specified newline to be used w the output (set by --newline option)
spec_newline = Nic
def usage(msg=Nic):
jeżeli msg jest Nic:
msg = __doc__
print(msg, file=sys.stderr)
def errprint(*args):
sys.stderr.write(" ".join(str(arg) dla arg w args))
sys.stderr.write("\n")
def main():
zaimportuj getopt
global verbose, recurse, dryrun, makebackup, spec_newline
spróbuj:
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
["dryrun", "recurse", "nobackup", "verbose", "newline=", "help"])
wyjąwszy getopt.error jako msg:
usage(msg)
zwróć
dla o, a w opts:
jeżeli o w ('-d', '--dryrun'):
dryrun = Prawda
albo_inaczej o w ('-r', '--recurse'):
recurse = Prawda
albo_inaczej o w ('-n', '--nobackup'):
makebackup = Nieprawda
albo_inaczej o w ('-v', '--verbose'):
verbose = Prawda
albo_inaczej o w ('--newline',):
jeżeli nie a.upper() w ('CRLF', 'LF'):
usage()
zwróć
spec_newline = dict(CRLF='\r\n', LF='\n')[a.upper()]
albo_inaczej o w ('-h', '--help'):
usage()
zwróć
jeżeli nie args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
zwróć
dla arg w args:
check(arg)
def check(file):
jeżeli os.path.isdir(file) oraz nie os.path.islink(file):
jeżeli verbose:
print("listing directory", file)
names = os.listdir(file)
dla name w names:
fullname = os.path.join(file, name)
jeżeli ((recurse oraz os.path.isdir(fullname) oraz
nie os.path.islink(fullname) oraz
nie os.path.split(fullname)[1].startswith("."))
albo name.lower().endswith(".py")):
check(fullname)
zwróć
jeżeli verbose:
print("checking", file, "...", end=' ')
przy open(file, 'rb') jako f:
encoding, _ = tokenize.detect_encoding(f.readline)
spróbuj:
przy open(file, encoding=encoding) jako f:
r = Reindenter(f)
wyjąwszy IOError jako msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
zwróć
newline = spec_newline jeżeli spec_newline inaczej r.newlines
jeżeli isinstance(newline, tuple):
errprint("%s: mixed newlines detected; cannot continue without --newline" % file)
zwróć
jeżeli r.run():
jeżeli verbose:
print("changed.")
jeżeli dryrun:
print("But this jest a dry run, so leaving it alone.")
jeżeli nie dryrun:
bak = file + ".bak"
jeżeli makebackup:
shutil.copyfile(file, bak)
jeżeli verbose:
print("backed up", file, "to", bak)
przy open(file, "w", encoding=encoding, newline=newline) jako f:
r.write(f)
jeżeli verbose:
print("wrote new", file)
zwróć Prawda
inaczej:
jeżeli verbose:
print("unchanged.")
zwróć Nieprawda
def _rstrip(line, JUNK='\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, nie
mentioning Barry by name albo anything <wink>.
"""
i = len(line)
dopóki i > 0 oraz line[i - 1] w JUNK:
i -= 1
zwróć line[:i]
klasa Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start jest so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line jest all-blank iff it's "\n".
self.lines = [_rstrip(line).expandtabs() + "\n"
dla line w self.raw]
self.lines.insert(0, Nic)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one dla each stmt oraz
# comment line. indentlevel jest -1 dla comment lines, jako a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
# Save the newlines found w the file so they can be used to
# create output without mutating the newlines.
self.newlines = f.newlines
def run(self):
tokens = tokenize.generate_tokens(self.getline)
dla _token w tokens:
self.tokeneater(*_token)
# Remove trailing empty lines.
lines = self.lines
dopóki lines oraz lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line przy *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
dla i w range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
jeżeli want < 0:
# A comment line.
jeżeli have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
jeżeli want < 0:
# Then it probably belongs to the next real stmt.
dla j w range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
jeżeli jlevel >= 0:
jeżeli have == getlspace(lines[jline]):
want = jlevel * 4
przerwij
jeżeli want < 0: # Maybe it's a hanging
# comment like this one,
# w which case we should shift it like its base
# line got shifted.
dla j w range(i - 1, -1, -1):
jline, jlevel = stats[j]
jeżeli jlevel >= 0:
want = have + (getlspace(after[jline - 1]) -
getlspace(lines[jline]))
przerwij
jeżeli want < 0:
# Still no luck -- leave it alone.
want = have
inaczej:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
jeżeli diff == 0 albo have == 0:
after.extend(lines[thisstmt:nextstmt])
inaczej:
dla line w lines[thisstmt:nextstmt]:
jeżeli diff > 0:
jeżeli line == "\n":
after.append(line)
inaczej:
after.append(" " * diff + line)
inaczej:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
zwróć self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter dla tokenize.
def getline(self):
jeżeli self.index >= len(self.lines):
line = ""
inaczej:
line = self.lines[self.index]
self.index += 1
zwróć line
# Line-eater dla tokenize.
def tokeneater(self, type, token, slinecol, end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
jeżeli type == NEWLINE:
# A program statement, albo ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
albo_inaczej type == INDENT:
self.find_stmt = 1
self.level += 1
albo_inaczej type == DEDENT:
self.find_stmt = 1
self.level -= 1
albo_inaczej type == COMMENT:
jeżeli self.find_stmt:
self.stats.append((slinecol[0], -1))
# but we're still looking dla a new stmt, so leave
# find_stmt alone
albo_inaczej type == NL:
dalej
albo_inaczej self.find_stmt:
# This jest the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, albo an
# ENDMARKER.
self.find_stmt = 0
jeżeli line: # nie endmarker
self.stats.append((slinecol[0], self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
dopóki i < n oraz line[i] == " ":
i += 1
zwróć i
jeżeli __name__ == '__main__':
main()
|
class お布団(object):
def __init__(self):
print("眠いよ")
def __enter__(self):
print("入眠")
return self
def __exit__(self, type_, value, traceback):
print(type_, value, traceback)
print("起床")
return True
def 状態確認(self):
print("オフトニアなうZzz")
def main():
with お布団() as 布団:
# 布団.起きたいか()
布団.状態確認()
if __name__ == '__main__':
main()
|
def dedup_list(list):
"""
deduplicate list
"""
new_list = []
for item in list:
if item not in new_list:
new_list.append(item)
return new_list
|
import lab as B
from wbml.warning import warn_upmodule
from ..matrix import AbstractMatrix
from ..triangular import LowerTriangular, UpperTriangular
__all__ = []
@B.dispatch
def triangular_solve(a: LowerTriangular, b: AbstractMatrix, lower_a: bool = True):
if not lower_a:
warn_upmodule(
f'Solving against {a}, but "lower_a" is set to "False": ignoring flag.',
category=UserWarning,
)
return B.solve(a, b)
@B.dispatch
def triangular_solve(a: UpperTriangular, b: AbstractMatrix, lower_a: bool = True):
if lower_a:
warn_upmodule(
f'Solving against {a}, but "lower_a" is set to "True": ignoring flag.',
category=UserWarning,
)
return B.solve(a, b)
|
import json
import jsonpickle
def encode_indent(object):
"""
Method to encode JSON
Parameters
----------
object : object
Returns
-------
JSONEncoder
Encoded JSON object
"""
return json.dumps(json.loads(jsonpickle.encode(object)), indent=4, sort_keys=True)
def save_any_json(dict2save, path):
frozen = jsonpickle.encode(dict2save)
with open(path, 'w') as fp:
json.dump(frozen, fp)
def load_any_json(path):
with open(path, 'r') as fp:
read_dict = json.load(fp)
return jsonpickle.decode(read_dict)
|
import os
def create_key(template, outtype=("nii.gz",), annotation_classes=None):
if template is None or not template:
raise ValueError("Template must be a valid format string")
return template, outtype, annotation_classes
def infotodict(seqinfo):
"""Heuristic evaluator for determining which runs belong where
allowed template fields - follow python string module:
item: index within category
subject: participant id
seqitem: run number during scanning
subindex: sub index within group
"""
t1w = create_key(
"sub-{subject}/{session}/anat/sub-{subject}_run-00{item:01d}_T1w"
)
task_interoception = create_key(
"sub-{subject}/{session}/func/sub-{subject}_task-interoception_run-00{item:01d}_bold"
)
task_mw = create_key(
"sub-{subject}/{session}/func/sub-{subject}_task-mw_run-00{item:01d}_bold"
)
task_faces = create_key(
"sub-{subject}/{session}/func/sub-{subject}_task-faces_run-00{item:01d}_bold"
)
info = {t1w: [], task_interoception: [], task_mw: [], task_faces: []}
for s in seqinfo:
if (
(s.dim1 == 320)
and (s.dim2 == 300)
and ("T1w_MPR" in s.protocol_name)
):
info[t1w].append(s.series_id)
elif (
(s.dim1 == 94)
and (s.dim2 == 94)
and ("INTEROCEPTION" in s.protocol_name)
):
info[task_interoception].append(s.series_id)
elif (
(s.dim1 == 94)
and (s.dim2 == 94)
and ("MIND_WANDERING" in s.protocol_name)
):
info[task_mw].append(s.series_id)
elif (
(s.dim1 == 94)
and (s.dim2 == 94)
and ("FEAR_FACES" in s.protocol_name)
):
info[task_faces].append(s.series_id)
return info
|
import pdb
import argparse
import random
import timeit
from typing import Any, Dict, List, Iterator, Optional, Sequence, Set, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
from functools import partial
import deepchem as dc
from deepchem.data import Dataset
from deepchem.splits import Splitter
from deepchem.splits.splitters import _generate_scaffold
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from atomsci.ddm.pipeline import chem_diversity as cd
from atomsci.ddm.pipeline import dist_metrics
from atomsci.ddm.pipeline import GeneticAlgorithm as ga
def _generate_scaffold_hists(scaffold_sets: List[Set[int]],
w: np.array) -> np.array:
"""Counts the number of labelled samples per task per scaffold
Returns an np.array M where each row i represents a scaffold and
each column j represents a task and M[i,j] contains the number of
labelled examples scaffold j has for task i.
Parameters
----------
scaffold_sets: List[Set[int]]
A list of scaffolds. Each scaffold is a set of indexes.
w: np.array
This is the w member of a Dataset. It is a binary matrix
denoting if compound i has a label for task j.
Returns
-------
scaffold_hists: np.array
An np.array M where each row i represents a scaffold and
each column j represents a task and M[i,j] contains the number of
labelled examples scaffold j has for task i.
"""
scaffold_hists = np.zeros((len(scaffold_sets), w.shape[1]), np.int)
for i, scaffold_set in enumerate(scaffold_sets):
scaffold_hists[i] = np.sum(w[list(scaffold_set)], axis=0)
return scaffold_hists
def smush_small_scaffolds(scaffolds: List[Set[int]],
num_super_scaffolds: int = 100) -> List[Set[int]]:
"""Combines small scaffolds into super scaffolds
Since using Murcko scaffolds
usually results in mostly scaffolds with 1 compound, these are 'super scaffolds'.
Each of these scaffolds are made up of Murcko scaffolds. Murcko scaffolds
are combined using the same method as ScaffoldSplitter, just extended to make
n 'super scaffolds'.
Parameters
----------
scaffolds: List[Set[int]]
A list of scaffolds
num_super_scaffolds: int
The number of desired super scaffolds
Returns
-------
new_scaffolds: List[Set[int]]
A list of super scaffolds. All roughly the same size. Unless the original
list of scaffolds is shorter than the desired number of scaffolds
"""
if len(scaffolds) <= num_super_scaffolds:
return scaffolds
total_length = np.sum([len(s) for s in scaffolds])
size_per_scaffold = int(total_length)/(num_super_scaffolds-1)
new_scaffolds = [set()]
for scaffold in scaffolds:
current_scaffold = new_scaffolds[-1]
if ((len(current_scaffold) + len(scaffold)) < size_per_scaffold) or (len(current_scaffold) == 0):
current_scaffold.update(scaffold)
else:
new_scaffolds.append(scaffold)
print('new scaffold lengths')
print([len(s) for s in new_scaffolds])
return new_scaffolds
def calc_ecfp(smiles: List[str],
workers: int = 8) -> List[rdkit.DataStructs.cDataStructs.ExplicitBitVect]:
"""Giving a list of strings return a list of ecfp features
Calls AllChem.GetMorganFingerprintAsBitVect for each smiles in parallel
Paramters
---------
smiles: List[str]
List of smiles strings
workers: int
Number of parallel workers
Returns
-------
fprints: List[int] TODO, UPDATE WITH REAL TYPE
Actually this is a special data type used by rdkit, some kind of
UIntSparseIntVect. This datatype is used specifically with
dist_smiles_from_ecfp
"""
from functools import partial
func = partial(calc_ecfp,workers=1)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [smiles[i:i+batchsize] for i in range(0, len(smiles), batchsize)]
with pool.Pool(workers) as p:
ecfps = p.map(func,batches)
fprints = [y for x in ecfps for y in x] #Flatten results
else:
mols = [Chem.MolFromSmiles(s) for s in smiles]
fprints = [AllChem.GetMorganFingerprintAsBitVect(mol, 2, 1024) for mol in mols]
return fprints
def dist_smiles_from_ecfp(ecfp1: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect],
ecfp2: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect]) -> List[float]:
"""Calculate tanimoto distance distribution between two lists of ecpf features
Parameters
----------
ecfp1: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect],
A list of ECPF finger prints
ecfp2: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect]
A list of ECPF finger prints
Returns
-------
List[float]
A list of tanimoto distances between 0 and 1
"""
if len(ecfp1) == 0 or len(ecfp2) == 0:
pass
#pdb.set_trace()
return cd.calc_summary(dist_metrics.tanimoto(ecfp1, ecfp2), calc_type='nearest',
num_nearest=1, within_dset=False)
def _generate_scaffold_dist_matrix(scaffold_lists: List[Set[int]],
ecfp_features: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect]) -> np.ndarray:
"""Returns a nearest neighbors distance matrix between each scaffold.
The distance between two scaffolds is defined as the
the distance between the two closest compounds between the two
scaffolds.
Parameters
----------
scaffold_lists: List[Set[int]]
List of scaffolds. A scaffold is a set of indicies into ecfp_features
ecfp_features: List[rdkit.DataStructs.cDataStructs.ExplicitBitVect]
List of ecfp features, one for each compound in the dataset
Returns
-------
dist_mat: np.ndarray
Distance matrix, symmetric.
"""
print('start generating big dist mat')
start = timeit.default_timer()
mat_shape = (len(scaffold_lists), len(scaffold_lists))
scaff_dist_mat = np.zeros(mat_shape)
for i, scaff1 in tqdm(enumerate(scaffold_lists)):
ecfp1 = [ecfp_features[s] for s in scaff1]
for j, scaff2 in enumerate(scaffold_lists[:i]):
if i == j:
continue
ecfp2 = [ecfp_features[s] for s in scaff2]
dists = dist_smiles_from_ecfp(ecfp1, ecfp2)
min_dist = np.median(dists)
if min_dist==0:
print("two scaffolds match exactly?!?", i, j)
print(len(set(scaff2).intersection(set(scaff1))))
scaff_dist_mat[i,j] = min_dist
scaff_dist_mat[j,i] = min_dist
print("finished scaff dist mat: %0.2f min"%((timeit.default_timer()-start)/60))
return scaff_dist_mat
class MultitaskScaffoldSplitter(Splitter):
"""MultitaskScaffoldSplitter Splitter class.
Tries to perform scaffold split across multiple tasks while maintianing
training, validation, and test fractions for each class using a GeneticAlgorithm
self.ss: List[Set[int]]
Contains a list of sets of compound indexes. Since using Murcko scaffolds
usually results in mostly scaffolds with 1 compound, these are 'super scaffolds'.
Each of these scaffolds are made up of Murcko scaffolds. Murcko scaffolds
are combined using the same method as ScaffoldSplitter, just extended to make
n 'super scaffolds'. Therefore it is possible to get very close to having the
ScaffoldSplitter result, if that is the best split possible.
"""
def generate_scaffolds(self,
dataset: Dataset) -> List[Set[int]]:
"""Returns all scaffolds from the dataset.
Parameters
----------
dataset: Dataset
Dataset to be split.
Returns
-------
scaffold_sets: List[Set[int]]
List of indices of each scaffold in the dataset.
"""
scaffolds = {}
data_len = len(dataset)
for ind, smiles in enumerate(dataset.ids):
scaffold = _generate_scaffold(smiles)
if scaffold is None:
continue
if scaffold not in scaffolds:
scaffolds[scaffold] = {ind}
else:
scaffolds[scaffold].add(ind)
# Sort from largest to smallest scaffold sets
#scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: len(x[1]), reverse=True)
]
return scaffold_sets
def expand_scaffolds(self,
scaffold_list: List[int]) -> List[int]:
'''Turns a list of scaffold indicies into a list of compound indicies
Given a list of scaffold indices in self.ss return a list of compound
indicies into self.dataset
Parameters
----------
scaffold_list List[int]:
A list of indicies into self.ss.
Returns
-------
compound_list List[int]:
A list of compound indicies into dataset
'''
compound_list = [i for scaffold in scaffold_list for i in self.ss[scaffold]]
return compound_list
def split_chromosome_to_compound_split(self,
split_chromosome: List[str]) -> Tuple:
'''Turns a split of scaffolds into a split of compounds
A chromosome is represented as a list of strings. Each string is
either 'train', 'valid', or 'test' which means that corresponding
scaffold belongs in 'train', 'valid', or 'test'
Parameters
----------
split_chromosome: List[str]
A list of strings that are either 'train', 'valid', or 'test.
Returns
-------
split: Tuple[int]
A tuple of length 3. Each element of this tuple contains a list of
indexes into self.dataset. You can use these indexes to pick out compounds
that belong to each partition
'''
train_part = self.expand_scaffolds([scaff_ind for scaff_ind, part in enumerate(split_chromosome) if part=='train'])
valid_part = self.expand_scaffolds([scaff_ind for scaff_ind, part in enumerate(split_chromosome) if part=='valid'])
test_part = self.expand_scaffolds([scaff_ind for scaff_ind, part in enumerate(split_chromosome) if part=='test'])
split = (train_part, valid_part, test_part)
return split
def scaffold_diff_fitness(self,
split_chromosome: List[str]) -> float:
'''Grades a chromosome based on how well the partitions are separated
Grades the quality of the split based on which scaffolds were alloted to
which partitions. The difference between two partitions is measured as
the minimum distance between all pairs of scaffolds between the training
and test partitions
Parameters
----------
scaffold_split: List[str]
A chromosome is represented as a list of strings. Index i in the
chromosome contains the partition for scaffold i.
Returns
-------
score: float
Floating point value beteween 0-1. 1 is the best score and 0 is the worst
'''
train_scaffolds = [i for i, part in enumerate(split_chromosome) if part=='train']
test_scaffolds = [i for i, part in enumerate(split_chromosome) if part=='test']
# if a partition is completely empty, return 0
if len(train_scaffolds) == 0 or len(test_scaffolds) == 0:
return 0
min_dist = 1e20
for ind1 in train_scaffolds:
for ind2 in test_scaffolds:
assert(not (ind1 == ind2))
# use the cached distance matrix to speed up computation
dist = self.scaff_scaff_distmat[ind1, ind2]
min_dist = np.min([min_dist, np.min(dist)])
return min_dist
def ratio_fitness_old(self, split_chromosome: List[str]) -> float:
"""Calculates a fitness score based on how accurately divided training/test/validation.
Parameters
----------
List[str]: split_chromosome
A list of strings, index i contains a string, 'train', 'valid', 'test'
which determines the partition that scaffold belongs to
Returns
-------
List[str]
A list of strings, index i contains a string, 'train', 'valid', 'test'
which determines the partition that scaffold belongs to
"""
start = timeit.default_timer()
total_counts = np.sum(self.dataset.w, axis=0)
subset_counts = [np.sum(self.dataset.w[subset], axis=0) for subset in \
self.split_chromosome_to_compound_split(split_chromosome)]
# subset order goes train, valid, test. just consider train for the moment
subset_ratios = [subset_count/total_counts for subset_count in subset_counts]
#print("mean: %0.2f, median: %0.2f, std: %0.2f"%(np.mean(subset_ratios[0]), np.median(subset_ratios[0]), np.std(subset_ratios[0])))
# fitness of split is the size of the smallest training dataset
# ratio_fit = min(subset_ratios[0]) # this resulted in a split with 0 test. shoulda seen that coming
num_tasks = self.dataset.w.shape[1]
# imagine the perfect split is a point in 3*num_tasks space.
target_split = np.concatenate([[self.frac_train]*num_tasks,
[self.frac_valid]*num_tasks])
# this is the current split also on 3*num_tasks space
current_split = np.concatenate(subset_ratios[:2])
# if any partition is 0, then this split fails
if min(current_split) == 0:
return 0
# worst possible distance to normalize this between 0 and 1
worst_distance = np.linalg.norm(np.ones(num_tasks*2))
ratio_fit = 1 - np.linalg.norm(target_split-current_split)/worst_distance
#print("\tratio_fitness: %0.2f min"%((timeit.default_timer()-start)/60))
return ratio_fit
def ratio_fitness(self, split_chromosome: List[str]) -> float:
"""Calculates a fitness score based on how accurately divided training/test/validation.
Treats the min,median,max of each of the three partitions as a 9D point and uses
euclidean distance to measure the distance to that point
Parameters
----------
List[str]: split_chromosome
A list of strings, index i contains a string, 'train', 'valid', 'test'
which determines the partition that scaffold belongs
Returns
-------
float
A float between 0 and 1. 1 best 0 is worst
"""
start = timeit.default_timer()
total_counts = np.sum(self.dataset.w, axis=0)
subset_counts = [np.sum(self.dataset.w[subset], axis=0) for subset in \
self.split_chromosome_to_compound_split(split_chromosome)]
# subset order goes train, valid, test. just consider train for the moment
subset_ratios = [subset_count/total_counts for subset_count in subset_counts]
# imagine the perfect split is a point in 9D space. For each subset we measure
# 3 values, min, median, max. Ideally, these 3 values all equal the desired fraction
target_split = np.concatenate([[self.frac_train]*3,
[self.frac_valid]*3,
[self.frac_test]*3])
# this is the current split also in 9D space
current_split = np.concatenate([[np.min(subset), np.median(subset), np.max(subset)] \
for subset in subset_ratios])
# if any partition is 0, then this split fails
if min(current_split) == 0:
return 0
# worst possible distance to normalize this between 0 and 1
worst_distance = np.linalg.norm(np.ones(len(target_split)))
ratio_fit = 1 - np.linalg.norm(target_split-current_split)/worst_distance
#print("\tratio_fitness: %0.2f min"%((timeit.default_timer()-start)/60))
return ratio_fit
def grade(self, split_chromosome: List[str]) -> float:
"""Assigns a score to a given chromosome
Balances the score between how well stratified the split is and how
different the training and test partitions are.
Parameters
----------
List[str]: split_chromosome
A list of strings, index i contains a string, 'train', 'valid', 'test'
which determines the partition that scaffold belongs
Returns
-------
float
A float between 0 and 1. 1 best 0 is worst
"""
fitness = self.diff_fitness_weight*self.scaffold_diff_fitness(split_chromosome) \
+ self.ratio_fitness_weight*self.ratio_fitness(split_chromosome)
return fitness
def init_scaffolds(self,
dataset: Dataset,
num_super_scaffolds: int = 20) -> None:
"""Creates super scaffolds used in splitting
This function sets a
Parameters
----------
dataset: Dataset
Deepchem Dataset. The parameter w must be created and ids must contain
smiles.
num_super_scaffolds: int
The number of super scaffolds.
Returns
-------
None
Only sets member variables self.ss and self.scaffold_hists
"""
# First assign each of the samples to a scaffold bin
# list of lists. one list per scaffold
big_ss = self.generate_scaffolds(dataset)
# using the same stragetgy as scaffold split, combine the scaffolds
# together until you have roughly 100 scaffold sets
self.ss = smush_small_scaffolds(big_ss, num_super_scaffolds=num_super_scaffolds)
# rows is the number of scaffolds
# columns is number of tasks
self.scaffold_hists = _generate_scaffold_hists(self.ss, dataset.w)
def split(self,
dataset: Dataset,
frac_train: float = 0.8,
frac_valid: float = 0.1,
frac_test: float = 0.1,
seed: Optional[int] = None,
diff_fitness_weight: float = 0,
ratio_fitness_weight: float = 1,
num_super_scaffolds: int = 20,
num_pop: int = 100,
workers: int = 10,
num_generations: int=30,
print_timings: bool = False) -> Tuple:
"""Creates a split for the given datset
This split splits the dataset into a list of super scaffolds then
assigns each scaffold into one of three partitions. The scaffolds
are assigned using a GeneticAlgorithm and tries to maximize the
difference between the training and test partitions as well as ensuring
all tasks have an appropriate number of training/validation/test samples
Parameters
----------
dataset: Dataset
Deepchem Dataset. The parameter w must be created and ids must contain
smiles.
frac_train: float
The fraction of data that each task should have in the train partition
frac_valid: float
The fraction of data that each task should have in the valid partition
frac_test: float
The fraction of data that each task should have in the test partition
seed: Optional[int]
Seed for random number generator
diff_fitness_weight: float
Weight for the importance of the difference between training and test
partitions
ratio_fitness_feight: float
Weight for the importance of ensuring each task has the appropriate
number of samples in training/validation/test
num_super_scaffolds: int
The number of super scaffolds.
num_pop: int
Size of the population for the genetic algorithm
workers: int
Number of workers to parallelize the genetic algorithm
num_generations: int
Number of generations to run the genetic algorithm
Returns
-------
Tuple
A tuple with 3 elements that are training, validation, and test compound
indexes into dataset, respectively
"""
self.dataset = dataset
self.diff_fitness_weight = diff_fitness_weight
self.ratio_fitness_weight = ratio_fitness_weight
self.frac_train = frac_train
self.frac_valid = frac_valid
self.frac_test = frac_test
# set up super scaffolds
self.init_scaffolds(self.dataset, num_super_scaffolds)
# ecpf features
self.ecfp_features = calc_ecfp(dataset.ids)
# calculate ScaffoldxScaffold distance matrix
start = timeit.default_timer()
self.scaff_scaff_distmat = _generate_scaffold_dist_matrix(self.ss, self.ecfp_features)
#print('scaffold dist mat %0.2f min'%((timeit.default_timer()-start)/60))
# initial population
population = []
for i in range(num_pop):
start = timeit.default_timer()
split_chromosome = self._split(frac_train=frac_train, frac_valid=frac_valid,
frac_test=frac_test)
#print("per_loop: %0.2f min"%((timeit.default_timer()-start)/60))
population.append(split_chromosome)
gene_alg = ga.GeneticAlgorithm(population, self.grade, ga_crossover,
ga_mutate)
#gene_alg.iterate(num_generations)
for i in range(num_generations):
gene_alg.step(print_timings=print_timings)
best_fitness = gene_alg.pop_scores[0]
print("step %d: best_fitness %0.2f"%(i, best_fitness))
#print("%d: %0.2f"%(i, gene_alg.grade_population()[0][0]))
best_fit = gene_alg.pop_scores[0]
best = gene_alg.pop[0]
#print('best ever fitness %0.2f'%best_ever_fit)
result = self.split_chromosome_to_compound_split(best)
return result
def _split(self,
frac_train: float = 0.8,
frac_valid: float = 0.1,
frac_test: float = 0.1,
seed: Optional[int] = None) -> List[str]:
"""Return indices for specified split
Parameters
----------
seed: int, optional (default None)
Random seed to use.
frac_train: float, optional (default 0.8)
The fraction of data to be used for the training split.
frac_valid: float, optional (default 0.1)
The fraction of data to be used for the validation split.
frac_test: float, optional (default 0.1)
The fraction of data to be used for the test split.
Returns
-------
List[str]
A list of strings, index i contains a string, 'train', 'valid', 'test'
which determines the partition that scaffold belongs to
"""
if seed is not None:
np.random.seed(seed)
# Figure out how many positive samples we want for each task in each dataset.
n_tasks = self.scaffold_hists.shape[1]
y_present = self.scaffold_hists != 0
indices_for_task = [
# look at which scaffolds contain samples for a task
# randomly shuffle the indices
np.nonzero(y_present[:, i])[0]
#np.random.permutation(np.nonzero(y_present[:, i])[0])
for i in range(n_tasks)
]
count_for_task = np.array([len(x) for x in indices_for_task])
train_target = np.round(frac_train * count_for_task).astype(np.int)
valid_target = np.round(frac_valid * count_for_task).astype(np.int)
test_target = np.round(frac_test * count_for_task).astype(np.int)
# Assign the positive samples to datasets. Since a sample may be positive
# on more than one task, we need to keep track of the effect of each added
# sample on each task. To try to keep everything balanced, we cycle through
# tasks, assigning one positive sample for each one.
train_counts = np.zeros(n_tasks, np.int)
valid_counts = np.zeros(n_tasks, np.int)
test_counts = np.zeros(n_tasks, np.int)
set_target = [train_target, valid_target, test_target]
set_counts = [train_counts, valid_counts, test_counts]
set_inds: List[List[int]] = [[], [], []]
assigned = set()
for i in range(len(self.scaffold_hists)):
for task in range(n_tasks):
indices = indices_for_task[task]
if i < len(indices) and indices[i] not in assigned:
# We have a sample that hasn't been assigned yet. Assign it to
# whichever set currently has the lowest fraction of its target for
# this task.
index = indices[i]
set_frac = [
1 if set_target[i][task] == 0 else
set_counts[i][task] / set_target[i][task] for i in range(3)
]
s = np.argmin(set_frac)
set_inds[s].append(index)
assigned.add(index)
set_counts[s] += self.scaffold_hists[index]
split_chromosome = ['']*len(self.ss)
for part_name, scaffolds in zip(['train', 'valid', 'test'], set_inds):
for s in scaffolds:
split_chromosome[s] = part_name
return split_chromosome
def ga_crossover(parents: List[List[str]],
num_pop: int) -> List[List[str]]:
"""Create the next generation from parents
A random index is chosen and genes up to that index from
the first chromosome is used and genes from the index to
the end is used.
Parameters
----------
parents: List[List[str]]
A list of chromosomes.
num_pop: int
The number of new chromosomes to make
Returns
-------
List[List[str]]
A list of chromosomes. The next generation
"""
# just single crossover point
new_pop = []
for i in range(num_pop):
parent1 = parents[i%len(parents)]
parent2 = parents[(i+1)%len(parents)]
crossover_point = random.randint(0, len(parents[0])-1)
new_pop.append(parent1[:crossover_point]+parent2[crossover_point:])
return new_pop
def ga_mutate(new_pop: List[List[str]],
mutation_rate: float = .02) -> List[List[str]]:
"""Mutate the population
Each chromosome is copied and mutated at mutation_rate.
When a gene mutates, it's randomly assigned to a partiton.
possibly the same partition.
Parameters
----------
new_pop: List[List[str]]
A list of chromosomes.
mutation_rate: float
How often a mutation occurs. 0.02 is a good rate for
my test sets.
Returns
-------
List[List[str]]
A list of chromosomes. Mutated chromosomes.
"""
mutated = []
for solution in new_pop:
new_solution = list(solution)
for i, gene in enumerate(new_solution):
if random.random() < mutation_rate:
new_solution[i] = ['train', 'valid', 'test'][random.randint(0,2)]
mutated.append(new_solution)
return mutated
def make_y_w(dataframe: pd.DataFrame,
columns: List[str]) -> Tuple:
"""Create y and w matrices for Deepchem's Dataset
Extracts labels and builds the w matrix for a dataset.
The w matrix contains a 1 if there's a label and 0
if not.
Parameters
----------
dataframe: pd.DataFrame
Pandas DataFrame
columns: List[str]
A list of columns that contain labels.
Returns
-------
Tuple
Two numpy arrays, y and w.
"""
y = dataframe[columns].values
w = np.ones_like(y)
nan_indx = np.argwhere(np.isnan(y))
for r, c in nan_indx:
w[r, c] = 0
return y, w
def split_using_MultitaskScaffoldSplit(df: pd.DataFrame,
id_col: str,
target_cols: List[str],
smiles_col: str,
**kwargs) -> pd.DataFrame:
'''Produces an AMPL compatible split file given a dataframe
Parameters
----------
df: pd.Dataframe
Dataframe containing compounds to split
id_col: str
Column containing compound ids
target_cols: List[str]
List of target columns. Can be of length 1
smiles_col: str
Column containing base_rdkit_smiles strings
**kwargs:
Any arguments you want to pass to MultitaskScaffoldSplit
Returns
-------
pd.DataFrame
Returns a DataFrame that's compatible with AMPL. Plus
an extra column that shows which scaffold each compound was
assigned to.
'''
# Build a deepchem Dataset. X isn't used and can be ignored
X = np.ones((len(df), 10))
y, w = make_y_w(df, target_cols)
ids = df[smiles_col].values
# build deepchem Dataset
dataset = dc.data.DiskDataset.from_numpy(X, y, w=w, ids=ids)
mss = MultitaskScaffoldSplitter()
splits = mss.split(dataset, **kwargs)
split_df = pd.DataFrame({'cmpd_id':df[id_col].values,
'fold': [0]*len(df)})
split_df['subset'] = ['']*split_df.shape[0]
split_df['subset'].iloc[splits[0]] = 'train'
split_df['subset'].iloc[splits[1]] = 'valid'
split_df['subset'].iloc[splits[2]] = 'test'
return split_df
def split_with(df, splitter, smiles_col, id_col, response_cols, **kwargs):
'''
Given a dataframe and a splitter, perform split
Return a split dataframe, with base_rdkit_smiles as key
and subset with train, valid, test
'''
# Build a deepchem Dataset. X isn't used and can be ignored
X = np.ones((len(df), 10))
y, w = make_y_w(df, response_cols)
ids = df[smiles_col].values
dataset = dc.data.DiskDataset.from_numpy(X, y, w=w, ids=ids)
splits = splitter.split(dataset, **kwargs)
split_df = pd.DataFrame(df[[id_col]])
split_df = split_df.rename(columns={id_col:'cmpd_id'})
split_array = np.array(['unassigned']*split_df.shape[0])
split_array[splits[0]] = 'train'
split_array[splits[1]] = 'valid'
split_array[splits[2]] = 'test'
split_df['subset'] = split_array
if 'ss' in dir(splitter):
ss = splitter.ss
scaffold_array = np.ones(split_df.shape[0])
for i, scaffold in enumerate(ss):
scaffold_array[list(scaffold)] = i
split_df['scaffold'] = scaffold_array
return split_df
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('data', type=str, help='path to input csv')
parser.add_argument('dist_weight', type=float,
help='Weight for the importance of the difference between training and test partitions')
parser.add_argument('ratio_weight', type=float,
help='Weight for the importance of ensuring each task has the appropriate number of samples in training/validation/test')
parser.add_argument('num_gens', type=int,
help='Number of generations to run.')
parser.add_argument('smiles_col', type=str, help='the column containing smiles')
parser.add_argument('id_col', type=str, help='the column containing ids')
parser.add_argument('response_cols', type=str, help='comma seperated string of response columns')
parser.add_argument('output', type=str, help='name of the split file')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
total_df = pd.read_csv(args.data)
dfw = args.dist_weight
rfw = args.ratio_weight
response_cols = args.response_cols.split(',')
mss = MultitaskScaffoldSplitter()
mss_split_df = split_with(total_df, mss,
smiles_col=args.smiles_col, id_col=args.id_col, response_cols=response_cols,
diff_fitness_weight=dfw, ratio_fitness_weight=rfw, num_generations=args.num_gens)
mss_split_df.to_csv(args.output, index=False)
|
import numpy as np
from numpy.random import default_rng
import scipy.sparse
import scipy.sparse.linalg
from melvin import BasisFunctions
def sech(x):
return 1.0 / np.cosh(x)
def load_scipy_sparse(xp):
if xp.__name__ == "numpy":
return scipy.sparse
elif xp.__name__ == "cupy":
import cupyx
return cupyx.scipy.sparse
def load_scipy_sparse_linalg(xp):
if xp.__name__ == "numpy":
return scipy.sparse.linalg
elif xp.__name__ == "cupy":
import cupyx.scipy.sparse.linalg
return cupyx.scipy.sparse.linalg
def init_var_with_noise(var, epsilon, seed=0):
rng = default_rng(seed)
var_shape = var.getp().shape
data_p = np.zeros(var_shape)
data_p += epsilon * (2 * rng.random(var_shape) - 1.0)
var.load(data_p, is_physical=True)
def calc_kinetic_energy(ux, uz, xp, params):
"""
Calculates the kinetic energy from velocity
Args:
ux (np.ndarray): x-velocity
uz (np.ndarray): z-velocity
xp (module): numpy module
params (Parameters): parameters
Returns:
float: kinetic energy
"""
nx, nz = params.nx, params.nz
ke = uz.getp() ** 2 + ux.getp() ** 2
total_ke = 0.5 * xp.sum(ke) / (nx * nz)
return total_ke
def calc_velocity_from_vorticity(
vorticity, streamfunction, ux, uz, laplacian_solver
):
laplacian_solver.solve(-vorticity.gets(), out=streamfunction._sdata)
if streamfunction._basis_functions[1] is BasisFunctions.FDM:
streamfunction.to_physical()
ux.setp(-streamfunction.pddz())
else:
ux[:] = -streamfunction.sddz()
ux.to_physical()
if streamfunction._basis_functions[0] is BasisFunctions.FDM:
streamfunction.to_physical()
uz.setp(streamfunction.pddx())
else:
uz[:] = streamfunction.sddx()
uz.to_physical()
|
from __future__ import division, absolute_import, print_function
from opendeep.optimization.loss.loss import *
from opendeep.optimization.loss.binary_crossentropy import *
from opendeep.optimization.loss.categorical_crossentropy import *
from opendeep.optimization.loss.isotropic_gaussian_LL import *
from opendeep.optimization.loss.mse import *
from opendeep.optimization.loss.neg_LL import *
from opendeep.optimization.loss.zero_one import *
from opendeep.optimization.loss import utils
|
from .__version__ import __version__
from .cleanups import add_cleanup, add_critical_cleanup
from .conf import config
from .ctx import context
from .ctx import g, session, test
from .core.session import Session
# assertions
from . import assertions
should = assertions
from .assertions import (
assert_contains,
assert_equal,
assert_equals,
assert_false,
assert_in,
assert_is,
assert_is_none,
assert_empty,
assert_not_empty,
assert_is_not,
assert_is_not_none,
assert_isinstance,
assert_not_contain,
assert_not_contains,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_isinstance,
assert_raises,
assert_true,
)
from .core.test import Test
from .core.test import abstract_test_class
from .core.fixtures import parametrize, parameters
from .core.fixtures.utils import fixture
from .utils import skip_test, skipped, add_error, add_failure
from .app import get_application_context
from .runner import run_tests
import logbook
logger = logbook.Logger(__name__)
|
from api.utils.db import db
from flask import request
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
github_access_token = db.Column(db.String(255))
github_id = db.Column(db.Integer)
github_login = db.Column(db.String(255))
username = db.Column(db.String(255))
email = db.Column(db.String(255))
image = db.Column(db.String(255))
def __init__(self, github_access_token):
self.github_access_token = github_access_token
def update_(self, **kwargs):
"""
update entries
"""
username, github_access_token = kwargs.get('username'), kwargs.get('github_access_token')
sql = """UPDATE users SET github_access_token = %s
WHERE username = %s"""
if username and github_access_token:
db.engine.execute(sql, (github_access_token, username))
# db.session.commit() |
from json import load
from _pytest import config
import pytest
import os
from textwrap import dedent
from bisect_scanner.config import Config, load_config, configure
import bisect_scanner.config as config
@pytest.fixture
def valid_config_file(tmp_path):
content = dedent("""
[node_urls]
W3_URL=wss://w3_url/ws
POLYGON_URL=wss://polygon_url/ws
ETHEREUM_URL=wss://ethereum_url/ws
""")
fn = tmp_path / 'valid.conf'
with open(fn, 'w+') as f:
f.write(content)
return fn
@pytest.fixture
def invalid_config_file(tmp_path):
content = dedent("""
[urls]
W3_URL=wss://w3_url/ws
POLYGON_URL=wss://polygon_url/ws
ETHEREUM_URL=wss://ethereum_url/ws
""")
fn = tmp_path / 'valid.conf'
with open(fn, 'w+') as f:
f.write(content)
return fn
def test_load_config_valid(valid_config_file):
config = load_config(valid_config_file)
assert [*config['node_urls'].keys()] == ['w3_url', 'polygon_url', 'ethereum_url']
assert config['node_urls']['W3_URL'] == "wss://w3_url/ws"
assert config['node_urls']['POLYGON_URL'] == "wss://polygon_url/ws"
assert config['node_urls']['ETHEREUM_URL'] == "wss://ethereum_url/ws"
def test_no_confg():
config = Config('nonexistent.ini')
assert config.ETHEREUM_URL is None
os.environ['BISECTSCANNER_ETHEREUM_URL'] = 'wss://url'
config = Config('nonexistent.ini')
assert config.ETHEREUM_URL == 'wss://url'
def test_configure():
os.environ['BISECTSCANNER_POLYGON_URL'] = 'wss://polygon_url'
configure('nonexistent.ini')
assert config.config.POLYGON_URL == 'wss://polygon_url'
|
#!python
# #define VAR foo
if __name__ == '__main__':
foo = 1
print VAR
|
#! /usr/bin/env python3
# image to ascii pixelwise
# Copyright Lesmana Zimmer [email protected]
# Licensed under WTFPL version 2
# http://www.wtfpl.net/about/
import sys
import itertools
import pprint
from PIL import Image
try:
filename = sys.argv[1]
except:
print('need argument: filename of image')
sys.exit(1)
with Image.open(filename) as im:
pixels = im.getdata()
width = im.width
height = im.height
#print(im.getbands())
iterpixels = iter(pixels)
rows = []
for y in range(height):
row = []
for x in range(width):
pixel = next(iterpixels)
row.append(pixel)
rows.append(row)
pairs = [iter(rows)] * 2
doublerows = itertools.zip_longest(*pairs)
upperhalfblock = '\u2580'
lowerhalfblock = '\u2584'
fullblock = '\u2588'
noblock = ' '
for upperrow, lowerrow in doublerows:
for upperpixel, lowerpixel in zip(upperrow, lowerrow):
#print(upperpixel, lowerpixel)
ur, ug, ub, ua = upperpixel
lr, lg, lb, la = lowerpixel
if ua == 0 and la == 0:
sys.stdout.write(noblock)
elif ua == 0 and la == 255:
sys.stdout.write(f'\033[38;2;{lr};{lg};{lb}m' + lowerhalfblock + '\033[0m')
elif ua == 255 and la == 0:
sys.stdout.write(f'\033[38;2;{ur};{ug};{ub}m' + upperhalfblock + '\033[0m')
elif ua == 255 and la == 255:
sys.stdout.write(f'\033[38;2;{ur};{ug};{ub};48;2;{lr};{lg};{lb}m' + upperhalfblock + '\033[0m')
else:
raise Exception(f'unexpected alpha value: {ua}, {la}')
sys.stdout.write('\n')
|
import pytest
from wtforms import Form
from tests import MultiDict
from wtforms_alchemy import DataRequired, PhoneNumberField
class TestPhoneNumberField(object):
def setup_method(self, method):
self.valid_phone_numbers = [
'040 1234567',
'+358 401234567',
'09 2501234',
'+358 92501234',
'0800 939393',
'09 4243 0456',
'0600 900 500'
]
self.invalid_phone_numbers = [
'abc',
'+040 1234567',
'0111234567',
'358'
]
def init_form(self, **kwargs):
class TestForm(Form):
phone_number = PhoneNumberField(**kwargs)
return TestForm
def test_valid_phone_numbers(self):
form_class = self.init_form(region='FI')
for phone_number in self.valid_phone_numbers:
form = form_class(MultiDict(phone_number=phone_number))
form.validate()
assert len(form.errors) == 0
def test_invalid_phone_numbers(self):
form_class = self.init_form(region='FI')
for phone_number in self.invalid_phone_numbers:
form = form_class(MultiDict(phone_number=phone_number))
form.validate()
assert len(form.errors['phone_number']) == 1
def test_render_empty_phone_number_value(self):
form_class = self.init_form(region='FI')
form = form_class(MultiDict(phone_number=''))
assert 'value=""' in form.phone_number()
def test_empty_phone_number_value_passed_as_none(self):
form_class = self.init_form(region='FI')
form = form_class(MultiDict(phone_number=''))
form.validate()
assert len(form.errors) == 0
assert form.data['phone_number'] is None
def test_default_display_format(self):
form_class = self.init_form(region='FI')
form = form_class(MultiDict(phone_number='+358401234567'))
assert 'value="040 1234567"' in form.phone_number()
def test_international_display_format(self):
form_class = self.init_form(
region='FI',
display_format='international'
)
form = form_class(MultiDict(phone_number='0401234567'))
assert 'value="+358 40 1234567"' in form.phone_number()
def test_e164_display_format(self):
form_class = self.init_form(
region='FI',
display_format='e164'
)
form = form_class(MultiDict(phone_number='0401234567'))
assert 'value="+358401234567"' in form.phone_number()
def test_field_rendering_when_invalid_phone_number(self):
form_class = self.init_form()
form = form_class(MultiDict(phone_number='invalid'))
form.validate()
assert 'value="invalid"' in form.phone_number()
@pytest.mark.parametrize(
'number,error_msg,check_value',
(
(
'',
'This field is required.',
lambda v, orig: v is None
),
(
'1',
'Not a valid phone number value',
lambda v, orig: v is not None
),
(
'123',
'Not a valid phone number value',
lambda v, orig: v is not None
),
(
'+46123456789',
None,
lambda v, orig: v.e164 == orig
),
)
)
def test_required_phone_number_form(self, number, error_msg, check_value):
class PhoneNumberForm(Form):
phone = PhoneNumberField(
'Phone number',
validators=[DataRequired()]
)
form = PhoneNumberForm(MultiDict(
phone=number
))
form.validate()
if error_msg:
assert len(form.errors) == 1
assert form.errors['phone'][0] == error_msg
else:
assert len(form.errors) == 0
assert check_value(form.phone.data, number) is True
|
#!/usr/bin/env python
import _init_paths
import gym
from tf_rl.controller import DiscreteDeepQ, NL
specname = 'CartPole-v0'
serializedname = 'dqntest_'+specname+'.pbx'
spec = gym.spec(specname)
env = spec.make()
episode_count = 250
max_steps = 10000
action_space = env.action_space
maxaction = action_space.n
observation_space = env.observation_space
maxobservation = observation_space.shape[0]
batchsize = 32 # store at least 12 times before training, each looking at 12 action-observation
controller = DiscreteDeepQ(maxobservation, [128, 128, maxaction],
[NL.TANH, NL.TANH, NL.IDENTITY], learning_rate=0.001,
decay=0.9, minibatch_size=batchsize, discount_rate=0.99,
exploration_period=5000, max_experience=10000, )
controller.initialize(serializedname)
# training step
for ep in xrange(episode_count):
observation = env.reset()
reward = done = None
total_reward = 0
nsteps = 0
for step_it in range(max_steps):
action = controller.action(observation)
new_observation, reward, done, _ = env.step(action)
controller.store(observation, action, reward, new_observation)
controller.training_step()
observation = new_observation
total_reward = total_reward + reward
# env.render()
nsteps = step_it # record step iteration since episode can end early
if done:
break
print 'episode {}: total reward of {} in {} steps'.format(ep, total_reward, nsteps+1)
# controller.save(serializedname)
|
import environ
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('app')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=True)
if READ_DOT_ENV_FILE:
env_file = str(ROOT_DIR.path('.env'))
env.read_env(env_file)
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.admin',
'django_admin_json_editor',
'import_export',
'mapwidgets',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'rest_framework_filters',
'django_filters',
'rest_framework',
'rest_framework_swagger',
'django_extensions',
'storages',
'django.contrib.gis',
]
LOCAL_APPS = [
'app.users',
'app.map',
'app.pets',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
class AllSameOriginMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
response['Access-Control-Allow-Origin'] = "http://localhost:4200"
response['Access-Control-Allow-Credentials'] = "true"
response["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept, X-CSRFToken, Cookie"
return response
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'config.settings.settings.AllSameOriginMiddleware',
]
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
ADMINS = [
("""Evgeny Rubanenko""", '[email protected]'),
]
MANAGERS = ADMINS
TIME_ZONE = 'Europe/Moscow'
LANGUAGE_CODE = 'ru-RU'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
STATIC_ROOT = env('DJANGO_STATIC_ROOT', default=str(ROOT_DIR('staticfiles')))
STATIC_URL = '/collected_static/'
STATICFILES_DIRS = [
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
LOGIN_REDIRECT_URL = '/'
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
ADMIN_URL = r'^admin/'
ALLOWED_HOSTS = ["*"]
DEFAULTS = {
'USE_SESSION_AUTH': False,
'SECURITY_DEFINITIONS': {
'basic': {
'type': 'basic'
}
}
}
CELERY_ALWAYS_EAGER = True
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_FILTER_BACKENDS': [
'rest_framework_filters.backends.RestFrameworkFilterBackend',
],
'PAGE_SIZE': 15
}
CKEDITOR_UPLOAD_PATH = "uploads_ck/"
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = '}Ft=26Vc:otWU@5u`VQb?J:!dz_rZX7f_p=@7;}g%208iJ7#)`'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'postgres',
'PORT': '5432',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
|
from py_to_win_app import Project
app_name = "fastapi-desktop"
p = Project(
input_dir=f"examples/{app_name}", main_file="main.py", app_name=app_name
)
p.build(
python_version="3.9.7",
requirements_file=f"examples/{app_name}/requirements.txt",
exe_name=app_name,
)
p.make_dist(delete_build_dir=True)
|
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.utils.translation import ugettext_lazy as _
from .models import User
class UserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ('email',)
class UserChangeForm(UserChangeForm):
pass
class UserAuthenticationForm(forms.Form):
email = forms.EmailField(label=_('Email'))
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(email)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
self.request = request
self.user_cache = None
super(UserAuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'email': 'email'},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
|
# Generated by Django 2.0.5 on 2018-05-26 10:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20180504_1652'),
]
operations = [
migrations.AlterModelOptions(
name='artist',
options={'verbose_name': 'Исполнитель', 'verbose_name_plural': 'Исполнители'},
),
migrations.AlterModelOptions(
name='release',
options={'verbose_name': 'Релиз', 'verbose_name_plural': 'Релизы'},
),
migrations.AlterModelOptions(
name='tag',
options={'verbose_name': 'Тег', 'verbose_name_plural': 'Теги'},
),
migrations.AlterModelOptions(
name='track',
options={'verbose_name': 'Трек', 'verbose_name_plural': 'Треки'},
),
migrations.AddField(
model_name='tag',
name='verified',
field=models.BooleanField(default=False, verbose_name='Проверенный тег'),
),
migrations.AlterField(
model_name='artist',
name='tags',
field=models.ManyToManyField(blank=True, to='api.Tag', verbose_name='Теги'),
),
migrations.AlterField(
model_name='release',
name='author',
field=models.ManyToManyField(to='api.Artist', verbose_name='Исполнитель'),
),
migrations.AlterField(
model_name='release',
name='relation',
field=models.TextField(default='{}', help_text='JSON FIELD', verbose_name='Отношения'),
),
migrations.AlterField(
model_name='release',
name='tags',
field=models.ManyToManyField(blank=True, to='api.Tag', verbose_name='Теги'),
),
migrations.AlterField(
model_name='release',
name='type',
field=models.CharField(choices=[('Album', 'Музыкальный альбом'), ('Single', 'Сингл'), ('Remixes', 'Ремиксы'), ('EP', 'EP'), ('Compilation', 'Сборник'), ('Mixtape', 'Микстейп'), ('Soundtrack', 'Саундтрек'), ('Live', 'Живая запись'), ('Bootleg', 'Бутлег'), ('Other', 'Другое')], max_length=32),
),
migrations.AlterField(
model_name='track',
name='author',
field=models.ManyToManyField(to='api.Artist', verbose_name='Исполнитель'),
),
migrations.AlterField(
model_name='track',
name='relation',
field=models.TextField(default='{}', help_text='JSON FIELD', verbose_name='Отношения'),
),
migrations.AlterField(
model_name='track',
name='tags',
field=models.ManyToManyField(to='api.Tag', verbose_name='Теги'),
),
]
|
"Utils functions."
import matplotlib.pyplot as plt
import numpy as np
from functools import wraps
def unpack_first_arg(f):
"Treat the second dimension of the first arg as independent inputs"
@wraps(f)
def g(*args, **kwargs):
if len(args) == 1 and isinstance(args[0], np.ndarray):
return f(*(args[0].T), **kwargs)
else:
return f(*args, **kwargs)
return g
def list_args_to_array(f):
"Convert input args that are list to arrays"
@wraps(f)
def g(*args, **kwargs):
args_new = [np.array(e) if isinstance(e, list) else e for e in args]
return f(*args_new, **kwargs)
return g
def get_integer_dtype(max_value, min_value=0):
"Return the best numpy dtype for integers with input max value"
if min_value >= 0:
if max_value < 2**8:
return np.uint8
elif max_value < 2**16:
return np.uint16
elif max_value < 2**32:
return np.uint32
else:
return np.uint64
else:
max_value = max(max_value, abs(min_value))
if max_value < 2**7:
return np.int8
elif max_value < 2**15:
return np.int16
elif max_value < 2**31:
return np.int32
else:
return np.int64
def store_value_on_first_computation(f):
"Compute and store the result of a method its first call"
@wraps(f)
def g(self, *args, **kwargs):
"Store the value of f on the first call and return it"
method_name = f.__name__
stored_result_name = "_" + method_name
if getattr(self, stored_result_name, None) is None:
setattr(self, stored_result_name, f(self, *args, **kwargs))
return getattr(self, stored_result_name)
return g
def grab_current_axis(f):
"Grab the current axis if None is provided"
@wraps(f)
def g(*args, **kwargs):
"Call f after grabbing the current axis if None is provided"
if kwargs.get("ax") is None:
kwargs.update({"ax": plt.gca()})
return f(*args, **kwargs)
return g
@grab_current_axis
def set_3Dlim(xlim=None, ylim=None, zlim=None, ax=None):
"Set the x, y and z axis limits of a 3D axis"
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if zlim is not None:
ax.set_zlim(zlim)
return ax
|
from gibson.envs.mobile_robots_env import TurtlebotNavigateSpeedControlEnv
from gibson.utils.play import play
import argparse
import os
import pybullet as p
import pybullet_data
import numpy as np
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs', 'play', 'tr_position_control.yaml')
print(config_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default=config_file)
args = parser.parse_args()
env = TurtlebotNavigateSpeedControlEnv(config = args.config)
env.reset()
vid = p.createVisualShape(p.GEOM_MESH, fileName=os.path.join(pybullet_data.getDataPath(), 'cube.obj'),
rgbaColor=[1, 0, 0, 0.7])
p.createMultiBody(baseVisualShapeIndex=vid, baseCollisionShapeIndex=-1,
basePosition=env.robot.get_target_position())
while env.robot.dist_to_target() > 0.2:
v_signal = min(env.robot.dist_to_target() * 0.2, 0.2)
print(env.robot.angle_to_target, env.robot.dist_to_target(), env.robot.body_xyz, env.robot.get_target_position())
omega_signal = (-env.robot.angle_to_target) / 10
omega_signal = np.clip(omega_signal, -0.02, 0.02)
obs, _, _, _ = env.step([v_signal, omega_signal])
#print(obs["nonviz_sensor"])
for i in range(1000):
env.step([0, 0]) |
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import keras_support
from tensorflow.keras.applications import NASNetLarge
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, Add, AveragePooling2D, GlobalAveragePooling2D, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from keras.datasets import cifar100
from keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback, History
import tensorflow.keras.backend as K
import numpy as np
import os, time, pickle
class Timer(Callback):
def __init__(self):
self.inital_time_starts = time.time()
def on_train_begin(self, logs):
self.inital_time = time.time() - self.inital_time_starts
self.epoch_starts = time.time()
self.times = []
def on_epoch_end(self, epoch, logs):
self.times.append(time.time()-self.epoch_starts)
self.epoch_starts = time.time()
def create_residual_blocks(input_tensor, base_ch, k, N):
start_tensor = input_tensor
for i in range(N):
x = Conv2D(base_ch*k, 7, padding="same")(start_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(base_ch*k, 7, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Add()([start_tensor, x])
start_tensor = x
return x
# WideResNet
def create_wideresnet(k, N, use_tpu):
input = Input(shape=(32, 32, 3))
# conv1 : 32x32
x = Conv2D(16*k, 1)(input)
x = create_residual_blocks(x, 16, k, N)
# downsampling 32->16
x = AveragePooling2D(2)(x)
x = Conv2D(32*k, 1)(x)
# conv2 : 16x16
x = create_residual_blocks(x, 32, k, N)
# downsampling 16->8
x = AveragePooling2D(2)(x)
x = Conv2D(64*k, 1)(x)
# conv4 : 8x8
x = create_residual_blocks(x, 64, k, N)
x = GlobalAveragePooling2D()(x)
x = Dense(100, activation="softmax")(x)
model = Model(input, x)
model.compile(Adam(), loss="categorical_crossentropy", metrics=["acc"])
if use_tpu:
tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)
return model
def single_trial(use_tpu, batch_size, use_validation, use_augment, from_storage, parallel_workers):
K.clear_session()
model = create_wideresnet(7, 4, use_tpu)
train_gen = ImageDataGenerator(
rescale=1.0/255,
width_shift_range=4.0/32,
height_shift_range=4.0/32,
horizontal_flip=True)
val_gen = ImageDataGenerator(
rescale=1.0/255)
if not from_storage:
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
if not use_augment:
X_train = (X_train / 255.0).astype(np.float32)
X_test = (X_test / 255.0).astype(np.float32)
timer = Timer()
hist = History()
n_train_examples, n_test_examples = 50000, 10000
n_epochs = 1
multiprocess = False if parallel_workers <= 1 else True
print("Start training...")
print(f"use_tpu:{use_tpu}, batch_size:{batch_size}, use_validation:{use_validation}, use_augment:{use_augment}, from_storage:{from_storage}, workers:{parallel_workers}")
if from_storage:
if use_augment:
if use_validation:
model.fit_generator(train_gen.flow_from_directory("cifar100-raw/train", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
steps_per_epoch=n_train_examples//batch_size, epochs=n_epochs,
callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess,
validation_data=val_gen.flow_from_directory("cifar100-raw/test", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
validation_steps=n_test_examples//batch_size)
else:
model.fit_generator(train_gen.flow_from_directory("cifar100-raw/train", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
steps_per_epoch=n_train_examples//batch_size, epochs=n_epochs,
callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess)
else:
if use_validation:
model.fit_generator(val_gen.flow_from_directory("cifar100-raw/train", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
steps_per_epoch=n_train_examples//batch_size, epochs=n_epochs,
callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess,
validation_data=val_gen.flow_from_directory("cifar100-raw/test", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
validation_steps=n_test_examples//batch_size)
else:
model.fit_generator(val_gen.flow_from_directory("cifar100-raw/train", target_size=(32, 32),
class_mode="categorical", shuffle=True,
batch_size=batch_size),
steps_per_epoch=n_train_examples//batch_size, epochs=n_epochs,
callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess)
else:
if use_augment:
if use_validation:
model.fit_generator(train_gen.flow(X_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=n_train_examples//batch_size,
epochs=n_epochs, callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess,
validation_data=val_gen.flow(X_test, y_test), validation_steps=n_test_examples//batch_size)
else:
model.fit_generator(train_gen.flow(X_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=n_train_examples//batch_size,
epochs=n_epochs, callbacks=[timer, hist],
workers=parallel_workers, use_multiprocessing=multiprocess)
else:
# fitは並列化できない
if use_validation:
model.fit(X_train, y_train, batch_size=batch_size, epochs=n_epochs, callbacks=[timer, hist],
validation_data=(X_test, y_test))
else:
model.fit(X_train, y_train, batch_size=batch_size, epochs=n_epochs, callbacks=[timer, hist])
history = hist.history
history["initial_time"] = timer.inital_time
history["times"] = timer.times
result = {
"device": "tpu" if use_tpu else "gpu",
"batch_size" : batch_size,
"use_validation" : use_validation,
"use_augmentation" : use_augment,
"from_storage": from_storage,
"result" : history,
"num_workers" : parallel_workers
}
return result
def trial(use_tpu, batch_size, separate_mode=-1):
flag = "tpu" if use_tpu else "gpu"
if separate_mode == -1:
filename = f"{flag}_batchsize_{batch_size}.dat"
else:
filename = f"{flag}_batchsize_{batch_size}_sep{separate_mode}.dat"
result = []
if separate_mode in [-1, 0]:
result.append(single_trial(use_tpu, batch_size, use_validation=False, use_augment=False, from_storage=False, parallel_workers=1))
result.append(single_trial(use_tpu, batch_size, use_validation=True, use_augment=False, from_storage=False, parallel_workers=1))
if separate_mode in [-1, 1]:
result.append(single_trial(use_tpu, batch_size, use_validation=True, use_augment=True, from_storage=False, parallel_workers=1))
result.append(single_trial(use_tpu, batch_size, use_validation=False, use_augment=False, from_storage=True, parallel_workers=1))
if separate_mode in [-1, 2]:
result.append(single_trial(use_tpu, batch_size, use_validation=True, use_augment=False, from_storage=True, parallel_workers=1))
result.append(single_trial(use_tpu, batch_size, use_validation=True, use_augment=True, from_storage=True, parallel_workers=1))
if separate_mode in [-1, 3]:
result.append(single_trial(use_tpu, batch_size, use_validation=False, use_augment=False, from_storage=True, parallel_workers=4))
result.append(single_trial(use_tpu, batch_size, use_validation=True, use_augment=True, from_storage=True, parallel_workers=4))
with open(filename, "wb") as fp:
pickle.dump(result, fp)
return filename
def appendix_trial(batch_size, use_tpu=True, sep=-1):
tpu_flag = "tpu" if use_tpu else "gpu"
filename = f"appendix_{tpu_flag}_batch_size_{batch_size}"
if sep >= 0: filename += f"_sep_{sep}"
filename += ".dat"
result = {}
for mode in range(3):
if sep >= 0:
if sep != mode: continue
K.clear_session()
model = create_wideresnet(7, 4, use_tpu)
# mode 1 = そのままfit
# mode 2 = バッチサイズの倍数に切り詰めてfit
# mode 3 = fit_generator
data_gen = ImageDataGenerator(rescale=1.0/255)
nb_epochs = 20
(X_train, y_train), (_, _) = cifar100.load_data()
timer = Timer()
hist = History()
print("Start training...")
print("mode = ", mode)
if mode == 0:
X_train = X_train / 255.0
y_train = to_categorical(y_train)
model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=[timer, hist])
elif mode == 1:
n_train = (X_train.shape[0] // batch_size) * batch_size
X_train = X_train[:n_train, :, :, :] / 255.0
y_train = to_categorical(y_train[:n_train, :])
model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=[timer, hist])
elif mode == 2:
y_train = to_categorical(y_train)
steps_per_epoch = X_train.shape[0] // batch_size
model.fit_generator(data_gen.flow(X_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=steps_per_epoch, epochs=nb_epochs, callbacks=[timer, hist])
history = hist.history
history["initial_time"] = timer.inital_time
history["times"] = timer.times
result[mode] = history
with open(filename, "wb") as fp:
pickle.dump(result, fp)
return filename
if __name__ == "__main__":
filename = trial(False, 256, 2) # True if use TPU
#filename = appendix_trial(4096, sep=0) |
#!/usr/bin/env python
import threading, os, time
from scapy.all import *
################################## USER INPUT VARIABLES #####################################
pcapFilename = 'droneAlert.pcap'
isDebug = True # Set isDebug = False when monitor mode is needed to be setup
interface = 'wlan1mon'
kp = 0.7 # Probability Threshold Constant, this is the suspicion level, decreasing this will make the detection faster
kd = 3.5 # Detection Threshold Constant, this is the average deviation threshold, decreasing this will make detection more aggresive but prone to error
ki = 8 # Idling Detection Threshold Constant, this is the ratio of number of times the channel is encountered over the number of encounters,
# decreasing ki will decrease the time needed to filter mobile phone beacon frames
MB_ch = 3 # MeshBulb Operating Channel, choose a channel where there are less noise
################################# GLOBAL VARIABLES #####################################
# Format for macAddrList ["aa:bb:cc:00:11:22","SSID","RSSI","Count","RSSI Deviation","Probability","Channel","Channel Encounter"]
macAddrList = [] # The list of unfiltered mac address encountered
filteredMacList = [] # Filtered mac address list, mac addr in this list is whitelisted
knownVendorList = ["60:60:1f","e0:b6:f5","00:12:1c","00:26:7e","90:03:b7","90:3a:e6","a0:14:3d"] # Drone Vendor Specific Mac Addrs
whiteList = ["b0:be:76","7c:8b:ca"] # Known Vendor Specific Mac Addrs for home WiFi AP
foundDroneList = [] # Temporary list for found drone mac addrs
channels = [] # List of channels where the AP is encountered
isPacketAvail = False
isChannelHopping = True
availLocalizer = [] # Temporary list to store the available ESP which contributes RSSI values
sleepCount = 0
################################# FUNCTIONS #######################################
# To start monitor mode
def startMonMode(iface):
print '[+] Setting ' + iface + ' into monitor mode'
try:
os.system('sudo airmon-ng start ' + iface)
time.sleep(4)
except:
print '[ERROR] Aircrack Suite is not installed, please install by typing sudo apt-get install aircrack-ng'
exit()
# A function to prevent the channelHopping thread from changing the channel while getting the RSSI values from supporting MeshBulbs
def suspendThread(iface):
global isPacketAvail
global sleepCount
os.system('iwconfig %s channel %d' % (iface, MB_ch))
sendp(pkt[0], iface=iface)
while isPacketAvail:
time.sleep(1) # time.sleep() function is used to suspend the thread
sleepCount +=1
if sleepCount > 20:
break
# Channel hopping must be done > 100ms because beacon frames usually is sent every 100ms
# Total of 14 channels defined for use by WiFi 802.11 2.4Ghz ISM Band
# If there are no found beacons, the channel will hop from channel 1 to channel 14
# If a particular beacon is found, it will only focus on that channel until it has made sure they are filtered
def channelHopping(iface):
global isPacketAvail
global isChannelHopping
while isChannelHopping:
if len(channels) == 0:
for ch in range(1,14):
os.system('iwconfig %s channel %d' % (iface, ch))
print "Current channel: " + str(ch)
time.sleep(0.220)
else:
for ch in channels:
os.system('iwconfig %s channel %d' % (iface, ch))
# Only announce the presence of a drone when iwconfig has changed to the channel MeshBulb is operating in
if isPacketAvail:
suspendThread(iface)
# Increment the Channel Encounter
for beacons in macAddrList:
if int(beacons[6]) == ch:
beacons[7] += 1
print "Current channel: " + str(ch)
time.sleep(0.305)
# Function to execute when a drone has been found
# Because ESP sometimes can only either detect SSID or MAC Addr, so we have to send both information
def announceDroneDetected(ssid, mac, detected_ch):
global isPacketAvail
print "[+] Drone Detected"
isPacketAvail = True
pkt[0].load = "\x7f\x18\xfe4\x9a\xab\x9f\x15\xdd\x11\x18\xfe4\x04\x01" + str(mac) + ":" + str(detected_ch) + ":" + ssid + ":" +"\x00\x00\x00\x00\x00\x00\x85"
print "SSID: "+ ssid + "\nMAC: " + mac + "\nChannel: " + str(detected_ch) + "\n"
# Cross check with the vendor specific mac address
def OUICheck(mac):
if mac[0:8] in knownVendorList:
return True
else:
return False
# Filter beacons based on the probability
def beaconFilter(kp,kd,ki):
# Clear the channels list
del channels[:]
for i, beacons in enumerate(macAddrList):
# Update the channels list for focused channel hopping
if beacons[6] not in channels:
channels.append(beacons[6])
# If probability more than the preset probability constant, this means we have found a drone, then send mac addr and channel info to ESPNOW
if beacons[5] >= kp:
announceDroneDetected(beacons[1],beacons[0],beacons[6])
if beacons[0] not in foundDroneList:
foundDroneList.append(beacons[0])
macAddrList.pop(i)
break
# Increment probability 0.2 if the average RSSI deviation is higher than detection constant
if float(beacons[4]/beacons[3]) > kd:
beacons[5] += 0.2
beacons[3] = 1
beacons[4] = 0
# Filter them out as non-drones if the AP stayed static for a long time/ in white list / has 'unifi' word in it
if beacons[3] > 20 or beacons[0][0:8] in whiteList or "unifi" in beacons[1] or beacons[-1] > 40:
filteredMacList.append(beacons[0])
macAddrList.pop(i)
# If beacon frame is sent too infrequent, suspect might be a mobile phone
if beacons[7]/beacons[3] > ki:
macAddrList.pop(i)
# print "Filtered MAC List: ", filteredMacList
# This function only handles incoming new packets
def PacketHandler(packet):
global isPacketAvail
global sleepCount
# If it is data from ESP NOW
if packet.subtype == 13 and packet.addr2 and isPacketAvail:
payload = str(packet.load).split(':')
# Usually legit payload after splitting is in the format of ['Rubbish HEX','x_coord','y_coord','RSSI','Rubbish HEX']
if len(payload) == 5 or len(payload) == 6:
# payload[-3] is the x_coord and payload[-4] is the y_coord
currentCoords = [payload[-3],payload[-4]]
currentRSSI = payload[-2]
# Keeping track of the amount of RSSI value contributing node, if there are none left, thread suspended can be freed
if currentCoords not in availLocalizer:
availLocalizer.append(currentCoords)
if currentRSSI == "0":
availLocalizer.remove(currentCoords)
if len(availLocalizer) == 0:
isPacketAvail = False
sleepCount = 0
print availLocalizer
print "x_coord: " + payload[-3] + " y_coord: " + payload[-4] + " RSSI: " + currentRSSI
# If packet has a beacon frame
if packet.haslayer(Dot11Beacon) and len(availLocalizer) == 0:
prob = 0
if packet.addr2 not in filteredMacList:
# Primary Filter, use vendor OUI Mac Address
if OUICheck(packet.addr2): prob = 1
# Secondary Filter, common drone has underscore and the 'drone' word in SSID
ssid = packet.getlayer(Dot11Elt).info
# Get channel information based on https://learntomato.flashrouters.com/wp-content/uploads/wifi-frequency-channels.jpg
channel = int(((packet.ChannelFrequency - 2412) / 5) + 1)
# Populate the macAddrList to be filtered
if len(macAddrList) == 0:
macAddrList.append([packet.addr2, ssid, packet.dBm_AntSignal,0,0,prob,channel,1])
for foundBeacons in macAddrList:
if packet.addr2 in foundBeacons:
# Increament counter
foundBeacons[3] += 1
# Get RSSI deviation and update RSSI
foundBeacons[4] += abs(foundBeacons[2]-packet.dBm_AntSignal)
foundBeacons[2] = packet.dBm_AntSignal
break
# If end of for loop and the mac address is not found in macAddrList
if foundBeacons == macAddrList[-1]:
# format[macAddr,SSID,RSSI,Count,RSSIDeviation,Probability]
macAddrList.append([packet.addr2, ssid, packet.dBm_AntSignal,0,0,prob,channel,1])
beaconFilter(kp,kd,ki)
# print "macAddrList: ", macAddrList
# print "foundDroneList: ", foundDroneList
if __name__ == "__main__":
# Reading the pcap file
pkt = rdpcap(pcapFilename)
if not isDebug:
startMonMode(interface[:-3])
# Creating and starting the thread for channel hopping
chHopthread = threading.Thread(target=channelHopping, args=(interface, ), name="channelHopping")
chHopthread.daemon = True
chHopthread.start()
# Sniff all packets on a single channel
sniff(iface=interface, store=False, prn = PacketHandler)
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Beam fn API log handler."""
# pytype: skip-file
# mypy: disallow-untyped-defs
import logging
import math
import queue
import sys
import threading
import time
import traceback
from typing import TYPE_CHECKING
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Union
from typing import cast
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
# This module is experimental. No backwards-compatibility guarantees.
class FnApiLogRecordHandler(logging.Handler):
"""A handler that writes log records to the fn API."""
# Maximum number of log entries in a single stream request.
_MAX_BATCH_SIZE = 1000
# Used to indicate the end of stream.
_FINISHED = Sentinel.sentinel
# Size of the queue used to buffer messages. Once full, messages will be
# dropped. If the average log size is 1KB this may use up to 10MB of memory.
_QUEUE_SIZE = 10000
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
}
def __init__(self, log_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> None
super(FnApiLogRecordHandler, self).__init__()
self._alive = True
self._dropped_logs = 0
self._log_entry_queue = queue.Queue(
maxsize=self._QUEUE_SIZE
) # type: queue.Queue[Union[beam_fn_api_pb2.LogEntry, Sentinel]]
ch = GRPCChannelFactory.insecure_channel(log_service_descriptor.url)
# Make sure the channel is ready to avoid [BEAM-4649]
grpc.channel_ready_future(ch).result(timeout=60)
self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor())
self._reader = threading.Thread(
target=lambda: self._read_log_control_messages(),
name='read_log_control_messages')
self._reader.daemon = True
self._reader.start()
def connect(self):
# type: () -> Iterable
if hasattr(self, '_logging_stub'):
del self._logging_stub # type: ignore[has-type]
self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub(
self._log_channel)
return self._logging_stub.Logging(self._write_log_entries())
def map_log_level(self, level):
# type: (int) -> beam_fn_api_pb2.LogEntry.Severity.Enum
try:
return self.LOG_LEVEL_MAP[level]
except KeyError:
return max(
beam_level for python_level,
beam_level in self.LOG_LEVEL_MAP.items() if python_level <= level)
def emit(self, record):
# type: (logging.LogRecord) -> None
log_entry = beam_fn_api_pb2.LogEntry()
log_entry.severity = self.map_log_level(record.levelno)
log_entry.message = self.format(record)
log_entry.thread = record.threadName
log_entry.log_location = '%s:%s' % (
record.pathname or record.module, record.lineno or record.funcName)
(fraction, seconds) = math.modf(record.created)
nanoseconds = 1e9 * fraction
log_entry.timestamp.seconds = int(seconds)
log_entry.timestamp.nanos = int(nanoseconds)
if record.exc_info:
log_entry.trace = ''.join(traceback.format_exception(*record.exc_info))
instruction_id = statesampler.get_current_instruction_id()
if instruction_id:
log_entry.instruction_id = instruction_id
tracker = statesampler.get_current_tracker()
if tracker:
current_state = tracker.current_state()
if (current_state and current_state.name_context and
current_state.name_context.transform_id):
log_entry.transform_id = current_state.name_context.transform_id
try:
self._log_entry_queue.put(log_entry, block=False)
except queue.Full:
self._dropped_logs += 1
def close(self):
# type: () -> None
"""Flush out all existing log entries and unregister this handler."""
try:
self._alive = False
# Acquiring the handler lock ensures ``emit`` is not run until the lock is
# released.
self.acquire()
self._log_entry_queue.put(self._FINISHED, timeout=5)
# wait on server to close.
self._reader.join()
self.release()
# Unregister this handler.
super(FnApiLogRecordHandler, self).close()
except Exception:
# Log rather than raising exceptions, to avoid clobbering
# underlying errors that may have caused this to close
# prematurely.
logging.error("Error closing the logging channel.", exc_info=True)
def _write_log_entries(self):
# type: () -> Iterator[beam_fn_api_pb2.LogEntry.List]
done = False
while not done:
log_entries = [self._log_entry_queue.get()]
try:
for _ in range(self._MAX_BATCH_SIZE):
log_entries.append(self._log_entry_queue.get_nowait())
except queue.Empty:
pass
if log_entries[-1] is self._FINISHED:
done = True
log_entries.pop()
if log_entries:
# typing: log_entries was initialized as List[Union[..., Sentinel]],
# but now that we've popped the sentinel out (above) we can safely cast
yield beam_fn_api_pb2.LogEntry.List(
log_entries=cast(List[beam_fn_api_pb2.LogEntry], log_entries))
def _read_log_control_messages(self):
# type: () -> None
# Only reconnect when we are alive.
# We can drop some logs in the unlikely event of logging connection
# dropped(not closed) during termination when we still have logs to be sent.
# This case is unlikely and the chance of reconnection and successful
# transmission of logs is also very less as the process is terminating.
# I choose not to handle this case to avoid un-necessary code complexity.
alive = True # Force at least one connection attempt.
while alive:
# Loop for reconnection.
log_control_iterator = self.connect()
if self._dropped_logs > 0:
logging.warning(
"Dropped %d logs while logging client disconnected",
self._dropped_logs)
self._dropped_logs = 0
try:
for _ in log_control_iterator:
# Loop for consuming messages from server.
# TODO(vikasrk): Handle control messages.
pass
# iterator is closed
return
except Exception as ex:
print(
"Logging client failed: {}... resetting".format(ex),
file=sys.stderr)
# Wait a bit before trying a reconnect
time.sleep(0.5) # 0.5 seconds
alive = self._alive
|
#!/usr/bin/env python3
"""
Spark API Python example.
This script retrieves an access token then fetches available Spark contracts.
It requires Python 3 (version >=3.1)
Usage:
- By providing the client_credentials file path
$ python spark_api_example.py <client_credentials_csv_file_path>
- By providing 2 environment variables:
$ export SPARK_CLIENT_ID=XXXX
$ export CLIENT_SECRET=YYYY
$ python spark_api_example.py
"""
import json
import os
import sys
from base64 import b64encode
from urllib.parse import urljoin
try:
from urllib import request, parse
from urllib.error import HTTPError
except ImportError:
raise RuntimeError("Python 3 required")
API_BASE_URL = "https://api.sparkcommodities.com"
def retrieve_credentials(file_path=None):
"""
Find credentials either by reading the client_credentials file or reading
environment variables
"""
if file_path is None:
client_id = os.getenv("SPARK_CLIENT_ID")
client_secret = os.getenv("SPARK_CLIENT_SECRET")
if not client_id or not client_secret:
raise RuntimeError(
"SPARK_CLIENT_ID and SPARK_CLIENT_SECRET environment vars required"
)
else:
# Parse the file
if not os.path.isfile(file_path):
raise RuntimeError("The file {} doesn't exist".format(file_path))
with open(file_path) as fp:
first_line = fp.readline()
if "clientId,clientSecret" not in first_line:
print("First line: {}".format(first_line))
raise RuntimeError(
"The specified file {} doesn't look like to be a Spark API client "
"credentials file".format(file_path)
)
second_line = fp.readline()
client_id, client_secret = second_line.split(",")
print(">>>> Found credentials!")
print(
">>>> Client_id={}, client_secret={}****".format(client_id, client_secret[:5])
)
return client_id, client_secret
def do_api_post_query(uri, body, headers):
url = urljoin(API_BASE_URL, uri)
data = json.dumps(body).encode("utf-8")
# HTTP POST request
req = request.Request(url, data=data, headers=headers)
try:
response = request.urlopen(req)
except HTTPError as e:
print("HTTP Error: ", e.code)
print(e.read())
sys.exit(1)
resp_content = response.read()
# The server must return HTTP 201. Raise an error if this is not the case
assert response.status == 201, resp_content
# The server returned a JSON response
content = json.loads(resp_content)
return content
def do_api_get_query(uri, access_token):
url = urljoin(API_BASE_URL, uri)
headers = {
"Authorization": "Bearer {}".format(access_token),
"Accept": "application/json",
}
# HTTP POST request
req = request.Request(url, headers=headers)
try:
response = request.urlopen(req)
except HTTPError as e:
print("HTTP Error: ", e.code)
print(e.read())
sys.exit(1)
resp_content = response.read()
# The server must return HTTP 201. Raise an error if this is not the case
assert response.status == 200, resp_content
# The server returned a JSON response
content = json.loads(resp_content)
return content
def get_access_token(client_id, client_secret):
"""
Get a new access_token. Access tokens are the thing that applications use to make
API requests. Access tokens must be kept confidential in storage.
# Procedure:
Do a POST query with `grantType` and `scopes` in the body. A basic authorization
HTTP header is required. The "Basic" HTTP authentication scheme is defined in
RFC 7617, which transmits credentials as `clientId:clientSecret` pairs, encoded
using base64.
"""
# Note: for the sake of this example, we choose to use the Python urllib from the
# standard lib. One should consider using https://requests.readthedocs.io/
payload = "{}:{}".format(client_id, client_secret).encode()
headers = {
"Authorization": b64encode(payload).decode(),
"Accept": "application/json",
"Content-Type": "application/json",
}
body = {
"grantType": "clientCredentials",
"scopes": "read:lng-freight-prices",
}
content = do_api_post_query(uri="/oauth/token/", body=body, headers=headers)
print(
">>>> Successfully fetched an access token {}****, valid {} seconds.".format(
content["accessToken"][:5], content["expiresIn"]
)
)
return content["accessToken"]
#
# Spark data fetching functions:
#
def list_contracts(access_token):
"""
Fetch available contracts. Return contract ticker symbols
# Procedure:
Do a GET query to /v1.0/contracts/ with a Bearer token authorization HTTP header.
"""
content = do_api_get_query(uri="/v1.0/contracts/", access_token=access_token)
print(">>>> Contracts:")
tickers = []
for contract in content["data"]:
print(contract["fullName"])
tickers.append(contract["id"])
return tickers
def get_latest_price_releases(access_token, ticker):
"""
For the contract, fetch then display the latest price release
# Procedure:
Do GET queries to /v1.0/contracts/{contract_ticker_symbol}/price-releases/latest/
with a Bearer token authorization HTTP header.
"""
content = do_api_get_query(
uri="/v1.0/contracts/{}/price-releases/latest/".format(ticker),
access_token=access_token,
)
release_date = content["data"]["releaseDate"]
print(">>>> Get latest price release for {}".format(ticker))
print("release date =", release_date)
data_points = content["data"]["data"][0]["dataPoints"]
for data_point in data_points:
period_start_at = data_point["deliveryPeriod"]["startAt"]
spark_price = data_point["derivedPrices"]["usdPerDay"]["spark"]
print(
"Spark Price is USD",
"{:>6}".format(spark_price),
"/day for period starting on",
period_start_at,
)
def fetch_historical_price_releases(access_token, ticker, limit=4, offset=None):
"""
For a selected contract, this endpoint returns all the Price Releases you can
access according to your current subscription, ordered by release date descending.
**Note**: Unlimited access to historical data and full forward curves is only
available to those with Premium access. Get in touch to find out more.
**Params**
limit: optional integer value to set an upper limit on the number of price
releases returned by the endpoint. Default here is 4.
offset: optional integer value to set from where to start returning data.
Default is 0.
# Procedure:
Do GET queries to /v1.0/contracts/{contract_ticker_symbol}/price-releases/
with a Bearer token authorization HTTP header.
"""
query_params = "?limit={}".format(limit)
if offset is not None:
query_params += "&offset={}".format(offset)
content = do_api_get_query(
uri="/v1.0/contracts/{}/price-releases/{}".format(ticker, query_params),
access_token=access_token,
)
print(">>>> Get price releases for {}".format(ticker))
for release in content["data"]:
release_date = release["releaseDate"]
print("- release date =", release_date)
data_points = release["data"][0]["dataPoints"]
for data_point in data_points:
period_start_at = data_point["deliveryPeriod"]["startAt"]
spark_price = data_point["derivedPrices"]["usdPerDay"]["spark"]
print(
" Spark Price is USD",
"{:>6}".format(spark_price),
"/day for period starting on",
period_start_at,
)
def main(file_path=None):
print(">>>> Running Spark API Python sample...")
client_id, client_secret = retrieve_credentials(file_path)
# Authenticate:
access_token = get_access_token(client_id, client_secret)
# Fetch data:
tickers = list_contracts(access_token)
for ticker in tickers:
get_latest_price_releases(access_token, ticker)
# For only 1 contract:
fetch_historical_price_releases(access_token, tickers[0], limit=4)
print(">>>> Done!")
if __name__ == "__main__":
if len(sys.argv) >= 2:
main(file_path=sys.argv[1])
else:
main()
|
# coding=utf-8
"""Classes for handling experimental datasets used by mmCIF models.
"""
class Dataset(object):
"""A set of input data, for example, a crystal structure or EM map.
:param location: a pointer to where the
dataset is stored. This is usually a subclass of
:class:`~ihm.location.DatabaseLocation` if the dataset is
deposited in a database such as PDB or EMDB, or
:class:`~ihm.location.InputFileLocation` if the dataset is stored
in an external file.
:type location: :class:`ihm.location.Location`
:param str details: Text giving more information about the dataset.
"""
_eq_keys = ['location']
# Datasets compare equal iff they are the same class and have the
# same attributes
def _eq_vals(self):
return tuple([self.__class__]
+ [getattr(self, x) for x in self._eq_keys])
def __eq__(self, other):
return self._eq_vals() == other._eq_vals()
def __hash__(self):
return hash(self._eq_vals())
data_type = 'Other'
def __init__(self, location, details=None):
self.location, self.details = location, details
#: A list of :class:`Dataset` and/or :class:`TransformedDataset`
#: objects from which this one was derived.
#: For example, a 3D EM map may be derived from a set of 2D images.
self.parents = []
def add_primary(self, dataset):
"""Add another Dataset from which this one was ultimately derived,
i.e. it is added as a parent, unless a parent already exists,
in which case it is added as a grandparent, and so on."""
root = self
while root.parents:
if len(root.parents) > 1:
raise ValueError("This dataset has multiple parents - don't "
"know which one to add to")
root = root.parents[0]
root.parents.append(dataset)
class TransformedDataset(object):
"""A :class:`Dataset` that should be rotated or translated before using.
This is typically used for derived datasets
(see :attr:`Dataset.parents`) where the derived dataset lies in a
different dataset from the parent (for example, it was moved to better
align with the model's reference frame or other experimental data).
The transformation that places the derived dataset on the parent
is recorded here.
:param dataset: The (parent) dataset.
:type dataset: :class:`Dataset`
:param transform: The rotation and translation that places a
derived dataset on this dataset.
:type transform: :class:`ihm.geometry.Transformation`
"""
def __init__(self, dataset, transform):
self.dataset, self.transform = dataset, transform
class DatasetGroup(list):
"""A set of :class:`Dataset` objects that are handled together.
This is implemented as a simple list.
:param sequence elements: Initial set of datasets.
:param str name: Short text name of this group.
:param str application: Text that shows how this group is used.
:param str details: Longer text that describes this group.
Normally a group is passed to one or more
:class:`~ihm.protocol.Protocol` or :class:`~ihm.analysis.Analysis`
objects, although unused groups can still be included in the file
if desired by adding them to :attr:`ihm.System.orphan_dataset_groups`.
"""
# For backwards compatibility with earlier versions of this class which
# didn't specify name/application/details
name = application = details = None
def __init__(self, elements=(), name=None, application=None, details=None):
super(DatasetGroup, self).__init__(elements)
self.name, self.application = name, application
self.details = details
class CXMSDataset(Dataset):
"""Processed cross-links from a CX-MS experiment"""
data_type = 'CX-MS data'
class MassSpecDataset(Dataset):
"""Raw mass spectrometry files such as peaklists"""
data_type = 'Mass Spectrometry data'
class HDXDataset(Dataset):
"""Data from a hydrogen/deuterium exchange experiment"""
data_type = 'H/D exchange data'
class PDBDataset(Dataset):
"""An experimentally-determined 3D structure as a set of a coordinates,
usually in a PDB file"""
data_type = 'Experimental model'
class ComparativeModelDataset(Dataset):
"""A 3D structure determined by comparative modeling"""
data_type = 'Comparative model'
class IntegrativeModelDataset(Dataset):
"""A 3D structure determined by integrative modeling"""
data_type = 'Integrative model'
class DeNovoModelDataset(Dataset):
"""A 3D structure determined by de novo modeling"""
data_type = 'De Novo model'
class NMRDataset(Dataset):
"""A nuclear magnetic resonance (NMR) dataset"""
data_type = 'NMR data'
class MutagenesisDataset(Dataset):
"""Mutagenesis data"""
data_type = 'Mutagenesis data'
class EMDensityDataset(Dataset):
"""A 3D electron microscopy dataset"""
data_type = '3DEM volume'
class EMMicrographsDataset(Dataset):
"""Raw 2D electron micrographs"""
data_type = 'EM raw micrographs'
class EM2DClassDataset(Dataset):
"""2DEM class average"""
data_type = '2DEM class average'
class SASDataset(Dataset):
"""SAS data"""
data_type = 'SAS data'
class FRETDataset(Dataset):
"""Data from a Förster resonance energy transfer (FRET) experiment"""
data_type = 'Single molecule FRET data'
class YeastTwoHybridDataset(Dataset):
"""Yeast two-hybrid data"""
data_type = 'Yeast two-hybrid screening data'
class GeneticInteractionsDataset(Dataset):
"""Quantitative measurements of genetic interactions"""
data_type = 'Quantitative measurements of genetic interactions'
|
from .blame import *
from .subcommand import *
SUBCOMMANDS = {"blame": Subcommand(blame.blame_args_validator, blame.blame_handler)}
|
# -*- coding: utf-8 -*-
#==========================================
# Title: optimization.py
# Author: Binxin Ru and Ahsan Alvi
# Date: 20 August 2019
# Link: https://arxiv.org/abs/1906.08878
#==========================================
"""Optimization utilities"""
from typing import Callable, Optional, Dict
import numpy as np
import scipy as sp
from scipy import optimize
def minimize_with_restarts(optimiser_func, restart_bounds, num_restarts=5,
min_successes=3, max_tries=None, hard_bounds=None,
jac=None, minimize_options=None, verbose=False):
"""
Runs scipy.optimize.minimize() with random restarts
"""
# Hard upper limit to kill the optimization if we keep on failing
if max_tries is None:
max_tries = (num_restarts + min_successes) * 3
# If options (maxiter) or jac is provided, pass that to minimize
# minimize_options is a dict like {'maxiter':100} or None
if jac is None:
def minimizer(x):
return optimize.minimize(optimiser_func,
x,
bounds=hard_bounds,
options=minimize_options)
else:
def minimizer(x):
return optimize.minimize(optimiser_func,
x,
jac=jac,
bounds=hard_bounds,
options=minimize_options)
if type(restart_bounds) is list:
restart_bounds = np.array(restart_bounds)
best_eval = None
best_opt_result = None
nfev = 0
ncrashes = 0
n_runs = 0
continue_trying = True
# for ii in range(num_restarts):
while continue_trying:
x0 = (restart_bounds[:, 1] - restart_bounds[:, 0]) \
* np.random.random_sample((restart_bounds.shape[0],)) \
+ restart_bounds[:, 0]
if verbose:
print("multistart iteration", n_runs, 'out of', num_restarts)
print("starting optimisation from x =", x0)
print(
f"n_runs = {n_runs}, ncrashes = {ncrashes}, max_tries = "
f"{max_tries}")
try:
opt_result = minimizer(x0)
nfev += opt_result.nfev
if opt_result.status == 1:
if verbose:
print("optimisation failed!")
else:
curr_x = opt_result.x
if best_opt_result is None:
best_opt_result = opt_result
best_eval = (curr_x, optimiser_func(curr_x))
if verbose:
print("Updating best to", best_eval)
else:
if optimiser_func(curr_x) < best_eval[1]:
best_opt_result = opt_result
best_eval = (curr_x, optimiser_func(curr_x))
if verbose:
print("Updating best to", best_eval)
except (np.linalg.LinAlgError, sp.linalg.LinAlgError):
if verbose:
print("multistart iteration {} failed".format(n_runs))
ncrashes += 1
# While we haven't reached the maximum number of run as well
# as the minimum number of successful optimizations, we continue
n_runs += 1
if n_runs >= num_restarts and (n_runs - ncrashes) > min_successes:
if verbose:
print("Reached desired number of restarts and successes.")
continue_trying = False
elif n_runs >= max_tries:
if verbose:
print("Maximum number of tries reached. " +
"Not enough successes, but stopping anyway.")
continue_trying = False
if ncrashes == n_runs: # if all optimizations failed
print("All multi-started optimizations encountered LinAlgErrors!")
if verbose:
print("Completed multigrad with", num_restarts,
" restarts and total nfev =", nfev)
return best_opt_result
def sample_then_minimize(
optimiser_func: Callable,
bounds: np.ndarray,
num_samples: Optional[int] = 1000,
num_chunks: Optional[int] = 4,
num_local: Optional[int] = 5,
jac: Optional[Callable] = None,
minimize_options: Optional[Dict] = None,
evaluate_sequentially: Optional[bool] = True,
extra_locs:Optional[np.ndarray] = None,
verbose: Optional[bool] = False) -> optimize.OptimizeResult:
"""Samples from the func and then optimizes the most promising locations
Parameters
----------
optimiser_func
Function to be minimized. Inputs are expected to be 2D.
bounds
Bounds for sampling and optimization
num_samples
Number of initial samples to take. Sampling is done uniformly
using the bounds as limits
num_chunks
Number of batches to evaluate the samples in
num_local
Number of local optimizations. This is the number of most promising
samples used as starting points for minimize()
jac
If available, the jacobian of optimiser_func
minimize_options
Options passed to minimize(), e.g. maxiter
evaluate_sequentially
Whether the optimiser_func can return the result for multiple inputs.
This is not the case for e.g. the log likelihood of a GP, but may
be possible for an acquisition function. Default behaviour is to
evaluate the optimiser_func sequentially.
extra_locs
Additional locations to consider for starting gradient descent opt
Useful for e.g. minimizing a surrogate, and adding the surrogate's X
as extra_locs
verbose
Returns
-------
scipy OptimizeResult of the best local optimization
"""
x_samples = np.random.uniform(bounds[:, 0],
bounds[:, 1],
(num_samples, bounds.shape[0]))
if extra_locs is not None:
assert extra_locs.ndim == x_samples.ndim
assert extra_locs.shape[-1] == x_samples.shape[-1]
x_samples = np.vstack((x_samples, extra_locs))
if evaluate_sequentially:
if verbose:
print(f"Evaluating {num_samples} locations sequentially")
f_samples = np.zeros(num_samples)
for ii in range(num_samples):
f_samples[ii] = optimiser_func(x_samples[ii])
else:
if verbose:
print(f"Evaluating {num_samples} locations")
x_chunks = np.split(x_samples, num_chunks)
f_samples_list = []
for x_chunk in x_chunks:
f_samples_list.append(optimiser_func(x_chunk))
f_samples = np.hstack(f_samples_list)
if num_local > 0:
best_indexes = f_samples.argsort()[::-1][-num_local:]
x_locals = x_samples[best_indexes]
if verbose:
print(f"Locally optimizing the top {num_local} locations")
best_result = None
best_f = np.inf
for ii in range(num_local):
x0 = np.atleast_2d(x_locals[ii])
res = sp.optimize.minimize(
optimiser_func, x0, jac=jac,
bounds=bounds,
options=minimize_options) # type: optimize.OptimizeResult
if res.fun < best_f:
best_result = res
best_f = res.fun
else:
min_idx = np.argmin(f_samples)
best_result = optimize.OptimizeResult(
x=x_samples[min_idx],
fun=f_samples[min_idx].item())
if verbose:
print(f"Best result found: {best_result.x} "
f"has function value {best_result.fun}")
return best_result
if __name__ == '__main__':
def f(x):
raise np.linalg.LinAlgError
minimize_with_restarts(f, [[0, 20]], num_restarts=20, min_successes=2,
verbose=True)
|
import re
import string
import random
import jsonlines
# read in 5 lines at a time
# line one is sentence with a mask
# line two is [MASK]
# line three is the options (candidates)
# line four is answer
def main():
with open('pdp-test.txt', 'r') as wsc:
for i in range(564):
encoded_schema = read_schema(wsc, i % 2 + 1)
with jsonlines.open('pdp-test.jsonl', 'a') as writer:
writer.write(encoded_schema)
def read_schema(wsc, id):
s = wsc.readline().strip()
pronoun = wsc.readline()
mask = re.compile(' ' + pronoun.strip() + ' ')
sentence = mask.sub('_', s)
candidates = wsc.readline().split(',')
candidates[0] = candidates[0].strip()
candidates[1] = candidates[1].strip()
answer = wsc.readline().strip()
correct = 1 if candidates[0] == answer else 2
wsc.readline() # discard empty line
return {
'qID': qID(30) + '-' + str(id),
'sentence': sentence,
'option1': candidates[0],
'option2': candidates[1],
'answer': str(correct)
}
def qID(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
main()
|
# -*- coding: UTF-8 -*-
# @desc while loop
count = 10
while (count > 0):
print 'the count is:', count;
count = count - 1;
print 'it is over...'
|
from collections import defaultdict
import sys
import io
import os
from tqdm import tqdm
import math
import torch
from torch import nn
import numpy as np
import thop
import time
from copy import deepcopy
from torchvision import ops
import contextlib
from typing import Dict, List, Tuple
import torch.distributed as dist
from torch.cuda.amp import autocast
import torch.backends.cudnn as cudnn
import torch.nn.utils.prune as prune
def autopad(k, p=None):
if p is None: # pad s.t. same spatial shape after convolution
p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
return p
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
if isinstance(conv, nn.Conv2d):
conv_type = nn.Conv2d
elif isinstance(conv, ops.DeformConv2d):
conv_type = ops.DeformConv2d
fusedconv = conv_type(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
@torch.no_grad()
def profile(model, input_size=512, nruns=100, verbose=False, amp=False):
# used to benchmark inference speed of model on different devices
if not (isinstance(input_size, tuple) or isinstance(input_size, list)):
input_size = (input_size, input_size)
x = torch.randn(1, 3, *input_size)
model.eval()
param = next(model.parameters())
device = param.device
x = x.to(device)
if param.is_cuda:
if torch.backends.cudnn.benchmark:
# have to do warm up iterations for fair comparison
print('benchmark warm up...')
with autocast(enabled=amp):
for _ in range(50):
_ = model(x)
start = time_synchronized()
with autocast(enabled=amp):
for _ in range(nruns):
o = model(x)
end = time_synchronized()
print(f'Forward time: {(end - start) * 1000 / nruns:.3f}ms (cuda)',
'@ input:', x.shape)
else:
start = time_synchronized()
for _ in range(nruns):
o = model(x)
end = time_synchronized() # seconds
print(f'Forward time: {(end - start) * 1000 / nruns:.3f}ms (cpu)',
'@ input:', x.shape)
if verbose:
if isinstance(o, dict):
for head_key, head in o.items():
print(f'{head_key} output: {head.size()}')
elif isinstance(o, list) or isinstance(o, tuple):
print('output:', end=' ')
for head in o:
print(head.size(), end=', ')
print('')
else:
print('output:', o.size())
def profile_training(model, input_size=512, nruns=100, amp=False, batch_size=16):
if not (isinstance(input_size, tuple) or isinstance(input_size, list)):
input_size = (input_size, input_size)
x = torch.randn(batch_size, 3, *input_size)
assert torch.cuda.is_available()
model.cuda().train()
x = x.cuda()
o = model(x)
if isinstance(o, list) or isinstance(o, tuple):
g0 = [torch.rand_like(item) for item in o]
elif isinstance(o, dict):
g0 = [torch.rand_like(item) for item in o.values()]
else:
g0 = [torch.rand_like(o)]
if torch.backends.cudnn.benchmark:
# have to do warm up iterations for fair comparison
print('benchmark warm up forward...')
with torch.no_grad():
with autocast(enabled=amp):
for _ in range(50):
o = model(x)
print('benchmark warm up backward...')
with autocast(enabled=amp):
for _ in range(50):
o = model(x)
for param in model.parameters():
param.grad = None
o = o.values() if isinstance(o, dict) else ([o] if isinstance(o, torch.Tensor) else o)
for i, v in enumerate(o):
v.backward(g0[i], retain_graph=i < len(o) - 1)
print(f'run through forward pass for {nruns} runs...')
start = time_synchronized()
with torch.no_grad():
with autocast(enabled=amp):
for _ in range(nruns):
o = model(x)
end = time_synchronized()
fwd_time = end - start # fwd only
print(f'run through forward and backward pass for {nruns} runs...')
torch.cuda.reset_peak_memory_stats(device='cuda')
start = time_synchronized()
with autocast(enabled=amp):
for _ in range(nruns):
o = model(x)
for param in model.parameters():
param.grad = None
o = o.values() if isinstance(o, dict) else ([o] if isinstance(o, torch.Tensor) else o)
for i, v in enumerate(o):
v.backward(g0[i], retain_graph=i < len(o) - 1)
end = time_synchronized()
mem = torch.cuda.max_memory_reserved(device='cuda') # bytes
bwd_time = end - start # fwd + bwd
bwd_time = (bwd_time - fwd_time) # bwd only
print(f'Forward time: {fwd_time * 1000 / nruns:.3f}ms (cuda)',
'@ input:', x.shape)
print(f'Backward time: {bwd_time * 1000 / nruns:.3f}ms (cuda)',
'@ input:', x.shape)
print(f'Maximum of managed memory: {mem / 10**9}GB')
def init_torch_seeds(seed=0, verbose=False):
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
if verbose:
print('PyTorch version {}'.format(torch.__version__))
print('CUDA version {}'.format(torch.version.cuda))
print('cuDNN version {}'.format(cudnn.version()))
print('cuDNN deterministic {}'.format(cudnn.deterministic))
print('cuDNN benchmark {}'.format(cudnn.benchmark))
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # not applicable for Center Net since parts have special initialization
elif t is nn.BatchNorm2d:
pass
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def model_info(model, in_channels=3, in_height=512, in_width=512, verbose=False):
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
x = torch.randn(1, in_channels, in_height, in_width,
device=next(model.parameters()).device)
# macs ... multiply-add computations
# flops ... floating point operations
macs, _ = thop.profile(deepcopy(model), inputs=(x,), verbose=False)
flops = macs / 1E9 * 2 # each mac = 2 flops (addition + multiplication)
n_layers = len(list(model.modules()))
print(f'{model.__class__.__name__}: {n_layers} layers, {n_p/10**6:0.3}M parameters, {n_g/10**6:0.3}M gradients, {flops:.1f}GFLOPs')
return n_layers, n_p, n_g, flops
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time() # seconds
def setup(rank, world_size):
# A free port on the machine that will host the process with rank 0.
os.environ['MASTER_ADDR'] = 'localhost'
# IP address of the machine that will host the process with rank 0.
os.environ['MASTER_PORT'] = '12355'
# The total number of processes, so master knows how many workers to wait for.
os.environ['WORLD_SIZE'] = str(world_size)
# Rank of each process, so they will know whether it is the master of a worker.
os.environ['RANK'] = str(rank)
# initialize the process group
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def _to_float(x):
return float(f"{x:.2f}")
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune_weights(model, amount=0.1):
# Prune model to requested global sparsity
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print('==> %.3g global sparsity' % sparsity(model).item())
def item_transform_image_only(item: Dict):
# to yield images for dataset statistic calculation
image = item['image'] # [0, 255] and unit8
image = image / 255.0 # [0, 1] and float32
return {'image': image}
def data_mean_and_std(dataloader, channel_first=False):
# calculate statistics of datasets
# if channel_first: b x c x h x w else: b x h x w x c
# dataloader should yield non-normalized images with
# floating point values in [0, 1] range
# note: Var[x] = E[X^2] - E^2[X]
N = 0
C = next(iter(dataloader))['image'].size(1 if channel_first else 3)
channelwise_sum = torch.zeros(C)
channelwise_sum_squared = torch.zeros(C)
for batch in tqdm(dataloader, desc='gather data statistics'):
images = batch['image']
#import pdb; pdb.set_trace()
if not channel_first: # from: b x h x w x c
images = images.permute(0, 3, 1, 2) # to: b x c x h x w
N += images.size(0) * images.size(2) * images.size(3) # pixels per channel
channelwise_sum += images.sum([0, 2, 3]) # C,
channelwise_sum_squared += torch.square(images).sum([0, 2, 3]) # C,
mean = channelwise_sum / N # C,
std = torch.sqrt(channelwise_sum_squared / N - torch.square(mean)) # C,
return mean, std
def generate_heatmap(shape, xy: np.ndarray, mask=None, sigma=2, cutoff=1e-3, bleed=True):
"""
Generates a single belief map of 'shape' for each point in 'xy'.
Parameters
----------
shape: tuple
h x w of image
xy: n x 2
n points with x, y coordinates (image coordinate system)
mask: n,
zero-one mask to select points from xy
sigma: scalar
gaussian sigma
cutoff: scalar
set belief to zero if it is less then cutoff
Returns
-------
belief map: 1 x h x w
"""
n = xy.shape[0]
h, w = shape[:2]
if n == 0:
return np.zeros((1, h, w), dtype=np.float32)
if not bleed:
wh = np.asarray([w - 1, h - 1])[None, :]
mask_ = np.logical_or(xy[..., :2] < 0, xy[..., :2] > wh).any(-1)
xy = xy.copy()
xy[mask_] = np.nan
# grid is 2 x h x h
grid = np.array(np.meshgrid(np.arange(w), np.arange(h)), dtype=np.float32)
# reshape grid to 1 x 2 x h x w
grid = grid.reshape((1, 2, h, w))
# reshape xy to n x 2 x 1 x 1
xy = xy.reshape((n, 2, 1, 1))
# compute squared distances to joints
d = ((grid - xy) ** 2).sum(1)
# compute gaussian
b = np.nan_to_num(np.exp(-(d / (2.0 * sigma ** 2))))
b[(b < cutoff)] = 0 # b is n x h x w
if mask is not None:
# set the invalid center point maps to all zero
b *= mask[:, None, None] # n x h x w
b = b.max(0, keepdims=True) # 1 x h x w
b[b >= 0.95] = 1 # targets are exactly 1 at discrete positions
return b # 1 x h x w
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# from https://github.com/ultralytics/yolov5/blob/master/utils/general.py
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T # 4xn
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
#import pdb; pdb.set_trace()
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
# nan for perfect alignment!
#return torch.nan_to_num(iou - (rho2 / c2 + v * alpha), nan=1.0) # CIoU
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MetricMeter:
def __init__(self, delimiter='\t'):
self.meters = defaultdict(AverageMeter)
self.delimiter = delimiter
def update(self, input_dict):
if input_dict is None:
return
if not isinstance(input_dict, dict):
raise TypeError(
'Input to MetricMeter.update() must be a dictionary'
)
for k, v in input_dict.items():
if isinstance(v, torch.Tensor):
v = v.item()
self.meters[k].update(v)
def reset(self): # to clear history for average calculation
for meter in self.meters.values(): # each of type AvarageMeter
meter.reset()
def to_writer(self, writer, tag, n_iter):
for name, meter in self.meters.items():
writer.add_scalar(f"{tag}/{name}", meter.val, n_iter)
def get_avg(self, tag):
return self.meters[tag].avg
def get_val(self, tag):
return self.meters[tag].val
def __str__(self):
output_str = []
for name, meter in self.meters.items():
output_str.append(
'{} {:.4f} ({:.4f})'.format(name, meter.val, meter.avg)
)
return self.delimiter.join(output_str)
class Config:
def __init__(self, config_path):
with open(config_path, "r") as fh:
content = fh.read()
self.parse(content)
def parse(self, content):
for line in content.split('\n'):
if len(line) == 0 or line.startswith('#'):
continue # skip comments and empty lines
try:
k, v = line.split(':')
except ValueError as e:
print(e, 'error in line:', line)
raise AttributeError
if '[' in v: # parse lists
is_float = True if '.' in v else False
v = v.strip()[1:-1].split(',')
v = [x for x in v if x != ''] # case: [0, ]
v = list(map(float if is_float else int, v))
dtype = np.float32 if is_float else np.int32
v = np.array(v, dtype=dtype)
elif '/' in v or "'" in v or '"' in v: # parse paths or strings
v = v.strip().strip("'").strip('"')
else: # parse integer, floating point or string values
is_float = True if '.' in v else False
try:
v = float(v) if is_float else int(v)
except ValueError:
if "True" in v:
v = True
elif "False" in v:
v = False
setattr(self, k, v)
# import pdb; pdb.set_trace()
def __repr__(self):
info = []
for k, v in self.__dict__.items():
info.append(f"{k}: {v}")
return "\n".join(info)
def __str__(self):
return self.__repr__()
class FileStream:
# context manager to save print output
def __init__(self, filepath: str, parser=None):
self.filepath = filepath
self.file = None
self.buffer = io.StringIO()
self.parser = parser
def write(self, s):
self.buffer.write(s) # redirect to buffer
sys.__stdout__.write(s) # and print it to console
def __enter__(self):
self.file = open(self.filepath, "w+")
sys.stdout = self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.parser is not None:
output = self.parser(self.buffer.getvalue())
else:
output = self.buffer.getvalue()
self.file.write(output)
self.buffer.close()
self.file.close()
sys.stdout = sys.__stdout__
|
import re
from datetime import datetime, timedelta, tzinfo
from dimagi.utils.parsing import ISO_DATE_FORMAT
def map_reduce(emitfunc=lambda rec: [(None,)], reducefunc=lambda v: v, data=None, include_docs=False):
"""perform a "map-reduce" on the data
emitfunc(rec): return an iterable of key-value pairings as (key, value). alternatively, may
simply emit (key,) (useful for include_docs=True or reducefunc=len)
reducefunc(values): applied to each list of values with the same key; defaults to just
returning the list
data: list of records to operate on. defaults to data loaded from load()
include_docs: if True, each emitted value v will be implicitly converted to (v, doc) (if
only key is emitted, v == doc)
"""
mapped = {}
for rec in data:
for emission in emitfunc(rec):
try:
k, v = emission
if include_docs:
v = (v, rec)
except ValueError:
k, v = emission[0], rec if include_docs else None
if k not in mapped:
mapped[k] = []
mapped[k].append(v)
return dict((k, reducefunc(v)) for k, v in mapped.items())
def parse_date(s):
for pattern, parsefunc in DATE_REGEXP:
match = pattern.match(s)
if match:
return parsefunc(**match.groupdict())
raise ValueError('did not match any date pattern')
def parse_iso_date(p):
return datetime.strptime(p, ISO_DATE_FORMAT).date()
def parse_iso_timestamp(p, frac, tz):
return parse_full_timestamp('%Y-%m-%dT%H:%M:%S', p, frac, tz)
def parse_js_timestamp(p, tz):
return parse_full_timestamp('%b %d %Y %H:%M:%S', p, None, tz)
def parse_full_timestamp(pattern, p, frac, tz):
stamp = datetime.strptime(p, pattern)
if frac:
stamp += timedelta(seconds=float(frac))
if tz:
try:
stamp = stamp.replace(tzinfo=TZ(tz))
except ValueError:
pass
return stamp
DATE_REGEXP = [
(re.compile(r'(?P<p>\d{4}-\d{2}-\d{2})$'), parse_iso_date),
(re.compile(r'(?P<p>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(?P<frac>\.\d+)?(?P<tz>Z|[+-]\d{2,4})?$'), parse_iso_timestamp),
(re.compile(r'\w{3} (?P<p>\w{3} \d{2} \d{4} \d{2}:\d{2}:\d{2}) (GMT|UTC)?(?P<tz>[+-]\d{4})'), parse_js_timestamp),
]
#do i really have to define this myself???
class TZ(tzinfo):
def __init__(self, tz):
if isinstance(tz, int):
self.offset = tz
self.name = '%s%02d%02d' % ('+' if tz >= 0 else '-', abs(tz) // 60, abs(tz) % 60)
else:
if tz in ('Z', 'UTC'):
tz = '+0000'
self.name = tz
try:
sign = {'+': 1, '-': -1}[tz[0]]
h = int(tz[1:3])
m = int(tz[3:5]) if len(tz) == 5 else 0
except:
raise ValueError('invalid tz spec')
self.offset = sign * (60 * h + m)
def utcoffset(self, dt):
return timedelta(minutes=self.offset)
def tzname(self, dt):
return self.name
def dst(self, dt):
return timedelta()
def __getinitargs__(self):
return (self.offset,)
def __repr__(self):
return self.name
|
from branch.branch.report.accounts_receivable_branch.accounts_receivable_branch import ReceivablePayableReport
def execute(filters=None):
args = {
"party_type": "Supplier",
"naming_by": ["Buying Settings", "supp_master_name"],
}
return ReceivablePayableReport(filters).run(args) |
from collections import namedtuple
EnvInfo = namedtuple('EnvInfo',
['discount',
'game_score',
'traj_done',
'internal_state'
])
|
class Solution:
def diffWaysToCompute(self, input: str) -> [int]:
end = []
op = {'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y}
for i in range(len(input)):
if input[i] in op.keys():
for left in self.diffWaysToCompute(input[:i]):
for right in self.diffWaysToCompute(input[i + 1:len(input)]):
output = op[input[i]](left, right)
end.append(output)
if len(end) == 0:
end.append(int(input))
return end |
# This module performs the conversion of T^{mu nu}
# in Spherical or Cartesian coordinates
# given as *numerical* expressions (i.e., given as
# numerical values with fixed floating-point precision;
# e.g., in the case of an initial data solver), to
# rescaled BSSN stress-energy source terms.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
# Step P1: Import needed modules
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import BSSN.BSSN_quantities as Bq # NRPy+: This module depends on the parameter EvolvedConformalFactor_cf,
# which is defined in BSSN.BSSN_quantities
import sys # Standard Python modules for multiplatform OS-level functions
import BSSN.BSSN_RHSs as bssnrhs # NRPy+: BSSN RHS quantities
import loop as lp # NRPy+: Helper module for writing C-code loops
def T4UUmunu_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear(CoordType_in,
Tmunu_input_function_name,
pointer_to_ID_inputs=False):
# The ADM & BSSN formalisms only work in 3D; they are 3+1 decompositions of Einstein's equations.
# To implement axisymmetry or spherical symmetry, simply set all spatial derivatives in
# the relevant angular directions to zero; DO NOT SET DIM TO ANYTHING BUT 3.
# Step 0: Set spatial dimension (must be 3 for BSSN)
DIM = 3
# Step 1: Define the input variables: the 4D stress-energy tensor, and the ADM 3-metric, lapse, & shift:
T4SphorCartUU = ixp.declarerank2("T4SphorCartUU", "sym01", DIM=4)
gammaSphorCartDD = ixp.declarerank2("gammaSphorCartDD", "sym01")
alphaSphorCart = sp.symbols("alphaSphorCart")
betaSphorCartU = ixp.declarerank1("betaSphorCartU")
# Step 2: All Tmunu initial data quantities are functions of xx0,xx1,xx2, but
# in the Spherical or Cartesian basis.
# We first define the BSSN stress-energy source terms in the Spherical
# or Cartesian basis, respectively.
# To get \gamma_{\mu \nu} = gammabar4DD[mu][nu], we'll need to construct the 4-metric, using Eq. 2.122 in B&S:
# S_{ij} = \gamma_{i \mu} \gamma_{j \nu} T^{\mu \nu}
# S_{i} = -\gamma_{i\mu} n_\nu T^{\mu\nu}
# S = \gamma^{ij} S_{ij}
# rho = n_\mu n_\nu T^{\mu\nu},
# where
# \gamma_{\mu\nu} = g_{\mu\nu} + n_\mu n_\nu
# and
# n_mu = {-\alpha,0,0,0},
# Step 2.1: Construct the 4-metric based on the input ADM quantities.
# This is provided by Eq 4.47 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf):
# g_{tt} = -\alpha^2 + \beta^k \beta_k
# g_{ti} = \beta_i
# g_{ij} = \gamma_{ij}
# Eq. 2.121 in B&S
betaSphorCartD = ixp.zerorank1()
for i in range(DIM):
for j in range(DIM):
betaSphorCartD[i] += gammaSphorCartDD[i][j] * betaSphorCartU[j]
# Now compute the beta contraction.
beta2 = sp.sympify(0)
for i in range(DIM):
beta2 += betaSphorCartU[i] * betaSphorCartD[i]
# Eq. 2.122 in B&S
g4SphorCartDD = ixp.zerorank2(DIM=4)
g4SphorCartDD[0][0] = -alphaSphorCart ** 2 + beta2
for i in range(DIM):
g4SphorCartDD[i + 1][0] = g4SphorCartDD[0][i + 1] = betaSphorCartD[i]
for i in range(DIM):
for j in range(DIM):
g4SphorCartDD[i + 1][j + 1] = gammaSphorCartDD[i][j]
# Step 2.2: Construct \gamma_{mu nu} = g_{mu nu} + n_mu n_nu:
n4SphorCartD = ixp.zerorank1(DIM=4)
n4SphorCartD[0] = -alphaSphorCart
gamma4SphorCartDD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
gamma4SphorCartDD[mu][nu] = g4SphorCartDD[mu][nu] + n4SphorCartD[mu] * n4SphorCartD[nu]
# Step 2.3: We now have all we need to construct the BSSN source
# terms in the current basis (Spherical or Cartesian):
# S_{ij} = \gamma_{i \mu} \gamma_{j \nu} T^{\mu \nu}
# S_{i} = -\gamma_{i\mu} n_\nu T^{\mu\nu}
# S = \gamma^{ij} S_{ij}
# rho = n_\mu n_\nu T^{\mu\nu},
SSphorCartDD = ixp.zerorank2()
SSphorCartD = ixp.zerorank1()
SSphorCart = sp.sympify(0)
rhoSphorCart = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
for mu in range(4):
for nu in range(4):
SSphorCartDD[i][j] += gamma4SphorCartDD[i + 1][mu] * gamma4SphorCartDD[j + 1][nu] * T4SphorCartUU[mu][nu]
for i in range(DIM):
for mu in range(4):
for nu in range(4):
SSphorCartD[i] += -gamma4SphorCartDD[i + 1][mu] * n4SphorCartD[nu] * T4SphorCartUU[mu][nu]
gammaSphorCartUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaSphorCartDD)
for i in range(DIM):
for j in range(DIM):
SSphorCart += gammaSphorCartUU[i][j] * SSphorCartDD[i][j]
for mu in range(4):
for nu in range(4):
rhoSphorCart += n4SphorCartD[mu] * n4SphorCartD[nu] * T4SphorCartUU[mu][nu]
# Step 3: Perform basis conversion to
# Make sure that rfm.reference_metric() has been called.
# We'll need the variables it defines throughout this module.
if rfm.have_already_called_reference_metric_function == False:
print("Error. Called Tmunu_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear() without")
print(" first setting up reference metric, by calling rfm.reference_metric().")
sys.exit(1)
# Step 1: All input quantities are in terms of r,th,ph or x,y,z. We want them in terms
# of xx0,xx1,xx2, so here we call sympify_integers__replace_rthph() to replace
# r,th,ph or x,y,z, respectively, with the appropriate functions of xx0,xx1,xx2
# as defined for this particular reference metric in reference_metric.py's
# xxSph[] or xxCart[], respectively:
r_th_ph_or_Cart_xyz_oID_xx = []
if CoordType_in == "Spherical":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xxSph
elif CoordType_in == "Cartesian":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xxCart
else:
print("Error: Can only convert ADM Cartesian or Spherical initial data to BSSN Curvilinear coords.")
sys.exit(1)
# Next apply Jacobian transformations to convert into the (xx0,xx1,xx2) basis
# alpha is a scalar, so no Jacobian transformation is necessary.
alpha = alphaSphorCart
Jac_dUSphorCart_dDrfmUD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
Jac_dUSphorCart_dDrfmUD[i][j] = sp.diff(r_th_ph_or_Cart_xyz_oID_xx[i], rfm.xx[j])
Jac_dUrfm_dDSphorCartUD, dummyDET = ixp.generic_matrix_inverter3x3(Jac_dUSphorCart_dDrfmUD)
betaU = ixp.zerorank1()
BU = ixp.zerorank1()
gammaSphorCartDD = ixp.zerorank2()
KDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
betaU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * betaSphorCartU[j]
BU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * BSphorCartU[j]
for k in range(DIM):
for l in range(DIM):
gammaSphorCartDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i] * Jac_dUSphorCart_dDrfmUD[l][j] * \
gammaSphorCartDD[k][l]
KDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i] * Jac_dUSphorCart_dDrfmUD[l][j] * KSphorCartDD[k][l]
# Step 3: All ADM quantities were input into this function in the Spherical or Cartesian
# basis, as functions of r,th,ph or x,y,z, respectively. In Steps 1 and 2 above,
# we converted them to the xx0,xx1,xx2 basis, and as functions of xx0,xx1,xx2.
# Here we convert ADM quantities to their BSSN Curvilinear counterparts:
# Step 3.1: Convert ADM $\gamma_{ij}$ to BSSN $\bar{\gamma}_{ij}$:
# We have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
# \bar{\gamma}_{i j} = \left(\frac{\bar{\gamma}}{\gamma}\right)^{1/3} \gamma_{ij}.
gammaSphorCartUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaSphorCartDD)
gammabarDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
gammabarDD[i][j] = (rfm.detgammahat / gammaDET) ** (sp.Rational(1, 3)) * gammaSphorCartDD[i][j]
# Step 3.2: Convert the extrinsic curvature $K_{ij}$ to the trace-free extrinsic
# curvature $\bar{A}_{ij}$, plus the trace of the extrinsic curvature $K$,
# where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):
# K = \gamma^{ij} K_{ij}, and
# \bar{A}_{ij} &= \left(\frac{\bar{\gamma}}{\gamma}\right)^{1/3} \left(K_{ij} - \frac{1}{3} \gamma_{ij} K \right)
trK = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
trK += gammaSphorCartUU[i][j] * KDD[i][j]
AbarDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
AbarDD[i][j] = (rfm.detgammahat / gammaDET) ** (sp.Rational(1, 3)) * (
KDD[i][j] - sp.Rational(1, 3) * gammaSphorCartDD[i][j] * trK)
# Step 3.3: Set the conformal factor variable $\texttt{cf}$, which is set
# by the "BSSN_RHSs::EvolvedConformalFactor_cf" parameter. For example if
# "EvolvedConformalFactor_cf" is set to "phi", we can use Eq. 3 of
# [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf),
# which in arbitrary coordinates is written:
# \phi = \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right).
# Alternatively if "BSSN_RHSs::EvolvedConformalFactor_cf" is set to "chi", then
# \chi = e^{-4 \phi} = \exp\left(-4 \frac{1}{12} \left(\frac{\gamma}{\bar{\gamma}}\right)\right)
# = \exp\left(-\frac{1}{3} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) = \left(\frac{\gamma}{\bar{\gamma}}\right)^{-1/3}.
#
# Finally if "BSSN_RHSs::EvolvedConformalFactor_cf" is set to "W", then
# W = e^{-2 \phi} = \exp\left(-2 \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) =
# \exp\left(-\frac{1}{6} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) =
# \left(\frac{\gamma}{\bar{\gamma}}\right)^{-1/6}.
# First compute gammabarDET:
gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)
cf = sp.sympify(0)
if par.parval_from_str("EvolvedConformalFactor_cf") == "phi":
cf = sp.Rational(1, 12) * sp.log(gammaDET / gammabarDET)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi":
cf = (gammaDET / gammabarDET) ** (-sp.Rational(1, 3))
elif par.parval_from_str("EvolvedConformalFactor_cf") == "W":
cf = (gammaDET / gammabarDET) ** (-sp.Rational(1, 6))
else:
print("Error EvolvedConformalFactor_cf type = \"" + par.parval_from_str("EvolvedConformalFactor_cf") + "\" unknown.")
sys.exit(1)
# Step 4: Rescale tensorial quantities according to the prescription described in
# the [BSSN in curvilinear coordinates tutorial module](Tutorial-BSSNCurvilinear.ipynb)
# (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
#
# h_{ij} &= (\bar{\gamma}_{ij} - \hat{\gamma}_{ij})/\text{ReDD[i][j]}\\
# a_{ij} &= \bar{A}_{ij}/\text{ReDD[i][j]}\\
# \lambda^i &= \bar{\Lambda}^i/\text{ReU[i]}\\
# \mathcal{V}^i &= \beta^i/\text{ReU[i]}\\
# \mathcal{B}^i &= B^i/\text{ReU[i]}\\
hDD = ixp.zerorank2()
aDD = ixp.zerorank2()
vetU = ixp.zerorank1()
betU = ixp.zerorank1()
for i in range(DIM):
vetU[i] = betaU[i] / rfm.ReU[i]
betU[i] = BU[i] / rfm.ReU[i]
for j in range(DIM):
hDD[i][j] = (gammabarDD[i][j] - rfm.ghatDD[i][j]) / rfm.ReDD[i][j]
aDD[i][j] = AbarDD[i][j] / rfm.ReDD[i][j]
# Step 5: Output all ADM-to-BSSN expressions to a C function. This function
# must first call the ID_ADM_SphorCart() defined above. Using these
# Spherical or Cartesian data, it sets up all quantities needed for
# BSSNCurvilinear initial data, *except* $\lambda^i$, which must be
# computed from numerical data using finite-difference derivatives.
with open("BSSN/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h", "w") as file:
file.write("void ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs(const REAL xx0xx1xx2[3],")
if pointer_to_ID_inputs == True:
file.write("ID_inputs *other_inputs,")
else:
file.write("ID_inputs other_inputs,")
file.write("""
REAL *hDD00,REAL *hDD01,REAL *hDD02,REAL *hDD11,REAL *hDD12,REAL *hDD22,
REAL *aDD00,REAL *aDD01,REAL *aDD02,REAL *aDD11,REAL *aDD12,REAL *aDD22,
REAL *trK,
REAL *vetU0,REAL *vetU1,REAL *vetU2,
REAL *betU0,REAL *betU1,REAL *betU2,
REAL *alpha, REAL *cf) {
REAL gammaSphorCartDD00,gammaSphorCartDD01,gammaSphorCartDD02,
gammaSphorCartDD11,gammaSphorCartDD12,gammaSphorCartDD22;
REAL KSphorCartDD00,KSphorCartDD01,KSphorCartDD02,
KSphorCartDD11,KSphorCartDD12,KSphorCartDD22;
REAL alphaSphorCart,betaSphorCartU0,betaSphorCartU1,betaSphorCartU2;
REAL BSphorCartU0,BSphorCartU1,BSphorCartU2;
const REAL xx0 = xx0xx1xx2[0];
const REAL xx1 = xx0xx1xx2[1];
const REAL xx2 = xx0xx1xx2[2];
REAL xyz_or_rthph[3];\n""")
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
outputC(r_th_ph_or_Cart_xyz_oID_xx[0:3], ["xyz_or_rthph[0]", "xyz_or_rthph[1]", "xyz_or_rthph[2]"],
"BSSN/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h", outCparams + ",CSE_enable=False")
with open("BSSN/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h", "a") as file:
file.write(" "+ADM_input_function_name+"""(xyz_or_rthph, other_inputs,
&gammaSphorCartDD00,&gammaSphorCartDD01,&gammaSphorCartDD02,
&gammaSphorCartDD11,&gammaSphorCartDD12,&gammaSphorCartDD22,
&KSphorCartDD00,&KSphorCartDD01,&KSphorCartDD02,
&KSphorCartDD11,&KSphorCartDD12,&KSphorCartDD22,
&alphaSphorCart,&betaSphorCartU0,&betaSphorCartU1,&betaSphorCartU2,
&BSphorCartU0,&BSphorCartU1,&BSphorCartU2);
// Next compute all rescaled BSSN curvilinear quantities:\n""")
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
outputC([hDD[0][0], hDD[0][1], hDD[0][2], hDD[1][1], hDD[1][2], hDD[2][2],
aDD[0][0], aDD[0][1], aDD[0][2], aDD[1][1], aDD[1][2], aDD[2][2],
trK, vetU[0], vetU[1], vetU[2], betU[0], betU[1], betU[2],
alpha, cf],
["*hDD00", "*hDD01", "*hDD02", "*hDD11", "*hDD12", "*hDD22",
"*aDD00", "*aDD01", "*aDD02", "*aDD11", "*aDD12", "*aDD22",
"*trK", "*vetU0", "*vetU1", "*vetU2", "*betU0", "*betU1", "*betU2",
"*alpha", "*cf"],
"BSSN/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h", params=outCparams)
with open("BSSN/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h", "a") as file:
file.write("}\n")
# Step 5.A: Output the driver function for the above
# function ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs()
# Next write the driver function for ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs():
with open("BSSN/ID_BSSN__ALL_BUT_LAMBDAs.h", "w") as file:
file.write("void ID_BSSN__ALL_BUT_LAMBDAs(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],")
if pointer_to_ID_inputs == True:
file.write("ID_inputs *other_inputs,")
else:
file.write("ID_inputs other_inputs,")
file.write("REAL *in_gfs) {\n")
file.write(lp.loop(["i2", "i1", "i0"], ["0", "0", "0"],
["Nxx_plus_2NGHOSTS[2]", "Nxx_plus_2NGHOSTS[1]", "Nxx_plus_2NGHOSTS[0]"],
["1", "1", "1"], ["#pragma omp parallel for",
" const REAL xx2 = xx[2][i2];",
" const REAL xx1 = xx[1][i1];"], "",
"""const REAL xx0 = xx[0][i0];
const int idx = IDX3(i0,i1,i2);
const REAL xx0xx1xx2[3] = {xx0,xx1,xx2};
ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs(xx0xx1xx2,other_inputs,
&in_gfs[IDX4pt(HDD00GF,idx)],&in_gfs[IDX4pt(HDD01GF,idx)],&in_gfs[IDX4pt(HDD02GF,idx)],
&in_gfs[IDX4pt(HDD11GF,idx)],&in_gfs[IDX4pt(HDD12GF,idx)],&in_gfs[IDX4pt(HDD22GF,idx)],
&in_gfs[IDX4pt(ADD00GF,idx)],&in_gfs[IDX4pt(ADD01GF,idx)],&in_gfs[IDX4pt(ADD02GF,idx)],
&in_gfs[IDX4pt(ADD11GF,idx)],&in_gfs[IDX4pt(ADD12GF,idx)],&in_gfs[IDX4pt(ADD22GF,idx)],
&in_gfs[IDX4pt(TRKGF,idx)],
&in_gfs[IDX4pt(VETU0GF,idx)],&in_gfs[IDX4pt(VETU1GF,idx)],&in_gfs[IDX4pt(VETU2GF,idx)],
&in_gfs[IDX4pt(BETU0GF,idx)],&in_gfs[IDX4pt(BETU1GF,idx)],&in_gfs[IDX4pt(BETU2GF,idx)],
&in_gfs[IDX4pt(ALPHAGF,idx)],&in_gfs[IDX4pt(CFGF,idx)]);
"""))
file.write("}\n")
# Step 6: Compute $\bar{\Lambda}^i$ (Eqs. 4 and 5 of
# [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)),
# from finite-difference derivatives of rescaled metric
# quantities $h_{ij}$:
# \bar{\Lambda}^i = \bar{\gamma}^{jk}\left(\bar{\Gamma}^i_{jk} - \hat{\Gamma}^i_{jk}\right).
# The reference_metric.py module provides us with analytic expressions for
# $\hat{\Gamma}^i_{jk}$, so here we need only compute
# finite-difference expressions for $\bar{\Gamma}^i_{jk}$, based on
# the values for $h_{ij}$ provided in the initial data. Once
# $\bar{\Lambda}^i$ has been computed, we apply the usual rescaling
# procedure:
# \lambda^i = \bar{\Lambda}^i/\text{ReU[i]},
# and then output the result to a C file using the NRPy+
# finite-difference C output routine.
# We will need all BSSN gridfunctions to be defined, as well as
# expressions for gammabarDD_dD in terms of exact derivatives of
# the rescaling matrix and finite-difference derivatives of
# hDD's.
gammabarDD = bssnrhs.gammabarDD
gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)
gammabarDD_dD = bssnrhs.gammabarDD_dD
# Next compute Christoffel symbols \bar{\Gamma}^i_{jk}:
GammabarUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for l in range(DIM):
GammabarUDD[i][j][k] += sp.Rational(1, 2) * gammabarUU[i][l] * (gammabarDD_dD[l][j][k] +
gammabarDD_dD[l][k][j] -
gammabarDD_dD[j][k][l])
# Next evaluate \bar{\Lambda}^i, based on GammabarUDD above and GammahatUDD
# (from the reference metric):
LambdabarU = ixp.zerorank1()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
LambdabarU[i] += gammabarUU[j][k] * (GammabarUDD[i][j][k] - rfm.GammahatUDD[i][j][k])
# Finally apply rescaling:
# lambda^i = Lambdabar^i/\text{ReU[i]}
lambdaU = ixp.zerorank1()
for i in range(DIM):
lambdaU[i] = LambdabarU[i] / rfm.ReU[i]
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
lambdaU_expressions = [lhrh(lhs=gri.gfaccess("in_gfs", "lambdaU0"), rhs=lambdaU[0]),
lhrh(lhs=gri.gfaccess("in_gfs", "lambdaU1"), rhs=lambdaU[1]),
lhrh(lhs=gri.gfaccess("in_gfs", "lambdaU2"), rhs=lambdaU[2])]
lambdaU_expressions_FDout = fin.FD_outputC("returnstring", lambdaU_expressions, outCparams)
with open("BSSN/ID_BSSN_lambdas.h", "w") as file:
file.write("""
void ID_BSSN_lambdas(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],const REAL dxx[3],REAL *in_gfs) {\n""")
file.write(lp.loop(["i2", "i1", "i0"], ["NGHOSTS", "NGHOSTS", "NGHOSTS"],
["NGHOSTS+Nxx[2]", "NGHOSTS+Nxx[1]", "NGHOSTS+Nxx[0]"],
["1", "1", "1"], ["const REAL invdx0 = 1.0/dxx[0];\n" +
"const REAL invdx1 = 1.0/dxx[1];\n" +
"const REAL invdx2 = 1.0/dxx[2];\n" +
"#pragma omp parallel for",
" const REAL xx2 = xx[2][i2];",
" const REAL xx1 = xx[1][i1];"], "",
"const REAL xx0 = xx[0][i0];\n" + lambdaU_expressions_FDout))
file.write("}\n")
|
import time
import pandas as pd
start_time = time.time()
data = pd.read_csv('../input/aqi-first-data-from-april-2018/data-esp8266-1129419-2018-04-20.csv',sep=';',header=0,skiprows=1,index_col=False,names=['date','2','3','4','5','6','7','pm25','pm10','10','11','temp','hum','14','15','16','17','18','19','20','21'],usecols=['date','pm25','pm10','temp','hum'])
#data.drop(["Time","durP1","ratioP1","P1","durP2","SDS_P1","SDS_P2","BMP_temperature","BMP_pressure","BME280_temperature","BME280_humidity","BME280_pressure","Min_cycle","Max_cycle","Samples","Signal"],axis = 1, inplace = True)
print(data)
time_passed = time.time() - start_time
print('Import took %s seconds' % time_passed)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from jd_assistant import Assistant
if __name__ == '__main__':
"""
重要提示:此处为示例代码之一,请移步下面的链接查看使用教程👇
https://github.com/tychxn/jd-assistant/wiki/1.-%E4%BA%AC%E4%B8%9C%E6%8A%A2%E8%B4%AD%E5%8A%A9%E6%89%8B%E7%94%A8%E6%B3%95
"""
# 执行预约抢购
# 5个参数
# sku_id: 商品id
# buy_time: 下单时间,例如:'2019-11-10 22:41:30.000'
# retry: 抢购重复执行次数,可选参数,默认4次
# interval: 抢购执行间隔,可选参数,默认4秒
# num: 购买数量,可选参数,默认1个
sku_ids = '56657322838:1,56655493806:1,56655493809:1,56657322841:1,100010159778:1,100010159774:1,19028117913:1,65425816569:1,10183475858:1,1835968:1,1835967:1,11609510701:1,41286382373:1,41286382376:1,41286382378:1,100009442472:1,100009445348:1,100006444236:1,100009394518:1,100005294853:1,7263128:1,7498167:1,51137726168:1,51137726169:1,851157:1,100005151507:1,100010159728:1,100010159728:1,100010159802:1,62268661448:1,7498167:1,5724402:1,100009450890:1,25411367334:1,2291586:1,1060200558:1,100006784140:1,3842341:1,4492421:1,100011294222:1,100004142807:1,100002824548:1' # 商品id
area = '19_1601_3633' # 区域id
asst = Assistant() # 初始化
asst.login_by_QRcode() # 扫码登陆
asst.buy_item_in_stock(sku_ids=sku_ids, area=area, wait_all=False, stock_interval=5)
|
from django.urls import path
from petstagram.pets.views import PetsListView, CreatePetView, UpdatePetView, DeletePetView, PetDetailsView, \
LikePetView, CommentPetView # like_pet, details_or_comment_pet, list_pets, create_pet, edit_pet, delete_pet
urlpatterns = [
# path('', list_pets, name='list pets'),
path('', PetsListView.as_view(), name='list pets'),
# path('detail/<int:pk>/', details_or_comment_pet, name='pet details or comment'),
path('detail/<int:pk>/', PetDetailsView.as_view(), name='pet details'),
# path('like/<int:pk>/', like_pet, name='like pet'),
path('like/<int:pk>/', LikePetView.as_view(), name='like pet'),
# path('edit/<int:pk>/', edit_pet, name='edit pet'),
path('edit/<int:pk>/', UpdatePetView.as_view(), name='edit pet'),
# path('delete/<int:pk>/', delete_pet, name='delete pet'),
path('delete/<int:pk>/', DeletePetView.as_view(), name='delete pet'),
# path('create/', create_pet, name='create pet'),
path('create/', CreatePetView.as_view(), name='create pet'),
path('comment/<int:pk>/', CommentPetView.as_view(), name='comment pet'),
]
|
from watchmen.topic.topic import Topic
def create_topic_index(topic: Topic):
pass
def __create_topic_table(topic: Topic, config=None):
pass
def update_topic_index(topic: Topic):
pass
def create_topic_table(topic: Topic):
pass
|
#!/usr/bin/env python
from math import fabs
import rospy
import actionlib
from std_msgs.msg import Float32
from chapter15.msg import RotationAction, RotationFeedback, RotationResult
from chapter15.srv import Light, LightResponse
from fake_actuator import FakeActuator
def volume_callback(msg):
volume = min(100, max(0, int(msg.data * 100)))
print('Setting volume to {}'.format(volume))
actuator.volume = volume
def light_callback(request):
actuator.toggle_light(request.on)
print('Toggled light to {}'.format(request.on))
return LightResponse(actuator.light_on)
def rotation_callback(goal):
feedback = RotationFeedback()
result = RotationResult()
print('Setting actuator position to {}'.format(goal.orientation))
actuator.set_position(goal.orientation)
success = True
rate = rospy.Rate(10)
while fabs(goal.orientation - actuator.position) > 0.01:
if a.is_preempt_requested():
print('Actuator movement was preempted')
success = False
break
print('Current actuator position: {}'.format(actuator.position))
feedback.current_orientation = actuator.position
a.publish_feedback(feedback)
rate.sleep()
result.final_orientation = actuator.position
if success:
print('Actuator movement succeeded; final orientation is {}'.format(
actuator.position))
a.set_succeeded(result)
else:
print('Actuator movement failed; final orientation is {}'.format(
actuator.position))
a.set_preempted(result)
if __name__ == '__main__':
actuator = FakeActuator()
# Initialize the node
rospy.init_node('fake')
# Topic for the volume
t = rospy.Subscriber('fake/volume', Float32, volume_callback)
# Service for the light
s = rospy.Service('fake/light', Light, light_callback)
# Action for the position
a = actionlib.SimpleActionServer('fake/position', RotationAction,
execute_cb=rotation_callback,
auto_start=False)
a.start()
# Start everything
rospy.spin()
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: plugins/shuffle/protobuf/message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='plugins/shuffle/protobuf/message.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n&plugins/shuffle/protobuf/message.proto\"@\n\x06Signed\x12\x17\n\x06packet\x18\x01 \x01(\x0b\x32\x07.Packet\x12\x1d\n\tsignature\x18\x02 \x01(\x0b\x32\n.Signature\"\xc6\x01\n\x06Packet\x12\x0f\n\x07session\x18\x01 \x01(\x0c\x12\x0e\n\x06number\x18\x02 \x01(\r\x12\"\n\x08\x66rom_key\x18\x03 \x01(\x0b\x32\x10.VerificationKey\x12 \n\x06to_key\x18\x04 \x01(\x0b\x32\x10.VerificationKey\x12\x15\n\x05phase\x18\x05 \x01(\x0e\x32\x06.Phase\x12\x19\n\x07message\x18\x06 \x01(\x0b\x32\x08.Message\x12#\n\x0cregistration\x18\x07 \x01(\x0b\x32\r.Registration\"\x16\n\x05\x43oins\x12\r\n\x05\x63oins\x18\x01 \x03(\t\"9\n\nSignatures\x12\x0c\n\x04utxo\x18\x01 \x01(\t\x12\x1d\n\tsignature\x18\x02 \x01(\x0b\x32\n.Signature\"\xf8\x01\n\x07Message\x12\x19\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x08.Address\x12\x1b\n\x03key\x18\x02 \x01(\x0b\x32\x0e.EncryptionKey\x12\x13\n\x04hash\x18\x03 \x01(\x0b\x32\x05.Hash\x12\x1f\n\nsignatures\x18\x04 \x03(\x0b\x32\x0b.Signatures\x12\x0b\n\x03str\x18\x05 \x01(\t\x12\x15\n\x05\x62lame\x18\x06 \x01(\x0b\x32\x06.Blame\x12$\n\x06inputs\x18\x07 \x03(\x0b\x32\x14.Message.InputsEntry\x1a\x35\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x15\n\x05value\x18\x02 \x01(\x0b\x32\x06.Coins:\x02\x38\x01\"\x1a\n\x07\x41\x64\x64ress\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\"K\n\x0cRegistration\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x04\x12\x1a\n\x04type\x18\x02 \x01(\x0e\x32\x0c.ShuffleType\x12\x0f\n\x07version\x18\x03 \x01(\x04\"\x1e\n\x0fVerificationKey\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1c\n\rEncryptionKey\x12\x0b\n\x03key\x18\x01 \x01(\t\",\n\rDecryptionKey\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0e\n\x06public\x18\x02 \x01(\t\"\x14\n\x04Hash\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\"\x1e\n\tSignature\x12\x11\n\tsignature\x18\x01 \x01(\x0c\"\"\n\x0bTransaction\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"\xb9\x01\n\x05\x42lame\x12\x17\n\x06reason\x18\x01 \x01(\x0e\x32\x07.Reason\x12!\n\x07\x61\x63\x63used\x18\x02 \x01(\x0b\x32\x10.VerificationKey\x12\x1b\n\x03key\x18\x03 \x01(\x0b\x32\x0e.DecryptionKey\x12!\n\x0btransaction\x18\x04 \x01(\x0b\x32\x0c.Transaction\x12\x19\n\x07invalid\x18\x05 \x01(\x0b\x32\x08.Invalid\x12\x19\n\x07packets\x18\x06 \x01(\x0b\x32\x08.Packets\"\x1a\n\x07Invalid\x12\x0f\n\x07invalid\x18\x01 \x01(\x0c\"(\n\x06Inputs\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\r\n\x05\x63oins\x18\x02 \x03(\t\"\"\n\x07Packets\x12\x17\n\x06packet\x18\x01 \x03(\x0b\x32\x07.Signed*\x90\x01\n\x05Phase\x12\x08\n\x04NONE\x10\x00\x12\x10\n\x0c\x41NNOUNCEMENT\x10\x01\x12\x0b\n\x07SHUFFLE\x10\x02\x12\r\n\tBROADCAST\x10\x03\x12\x16\n\x12\x45QUIVOCATION_CHECK\x10\x04\x12\x0b\n\x07SIGNING\x10\x05\x12\x1f\n\x1bVERIFICATION_AND_SUBMISSION\x10\x06\x12\t\n\x05\x42LAME\x10\x07*$\n\x0bShuffleType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04\x44UST\x10\x01*\xc6\x01\n\x06Reason\x12\x15\n\x11INSUFFICIENTFUNDS\x10\x00\x12\x0f\n\x0b\x44OUBLESPEND\x10\x01\x12\x17\n\x13\x45QUIVOCATIONFAILURE\x10\x02\x12\x12\n\x0eSHUFFLEFAILURE\x10\x03\x12!\n\x1dSHUFFLEANDEQUIVOCATIONFAILURE\x10\x04\x12\x14\n\x10INVALIDSIGNATURE\x10\x05\x12\x11\n\rMISSINGOUTPUT\x10\x06\x12\x08\n\x04LIAR\x10\x07\x12\x11\n\rINVALIDFORMAT\x10\x08\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PHASE = _descriptor.EnumDescriptor(
name='Phase',
full_name='Phase',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOUNCEMENT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUFFLE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BROADCAST', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EQUIVOCATION_CHECK', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNING', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERIFICATION_AND_SUBMISSION', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLAME', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1241,
serialized_end=1385,
)
_sym_db.RegisterEnumDescriptor(_PHASE)
Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE)
_SHUFFLETYPE = _descriptor.EnumDescriptor(
name='ShuffleType',
full_name='ShuffleType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUST', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1387,
serialized_end=1423,
)
_sym_db.RegisterEnumDescriptor(_SHUFFLETYPE)
ShuffleType = enum_type_wrapper.EnumTypeWrapper(_SHUFFLETYPE)
_REASON = _descriptor.EnumDescriptor(
name='Reason',
full_name='Reason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INSUFFICIENTFUNDS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOUBLESPEND', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EQUIVOCATIONFAILURE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUFFLEFAILURE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUFFLEANDEQUIVOCATIONFAILURE', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALIDSIGNATURE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MISSINGOUTPUT', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIAR', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALIDFORMAT', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1426,
serialized_end=1624,
)
_sym_db.RegisterEnumDescriptor(_REASON)
Reason = enum_type_wrapper.EnumTypeWrapper(_REASON)
NONE = 0
ANNOUNCEMENT = 1
SHUFFLE = 2
BROADCAST = 3
EQUIVOCATION_CHECK = 4
SIGNING = 5
VERIFICATION_AND_SUBMISSION = 6
BLAME = 7
DEFAULT = 0
DUST = 1
INSUFFICIENTFUNDS = 0
DOUBLESPEND = 1
EQUIVOCATIONFAILURE = 2
SHUFFLEFAILURE = 3
SHUFFLEANDEQUIVOCATIONFAILURE = 4
INVALIDSIGNATURE = 5
MISSINGOUTPUT = 6
LIAR = 7
INVALIDFORMAT = 8
_SIGNED = _descriptor.Descriptor(
name='Signed',
full_name='Signed',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packet', full_name='Signed.packet', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signature', full_name='Signed.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=106,
)
_PACKET = _descriptor.Descriptor(
name='Packet',
full_name='Packet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session', full_name='Packet.session', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='Packet.number', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='from_key', full_name='Packet.from_key', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='to_key', full_name='Packet.to_key', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phase', full_name='Packet.phase', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='Packet.message', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='registration', full_name='Packet.registration', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=307,
)
_COINS = _descriptor.Descriptor(
name='Coins',
full_name='Coins',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coins', full_name='Coins.coins', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=331,
)
_SIGNATURES = _descriptor.Descriptor(
name='Signatures',
full_name='Signatures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='utxo', full_name='Signatures.utxo', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signature', full_name='Signatures.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=333,
serialized_end=390,
)
_MESSAGE_INPUTSENTRY = _descriptor.Descriptor(
name='InputsEntry',
full_name='Message.InputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Message.InputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Message.InputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=588,
serialized_end=641,
)
_MESSAGE = _descriptor.Descriptor(
name='Message',
full_name='Message',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='Message.address', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='Message.key', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='Message.hash', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signatures', full_name='Message.signatures', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='str', full_name='Message.str', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blame', full_name='Message.blame', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inputs', full_name='Message.inputs', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MESSAGE_INPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=393,
serialized_end=641,
)
_ADDRESS = _descriptor.Descriptor(
name='Address',
full_name='Address',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='Address.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=669,
)
_REGISTRATION = _descriptor.Descriptor(
name='Registration',
full_name='Registration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='amount', full_name='Registration.amount', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Registration.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='Registration.version', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=746,
)
_VERIFICATIONKEY = _descriptor.Descriptor(
name='VerificationKey',
full_name='VerificationKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='VerificationKey.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=748,
serialized_end=778,
)
_ENCRYPTIONKEY = _descriptor.Descriptor(
name='EncryptionKey',
full_name='EncryptionKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='EncryptionKey.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=780,
serialized_end=808,
)
_DECRYPTIONKEY = _descriptor.Descriptor(
name='DecryptionKey',
full_name='DecryptionKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DecryptionKey.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='public', full_name='DecryptionKey.public', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=810,
serialized_end=854,
)
_HASH = _descriptor.Descriptor(
name='Hash',
full_name='Hash',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hash', full_name='Hash.hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=856,
serialized_end=876,
)
_SIGNATURE = _descriptor.Descriptor(
name='Signature',
full_name='Signature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='signature', full_name='Signature.signature', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=878,
serialized_end=908,
)
_TRANSACTION = _descriptor.Descriptor(
name='Transaction',
full_name='Transaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='Transaction.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=910,
serialized_end=944,
)
_BLAME = _descriptor.Descriptor(
name='Blame',
full_name='Blame',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reason', full_name='Blame.reason', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accused', full_name='Blame.accused', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='Blame.key', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='Blame.transaction', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='invalid', full_name='Blame.invalid', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='packets', full_name='Blame.packets', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=947,
serialized_end=1132,
)
_INVALID = _descriptor.Descriptor(
name='Invalid',
full_name='Invalid',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='invalid', full_name='Invalid.invalid', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1134,
serialized_end=1160,
)
_INPUTS = _descriptor.Descriptor(
name='Inputs',
full_name='Inputs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='Inputs.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coins', full_name='Inputs.coins', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1162,
serialized_end=1202,
)
_PACKETS = _descriptor.Descriptor(
name='Packets',
full_name='Packets',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packet', full_name='Packets.packet', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1204,
serialized_end=1238,
)
_SIGNED.fields_by_name['packet'].message_type = _PACKET
_SIGNED.fields_by_name['signature'].message_type = _SIGNATURE
_PACKET.fields_by_name['from_key'].message_type = _VERIFICATIONKEY
_PACKET.fields_by_name['to_key'].message_type = _VERIFICATIONKEY
_PACKET.fields_by_name['phase'].enum_type = _PHASE
_PACKET.fields_by_name['message'].message_type = _MESSAGE
_PACKET.fields_by_name['registration'].message_type = _REGISTRATION
_SIGNATURES.fields_by_name['signature'].message_type = _SIGNATURE
_MESSAGE_INPUTSENTRY.fields_by_name['value'].message_type = _COINS
_MESSAGE_INPUTSENTRY.containing_type = _MESSAGE
_MESSAGE.fields_by_name['address'].message_type = _ADDRESS
_MESSAGE.fields_by_name['key'].message_type = _ENCRYPTIONKEY
_MESSAGE.fields_by_name['hash'].message_type = _HASH
_MESSAGE.fields_by_name['signatures'].message_type = _SIGNATURES
_MESSAGE.fields_by_name['blame'].message_type = _BLAME
_MESSAGE.fields_by_name['inputs'].message_type = _MESSAGE_INPUTSENTRY
_REGISTRATION.fields_by_name['type'].enum_type = _SHUFFLETYPE
_BLAME.fields_by_name['reason'].enum_type = _REASON
_BLAME.fields_by_name['accused'].message_type = _VERIFICATIONKEY
_BLAME.fields_by_name['key'].message_type = _DECRYPTIONKEY
_BLAME.fields_by_name['transaction'].message_type = _TRANSACTION
_BLAME.fields_by_name['invalid'].message_type = _INVALID
_BLAME.fields_by_name['packets'].message_type = _PACKETS
_PACKETS.fields_by_name['packet'].message_type = _SIGNED
DESCRIPTOR.message_types_by_name['Signed'] = _SIGNED
DESCRIPTOR.message_types_by_name['Packet'] = _PACKET
DESCRIPTOR.message_types_by_name['Coins'] = _COINS
DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE
DESCRIPTOR.message_types_by_name['Address'] = _ADDRESS
DESCRIPTOR.message_types_by_name['Registration'] = _REGISTRATION
DESCRIPTOR.message_types_by_name['VerificationKey'] = _VERIFICATIONKEY
DESCRIPTOR.message_types_by_name['EncryptionKey'] = _ENCRYPTIONKEY
DESCRIPTOR.message_types_by_name['DecryptionKey'] = _DECRYPTIONKEY
DESCRIPTOR.message_types_by_name['Hash'] = _HASH
DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE
DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION
DESCRIPTOR.message_types_by_name['Blame'] = _BLAME
DESCRIPTOR.message_types_by_name['Invalid'] = _INVALID
DESCRIPTOR.message_types_by_name['Inputs'] = _INPUTS
DESCRIPTOR.message_types_by_name['Packets'] = _PACKETS
DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE
DESCRIPTOR.enum_types_by_name['ShuffleType'] = _SHUFFLETYPE
DESCRIPTOR.enum_types_by_name['Reason'] = _REASON
Signed = _reflection.GeneratedProtocolMessageType('Signed', (_message.Message,), dict(
DESCRIPTOR = _SIGNED,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Signed)
))
_sym_db.RegisterMessage(Signed)
Packet = _reflection.GeneratedProtocolMessageType('Packet', (_message.Message,), dict(
DESCRIPTOR = _PACKET,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Packet)
))
_sym_db.RegisterMessage(Packet)
Coins = _reflection.GeneratedProtocolMessageType('Coins', (_message.Message,), dict(
DESCRIPTOR = _COINS,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Coins)
))
_sym_db.RegisterMessage(Coins)
Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURES,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Signatures)
))
_sym_db.RegisterMessage(Signatures)
Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict(
InputsEntry = _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict(
DESCRIPTOR = _MESSAGE_INPUTSENTRY,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Message.InputsEntry)
))
,
DESCRIPTOR = _MESSAGE,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Message)
))
_sym_db.RegisterMessage(Message)
_sym_db.RegisterMessage(Message.InputsEntry)
Address = _reflection.GeneratedProtocolMessageType('Address', (_message.Message,), dict(
DESCRIPTOR = _ADDRESS,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Address)
))
_sym_db.RegisterMessage(Address)
Registration = _reflection.GeneratedProtocolMessageType('Registration', (_message.Message,), dict(
DESCRIPTOR = _REGISTRATION,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Registration)
))
_sym_db.RegisterMessage(Registration)
VerificationKey = _reflection.GeneratedProtocolMessageType('VerificationKey', (_message.Message,), dict(
DESCRIPTOR = _VERIFICATIONKEY,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:VerificationKey)
))
_sym_db.RegisterMessage(VerificationKey)
EncryptionKey = _reflection.GeneratedProtocolMessageType('EncryptionKey', (_message.Message,), dict(
DESCRIPTOR = _ENCRYPTIONKEY,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:EncryptionKey)
))
_sym_db.RegisterMessage(EncryptionKey)
DecryptionKey = _reflection.GeneratedProtocolMessageType('DecryptionKey', (_message.Message,), dict(
DESCRIPTOR = _DECRYPTIONKEY,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:DecryptionKey)
))
_sym_db.RegisterMessage(DecryptionKey)
Hash = _reflection.GeneratedProtocolMessageType('Hash', (_message.Message,), dict(
DESCRIPTOR = _HASH,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Hash)
))
_sym_db.RegisterMessage(Hash)
Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURE,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Signature)
))
_sym_db.RegisterMessage(Signature)
Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), dict(
DESCRIPTOR = _TRANSACTION,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Transaction)
))
_sym_db.RegisterMessage(Transaction)
Blame = _reflection.GeneratedProtocolMessageType('Blame', (_message.Message,), dict(
DESCRIPTOR = _BLAME,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Blame)
))
_sym_db.RegisterMessage(Blame)
Invalid = _reflection.GeneratedProtocolMessageType('Invalid', (_message.Message,), dict(
DESCRIPTOR = _INVALID,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Invalid)
))
_sym_db.RegisterMessage(Invalid)
Inputs = _reflection.GeneratedProtocolMessageType('Inputs', (_message.Message,), dict(
DESCRIPTOR = _INPUTS,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Inputs)
))
_sym_db.RegisterMessage(Inputs)
Packets = _reflection.GeneratedProtocolMessageType('Packets', (_message.Message,), dict(
DESCRIPTOR = _PACKETS,
__module__ = 'plugins.shuffle.protobuf.message_pb2'
# @@protoc_insertion_point(class_scope:Packets)
))
_sym_db.RegisterMessage(Packets)
_MESSAGE_INPUTSENTRY.has_options = True
_MESSAGE_INPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
# encoding: utf-8
import json
import random
from datetime import timedelta
import pytest
from api.authenticator import BasicAuthenticationProvider
from api.circulation import CirculationAPI, FulfillmentInfo, HoldInfo, LoanInfo
from api.circulation_exceptions import *
from api.config import Configuration, temp_config
from api.overdrive import (
MockOverdriveAPI,
NewTitlesOverdriveCollectionMonitor,
OverdriveAPI,
OverdriveCirculationMonitor,
OverdriveCollectionReaper,
OverdriveFormatSweep,
OverdriveManifestFulfillmentInfo,
RecentOverdriveCollectionMonitor,
)
from core.metadata_layer import TimestampData
from core.model import (
CirculationEvent,
ConfigurationSetting,
DataSource,
DeliveryMechanism,
Edition,
ExternalIntegration,
Identifier,
LicensePool,
MediaTypes,
Representation,
RightsStatus,
)
from core.testing import DatabaseTest, DummyHTTPClient, MockRequestsResponse
from core.util.datetime_helpers import datetime_utc, utc_now
from . import sample_data
class OverdriveAPITest(DatabaseTest):
def setup_method(self):
super(OverdriveAPITest, self).setup_method()
library = self._default_library
self.collection = MockOverdriveAPI.mock_collection(self._db)
self.circulation = CirculationAPI(
self._db, library, api_map={ExternalIntegration.OVERDRIVE: MockOverdriveAPI}
)
self.api = self.circulation.api_for_collection[self.collection.id]
@classmethod
def sample_data(self, filename):
return sample_data(filename, "overdrive")
@classmethod
def sample_json(self, filename):
data = self.sample_data(filename)
return data, json.loads(data)
def error_message(self, error_code, message=None, token=None):
"""Create a JSON document that simulates the message served by
Overdrive given a certain error condition.
"""
message = message or self._str
token = token or self._str
data = dict(errorCode=error_code, message=message, token=token)
return json.dumps(data)
class TestOverdriveAPI(OverdriveAPITest):
def test_external_integration(self):
assert self.collection.external_integration == self.api.external_integration(
self._db
)
def test_lock_in_format(self):
# Verify which formats do or don't need to be locked in before
# fulfillment.
needs_lock_in = self.api.LOCK_IN_FORMATS
# Streaming and manifest-based formats are exempt; all
# other formats need lock-in.
exempt = list(self.api.STREAMING_FORMATS) + list(
self.api.MANIFEST_INTERNAL_FORMATS
)
for i in self.api.FORMATS:
if i not in exempt:
assert i in needs_lock_in
for i in exempt:
assert i not in needs_lock_in
def test__run_self_tests(self):
# Verify that OverdriveAPI._run_self_tests() calls the right
# methods.
class Mock(MockOverdriveAPI):
"Mock every method used by OverdriveAPI._run_self_tests."
# First we will call check_creds() to get a fresh credential.
mock_credential = object()
def check_creds(self, force_refresh=False):
self.check_creds_called_with = force_refresh
return self.mock_credential
# Then we will call get_advantage_accounts().
mock_advantage_accounts = [object(), object()]
def get_advantage_accounts(self):
return self.mock_advantage_accounts
# Then we will call get() on the _all_products_link.
def get(self, url, extra_headers, exception_on_401=False):
self.get_called_with = (url, extra_headers, exception_on_401)
return 200, {}, json.dumps(dict(totalItems=2010))
# Finally, for every library associated with this
# collection, we'll call get_patron_credential() using
# the credentials of that library's test patron.
mock_patron_credential = object()
get_patron_credential_called_with = []
def get_patron_credential(self, patron, pin):
self.get_patron_credential_called_with.append((patron, pin))
return self.mock_patron_credential
# Now let's make sure two Libraries have access to this
# Collection -- one library with a default patron and one
# without.
no_default_patron = self._library()
self.collection.libraries.append(no_default_patron)
with_default_patron = self._default_library
integration = self._external_integration(
"api.simple_authentication",
ExternalIntegration.PATRON_AUTH_GOAL,
libraries=[with_default_patron],
)
p = BasicAuthenticationProvider
integration.setting(p.TEST_IDENTIFIER).value = "username1"
integration.setting(p.TEST_PASSWORD).value = "password1"
# Now that everything is set up, run the self-test.
api = Mock(self._db, self.collection)
results = sorted(api._run_self_tests(self._db), key=lambda x: x.name)
[
no_patron_credential,
default_patron_credential,
global_privileges,
collection_size,
advantage,
] = results
# Verify that each test method was called and returned the
# expected SelfTestResult object.
assert (
"Checking global Client Authentication privileges" == global_privileges.name
)
assert True == global_privileges.success
assert api.mock_credential == global_privileges.result
assert "Looking up Overdrive Advantage accounts" == advantage.name
assert True == advantage.success
assert "Found 2 Overdrive Advantage account(s)." == advantage.result
assert "Counting size of collection" == collection_size.name
assert True == collection_size.success
assert "2010 item(s) in collection" == collection_size.result
url, headers, error_on_401 = api.get_called_with
assert api._all_products_link == url
assert (
"Acquiring test patron credentials for library %s" % no_default_patron.name
== no_patron_credential.name
)
assert False == no_patron_credential.success
assert "Library has no test patron configured." == str(
no_patron_credential.exception
)
assert (
"Checking Patron Authentication privileges, using test patron for library %s"
% with_default_patron.name
== default_patron_credential.name
)
assert True == default_patron_credential.success
assert api.mock_patron_credential == default_patron_credential.result
# Although there are two libraries associated with this
# collection, get_patron_credential was only called once, because
# one of the libraries doesn't have a default patron.
[(patron1, password1)] = api.get_patron_credential_called_with
assert "username1" == patron1.authorization_identifier
assert "password1" == password1
def test_run_self_tests_short_circuit(self):
"""If OverdriveAPI.check_creds can't get credentials, the rest of
the self-tests aren't even run.
This probably doesn't matter much, because if check_creds doesn't
work we won't be able to instantiate the OverdriveAPI class.
"""
def explode(*args, **kwargs):
raise Exception("Failure!")
self.api.check_creds = explode
# Only one test will be run.
[check_creds] = self.api._run_self_tests(self._db)
assert "Failure!" == str(check_creds.exception)
def test_default_notification_email_address(self):
"""Test the ability of the Overdrive API to detect an email address
previously given by the patron to Overdrive for the purpose of
notifications.
"""
ignore, patron_with_email = self.sample_json("patron_info.json")
self.api.queue_response(200, content=patron_with_email)
patron = self._patron()
# The site default for notification emails will never be used.
configuration_setting = ConfigurationSetting.for_library(
Configuration.DEFAULT_NOTIFICATION_EMAIL_ADDRESS, self._default_library
)
configuration_setting.value = "[email protected]"
# If the patron has used a particular email address to put
# books on hold, use that email address, not the site default.
assert "[email protected]" == self.api.default_notification_email_address(
patron, "pin"
)
# If the patron's email address according to Overdrive _is_
# the site default, it is ignored. This can only happen if
# this patron placed a hold using an older version of the
# circulation manager.
patron_with_email["lastHoldEmail"] = configuration_setting.value
self.api.queue_response(200, content=patron_with_email)
assert None == self.api.default_notification_email_address(patron, "pin")
# If the patron has never before put an Overdrive book on
# hold, their JSON object has no `lastHoldEmail` key. In this
# case we return None -- again, ignoring the site default.
patron_with_no_email = dict(patron_with_email)
del patron_with_no_email["lastHoldEmail"]
self.api.queue_response(200, content=patron_with_no_email)
assert None == self.api.default_notification_email_address(patron, "pin")
# If there's an error getting the information from Overdrive,
# we return None.
self.api.queue_response(404)
assert None == self.api.default_notification_email_address(patron, "pin")
def test_scope_string(self):
# scope_string() puts the website ID of the Overdrive
# integration and the ILS name associated with the library
# into the form expected by Overdrive.
expect = "websiteid:%s authorizationname:%s" % (
self.api.website_id.decode("utf-8"),
self.api.ils_name(self._default_library),
)
assert expect == self.api.scope_string(self._default_library)
def test_checkout(self):
# Verify the process of checking out a book.
patron = object()
pin = object()
pool = self._licensepool(edition=None, collection=self.collection)
identifier = pool.identifier
class Mock(MockOverdriveAPI):
MOCK_EXPIRATION_DATE = object()
PROCESS_CHECKOUT_ERROR_RESULT = Exception(
"exception in _process_checkout_error"
)
def __init__(self, *args, **kwargs):
super(Mock, self).__init__(*args, **kwargs)
self.extract_expiration_date_called_with = []
self._process_checkout_error_called_with = []
def extract_expiration_date(self, loan):
self.extract_expiration_date_called_with.append(loan)
return self.MOCK_EXPIRATION_DATE
def _process_checkout_error(self, patron, pin, licensepool, data):
self._process_checkout_error_called_with.append(
(patron, pin, licensepool, data)
)
result = self.PROCESS_CHECKOUT_ERROR_RESULT
if isinstance(result, Exception):
raise result
return result
# First, test the successful path.
api = Mock(self._db, self.collection)
api_response = json.dumps("some data")
api.queue_response(201, content=api_response)
loan = api.checkout(patron, pin, pool, "internal format is ignored")
# Verify that a good-looking patron request went out.
endpoint, ignore, kwargs = api.requests.pop()
assert endpoint.endswith("/me/checkouts")
assert patron == kwargs.pop("_patron")
extra_headers = kwargs.pop("extra_headers")
assert {"Content-Type": "application/json"} == extra_headers
data = json.loads(kwargs.pop("data"))
assert {
"fields": [{"name": "reserveId", "value": pool.identifier.identifier}]
} == data
# The API response was passed into extract_expiration_date.
#
# The most important thing here is not the content of the response but the
# fact that the response code was not 400.
assert "some data" == api.extract_expiration_date_called_with.pop()
# The return value is a LoanInfo object with all relevant info.
assert isinstance(loan, LoanInfo)
assert pool.collection.id == loan.collection_id
assert pool.data_source.name == loan.data_source_name
assert identifier.type == loan.identifier_type
assert identifier.identifier == loan.identifier
assert None == loan.start_date
assert api.MOCK_EXPIRATION_DATE == loan.end_date
# _process_checkout_error was not called
assert [] == api._process_checkout_error_called_with
# Now let's test error conditions.
# Most of the time, an error simply results in an exception.
api.queue_response(400, content=api_response)
with pytest.raises(Exception) as excinfo:
api.checkout(patron, pin, pool, "internal format is ignored")
assert "exception in _process_checkout_error" in str(excinfo.value)
assert (
patron,
pin,
pool,
"some data",
) == api._process_checkout_error_called_with.pop()
# However, if _process_checkout_error is able to recover from
# the error and ends up returning something, the return value
# is propagated from checkout().
api.PROCESS_CHECKOUT_ERROR_RESULT = "Actually, I was able to recover"
api.queue_response(400, content=api_response)
assert "Actually, I was able to recover" == api.checkout(
patron, pin, pool, "internal format is ignored"
)
assert (
patron,
pin,
pool,
"some data",
) == api._process_checkout_error_called_with.pop()
def test__process_checkout_error(self):
# Verify that _process_checkout_error handles common API-side errors,
# making follow-up API calls if necessary.
class Mock(MockOverdriveAPI):
MOCK_LOAN = object()
MOCK_EXPIRATION_DATE = object()
def __init__(self, *args, **kwargs):
super(Mock, self).__init__(*args, **kwargs)
self.update_licensepool_called_with = []
self.get_loan_called_with = []
self.extract_expiration_date_called_with = []
def update_licensepool(self, identifier):
self.update_licensepool_called_with.append(identifier)
def get_loan(self, patron, pin, identifier):
self.get_loan_called_with.append((patron, pin, identifier))
return self.MOCK_LOAN
def extract_expiration_date(self, loan):
self.extract_expiration_date_called_with.append(loan)
return self.MOCK_EXPIRATION_DATE
patron = object()
pin = object()
pool = self._licensepool(edition=None, collection=self.collection)
identifier = pool.identifier
api = Mock(self._db, self.collection)
m = api._process_checkout_error
# Most of the error handling is pretty straightforward.
def with_error_code(code):
# Simulate the response of the Overdrive API with a given error code.
error = dict(errorCode=code)
# Handle the error.
return m(patron, pin, pool, error)
# Errors not specifically known become generic CannotLoan exceptions.
with pytest.raises(CannotLoan) as excinfo:
with_error_code("WeirdError")
assert "WeirdError" in str(excinfo.value)
# If the data passed in to _process_checkout_error is not what
# the real Overdrive API would send, the error is even more
# generic.
with pytest.raises(CannotLoan) as excinfo:
m(patron, pin, pool, "Not a dict")
assert "Unknown Error" in str(excinfo.value)
with pytest.raises(CannotLoan) as excinfo:
m(patron, pin, pool, dict(errorCodePresent=False))
assert "Unknown Error" in str(excinfo.value)
# Some known errors become specific subclasses of CannotLoan.
pytest.raises(
PatronLoanLimitReached, with_error_code, "PatronHasExceededCheckoutLimit"
)
pytest.raises(
PatronLoanLimitReached,
with_error_code,
"PatronHasExceededCheckoutLimit_ForCPC",
)
# There are two cases where we need to make follow-up API
# requests as the result of a failure during the loan process.
# First, if the error is "NoCopiesAvailable", we know we have
# out-of-date availability information and we need to call
# update_licensepool before raising NoAvailbleCopies().
pytest.raises(NoAvailableCopies, with_error_code, "NoCopiesAvailable")
assert identifier.identifier == api.update_licensepool_called_with.pop()
# If the error is "TitleAlreadyCheckedOut", then the problem
# is that the patron tried to take out a new loan instead of
# fulfilling an existing loan. In this case we don't raise an
# exception at all; we fulfill the loan and return a LoanInfo
# object.
loan = with_error_code("TitleAlreadyCheckedOut")
# get_loan was called with the patron's details.
assert (patron, pin, identifier.identifier) == api.get_loan_called_with.pop()
# extract_expiration_date was called on the return value of get_loan.
assert api.MOCK_LOAN == api.extract_expiration_date_called_with.pop()
# And a LoanInfo was created with all relevant information.
assert isinstance(loan, LoanInfo)
assert pool.collection.id == loan.collection_id
assert pool.data_source.name == loan.data_source_name
assert identifier.type == loan.identifier_type
assert identifier.identifier == loan.identifier
assert None == loan.start_date
assert api.MOCK_EXPIRATION_DATE == loan.end_date
def test_extract_expiration_date(self):
# Test the code that finds and parses a loan expiration date.
m = OverdriveAPI.extract_expiration_date
# Success
assert datetime_utc(2020, 1, 2, 3, 4, 5) == m(
dict(expires="2020-01-02T03:04:05Z")
)
# Various failure cases.
assert None == m(dict(expiresPresent=False))
assert None == m(dict(expires="Wrong date format"))
assert None == m("Not a dict")
assert None == m(None)
def test_place_hold(self):
# Verify that an appropriate request is made to HOLDS_ENDPOINT
# to create a hold.
#
# The request will include different form fields depending on
# whether default_notification_email_address returns something.
class Mock(MockOverdriveAPI):
def __init__(self, *args, **kwargs):
super(Mock, self).__init__(*args, **kwargs)
self.DEFAULT_NOTIFICATION_EMAIL_ADDRESS = None
def default_notification_email_address(self, patron, pin):
self.default_notification_email_address_called_with = (patron, pin)
return self.DEFAULT_NOTIFICATION_EMAIL_ADDRESS
def fill_out_form(self, **form_fields):
# Record the form fields and return some dummy values.
self.fill_out_form_called_with = form_fields
return "headers", "filled-out form"
def patron_request(self, *args, **kwargs):
# Pretend to make a request to an API endpoint.
self.patron_request_called_with = (args, kwargs)
return "A mock response"
def process_place_hold_response(self, response, patron, pin, licensepool):
self.process_place_hold_response_called_with = (
response,
patron,
pin,
licensepool,
)
return "OK, I processed it."
# First, test the case where no notification email address is
# provided and there is no default.
patron = object()
pin = object()
pool = self._licensepool(edition=None, collection=self.collection)
api = Mock(self._db, self.collection)
response = api.place_hold(patron, pin, pool, None)
# Now we can trace the path of the input through the method calls.
# The patron and PIN were passed into
# default_notification_email_address.
assert (patron, pin) == api.default_notification_email_address_called_with
# The return value was None, and so 'ignoreHoldEmail' was
# added to the form to be filled out, rather than
# 'emailAddress' being added.
fields = api.fill_out_form_called_with
identifier = str(pool.identifier.identifier)
assert dict(ignoreHoldEmail=True, reserveId=identifier) == fields
# patron_request was called with the filled-out form and other
# information necessary to authenticate the request.
args, kwargs = api.patron_request_called_with
assert (patron, pin, api.HOLDS_ENDPOINT, "headers", "filled-out form") == args
assert {} == kwargs
# Finally, process_place_hold_response was called on
# the return value of patron_request
assert (
"A mock response",
patron,
pin,
pool,
) == api.process_place_hold_response_called_with
assert "OK, I processed it." == response
# Now we need to test two more cases.
#
# First, the patron has a holds notification address
# registered with Overdrive.
email = "[email protected]"
api.DEFAULT_NOTIFICATION_EMAIL_ADDRESS = email
response = api.place_hold(patron, pin, pool, None)
# Same result.
assert "OK, I processed it." == response
# Different variables were passed in to fill_out_form.
fields = api.fill_out_form_called_with
assert dict(emailAddress=email, reserveId=identifier) == fields
# Finally, test that when a specific address is passed in, it
# takes precedence over the patron's holds notification address.
response = api.place_hold(patron, pin, pool, "[email protected]")
assert "OK, I processed it." == response
fields = api.fill_out_form_called_with
assert dict(emailAddress="[email protected]", reserveId=identifier) == fields
def test_process_place_hold_response(self):
# Verify that we can handle various error and non-error responses
# to a HOLDS_ENDPOINT request.
ignore, successful_hold = self.sample_json("successful_hold.json")
class Mock(MockOverdriveAPI):
def get_hold(self, patron, pin, overdrive_id):
# Return a sample hold representation rather than
# making another API request.
self.get_hold_called_with = (patron, pin, overdrive_id)
return successful_hold
api = Mock(self._db, self.collection)
def process_error_response(message):
# Attempt to process a response that resulted in an error.
if isinstance(message, (bytes, str)):
data = dict(errorCode=message)
else:
data = message
response = MockRequestsResponse(400, content=data)
return api.process_place_hold_response(response, None, None, None)
# Some error messages result in specific CirculationExceptions.
pytest.raises(CannotRenew, process_error_response, "NotWithinRenewalWindow")
pytest.raises(
PatronHoldLimitReached, process_error_response, "PatronExceededHoldLimit"
)
# An unrecognized error message results in a generic
# CannotHold.
pytest.raises(CannotHold, process_error_response, "SomeOtherError")
# Same if the error message is missing or the response can't be
# processed.
pytest.raises(CannotHold, process_error_response, dict())
pytest.raises(CannotHold, process_error_response, None)
# Same if the error code isn't in the 4xx or 2xx range
# (which shouldn't happen in real life).
response = MockRequestsResponse(999)
pytest.raises(
CannotHold, api.process_place_hold_response, response, None, None, None
)
# At this point patron and book details become important --
# we're going to return a HoldInfo object and potentially make
# another API request.
patron = self._patron()
pin = object()
licensepool = self._licensepool(edition=None)
# The remaining tests will end up running the same code on the
# same data, so they will return the same HoldInfo. Define a
# helper method to make this easier.
def assert_correct_holdinfo(x):
assert isinstance(x, HoldInfo)
assert licensepool.collection == x.collection(self._db)
assert licensepool.data_source.name == x.data_source_name
assert identifier.identifier == x.identifier
assert identifier.type == x.identifier_type
assert datetime_utc(2015, 3, 26, 11, 30, 29) == x.start_date
assert None == x.end_date
assert 1 == x.hold_position
# Test the case where the 'error' is that the book is already
# on hold.
already_on_hold = dict(errorCode="AlreadyOnWaitList")
response = MockRequestsResponse(400, content=already_on_hold)
result = api.process_place_hold_response(response, patron, pin, licensepool)
# get_hold() was called with the arguments we expect.
identifier = licensepool.identifier
assert (patron, pin, identifier.identifier) == api.get_hold_called_with
# The result was converted into a HoldInfo object. The
# effective result is exactly as if we had successfully put
# the book on hold.
assert_correct_holdinfo(result)
# Finally, let's test the case where there was no hold and now
# there is.
api.get_hold_called_with = None
response = MockRequestsResponse(200, content=successful_hold)
result = api.process_place_hold_response(response, patron, pin, licensepool)
assert_correct_holdinfo(result)
# Here, get_hold was _not_ called, because the hold didn't
# already exist.
assert None == api.get_hold_called_with
def test_checkin(self):
class Mock(MockOverdriveAPI):
EARLY_RETURN_SUCCESS = False
def perform_early_return(self, *args):
self.perform_early_return_call = args
return self.EARLY_RETURN_SUCCESS
def patron_request(self, *args, **kwargs):
self.patron_request_call = (args, kwargs)
overdrive = Mock(self._db, self.collection)
overdrive.perform_early_return_call = None
# In most circumstances we do not bother calling
# perform_early_return; we just call patron_request.
pool = self._licensepool(None)
patron = self._patron()
pin = object()
expect_url = overdrive.endpoint(
overdrive.CHECKOUT_ENDPOINT, overdrive_id=pool.identifier.identifier
)
def assert_no_early_return():
"""Call this to verify that patron_request is
called within checkin() instead of perform_early_return.
"""
overdrive.checkin(patron, pin, pool)
# perform_early_return was not called.
assert None == overdrive.perform_early_return_call
# patron_request was called in an attempt to
# DELETE an active loan.
args, kwargs = overdrive.patron_request_call
assert (patron, pin, expect_url) == args
assert dict(method="DELETE") == kwargs
overdrive.patron_request_call = None
# If there is no loan, there is no perform_early_return.
assert_no_early_return()
# Same if the loan is not fulfilled...
loan, ignore = pool.loan_to(patron)
assert_no_early_return()
# If the loan is fulfilled but its LicensePoolDeliveryMechanism has
# no DeliveryMechanism for some reason...
loan.fulfillment = pool.delivery_mechanisms[0]
dm = loan.fulfillment.delivery_mechanism
loan.fulfillment.delivery_mechanism = None
assert_no_early_return()
# If the loan is fulfilled but the delivery mechanism uses DRM...
loan.fulfillment.delivery_mechanism = dm
assert_no_early_return()
# If the loan is fulfilled with a DRM-free delivery mechanism,
# perform_early_return _is_ called.
dm.drm_scheme = DeliveryMechanism.NO_DRM
overdrive.checkin(patron, pin, pool)
assert (patron, pin, loan) == overdrive.perform_early_return_call
# But if it fails, patron_request is _also_ called.
args, kwargs = overdrive.patron_request_call
assert (patron, pin, expect_url) == args
assert dict(method="DELETE") == kwargs
# Finally, if the loan is fulfilled with a DRM-free delivery mechanism
# and perform_early_return succeeds, patron_request_call is not
# called -- the title was already returned.
overdrive.patron_request_call = None
overdrive.EARLY_RETURN_SUCCESS = True
overdrive.checkin(patron, pin, pool)
assert (patron, pin, loan) == overdrive.perform_early_return_call
assert None == overdrive.patron_request_call
def test_perform_early_return(self):
class Mock(MockOverdriveAPI):
EARLY_RETURN_URL = "http://early-return/"
def get_fulfillment_link(self, *args):
self.get_fulfillment_link_call = args
return ("http://fulfillment/", "content/type")
def _extract_early_return_url(self, *args):
self._extract_early_return_url_call = args
return self.EARLY_RETURN_URL
overdrive = Mock(self._db, self.collection)
# This patron has a loan.
pool = self._licensepool(None)
patron = self._patron()
pin = object()
loan, ignore = pool.loan_to(patron)
# The loan has been fulfilled and now the patron wants to
# do early return.
loan.fulfillment = pool.delivery_mechanisms[0]
# Our mocked perform_early_return will make two HTTP requests.
# The first will be to the fulfill link returned by our mock
# get_fulfillment_link. The response to this request is a
# redirect that includes an early return link.
http = DummyHTTPClient()
http.responses.append(
MockRequestsResponse(
302, dict(location="http://fulfill-this-book/?or=return-early")
)
)
# The second HTTP request made will be to the early return
# link 'extracted' from that link by our mock
# _extract_early_return_url. The response here is a copy of
# the actual response Overdrive sends in this situation.
http.responses.append(MockRequestsResponse(200, content="Success"))
# Do the thing.
success = overdrive.perform_early_return(patron, pin, loan, http.do_get)
# The title was 'returned'.
assert True == success
# It worked like this:
#
# get_fulfillment_link was called with appropriate arguments.
assert (
patron,
pin,
pool.identifier.identifier,
"ebook-epub-adobe",
) == overdrive.get_fulfillment_link_call
# The URL returned by that method was 'requested'.
assert "http://fulfillment/" == http.requests.pop(0)
# The resulting URL was passed into _extract_early_return_url.
assert (
"http://fulfill-this-book/?or=return-early",
) == overdrive._extract_early_return_url_call
# Then the URL returned by _that_ method was 'requested'.
assert "http://early-return/" == http.requests.pop(0)
# If no early return URL can be extracted from the fulfillment URL,
# perform_early_return has no effect.
#
overdrive._extract_early_return_url_call = None
overdrive.EARLY_RETURN_URL = None
http.responses.append(
MockRequestsResponse(302, dict(location="http://fulfill-this-book/"))
)
success = overdrive.perform_early_return(patron, pin, loan, http.do_get)
assert False == success
# extract_early_return_url_call was called, but since it returned
# None, no second HTTP request was made.
assert "http://fulfillment/" == http.requests.pop(0)
assert (
"http://fulfill-this-book/",
) == overdrive._extract_early_return_url_call
assert [] == http.requests
# If we can't map the delivery mechanism to one of Overdrive's
# internal formats, perform_early_return has no effect.
#
loan.fulfillment.delivery_mechanism.content_type = "not-in/overdrive"
success = overdrive.perform_early_return(patron, pin, loan, http.do_get)
assert False == success
# In this case, no HTTP requests were made at all, since we
# couldn't figure out which arguments to pass into
# get_fulfillment_link.
assert [] == http.requests
# If the final attempt to hit the return URL doesn't result
# in a 200 status code, perform_early_return has no effect.
http.responses.append(
MockRequestsResponse(
302, dict(location="http://fulfill-this-book/?or=return-early")
)
)
http.responses.append(MockRequestsResponse(401, content="Unauthorized!"))
success = overdrive.perform_early_return(patron, pin, loan, http.do_get)
assert False == success
def test_extract_early_return_url(self):
m = OverdriveAPI._extract_early_return_url
assert None == m("http://no-early-return/")
assert None == m("")
assert None == m(None)
# This is based on a real Overdrive early return URL.
has_early_return = "https://openepub-gk.cdn.overdrive.com/OpenEPUBStore1/1577-1/%7B5880F6D0-48AC-44DE-8BF1-FD1CE62E97A8%7DFzr418.epub?e=1518753718&loanExpirationDate=2018-03-01T17%3a12%3a33Z&loanEarlyReturnUrl=https%3a%2f%2fnotifications-ofs.contentreserve.com%2fEarlyReturn%2fnypl%2f037-1374147-00279%2f5480F6E1-48F3-00DE-96C1-FD3CE32D94FD-312%3fh%3dVgvxBQHdQxtsbgb43AH6%252bEmpni9LoffkPczNiUz7%252b10%253d&sourceId=nypl&h=j7nGk7qxE71X2ZcdLw%2bqa04jqEw%3d"
assert (
"https://notifications-ofs.contentreserve.com/EarlyReturn/nypl/037-1374147-00279/5480F6E1-48F3-00DE-96C1-FD3CE32D94FD-312?h=VgvxBQHdQxtsbgb43AH6%2bEmpni9LoffkPczNiUz7%2b10%3d"
== m(has_early_return)
)
def test_place_hold_raises_exception_if_patron_over_hold_limit(self):
over_hold_limit = self.error_message(
"PatronExceededHoldLimit",
"Patron cannot place any more holds, already has maximum holds placed.",
)
edition, pool = self._edition(
identifier_type=Identifier.OVERDRIVE_ID,
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
)
self.api.queue_response(400, content=over_hold_limit)
pytest.raises(
PatronHoldLimitReached,
self.api.place_hold,
self._patron(),
"pin",
pool,
notification_email_address="[email protected]",
)
def test_place_hold_looks_up_notification_address(self):
edition, pool = self._edition(
identifier_type=Identifier.OVERDRIVE_ID,
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
)
# The first request we make will be to get patron info,
# so that we know that the most recent email address used
# to put a book on hold is [email protected].
ignore, patron_with_email = self.sample_json("patron_info.json")
# The second request we make will be to put a book on hold,
# and when we do so we will ask for the notification to be
# sent to [email protected].
ignore, successful_hold = self.sample_json("successful_hold.json")
self.api.queue_response(200, content=patron_with_email)
self.api.queue_response(200, content=successful_hold)
with temp_config() as config:
config["default_notification_email_address"] = "[email protected]"
hold = self.api.place_hold(
self._patron(), "pin", pool, notification_email_address=None
)
# The book was placed on hold.
assert 1 == hold.hold_position
assert pool.identifier.identifier == hold.identifier
# And when we placed it on hold, we passed in [email protected]
# as the email address -- not [email protected].
url, positional_args, kwargs = self.api.requests[-1]
headers, body = positional_args
assert '{"name": "emailAddress", "value": "[email protected]"}' in body
def test_fulfill_returns_fulfillmentinfo_if_returned_by_get_fulfillment_link(self):
# If get_fulfillment_link returns a FulfillmentInfo, it is returned
# immediately and the rest of fulfill() does not run.
fulfillment = FulfillmentInfo(self.collection, *[None] * 7)
class MockAPI(OverdriveAPI):
def get_fulfillment_link(*args, **kwargs):
return fulfillment
# Since most of the data is not provided, if fulfill() tried
# to actually run to completion, it would crash.
edition, pool = self._edition(with_license_pool=True)
api = MockAPI(self._db, self.collection)
result = api.fulfill(None, None, pool, None)
assert fulfillment == result
def test_fulfill_raises_exception_and_updates_formats_for_outdated_format(self):
edition, pool = self._edition(
identifier_type=Identifier.OVERDRIVE_ID,
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
)
# This pool has a format that's no longer available from overdrive.
pool.set_delivery_mechanism(
Representation.PDF_MEDIA_TYPE,
DeliveryMechanism.ADOBE_DRM,
RightsStatus.IN_COPYRIGHT,
None,
)
ignore, loan = self.sample_json("single_loan.json")
ignore, lock_in_format_not_available = self.sample_json(
"lock_in_format_not_available.json"
)
# We will get the loan, try to lock in the format, and fail.
self.api.queue_response(200, content=loan)
self.api.queue_response(400, content=lock_in_format_not_available)
# Trying to get a fulfillment link raises an exception.
pytest.raises(
FormatNotAvailable,
self.api.get_fulfillment_link,
self._patron(),
"pin",
pool.identifier.identifier,
"ebook-epub-adobe",
)
# Fulfill will also update the formats.
ignore, bibliographic = self.sample_json("bibliographic_information.json")
# To avoid a mismatch, make it look like the information is
# for the correct Identifier.
bibliographic["id"] = pool.identifier.identifier
# If we have the LicensePool available (as opposed to just the
# identifier), we will get the loan, try to lock in the
# format, fail, and then update the bibliographic information.
self.api.queue_response(200, content=loan)
self.api.queue_response(400, content=lock_in_format_not_available)
self.api.queue_response(200, content=bibliographic)
pytest.raises(
FormatNotAvailable,
self.api.fulfill,
self._patron(),
"pin",
pool,
"ebook-epub-adobe",
)
# The delivery mechanisms have been updated.
assert 4 == len(pool.delivery_mechanisms)
assert set(
[
MediaTypes.EPUB_MEDIA_TYPE,
DeliveryMechanism.KINDLE_CONTENT_TYPE,
DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE,
MediaTypes.OVERDRIVE_EBOOK_MANIFEST_MEDIA_TYPE,
]
) == set(
[lpdm.delivery_mechanism.content_type for lpdm in pool.delivery_mechanisms]
)
assert set(
[
DeliveryMechanism.ADOBE_DRM,
DeliveryMechanism.KINDLE_DRM,
DeliveryMechanism.LIBBY_DRM,
DeliveryMechanism.STREAMING_DRM,
]
) == set(
[lpdm.delivery_mechanism.drm_scheme for lpdm in pool.delivery_mechanisms]
)
def test_get_fulfillment_link_from_download_link(self):
patron = self._patron()
ignore, streaming_fulfill_link = self.sample_json(
"streaming_fulfill_link_response.json"
)
self.api.queue_response(200, content=streaming_fulfill_link)
href, type = self.api.get_fulfillment_link_from_download_link(
patron, "1234", "http://download-link", fulfill_url="http://fulfill"
)
assert (
"https://fulfill.contentreserve.com/PerfectLife9780345530967.epub-sample.overdrive.com?RetailerID=nypl&Expires=1469825647&Token=dd0e19b4-eb70-439d-8c50-a65201060f4c&Signature=asl67/G154KeeUsL1mHPwEbZfgc="
== href
)
assert "text/html" == type
def test_get_fulfillment_link_returns_fulfillmentinfo_for_manifest_format(self):
# When the format requested would result in a link to a
# manifest file, the manifest link is returned as-is (wrapped
# in an OverdriveFulfillmentInfo) rather than being retrieved
# and processed.
# To keep things simple, our mock API will always return the same
# fulfillment link.
loan_info = {"isFormatLockedIn": False}
class MockAPI(MockOverdriveAPI):
def get_loan(self, patron, pin, overdrive_id):
self.get_loan_called_with = (patron, pin, overdrive_id)
return loan_info
def get_download_link(self, loan, format_type, error_url):
self.get_download_link_called_with = (loan, format_type, error_url)
return "http://fulfillment-link/"
def get_fulfillment_link_from_download_link(self, *args, **kwargs):
# We want to verify that this method is never called.
raise Exception("explode!")
api = MockAPI(self._db, self.collection)
api.queue_response(200, content=json.dumps({"some": "data"}))
# Randomly choose one of the formats that must be fulfilled as
# a link to a manifest.
overdrive_format = random.choice(list(OverdriveAPI.MANIFEST_INTERNAL_FORMATS))
# Get the fulfillment link.
patron = self._patron()
fulfillmentinfo = api.get_fulfillment_link(
patron,
"1234",
"http://download-link",
overdrive_format,
)
assert isinstance(fulfillmentinfo, OverdriveManifestFulfillmentInfo)
# Before looking at the OverdriveManifestFulfillmentInfo,
# let's see how we got there.
# First, our mocked get_loan() was called.
assert (patron, "1234", "http://download-link") == api.get_loan_called_with
# It returned a dictionary that contained no information
# except isFormatLockedIn: false.
# Since the manifest formats do not lock the loan, this
# skipped most of the code in get_fulfillment_link, and the
# loan info was passed into our mocked get_download_link.
assert (
loan_info,
overdrive_format,
api.DEFAULT_ERROR_URL,
) == api.get_download_link_called_with
# Since the manifest formats cannot be retrieved by the
# circulation manager, the result of get_download_link was
# wrapped in an OverdriveManifestFulfillmentInfo and returned.
# get_fulfillment_link_from_download_link was never called.
assert "http://fulfillment-link/" == fulfillmentinfo.content_link
assert None == fulfillmentinfo.content_type
def test_update_formats(self):
# Create a LicensePool with an inaccurate delivery mechanism
# and the wrong medium.
edition, pool = self._edition(
data_source_name=DataSource.OVERDRIVE,
identifier_type=Identifier.OVERDRIVE_ID,
with_license_pool=True,
)
edition.medium = Edition.PERIODICAL_MEDIUM
# Add the bad delivery mechanism.
pool.set_delivery_mechanism(
Representation.PDF_MEDIA_TYPE,
DeliveryMechanism.ADOBE_DRM,
RightsStatus.IN_COPYRIGHT,
None,
)
# Prepare the bibliographic information.
ignore, bibliographic = self.sample_json("bibliographic_information.json")
# To avoid a mismatch, make it look like the information is
# for the new pool's Identifier.
bibliographic["id"] = pool.identifier.identifier
self.api.queue_response(200, content=bibliographic)
self.api.update_formats(pool)
# The delivery mechanisms have been updated.
assert 4 == len(pool.delivery_mechanisms)
assert set(
[
MediaTypes.EPUB_MEDIA_TYPE,
DeliveryMechanism.KINDLE_CONTENT_TYPE,
DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE,
MediaTypes.OVERDRIVE_EBOOK_MANIFEST_MEDIA_TYPE,
]
) == set(
[lpdm.delivery_mechanism.content_type for lpdm in pool.delivery_mechanisms]
)
assert set(
[
DeliveryMechanism.ADOBE_DRM,
DeliveryMechanism.KINDLE_DRM,
DeliveryMechanism.LIBBY_DRM,
DeliveryMechanism.STREAMING_DRM,
]
) == set(
[lpdm.delivery_mechanism.drm_scheme for lpdm in pool.delivery_mechanisms]
)
# The Edition's medium has been corrected.
assert Edition.BOOK_MEDIUM == edition.medium
def test_update_availability(self):
# Test the Overdrive implementation of the update_availability
# method defined by the CirculationAPI interface.
# Create a LicensePool that needs updating.
edition, pool = self._edition(
identifier_type=Identifier.OVERDRIVE_ID,
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
collection=self.collection,
)
# We have never checked the circulation information for this
# LicensePool. Put some random junk in the pool to make sure
# it gets replaced.
pool.licenses_owned = 10
pool.licenses_available = 4
pool.patrons_in_hold_queue = 3
assert None == pool.last_checked
# Prepare availability information.
ignore, availability = self.sample_json(
"overdrive_availability_information.json"
)
# Since this is the first time we've seen this book,
# we'll also be updating the bibliographic information.
ignore, bibliographic = self.sample_json("bibliographic_information.json")
# To avoid a mismatch, make it look like the information is
# for the new pool's Identifier.
availability["id"] = pool.identifier.identifier
bibliographic["id"] = pool.identifier.identifier
self.api.queue_response(200, content=availability)
self.api.queue_response(200, content=bibliographic)
self.api.update_availability(pool)
# The availability information has been updated, as has the
# date the availability information was last checked.
assert 5 == pool.licenses_owned
assert 1 == pool.licenses_available
assert 0 == pool.patrons_in_hold_queue
assert pool.last_checked is not None
def test_circulation_lookup(self):
"""Test the method that actually looks up Overdrive circulation
information.
"""
self.api.queue_response(200, content="foo")
# If passed an identifier, we'll use the endpoint() method to
# construct a v2 availability URL and make a request to
# it.
book, (status_code, headers, content) = self.api.circulation_lookup(
"an-identifier"
)
assert dict(id="an-identifier") == book
assert 200 == status_code
assert b"foo" == content
request_url, ignore1, ignore2 = self.api.requests.pop()
expect_url = self.api.endpoint(
self.api.AVAILABILITY_ENDPOINT,
collection_token=self.api.collection_token,
product_id="an-identifier",
)
assert request_url == expect_url
assert "/v2/collections" in request_url
# If passed the result of an API call that includes an
# availability link, we'll clean up the URL in the link and
# use it to get our availability data.
self.api.queue_response(200, content="foo")
v1 = "https://qa.api.overdrive.com/v1/collections/abcde/products/12345/availability"
v2 = "https://qa.api.overdrive.com/v2/collections/abcde/products/12345/availability"
previous_result = dict(availability_link=v1)
book, (status_code, headers, content) = self.api.circulation_lookup(
previous_result
)
assert previous_result == book
assert 200 == status_code
assert b"foo" == content
request_url, ignore1, ignore2 = self.api.requests.pop()
# The v1 URL was converted to a v2 url.
assert v2 == request_url
def test_update_licensepool_error(self):
# Create an identifier.
identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)
ignore, availability = self.sample_json(
"overdrive_availability_information.json"
)
self.api.queue_response(500, content="An error occured.")
book = dict(id=identifier.identifier, availability_link=self._url)
pool, was_new, changed = self.api.update_licensepool(book)
assert None == pool
def test_update_licensepool_not_found(self):
# If the Overdrive API says a book is not found in the
# collection, that's treated as useful information, not an error.
# Create an identifier.
identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)
ignore, not_found = self.sample_json("overdrive_availability_not_found.json")
# Queue the 'not found' response twice -- once for the circulation
# lookup and once for the metadata lookup.
self.api.queue_response(404, content=not_found)
self.api.queue_response(404, content=not_found)
book = dict(id=identifier.identifier, availability_link=self._url)
pool, was_new, changed = self.api.update_licensepool(book)
assert 0 == pool.licenses_owned
assert 0 == pool.licenses_available
assert 0 == pool.patrons_in_hold_queue
def test_update_licensepool_provides_bibliographic_coverage(self):
# Create an identifier.
identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)
# Prepare bibliographic and availability information
# for this identifier.
ignore, availability = self.sample_json(
"overdrive_availability_information.json"
)
ignore, bibliographic = self.sample_json("bibliographic_information.json")
# To avoid a mismatch, make it look like the information is
# for the newly created Identifier.
availability["id"] = identifier.identifier
bibliographic["id"] = identifier.identifier
self.api.queue_response(200, content=availability)
self.api.queue_response(200, content=bibliographic)
# Now we're ready. When we call update_licensepool, the
# OverdriveAPI will retrieve the availability information,
# then the bibliographic information. It will then trigger the
# OverdriveBibliographicCoverageProvider, which will
# create an Edition and a presentation-ready Work.
pool, was_new, changed = self.api.update_licensepool(identifier.identifier)
assert True == was_new
assert availability["copiesOwned"] == pool.licenses_owned
edition = pool.presentation_edition
assert "Ancillary Justice" == edition.title
assert True == pool.work.presentation_ready
assert pool.work.cover_thumbnail_url.startswith(
"http://images.contentreserve.com/"
)
# The book has been run through the bibliographic coverage
# provider.
coverage = [
x
for x in identifier.coverage_records
if x.operation is None and x.data_source.name == DataSource.OVERDRIVE
]
assert 1 == len(coverage)
# Call update_licensepool on an identifier that is missing a work and make
# sure that it provides bibliographic coverage in that case.
self._db.delete(pool.work)
self._db.commit()
pool, is_new = LicensePool.for_foreign_id(
self._db,
DataSource.OVERDRIVE,
Identifier.OVERDRIVE_ID,
identifier.identifier,
collection=self.collection,
)
assert not pool.work
self.api.queue_response(200, content=availability)
self.api.queue_response(200, content=bibliographic)
pool, was_new, changed = self.api.update_licensepool(identifier.identifier)
assert False == was_new
assert True == pool.work.presentation_ready
def test_update_new_licensepool(self):
data, raw = self.sample_json("overdrive_availability_information.json")
# Create an identifier
identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)
# Make it look like the availability information is for the
# newly created Identifier.
raw["reserveId"] = identifier.identifier
pool, was_new = LicensePool.for_foreign_id(
self._db,
DataSource.OVERDRIVE,
identifier.type,
identifier.identifier,
collection=self.collection,
)
pool, was_new, changed = self.api.update_licensepool_with_book_info(
raw, pool, was_new
)
assert True == was_new
assert True == changed
self._db.commit()
assert raw["copiesOwned"] == pool.licenses_owned
assert raw["copiesAvailable"] == pool.licenses_available
assert 0 == pool.licenses_reserved
assert raw["numberOfHolds"] == pool.patrons_in_hold_queue
def test_update_existing_licensepool(self):
data, raw = self.sample_json("overdrive_availability_information.json")
# Create a LicensePool.
wr, pool = self._edition(
data_source_name=DataSource.OVERDRIVE,
identifier_type=Identifier.OVERDRIVE_ID,
with_license_pool=True,
)
# Make it look like the availability information is for the
# newly created LicensePool.
raw["id"] = pool.identifier.identifier
wr.title = "The real title."
assert 1 == pool.licenses_owned
assert 1 == pool.licenses_available
assert 0 == pool.licenses_reserved
assert 0 == pool.patrons_in_hold_queue
p2, was_new, changed = self.api.update_licensepool_with_book_info(
raw, pool, False
)
assert False == was_new
assert True == changed
assert p2 == pool
# The title didn't change to that title given in the availability
# information, because we already set a title for that work.
assert "The real title." == wr.title
assert raw["copiesOwned"] == pool.licenses_owned
assert raw["copiesAvailable"] == pool.licenses_available
assert 0 == pool.licenses_reserved
assert raw["numberOfHolds"] == pool.patrons_in_hold_queue
def test_update_new_licensepool_when_same_book_has_pool_in_different_collection(
self,
):
old_edition, old_pool = self._edition(
data_source_name=DataSource.OVERDRIVE,
identifier_type=Identifier.OVERDRIVE_ID,
with_license_pool=True,
)
old_pool.calculate_work()
collection = self._collection()
data, raw = self.sample_json("overdrive_availability_information.json")
# Make it look like the availability information is for the
# old pool's Identifier.
identifier = old_pool.identifier
raw["id"] = identifier.identifier
new_pool, was_new = LicensePool.for_foreign_id(
self._db,
DataSource.OVERDRIVE,
identifier.type,
identifier.identifier,
collection=collection,
)
# The new pool doesn't have a presentation edition yet,
# but it will be updated to share the old pool's edition.
assert None == new_pool.presentation_edition
new_pool, was_new, changed = self.api.update_licensepool_with_book_info(
raw, new_pool, was_new
)
assert True == was_new
assert True == changed
assert old_edition == new_pool.presentation_edition
assert old_pool.work == new_pool.work
def test_update_licensepool_with_holds(self):
data, raw = self.sample_json("overdrive_availability_information_holds.json")
identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)
raw["id"] = identifier.identifier
license_pool, is_new = LicensePool.for_foreign_id(
self._db,
DataSource.OVERDRIVE,
identifier.type,
identifier.identifier,
collection=self._default_collection,
)
pool, was_new, changed = self.api.update_licensepool_with_book_info(
raw, license_pool, is_new
)
assert 10 == pool.patrons_in_hold_queue
assert True == changed
def test_refresh_patron_access_token(self):
"""Verify that patron information is included in the request
when refreshing a patron access token.
"""
patron = self._patron()
patron.authorization_identifier = "barcode"
credential = self._credential(patron=patron)
data, raw = self.sample_json("patron_token.json")
self.api.queue_response(200, content=raw)
# Try to refresh the patron access token with a PIN, and
# then without a PIN.
self.api.refresh_patron_access_token(credential, patron, "a pin")
self.api.refresh_patron_access_token(credential, patron, None)
# Verify that the requests that were made correspond to what
# Overdrive is expecting.
with_pin, without_pin = self.api.access_token_requests
url, payload, headers, kwargs = with_pin
assert "https://oauth-patron.overdrive.com/patrontoken" == url
assert "barcode" == payload["username"]
expect_scope = "websiteid:%s authorizationname:%s" % (
self.api.website_id.decode("utf-8"),
self.api.ils_name(patron.library),
)
assert expect_scope == payload["scope"]
assert "a pin" == payload["password"]
assert not "password_required" in payload
url, payload, headers, kwargs = without_pin
assert "https://oauth-patron.overdrive.com/patrontoken" == url
assert "barcode" == payload["username"]
assert expect_scope == payload["scope"]
assert "false" == payload["password_required"]
assert "[ignore]" == payload["password"]
class TestOverdriveAPICredentials(OverdriveAPITest):
def test_patron_correct_credentials_for_multiple_overdrive_collections(self):
# Verify that the correct credential will be used
# when a library has more than one OverDrive collection.
def _optional_value(self, obj, key):
return obj.get(key, "none")
def _make_token(scope, username, password, grant_type="password"):
return "%s|%s|%s|%s" % (grant_type, scope, username, password)
class MockAPI(MockOverdriveAPI):
def token_post(self, url, payload, headers={}, **kwargs):
url = self.endpoint(url)
self.access_token_requests.append((url, payload, headers, kwargs))
token = _make_token(
_optional_value(self, payload, "scope"),
_optional_value(self, payload, "username"),
_optional_value(self, payload, "password"),
grant_type=_optional_value(self, payload, "grant_type"),
)
response = self.mock_access_token_response(token)
from core.util.http import HTTP
return HTTP._process_response(url, response, **kwargs)
library = self._default_library
patron = self._patron(library=library)
patron.authorization_identifier = "patron_barcode"
pin = "patron_pin"
# clear out any collections added before we add ours
library.collections = []
# Distinct credentials for the two OverDrive collections in which our
# library has membership.
library_collection_properties = [
dict(
library=library,
name="Test OD Collection 1",
client_key="client_key_1",
client_secret="client_secret_1",
library_id="lib_id_1",
website_id="ws_id_1",
ils_name="lib1_coll1_ils",
),
dict(
library=library,
name="Test OD Collection 2",
client_key="client_key_2",
client_secret="client_secret_2",
library_id="lib_id_2",
website_id="ws_id_2",
ils_name="lib1_coll2_ils",
),
]
# These are the credentials we'll expect for each of our collections.
expected_credentials = {
props["name"]: _make_token(
"websiteid:%s authorizationname:%s"
% (props["website_id"], props["ils_name"]),
patron.authorization_identifier,
pin,
)
for props in library_collection_properties
}
# Add the collections.
collections = [
MockAPI.mock_collection(self._db, **props)
for props in library_collection_properties
]
circulation = CirculationAPI(
self._db, library, api_map={ExternalIntegration.OVERDRIVE: MockAPI}
)
od_apis = {
api.collection.name: api
for api in list(circulation.api_for_collection.values())
}
# Ensure that we have the correct number of OverDrive collections.
assert len(library_collection_properties) == len(od_apis)
# Verify that the expected credentials match what we got.
for name in list(expected_credentials.keys()) + list(
reversed(list(expected_credentials.keys()))
):
credential = od_apis[name].get_patron_credential(patron, pin)
assert expected_credentials[name] == credential.credential
class TestExtractData(OverdriveAPITest):
def test_get_download_link(self):
data, json = self.sample_json("checkout_response_locked_in_format.json")
url = MockOverdriveAPI.get_download_link(
json, "ebook-epub-adobe", "http://foo.com/"
)
assert (
"http://patron.api.overdrive.com/v1/patrons/me/checkouts/76C1B7D0-17F4-4C05-8397-C66C17411584/formats/ebook-epub-adobe/downloadlink?errorpageurl=http://foo.com/"
== url
)
pytest.raises(
NoAcceptableFormat,
MockOverdriveAPI.get_download_link,
json,
"no-such-format",
"http://foo.com/",
)
def test_get_download_link_raises_exception_if_loan_fulfilled_on_incompatible_platform(
self,
):
data, json = self.sample_json("checkout_response_book_fulfilled_on_kindle.json")
pytest.raises(
FulfilledOnIncompatiblePlatform,
MockOverdriveAPI.get_download_link,
json,
"ebook-epub-adobe",
"http://foo.com/",
)
def test_get_download_link_for_manifest_format(self):
# If you ask for the download link for an 'x-manifest' format,
# it's treated as a variant of the 'x' format.
data, json = self.sample_json("checkout_response_book_fulfilled_on_kindle.json")
# This is part of the URL from `json` that we expect
# get_download_link to use as a base.
base_url = "http://patron.api.overdrive.com/v1/patrons/me/checkouts/98EA8135-52C0-4480-9C0E-1D0779670D4A/formats/ebook-overdrive/downloadlink"
# First, let's ask for the streaming format.
link = MockOverdriveAPI.get_download_link(
json, "ebook-overdrive", "http://foo.com/"
)
# The base URL is returned, with {errorpageurl} filled in and
# {odreadauthurl} left for other code to fill in.
assert (
base_url + "?errorpageurl=http://foo.com/&odreadauthurl={odreadauthurl}"
== link
)
# Now let's ask for the manifest format.
link = MockOverdriveAPI.get_download_link(
json, "ebook-overdrive-manifest", "http://bar.com/"
)
# The {errorpageurl} and {odreadauthurl} parameters
# have been removed, and contentfile=true has been appended.
assert base_url + "?contentfile=true" == link
def test_extract_download_link(self):
# Verify that extract_download_link can or cannot find a
# download link for a given format subdocument.
class Mock(OverdriveAPI):
called_with = None
@classmethod
def make_direct_download_link(cls, download_link):
cls.called_with = download_link
return "http://manifest/"
m = Mock.extract_download_link
error_url = "http://error/"
# Here we don't even know the name of the format.
empty = dict()
with pytest.raises(IOError) as excinfo:
m(empty, error_url)
assert "No linkTemplates for format (unknown)" in str(excinfo.value)
# Here we know the name, but there are no link templates.
no_templates = dict(formatType="someformat")
with pytest.raises(IOError) as excinfo:
m(no_templates, error_url)
assert "No linkTemplates for format someformat" in str(excinfo.value)
# Here there's a link template structure, but no downloadLink
# inside.
no_download_link = dict(formatType="someformat", linkTemplates=dict())
with pytest.raises(IOError) as excinfo:
m(no_download_link, error_url)
assert "No downloadLink for format someformat" in str(excinfo.value)
# Here there's a downloadLink structure, but no href inside.
href_is_missing = dict(
formatType="someformat", linkTemplates=dict(downloadLink=dict())
)
with pytest.raises(IOError) as excinfo:
m(href_is_missing, error_url)
assert "No downloadLink href for format someformat" in str(excinfo.value)
# Now we finally get to the cases where there is an actual
# download link. The behavior is different based on whether
# or not we want to return a link to the manifest file.
working = dict(
formatType="someformat",
linkTemplates=dict(
downloadLink=dict(href="http://download/?errorpageurl={errorpageurl}")
),
)
# If we don't want a manifest, make_direct_download_link is
# not called.
do_not_fetch_manifest = m(working, error_url, fetch_manifest=False)
assert None == Mock.called_with
# The errorpageurl template is filled in.
assert "http://download/?errorpageurl=http://error/" == do_not_fetch_manifest
# If we do want a manifest, make_direct_download_link is called
# without errorpageurl being affected.
do_fetch_manifest = m(working, error_url, fetch_manifest=True)
assert "http://download/?errorpageurl={errorpageurl}" == Mock.called_with
assert "http://manifest/" == do_fetch_manifest
def test_make_direct_download_link(self):
# Verify that make_direct_download_link handles various more
# or less weird URLs that the Overdrive might or might not
# serve.
base = "http://overdrive/downloadlink"
m = OverdriveAPI.make_direct_download_link
assert base + "?contentfile=true" == m(base)
assert base + "?contentfile=true" == m(base + "?odreadauthurl={odreadauthurl}")
assert base + "?other=other&contentfile=true" == m(
base + "?odreadauthurl={odreadauthurl}&other=other"
)
def test_extract_data_from_checkout_resource(self):
data, json = self.sample_json("checkout_response_locked_in_format.json")
expires, url = MockOverdriveAPI.extract_data_from_checkout_response(
json, "ebook-epub-adobe", "http://foo.com/"
)
assert 2013 == expires.year
assert 10 == expires.month
assert 4 == expires.day
assert (
"http://patron.api.overdrive.com/v1/patrons/me/checkouts/76C1B7D0-17F4-4C05-8397-C66C17411584/formats/ebook-epub-adobe/downloadlink?errorpageurl=http://foo.com/"
== url
)
def test_process_checkout_data(self):
data, json = self.sample_json(
"shelf_with_book_already_fulfilled_on_kindle.json"
)
[on_kindle, not_on_kindle] = json["checkouts"]
# The book already fulfilled on Kindle doesn't get turned into
# LoanInfo at all.
assert None == MockOverdriveAPI.process_checkout_data(
on_kindle, self.collection
)
# The book not yet fulfilled does show up as a LoanInfo.
loan_info = MockOverdriveAPI.process_checkout_data(
not_on_kindle, self.collection
)
assert "2fadd2ac-a8ec-4938-a369-4c3260e8922b" == loan_info.identifier
# Since there are two usable formats (Adobe EPUB and Adobe
# PDF), the LoanInfo is not locked to any particular format.
assert None == loan_info.locked_to
# A book that's on loan and locked to a specific format has a
# DeliveryMechanismInfo associated with that format.
data, format_locked_in = self.sample_json(
"checkout_response_locked_in_format.json"
)
loan_info = MockOverdriveAPI.process_checkout_data(
format_locked_in, self.collection
)
delivery = loan_info.locked_to
assert Representation.EPUB_MEDIA_TYPE == delivery.content_type
assert DeliveryMechanism.ADOBE_DRM == delivery.drm_scheme
# This book is on loan and the choice between Kindle and Adobe
# EPUB has not yet been made, but as far as we're concerned,
# Adobe EPUB is the only *usable* format, so it's effectively
# locked.
data, no_format_locked_in = self.sample_json(
"checkout_response_no_format_locked_in.json"
)
loan_info = MockOverdriveAPI.process_checkout_data(
no_format_locked_in, self.collection
)
assert loan_info != None
delivery = loan_info.locked_to
assert Representation.EPUB_MEDIA_TYPE == delivery.content_type
assert DeliveryMechanism.ADOBE_DRM == delivery.drm_scheme
# TODO: In the future both of these tests should return a
# LoanInfo with appropriate FulfillmentInfo. The calling code
# would then decide whether or not to show the loan.
class TestSyncBookshelf(OverdriveAPITest):
def test_sync_bookshelf_creates_local_loans(self):
loans_data, json_loans = self.sample_json(
"shelf_with_some_checked_out_books.json"
)
holds_data, json_holds = self.sample_json("no_holds.json")
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
patron = self._patron()
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
# All four loans in the sample data were created.
assert 4 == len(loans)
assert loans.sort() == patron.loans.sort()
# We have created previously unknown LicensePools and
# Identifiers.
identifiers = [loan.license_pool.identifier.identifier for loan in loans]
assert (
sorted(
[
"a5a3d737-34d4-4d69-aad8-eba4e46019a3",
"99409f99-45a5-4238-9e10-98d1435cde04",
"993e4b33-823c-40af-8f61-cac54e1cba5d",
"a2ec6f3a-ebfe-4c95-9638-2cb13be8de5a",
]
)
== sorted(identifiers)
)
# We have recorded a new DeliveryMechanism associated with
# each loan.
mechanisms = []
for loan in loans:
if loan.fulfillment:
mechanism = loan.fulfillment.delivery_mechanism
mechanisms.append((mechanism.content_type, mechanism.drm_scheme))
assert [
(Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.NO_DRM),
(Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),
(Representation.PDF_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),
(Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),
] == mechanisms
# There are no holds.
assert [] == holds
# Running the sync again leaves all four loans in place.
patron.last_loan_activity_sync = None
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 4 == len(loans)
assert loans.sort() == patron.loans.sort()
def test_sync_bookshelf_removes_loans_not_present_on_remote(self):
loans_data, json_loans = self.sample_json(
"shelf_with_some_checked_out_books.json"
)
holds_data, json_holds = self.sample_json("no_holds.json")
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
# Create a loan not present in the sample data.
patron = self._patron()
overdrive_edition, new = self._edition(
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
collection=self.collection,
)
[pool] = overdrive_edition.license_pools
overdrive_loan, new = pool.loan_to(patron)
yesterday = utc_now() - timedelta(days=1)
overdrive_loan.start = yesterday
# Sync with Overdrive, and the loan not present in the sample
# data is removed.
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 4 == len(loans)
assert set(loans) == set(patron.loans)
assert overdrive_loan not in patron.loans
def test_sync_bookshelf_ignores_loans_from_other_sources(self):
patron = self._patron()
gutenberg, new = self._edition(
data_source_name=DataSource.GUTENBERG, with_license_pool=True
)
[pool] = gutenberg.license_pools
gutenberg_loan, new = pool.loan_to(patron)
loans_data, json_loans = self.sample_json(
"shelf_with_some_checked_out_books.json"
)
holds_data, json_holds = self.sample_json("no_holds.json")
# Overdrive doesn't know about the Gutenberg loan, but it was
# not destroyed, because it came from another source.
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 5 == len(patron.loans)
assert gutenberg_loan in patron.loans
def test_sync_bookshelf_creates_local_holds(self):
loans_data, json_loans = self.sample_json("no_loans.json")
holds_data, json_holds = self.sample_json("holds.json")
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
patron = self._patron()
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
# All four loans in the sample data were created.
assert 4 == len(holds)
assert sorted(holds) == sorted(patron.holds)
# Running the sync again leaves all four holds in place.
patron.last_loan_activity_sync = None
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 4 == len(holds)
assert sorted(holds) == sorted(patron.holds)
def test_sync_bookshelf_removes_holds_not_present_on_remote(self):
loans_data, json_loans = self.sample_json("no_loans.json")
holds_data, json_holds = self.sample_json("holds.json")
patron = self._patron()
overdrive_edition, new = self._edition(
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
collection=self.collection,
)
[pool] = overdrive_edition.license_pools
overdrive_hold, new = pool.on_hold_to(patron)
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
# The hold not present in the sample data has been removed
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 4 == len(holds)
assert holds == patron.holds
assert overdrive_hold not in patron.loans
def test_sync_bookshelf_ignores_holds_from_other_collections(self):
loans_data, json_loans = self.sample_json("no_loans.json")
holds_data, json_holds = self.sample_json("holds.json")
patron = self._patron()
# This patron has an Overdrive book on hold, but it derives
# from an Overdrive Collection that's not managed by
# self.circulation.
overdrive, new = self._edition(
data_source_name=DataSource.OVERDRIVE,
with_license_pool=True,
collection=self._collection(),
)
[pool] = overdrive.license_pools
overdrive_hold, new = pool.on_hold_to(patron)
self.api.queue_response(200, content=loans_data)
self.api.queue_response(200, content=holds_data)
# self.api doesn't know about the hold, but it was not
# destroyed, because it came from a different collection.
loans, holds = self.circulation.sync_bookshelf(patron, "dummy pin")
assert 5 == len(patron.holds)
assert overdrive_hold in patron.holds
class TestOverdriveManifestFulfillmentInfo(OverdriveAPITest):
def test_as_response(self):
# An OverdriveManifestFulfillmentInfo just links the client
# directly to the manifest file, bypassing normal FulfillmentInfo
# processing.
info = OverdriveManifestFulfillmentInfo(
self._default_collection,
"http://content-link/",
"abcd-efgh",
"scope string",
)
response = info.as_response
assert 302 == response.status_code
assert "" == response.get_data(as_text=True)
headers = response.headers
assert "text/plain" == headers["Content-Type"]
# These are the important headers; the location of the manifest file
# and the scope necessary to initiate Patron Authentication for
# it.
assert "scope string" == headers["X-Overdrive-Scope"]
assert "http://content-link/" == headers["Location"]
class TestOverdriveCirculationMonitor(OverdriveAPITest):
def test_run(self):
# An end-to-end test verifying that this Monitor manages its
# state across multiple runs.
#
# This tests a lot of code that's technically not in Monitor,
# but when the Monitor API changes, it may require changes to
# this particular monitor, and it's good to have a test that
# will fail if that's true.
class Mock(OverdriveCirculationMonitor):
def catch_up_from(self, start, cutoff, progress):
self.catch_up_from_called_with = (start, cutoff, progress)
monitor = Mock(self._db, self.collection)
monitor.run()
start, cutoff, progress = monitor.catch_up_from_called_with
now = utc_now()
# The first time this Monitor is called, its 'start time' is
# the current time, and we ask for an overlap of one minute.
# This isn't very effective, but we have to start somewhere.
#
# (This isn't how the Overdrive collection is initially
# populated, BTW -- that's NewTitlesOverdriveCollectionMonitor.)
self.time_eq(start, now - monitor.OVERLAP)
self.time_eq(cutoff, now)
timestamp = monitor.timestamp()
assert start == timestamp.start
assert cutoff == timestamp.finish
# The second time the Monitor is called, its 'start time'
# is one minute before the previous cutoff time.
monitor.run()
new_start, new_cutoff, new_progress = monitor.catch_up_from_called_with
now = utc_now()
assert new_start == cutoff - monitor.OVERLAP
self.time_eq(new_cutoff, now)
def test_catch_up_from(self):
# catch_up_from() asks Overdrive about recent changes by
# calling recently_changed_ids().
#
# It mirrors those changes locally by calling
# update_licensepool().
#
# If this is our first time encountering a book, a
# DISTRIBUTOR_TITLE_ADD analytics event is sent out.
#
# The method stops when should_stop() -- called on every book
# -- returns True.
class MockAPI(object):
def __init__(self, *ignore, **kwignore):
self.licensepools = []
self.update_licensepool_calls = []
def update_licensepool(self, book_id):
pool, is_new, is_changed = self.licensepools.pop(0)
self.update_licensepool_calls.append((book_id, pool))
return pool, is_new, is_changed
class MockAnalytics(object):
def __init__(self, _db):
self._db = _db
self.events = []
def collect_event(self, *args):
self.events.append(args)
class MockMonitor(OverdriveCirculationMonitor):
recently_changed_ids_called_with = None
should_stop_calls = []
def recently_changed_ids(self, start, cutoff):
self.recently_changed_ids_called_with = (start, cutoff)
return [1, 2, None, 3, 4]
def should_stop(self, start, book, is_changed):
# We're going to stop after the third valid book,
# ensuring that we never ask 'Overdrive' for the
# fourth book.
self.should_stop_calls.append((start, book, is_changed))
if book == 3:
return True
return False
monitor = MockMonitor(
self._db, self.collection, api_class=MockAPI, analytics_class=MockAnalytics
)
api = monitor.api
# A MockAnalytics object was created and is ready to receive analytics
# events.
assert isinstance(monitor.analytics, MockAnalytics)
assert self._db == monitor.analytics._db
# The 'Overdrive API' is ready to tell us about four books,
# but only one of them (the first) represents a change from what
# we already know.
lp1 = self._licensepool(None)
lp1.last_checked = utc_now()
lp2 = self._licensepool(None)
lp3 = self._licensepool(None)
lp4 = object()
api.licensepools.append((lp1, True, True))
api.licensepools.append((lp2, False, False))
api.licensepools.append((lp3, False, True))
api.licensepools.append(lp4)
progress = TimestampData()
start = object()
cutoff = object()
monitor.catch_up_from(start, cutoff, progress)
# The monitor called recently_changed_ids with the start and
# cutoff times. It returned five 'books', one of which was None --
# simulating a lack of data from Overdrive.
assert (start, cutoff) == monitor.recently_changed_ids_called_with
# The monitor ignored the empty book and called
# update_licensepool on the first three valid 'books'. The
# mock API delivered the first three LicensePools from the
# queue.
assert [(1, lp1), (2, lp2), (3, lp3)] == api.update_licensepool_calls
# After each book was processed, should_stop was called, using
# the LicensePool, the start date, plus information about
# whether the LicensePool was changed (or created) during
# update_licensepool().
assert [
(start, 1, True),
(start, 2, False),
(start, 3, True),
] == monitor.should_stop_calls
# should_stop returned True on the third call, and at that
# point we gave up.
# The fourth (bogus) LicensePool is still in api.licensepools,
# because we never asked for it.
assert [lp4] == api.licensepools
# A single analytics event was sent out, for the first LicensePool,
# the one that update_licensepool said was new.
[[library, licensepool, event, last_checked]] = monitor.analytics.events
# The event commemerates the addition of this LicensePool to the
# collection.
assert lp1.collection.libraries == [library]
assert lp1 == licensepool
assert CirculationEvent.DISTRIBUTOR_TITLE_ADD == event
assert lp1.last_checked == last_checked
# The incoming TimestampData object was updated with
# a summary of what happened.
#
# We processed four books: 1, 2, None (which was ignored)
# and 3.
assert "Books processed: 4." == progress.achievements
class TestNewTitlesOverdriveCollectionMonitor(OverdriveAPITest):
def test_recently_changed_ids(self):
class MockAPI(object):
def __init__(self, *args, **kwargs):
pass
def all_ids(self):
return "all of the ids"
monitor = NewTitlesOverdriveCollectionMonitor(
self._db, self.collection, api_class=MockAPI
)
assert "all of the ids" == monitor.recently_changed_ids(object(), object())
def test_should_stop(self):
monitor = NewTitlesOverdriveCollectionMonitor(
self._db, self.collection, api_class=MockOverdriveAPI
)
m = monitor.should_stop
# If the monitor has never run before, we need to keep going
# until we run out of books.
assert False == m(None, object(), object())
assert False == m(monitor.NEVER, object(), object())
# If information is missing or invalid, we assume that we
# should keep going.
start = datetime_utc(2018, 1, 1)
assert False == m(start, {}, object())
assert False == m(start, {"date_added": None}, object())
assert False == m(start, {"date_added": "Not a date"}, object())
# Here, we're actually comparing real dates, using the date
# format found in the Overdrive API. A date that's after the
# `start` date means we should keep going backwards. A date before
# the `start` date means we should stop.
assert False == m(
start, {"date_added": "2019-07-12T11:06:38.157+01:00"}, object()
)
assert True == m(
start, {"date_added": "2017-07-12T11:06:38.157-04:00"}, object()
)
class TestNewTitlesOverdriveCollectionMonitor(OverdriveAPITest):
def test_should_stop(self):
monitor = RecentOverdriveCollectionMonitor(
self._db, self.collection, api_class=MockOverdriveAPI
)
assert 0 == monitor.consecutive_unchanged_books
m = monitor.should_stop
# This book hasn't been changed, but we're under the limit, so we should
# keep going.
assert False == m(object(), object(), False)
assert 1 == monitor.consecutive_unchanged_books
assert False == m(object(), object(), False)
assert 2 == monitor.consecutive_unchanged_books
# This book has changed, so our counter gets reset.
assert False == m(object(), object(), True)
assert 0 == monitor.consecutive_unchanged_books
# When we're at the limit, and another book comes along that hasn't
# been changed, _then_ we decide to stop.
monitor.consecutive_unchanged_books = (
monitor.MAXIMUM_CONSECUTIVE_UNCHANGED_BOOKS
)
assert True == m(object(), object(), False)
assert (
monitor.MAXIMUM_CONSECUTIVE_UNCHANGED_BOOKS + 1
== monitor.consecutive_unchanged_books
)
class TestOverdriveFormatSweep(OverdriveAPITest):
def test_process_item(self):
# Validate the standard CollectionMonitor interface.
monitor = OverdriveFormatSweep(
self._db, self.collection, api_class=MockOverdriveAPI
)
monitor.api.queue_collection_token()
# We're not testing that the work actually gets done (that's
# tested in test_update_formats), only that the monitor
# implements the expected process_item API without crashing.
monitor.api.queue_response(404)
edition, pool = self._edition(with_license_pool=True)
monitor.process_item(pool.identifier)
def test_process_item_multiple_licence_pools(self):
# Make sure that we only call update_formats once when an item
# is part of multiple licensepools.
class MockApi(MockOverdriveAPI):
update_format_calls = 0
def update_formats(self, licensepool):
self.update_format_calls += 1
monitor = OverdriveFormatSweep(self._db, self.collection, api_class=MockApi)
monitor.api.queue_collection_token()
monitor.api.queue_response(404)
edition = self._edition()
collection1 = self._collection(name="Collection 1")
pool1 = self._licensepool(edition, collection=collection1)
collection2 = self._collection(name="Collection 2")
pool2 = self._licensepool(edition, collection=collection2)
monitor.process_item(pool1.identifier)
assert 1 == monitor.api.update_format_calls
class TestReaper(OverdriveAPITest):
def test_instantiate(self):
# Validate the standard CollectionMonitor interface.
monitor = OverdriveCollectionReaper(
self._db, self.collection, api_class=MockOverdriveAPI
)
|
# Generated by Django 2.2.6 on 2019-11-18 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='length',
field=models.PositiveIntegerField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='course',
name='size',
field=models.PositiveIntegerField(blank=True, editable=False, null=True),
),
]
|
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import pytest
import os
from tempfile import NamedTemporaryFile, mkdtemp
ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_data/")
from sparse_neighbors_search import MinHash
from scipy.sparse import csr_matrix, vstack, save_npz, load_npz
def test_minHash():
path = ROOT + 'nagano_1mb_intermediate.npz'
neighborhood_matrix = load_npz(path)
minHash_object = MinHash(n_neighbors=500, number_of_hash_functions=20, number_of_cores=4,
shingle_size=5, fast=True, maxFeatures=int(max(neighborhood_matrix.getnnz(1))), absolute_numbers=False, max_bin_size=100000,
minimal_blocks_in_common=400, excess_factor=1, prune_inverse_index=False)
minHash_object.fit(neighborhood_matrix[0:10, :])
knn_graph = minHash_object.kneighbors_graph(mode='distance')
|
from nideconv.hierarchical_bayes.backends import HierarchicalStanModel
import numpy as np
from nose.tools import assert_greater
from numpy.testing import assert_allclose
def test_simple_model():
n_subjects = 15
n_cols = 5
length_signal = 10
beta_subject = np.random.randn(n_subjects, n_cols) + np.arange(n_cols)
X = np.random.randn(n_subjects * length_signal,
n_cols)
X[:, 0] = 1
subj_ix = np.repeat(np.arange(n_subjects), length_signal)
beta = beta_subject[subj_ix]
Y = np.einsum('ij,ij->i', beta, X)
Y += np.random.randn(*Y.shape)
model = HierarchicalStanModel(X, subj_ix)
model.sample(Y)
r = np.corrcoef(model.get_subject_traces().mean(),
beta_subject.ravel())[0,0]
assert_greater(r, 0.95)
assert_allclose(model.get_group_traces().mean(),
np.arange(n_cols),
atol=.5)
|
z = int(input())
while z > 0:
num = int(input())
temp = s = 0
for y in range(0 , num):
if temp == 0:
s += 1
temp = 1
else:
s -= 1
temp = 0
print(s)
z -= 1 |
#!/usr/bin/env python
"""
__author__ = "Axelle Apvrille"
__status__ = "Alpha"
__license__ = "MIT License"
"""
country = { 'af' : 0,
'ax':1,
'al':2,
'dz':3,
'as':4,
'ad':5,
'ao':6,
'ai':7,
'aq':8,
'ag':9,
'ar':10,
'am':11,
'aw':12,
'au':13,
'at':14,
'az':15,
'bs':16,
'bh':17,
'bd':18,
'bb':19,
'by':20,
'be':21,
'bz':22,
'bj':23,
'bm':24,
'bt':25,
'bo':26,
'bq':27,
'ba':28,
'bw':29,
'bv':30,
'br':31,
'io':32,
'bn':33,
'bg':34,
'bf':35,
'bi':36,
'kh':37,
'cm':38,
'ca':39,
'cv':40,
'ky':41,
'cf':42,
'td':43,
'cl':44,
'cn':45,
'cx':46,
'cc':47,
'co':48,
'km':49,
'cg':50,
'cd':51,
'ck':52,
'cr':53,
'ci':54,
'hr':55,
'cu':56,
'cw':57,
'cy':58,
'cz':59,
'dk':60,
'dj':61,
'dm':62,
'do':63,
'ec':64,
'eg':65,
'sv':66,
'gq':67,
'er':68,
'ee':69,
'et':70,
'fk':71,
'fo':72,
'fj':73,
'fi':74,
'fr':75,
'gf':76,
'pf':77,
'tf':78,
'ga':79,
'gm':80,
'ge':81,
'de':82,
'gh':83,
'gi':84,
'gr':85,
'gl':86,
'gd':87,
'gp':88,
'gu':89,
'gt':90,
'gg':91,
'gn':92,
'gw':93,
'gy':94,
'ht':95,
'hm':96,
'va':97,
'hn':98,
'hk':99,
'hu':100,
'is':101,
'in':102,
'id':103,
'ir':104,
'iq':105,
'ie':106,
'im':107,
'il':108,
'it':109,
'jm':110,
'jp':111,
'je':112,
'jo':113,
'kz':114,
'ke':115,
'ki':116,
'kp':117,
'kr':118,
'kw':119,
'kg':120,
'la':121,
'lv':122,
'lb':123,
'ls':124,
'lr':125,
'ly':126,
'li':127,
'lt':128,
'lu':129,
'mo':130,
'mk':131,
'mg':132,
'mw':133,
'my':134,
'mv':135,
'ml':136,
'mt':137,
'mh':138,
'mq':139,
'mr':140,
'mu':141,
'yt':142,
'mx':143,
'fm':144,
'md':145,
'mc':146,
'mn':147,
'me':148,
'ms':149,
'ma':150,
'mz':151,
'mm':152,
'na':153,
'nr':154,
'np':155,
'nl':156,
'nc':157,
'nz':158,
'ni':159,
'ne':160,
'ng':161,
'nu':162,
'nf':163,
'mp':164,
'no':165,
'om':166,
'pk':167,
'pw':168,
'ps':169,
'pa':170,
'pg':171,
'py':172,
'pe':173,
'ph':174,
'pn':175,
'pl':176,
'pt':177,
'pr':178,
'qa':179,
're':180,
'ro':181,
'ru':182,
'rw':183,
'bl':184,
'sh':185,
'kn':186,
'lc':187,
'mf':188,
'pm':189,
'vc':190,
'ws':191,
'sm':192,
'st':193,
'sa':194,
'sn':195,
'rs':196,
'sc':197,
'sl':198,
'sg':199,
'sx':200,
'sk':201,
'si':202,
'sb':203,
'so':204,
'za':205,
'gs':206,
'ss':207,
'es':208,
'lk':209,
'sd':210,
'sr':211,
'sj':212,
'sz':213,
'se':214,
'ch':215,
'sy':216,
'tw':217,
'tj':218,
'tz':219,
'th':220,
'tl':221,
'tg':222,
'tk':223,
'to':224,
'tt':225,
'tn':226,
'tr':227,
'tm':228,
'tc':229,
'tv':230,
'ug':231,
'ua':232,
'ae':233,
'gb':234,
'us':235,
'um':236,
'uy':237,
'uz':238,
'vu':239,
've':240,
'vn':241,
'vg':242,
'vi':243,
'wf':244,
'eh':245,
'ye':246,
'zm':247,
'zw':248,
'unknown':500
}
def to_int(country_string):
'''Converts a 2 letter country string like fr to the appropriate enum value
If not found, returns the value for unknown'''
lowercase = country_string.lower()
if lowercase in country.keys():
return country[lowercase]
else:
return country['unknown']
def to_key(code):
'''Converts the enum value to a 2 letter country code. If not found,
returns unknown'''
for name, number in country.iteritems():
if code == number:
return name
return "unknown"
|
#
# Copyright (C) 2021 Satoru SATOH <satoru.satoh @ gmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=inherit-non-class,too-few-public-methods
"""anyconfig basic data types.
"""
import pathlib
import typing
IOI_PATH_STR: str = 'path'
IOI_PATH_OBJ: str = 'pathlib.Path'
IOI_STREAM: str = 'stream'
IOI_TYPES: typing.FrozenSet[str] = frozenset(
(IOI_PATH_STR, IOI_PATH_OBJ, IOI_STREAM)
)
class IOInfo(typing.NamedTuple):
"""Equivalent to collections.namedtuple."""
src: typing.Union[str, pathlib.Path, typing.IO]
type: str
path: str
extension: str
IOI_KEYS: typing.Tuple[str, ...] = IOInfo._fields
PathOrIOT = typing.Union[str, pathlib.Path, typing.IO]
PathOrIOInfoT = typing.Union[PathOrIOT, IOInfo]
InDataT = typing.Mapping[str, typing.Any]
PrimitiveT = typing.Union[None, int, float, bool, str, InDataT]
InDataExT = typing.Union[PrimitiveT, InDataT]
# vim:sw=4:ts=4:et:
|
import numpy as np
a = np.zeros((2,3))
print(a)
p = np.full((3,4),12)
print(p)
k = np.eye((3))
print(k)
k = np.arange(9)
print(k) |
import turtle
bob = turtle.Turtle()
def square(bob):
for i in range(4):
bob.fd(100)
bob.lt(90)
print(bob)
square(bob)
turtle.mainloop()
|
"""Contsructor to take a Python dict containing an API Documentation and
create a HydraDoc object for it
"""
import re
import json
from pyld import jsonld
import requests
from hydra_python_core.doc_writer import (HydraDoc, HydraClass, HydraClassProp,
HydraClassOp, HydraStatus, HydraLink,
HydraCollection, DocUrl)
from typing import Any, Dict, Match, Optional, Tuple, Union, List
from hydra_python_core.namespace import hydra, rdfs
from urllib.parse import urlparse
jsonld.set_document_loader(jsonld.requests_document_loader())
def create_doc(doc: Dict[str, Any], HYDRUS_SERVER_URL: str = None,
API_NAME: str = None) -> HydraDoc:
"""
Create the HydraDoc object from the API Documentation.
:param doc: dictionary of hydra api doc
:param HYDRUS_SERVER_URL: url of the hydrus server
:param API_NAME: name of the api
:return: instance of HydraDoc which server and agent can understand
:raise SyntaxError: If the `doc` doesn't have an entry for `@id` , `@context`, `@type` key.
"""
# These keys must be there in the APIDOC: @context, @id, @type
if not all(key in doc for key in ('@context', '@id', '@type')):
raise SyntaxError("Please make sure doc contains @context, @id and @type")
_context = doc['@context']
base_url = ''
entrypoint = ''
doc_name = 'vocab'
doc_url = ''
_id = ''
_entrypoint = ''
_title = "The default title"
_description = "This is the default description"
_classes = []
_collections = []
_endpoints = []
_possible_status = []
_endpoint_class = []
_endpoint_collection = []
_non_endpoint_classes = []
expanded_doc = jsonld.expand(doc)
for item in expanded_doc:
_id = item['@id']
# Extract base_url, entrypoint and API name
base_url = urlparse(_id).scheme + '//' + urlparse(_id).netloc
entrypoint = _entrypoint
doc_name = urlparse(_id).path.split('/')[-1]
doc_url = DocUrl(HYDRUS_SERVER_URL, api_name=API_NAME, doc_name=doc_name).doc_url
for entrypoint in item[hydra['entrypoint']]:
_entrypoint = entrypoint['@id']
if hydra['title'] in item:
for title in item[hydra['title']]:
_title = title['@value']
if hydra['description'] in item:
for description in item[hydra['description']]:
_description = description['@value']
for classes in item[hydra['supportedClass']]:
isCollection = False
if hydra['manages'] in classes:
isCollection = True
_collections.append(classes)
for supported_prop in classes[hydra['supportedProperty']]:
for prop in supported_prop[hydra['property']]:
if '@type' in prop:
for prop_type in prop['@type']:
if prop_type == hydra['Link']:
# find the range of the link
for resource_range in prop[rdfs['range']]:
_endpoints.append(check_namespace(resource_range['@id']))
if not isCollection:
_classes.append(classes)
for status in item[hydra['possibleStatus']]:
_possible_status.append(status)
for classes in _classes:
if classes['@id'] == hydra['Resource'] or classes['@id'] == hydra['Collection']:
continue
endpoint = False
if classes['@id'].find("EntryPoint") != -1:
classes['@id'] = "{}{}".format(doc_url, "EntryPoint")
else:
classes['@id'] = check_namespace(classes['@id'])
for endpoints in _endpoints:
if classes['@id'] == endpoints:
endpoint = True
_endpoint_class.append(classes)
if not endpoint:
_non_endpoint_classes.append(classes)
for collections in _collections:
collections['@id'] = check_namespace(collections['@id'])
for endpoints in _endpoints:
if collections['@id'] == endpoints:
_endpoint_collection.append(collections)
# Main doc object
if HYDRUS_SERVER_URL is not None and API_NAME is not None:
apidoc = HydraDoc(
API_NAME, _title, _description, API_NAME, HYDRUS_SERVER_URL, doc_name)
elif entrypoint.get('@id') is not None:
apidoc = HydraDoc(
entrypoint.get('@id'), _title, _description, entrypoint.get('@id'), base_url, doc_name)
else:
raise Exception("No EntryPoint found, please set the API variables.")
# additional context entries
for entry in _context:
apidoc.add_to_context(entry, _context[entry])
# make endpoint classes
for endpoint_classes in _endpoint_class:
if endpoint_classes['@id'] == hydra['Resource'] or \
endpoint_classes['@id'] == hydra['Collection'] or \
endpoint_classes['@id'].find("EntryPoint") != -1:
continue
class_ = create_class(endpoint_classes, endpoint=True)
apidoc.add_supported_class(class_)
# make non-endpoint classes
for classes in _non_endpoint_classes:
if classes['@id'] == hydra['Resource'] or classes['@id'] == hydra['Collection'] or \
classes['@id'].find("EntryPoint") != -1:
continue
class_ = create_class(classes, endpoint=False)
apidoc.add_supported_class(class_)
# make endpoint collections
for endpoint_collection in _endpoint_collection:
collection_ = create_collection(endpoint_collection)
apidoc.add_supported_collection(collection_)
# add possibleStatus
status_list = create_status(_possible_status)
for status in status_list:
apidoc.add_possible_status(status)
# add base collection and resource
apidoc.add_baseResource()
apidoc.add_baseCollection()
apidoc.gen_EntryPoint()
return apidoc
def create_collection(endpoint_collection: Dict[str, Any]) -> HydraCollection:
"""
Creates the instance of HydraCollection from expanded APIDOC
:param endpoint_collection: creates HydraCollection from expanded API doc
:return: instance of HydraCollection
"""
collection_name = "The default collection name"
collection_description = "The default collection description"
if hydra['title'] in endpoint_collection:
collection_name = endpoint_collection[hydra['title']][0]['@value']
if hydra['description'] in endpoint_collection:
collection_description = endpoint_collection[hydra['description']][0]['@value']
manages = {}
if hydra['object'] in endpoint_collection[hydra['manages']][0]:
object_id = endpoint_collection[hydra['manages']][0][hydra['object']][0]['@id']
manages['object'] = check_namespace(object_id)
if hydra['subject'] in endpoint_collection[hydra['manages']][0]:
subject_id = endpoint_collection[hydra['manages']][0][hydra['subject']][0]['@id']
manages['subject'] = check_namespace(subject_id)
if hydra['property'] in endpoint_collection[hydra['manages']][0]:
property_id = endpoint_collection[hydra['manages']][0][hydra['property']][0]['@id']
manages['property'] = check_namespace(property_id)
is_get = False
is_post = False
is_put = False
is_del = False
for supported_operations in endpoint_collection[hydra['supportedOperation']]:
if supported_operations[hydra['method']][0]['@value'] == 'GET':
is_get = True
if supported_operations[hydra['method']][0]['@value'] == 'PUT':
is_post = True
if supported_operations[hydra['method']][0]['@value'] == 'POST':
is_put = True
if supported_operations[hydra['method']][0]['@value'] == 'PUT':
is_del = True
collection_ = HydraCollection(collection_name=collection_name,
collection_description=collection_description,
manages=manages, get=is_get,
post=is_post, put=is_put, delete=is_del)
return collection_
def create_class(expanded_class: Dict[str, Any], endpoint: bool) -> HydraClass:
"""
Creates HydraClass from the expanded API document;
:param apidoc: object of HydraDoc type
:param expanded_class: the expanded class
:param endpoint: boolean True if class is an endpoint, False if class is not endpoint
:return: HydraClass object that can be added to api doc
"""
class_title = "A Class"
class_description = "The description of the class"
if hydra['title'] in expanded_class:
class_title = expanded_class[hydra['title']][0]['@value']
if hydra['description'] in expanded_class:
class_description = expanded_class[hydra['description']][0]['@value']
class_ = HydraClass(class_title,
class_description, endpoint=endpoint)
# add supported Property
for supported_property in expanded_class[hydra["supportedProperty"]]:
prop_ = create_property(supported_property)
class_.add_supported_prop(prop_)
# add supported operations
for supported_operations in expanded_class[hydra['supportedOperation']]:
op_ = create_operation(supported_operations)
class_.add_supported_op(op_)
return class_
def create_operation(supported_operation: Dict[str, Any]) -> HydraClassOp:
"""
Creates the instance of HydraClassOp
:param supported_operation: The expanded supported operation from the API DOC
:return: HydraClassOp
"""
op_title = "The title of the operation"
op_expects = "null"
op_returns = "null"
op_expects_header = []
op_returns_header = []
op_possible_status = []
if hydra['title'] in supported_operation:
op_title = supported_operation[hydra['title']][0]['@value']
op_method = supported_operation[hydra['method']][0]['@value']
if hydra['expects'] in supported_operation:
op_expects = check_namespace(supported_operation[hydra['expects']][0]['@id'])
if hydra['returns'] in supported_operation:
op_returns = check_namespace(supported_operation[hydra['returns']][0]['@id'])
if hydra['expectsHeader'] in supported_operation:
for header in supported_operation[hydra['expectsHeader']]:
op_expects_header.append(header['@value'])
if hydra['returnsHeader'] in supported_operation:
for header in supported_operation[hydra['returnsHeader']]:
op_returns_header.append(header['@value'])
if hydra['possibleStatus'] in supported_operation:
op_possible_status = create_status(supported_operation[hydra['possibleStatus']])
op_ = HydraClassOp(title=op_title,
method=op_method,
expects=op_expects,
returns=op_returns,
expects_header=op_expects_header,
returns_header=op_returns_header,
possible_status=op_possible_status)
return op_
def create_status(possible_status: List[Any]) -> List[HydraStatus]:
"""
Creates instance of HydraStatus from expanded API doc
:param possible_status: possible status from the expanded API doc
:return: List of instances of HydraStatus
"""
status_list = []
for status in possible_status:
status_id = None
status_title = "The default title for status"
status_desc = "The default description of status"
if hydra['description'] in status:
status_desc = status[hydra['description']][0]['@value']
status_code = status[hydra['statusCode']][0]['@value']
if '@id' in status:
status_id = status['@id']
if hydra['title'] in status:
status_title = status[hydra['title']][0]['@value']
status_ = HydraStatus(status_code, status_id, status_title, status_desc)
status_list.append(status_)
return status_list
def create_property(supported_property: Dict[str, Any]) -> Union[HydraLink, HydraClassProp]:
"""
Creates the HydraClassProp from the expanded supported property
:param supported_property: supported property dict from the expanded api doc
:return: HydraClassProp
"""
prop_id = ""
prop_title = "The title of Property"
if hydra['property'] in supported_property:
prop_id = check_namespace(supported_property[hydra['property']][0]['@id'])
if '@type' in supported_property[hydra['property']][0]:
if supported_property[hydra['property']][0]['@type'][0] == hydra['Link']:
prop_id = create_link(supported_property[hydra['property']][0])
else:
raise KeyError("{} is missing".format(hydra['property']))
if hydra['title'] in supported_property:
prop_title = supported_property[hydra['title']][0]['@value']
prop_read = supported_property[hydra['readable']][0]['@value']
prop_require = supported_property[hydra['required']][0]['@value']
prop_write = supported_property[hydra['writeable']][0]['@value']
prop_ = HydraClassProp(prop=prop_id,
title=prop_title,
required=prop_require,
read=prop_read,
write=prop_write)
return prop_
def create_link(supported_property: Dict[str, Any]) -> HydraLink:
"""
Creates the instances of HydraLink
:param supported_property: expanded Property
:return: instance of HydraLink
"""
prop_title = 'The default Link title'
prop_desc = 'The default Link description'
prop_id = check_namespace(supported_property['@id'])
if hydra['description'] in supported_property:
prop_desc = supported_property[hydra['description']]
if hydra['title'] in supported_property:
prop_title = supported_property[hydra['title']][0]['@value']
prop_domain = check_namespace(supported_property[rdfs['domain']][0]['@id'])
prop_range = check_namespace(supported_property[rdfs['range']][0]['@id'])
link_ = HydraLink(prop_id, prop_title, prop_desc, prop_domain, prop_range)
if hydra['supportedOperation'] in supported_property:
for operations in supported_property[hydra['supportedOperation']]:
operation = create_operation(operations)
link_.add_supported_op(operation)
return link_
def check_namespace(id_: str = None) -> str:
"""
A helper method to check if the classes and properties are in the same
namespace and if not bring them into the right namespace
:param id_ The id to check
:return: correct url
"""
if id_.find(DocUrl.doc_url) == -1 and id_ != "null":
if id_.find('?resource=') != -1:
resource_name = id_.split('?resource=')[-1]
id_ = "{}{}".format(DocUrl.doc_url, resource_name)
elif id_.find('#type') != -1:
id_ = "{}{}".format(DocUrl.doc_url, id_.split('#')[-1])
else:
return id_
return id_
|
import json
from signbank.dictionary.models import *
from signbank.settings.server_specific import *
# these are the categories that are displayed in chartjs in the GlossFrequencyView template
SEX_CATEGORIES = ['Female', 'Male']
AGE_CATEGORIES = ['< 25', '25 - 35', '36 - 65', '> 65']
def collect_speaker_age_data(speakers_summary, age_range):
# the following collects the speakers distributed over a range of ages to display on the x axis
# for display in chartjs, the age labels are stored separately from the number of speakers having that age
# ages to display data for across the x axis
speaker_age_data = []
for i in range(0, 100):
# the age is a string for javascript
i_key = str(i)
if i_key in speakers_summary.keys():
# some speakers have this age
# set the number of speakers with this age
speaker_age_data.append(speakers_summary[i_key])
age_range[i] = True
else:
# no speakers have this age
# only show labels for ages that have speakers of that age
speaker_age_data.append(0)
return (speaker_age_data, age_range)
def collect_variants_data(variants):
# parameter is a list of variants objects
# returns a tuple
# variants data quick access: dictionary mapping variant annotation to speaker data for variant
# sorted variants with keys: sorted list of pairs ( variant annotation, variant object )
if not variants:
return ({}, [])
variants_with_keys = []
if len(variants) > 1:
for v in variants:
# get the annotation explicitly
# do not use the __str__ property idgloss
try:
v_idgloss = v.annotationidglosstranslation_set.get(language=v.lemma.dataset.default_language).text
except ObjectDoesNotExist:
# no translation found for annotation of gloss, display gloss id instead
v_idgloss = str(v.id)
variants_with_keys.append((v_idgloss, v))
sorted_variants_with_keys = sorted(variants_with_keys, key=lambda tup: tup[0])
variants_data_quick_access = {}
for (og_idgloss, variant_of_gloss) in sorted_variants_with_keys:
variants_speaker_data = variant_of_gloss.speaker_data()
variants_data_quick_access[og_idgloss] = variants_speaker_data
return (variants_data_quick_access, sorted_variants_with_keys)
def collect_variants_age_range_data(sorted_variants_with_keys, age_range):
variants_age_range_distribution_data = {}
for (variant_idgloss, variant_of_gloss) in sorted_variants_with_keys:
variant_speaker_age_data_v = variant_of_gloss.speaker_age_data()
speaker_age_data_v = []
for i in range(0, 100):
i_key = str(i)
if i_key in variant_speaker_age_data_v.keys():
speaker_age_data_v.append(variant_speaker_age_data_v[i_key])
age_range[i] = True
else:
speaker_age_data_v.append(0)
variants_age_range_distribution_data[variant_idgloss] = speaker_age_data_v
return (variants_age_range_distribution_data, age_range)
def collect_variants_age_sex_raw_percentage(sorted_variants_with_keys, variants_data_quick_access):
variants_sex_distribution_data_raw = {}
variants_sex_distribution_data_percentage = {}
variants_sex_distribution_data_totals = {}
for i_key in SEX_CATEGORIES:
variants_sex_distribution_data_raw[i_key] = {}
variants_sex_distribution_data_percentage[i_key] = {}
variants_sex_distribution_data_totals[i_key] = 0
variants_age_distribution_data_raw = {}
variants_age_distribution_data_percentage = {}
variants_age_distribution_data_totals = {}
for i_key in AGE_CATEGORIES:
variants_age_distribution_data_raw[i_key] = {}
variants_age_distribution_data_percentage[i_key] = {}
variants_age_distribution_data_totals[i_key] = 0
for (variant_idgloss, variant_of_gloss) in sorted_variants_with_keys:
for i_key in SEX_CATEGORIES:
variants_sex_distribution_data_totals[i_key] += variants_data_quick_access[variant_idgloss][i_key]
for i_key in AGE_CATEGORIES:
variants_age_distribution_data_totals[i_key] += variants_data_quick_access[variant_idgloss][i_key]
for i_key in SEX_CATEGORIES:
total_gender_across_variants = variants_sex_distribution_data_totals[i_key]
for (variant_idgloss, variant_of_gloss) in sorted_variants_with_keys:
variant_speaker_data_v = variants_data_quick_access[variant_idgloss]
i_value = variant_speaker_data_v[i_key]
speaker_data_v = i_value
if total_gender_across_variants > 0:
speaker_data_p = i_value / total_gender_across_variants
else:
speaker_data_p = 0
variants_sex_distribution_data_raw[i_key][variant_idgloss] = speaker_data_v
variants_sex_distribution_data_percentage[i_key][variant_idgloss] = speaker_data_p
for i_key in AGE_CATEGORIES:
total_age_across_variants = variants_age_distribution_data_totals[i_key]
for (variant_idgloss, variant_of_gloss) in sorted_variants_with_keys:
variant_speaker_data_v = variants_data_quick_access[variant_idgloss]
i_value = variant_speaker_data_v[i_key]
speaker_data_v = i_value
if total_age_across_variants > 0:
speaker_data_p = i_value / total_age_across_variants
else:
speaker_data_p = 0
variants_age_distribution_data_raw[i_key][variant_idgloss] = speaker_data_v
variants_age_distribution_data_percentage[i_key][variant_idgloss] = speaker_data_p
return (variants_sex_distribution_data_raw, variants_sex_distribution_data_percentage,
variants_age_distribution_data_raw, variants_age_distribution_data_percentage)
|
# Generated by Django 3.1.13 on 2021-10-26 13:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0087_auto_20211026_0131'),
]
operations = [
migrations.RemoveField(
model_name='descrierepage',
name='bolta_peste_pronaos_material',
),
]
|
"""INSTEON Standard Receive Message Type 0x50."""
from insteonplm.constants import (MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50,
MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE)
from insteonplm.address import Address
from insteonplm.messages.message import Message
from insteonplm.messages.messageFlags import MessageFlags
class StandardReceive(Message):
"""Insteon Standard Length Message Received.
Message type 0x50
"""
_code = MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50
_sendSize = MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE
_receivedSize = MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE
_description = 'INSTEON Standard Message Received'
def __init__(self, address, target, commandtuple, cmd2=None, flags=0x00):
"""Initialize the StandardReceive message class."""
if commandtuple.get('cmd1') is not None:
cmd1 = commandtuple['cmd1']
cmd2out = commandtuple['cmd2']
else:
raise ValueError
if cmd2 is not None:
cmd2out = cmd2
if cmd2out is None:
raise ValueError
self._address = Address(address)
self._target = Address(target)
self._messageFlags = MessageFlags(flags)
# self._messageFlags.extended = 0
self._cmd1 = cmd1
self._cmd2 = cmd2out
@classmethod
def from_raw_message(cls, rawmessage):
"""Create message from a raw byte stream."""
return StandardReceive(rawmessage[2:5],
rawmessage[5:8],
{'cmd1': rawmessage[9],
'cmd2': rawmessage[10]},
flags=rawmessage[8])
# pylint: disable=protected-access
@classmethod
def template(cls, address=None, target=None, commandtuple={},
cmd2=-1, flags=None):
"""Create a message template used for callbacks."""
msgraw = bytearray([0x02, cls._code])
msgraw.extend(bytes(cls._receivedSize))
msg = StandardReceive.from_raw_message(msgraw)
cmd1 = commandtuple.get('cmd1')
cmd2out = commandtuple.get('cmd2')
if cmd2 is not -1:
cmd2out = cmd2
msg._address = Address(address)
msg._target = Address(target)
msg._messageFlags = MessageFlags(flags)
msg._cmd1 = cmd1
msg._cmd2 = cmd2out
return msg
@property
def address(self):
"""Return the address of the device."""
return self._address
@property
def target(self):
"""Return the address of the target device."""
return self._target
@property
def cmd1(self):
"""Return the cmd1 property of the message."""
return self._cmd1
@property
def cmd2(self):
"""Return the cmd2 property of the message."""
return self._cmd2
@property
def flags(self):
"""Return the message flags."""
return self._messageFlags
@property
def targetLow(self):
"""Return the low byte of the target message property.
Used in All-Link Cleanup message types.
"""
low_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
low_byte = self.target.bytes[0]
return low_byte
@property
def targetMed(self):
"""Return the middle byte of the target message property.
Used in All-Link Cleanup message types.
"""
med_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
med_byte = self.target.bytes[1]
return med_byte
@property
def targetHi(self):
"""Return the high byte of the target message property.
Used in All-Link Cleanup message types.
"""
hi_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
hi_byte = self.target.bytes[2]
return hi_byte
def _message_properties(self):
return [{'address': self._address},
{'target': self._target},
{'flags': self._messageFlags},
{'cmd1': self._cmd1},
{'cmd2': self._cmd2}]
|
import unittest
import tempfile
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from vcfremapper.sort_vcf import sort_vcf
class TestSortVcf(unittest.TestCase):
''' test sort_vcf function
'''
def test_sort_vcf(self):
''' check sort_vcf function
'''
lines = ['1\t100\t.\tA\tG\t100\tPASS\tAC=100\n',
'2\t150\t.\tA\tG\t100\tPASS\tAC=100\n',
'2\t150\t.\tA\tC\t100\tPASS\tAC=100\n',
'1\t200\t.\tA\tG\t100\tPASS\tAC=100\n',
'1\t180\t.\tA\tG\t100\tPASS\tAC=100\n']
input = tempfile.NamedTemporaryFile(mode='w+t')
output = tempfile.NamedTemporaryFile(mode='w+t')
input.writelines(lines)
input.flush()
header = '##fileformat=VCFv4.1\n' \
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n'
# define the byte offsets for the lines by their coordinates
coords = {
'1': {
(100, 'A', ('G',)): 0,
(200, 'A', ('G',)): 84,
(180, 'A', ('G',)): 112,
},
'2': {
(150, 'A', ('G',)): 28,
(150, 'A', ('C',)): 56,
},
}
# sort the VCF
sort_vcf(coords, input, output, header)
output.seek(0)
self.assertEqual(output.read(), header + ''.join(sorted(lines)))
|
"""
Given a binary tree, return all root-to-leaf paths.
Note: A leaf is a node with no children.
Example:
Input:
1
/ \
2 3
\
5
Output: ["1->2->5", "1->3"]
Explanation: All root-to-leaf paths are: 1->2->5, 1->3
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
"""
Similar to Q.Path Sum
DFS + Stack
"""
# #Boundary Conditions
if not root:
return []
res = []
res_min_path_sum = []
stack = [[root, "0"]]
while stack:
elem = stack.pop()
node = elem[0]
res.append([node.val, elem[1] + "->" + str(node.val)])
if not node.right and not node.left:
res_min_path_sum.append(res[-1])
if node.right:
stack.append([node.right, elem[1] + "->" + str(node.val)])
if node.left:
stack.append([node.left, elem[1] + "->" + str(node.val)])
# print([stack], res)
print(res_min_path_sum)
return [ele[1][3:] for ele in res_min_path_sum] |
import csv
import numpy as np
import matplotlib.pyplot as plt
fig, ax, = plt.subplots()
y = np.zeros(19)
x = np.zeros(19)
index = 0
N = 19
ind = np.arange(N)
width = 0.5
msg = ax.annotate('Click bars for annotation', xy=(0, 0), xytext=(2012, 20000))
category = []
categoryFoodName = []
with open("food_imports.csv", 'r') as fil:
data = csv.DictReader(fil, delimiter=',')
for row in data:
foodName = row['Food Type']
del(row['Food Type'])
for year, dollarAmount in row.items():
temp = dollarAmount.replace(',', '')
if len(temp) != 0:
y[index] = float(temp)
x[index] = int(year)
index = index + 1
if year == '1999':
categoryFoodName.append(foodName)
category.append(y)
index = 0
def onpick(event):
global msg
amount = event.artist.get_height()
date = int(event.mouseevent.xdata)
categories = event.artist.get_label()
msg.remove()
msg = ax.annotate("Category: {},\nYear: {},\nPrice ($) per Million: {}\n".format(categories, amount, x.item(1)),
xy=(0, 0), xytext=(4, 18000))
print(date, amount)
category1Plot = ax.bar(x, category[0], picker=1, label=categoryFoodName[0])
category2Plot = ax.bar(x, category[1], picker=1, label=categoryFoodName[1], bottom=category[0])
category3Plot = ax.bar(x, category[2], picker=1, label=categoryFoodName[2], bottom=category[0]+category[1])
category4Plot = ax.bar(x, category[3], picker=1, label=categoryFoodName[3], bottom=category[0]+category[1]+category[2])
category5Plot = ax.bar(x, category[4], picker=1, label=categoryFoodName[4],
bottom=category[0]+category[1]+category[2]+category[3])
category6Plot = ax.bar(x, category[5], picker=1, label=categoryFoodName[5],
bottom=category[0]+category[1]+category[2]+category[3]+category[4])
category7Plot = ax.bar(x, category[6], picker=1, label=categoryFoodName[6],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5])
category8Plot = ax.bar(x, category[7], picker=1, label=categoryFoodName[7],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6])
category9Plot = ax.bar(x, category[8], picker=1, label=categoryFoodName[8],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7])
category10Plot = ax.bar(x, category[9], picker=1, label=categoryFoodName[9],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[8])
category11Plot = ax.bar(x, category[10], picker=1, label=categoryFoodName[10],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[8]+category[9])
category12Plot = ax.bar(x, category[11], picker=1, label=categoryFoodName[11],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[8]+category[9]+category[10])
category13Plot = ax.bar(x, category[12], picker=1, label=categoryFoodName[12],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[8]+category[9]+category[10]+category[11])
category14Plot = ax.bar(x, category[13], picker=1, label=categoryFoodName[13],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[10]+category[9]+category[10]+category[11]+category[12])
category15Plot = ax.bar(x, category[14], picker=1, label=categoryFoodName[14],
bottom=category[0]+category[1]+category[2]+category[3]+category[4]+category[5]+category[6]+
category[7]+category[8]+category[9]+category[10]+category[11]+category[12]+category[13])
plt.xlabel('Year')
plt.ylabel('Price ($) per Million')
plt.title('Food Imports by Year')
plt.xticks(x)
plt.legend(loc='upper left', bbox_to_anchor=(0., 1.1))
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
|
import click
import six
from corgie import scheduling, residuals, helpers, stack
from corgie.log import logger as corgie_logger
from corgie.layers import get_layer_types, DEFAULT_LAYER_TYPE, \
str_to_layer_type
from corgie.boundingcube import get_bcube_from_coords
from corgie.argparsers import LAYER_HELP_STR, \
create_layer_from_spec, corgie_optgroup, corgie_option, \
create_stack_from_spec
class RenderJob(scheduling.Job):
def __init__(self, src_stack, dst_stack, mips, pad, render_masks,
blackout_masks, seethrough, bcube, chunk_xy, chunk_z,
additional_fields=[], seethrough_offset=-1):
self.src_stack = src_stack
self.dst_stack = dst_stack
self.mips = mips
self.pad = pad
self.bcube = bcube
self.chunk_xy = chunk_xy
self.chunk_z = chunk_z
self.render_masks = render_masks
self.blackout_masks = blackout_masks
self.additional_fields = additional_fields
self.seethrough = seethrough
self.seethrough_offset = seethrough_offset
super().__init__()
def task_generator(self):
for mip in self.mips:
chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
bcube=self.bcube,
chunk_xy=self.chunk_xy,
chunk_z=self.chunk_z,
mip=mip)
tasks = [RenderTask(self.src_stack,
self.dst_stack,
blackout_masks=self.blackout_masks,
render_masks=self.render_masks,
mip=mip,
pad=self.pad,
bcube=input_chunk,
additional_fields=self.additional_fields,
seethrough=self.seethrough,
seethrough_offset=self.seethrough_offset) \
for input_chunk in chunks]
corgie_logger.info(f"Yielding render tasks for bcube: {self.bcube}, MIP: {mip}")
yield tasks
class RenderTask(scheduling.Task):
def __init__(self, src_stack, dst_stack, additional_fields, render_masks,
blackout_masks, seethrough, seethrough_offset, mip,
pad, bcube):
super().__init__(self)
self.src_stack = src_stack
self.dst_stack = dst_stack
self.render_masks = render_masks
self.blackout_masks = blackout_masks
self.mip = mip
self.bcube = bcube
self.pad = pad
self.additional_fields = additional_fields
self.seethrough = seethrough
self.seethrough_offset = seethrough_offset
self.blackout_value = 0.0
def execute(self):
padded_bcube = self.bcube.uncrop(self.pad, self.mip)
seethrough_bcube = self.bcube.translate(
z_offset=self.seethrough_offset)
for f in self.additional_fields:
#just in case the "additional" field is actually already a part of src_stack
if f not in self.src_stack.layers.values():
self.src_stack.add_layer(f)
src_translation, src_data_dict = self.src_stack.read_data_dict(padded_bcube,
mip=self.mip, add_prefix=False)
agg_field = src_data_dict[f"agg_field"]
agg_mask = None
if self.blackout_masks or self.seethrough:
mask_layers = self.dst_stack.get_layers_of_type(["mask"])
mask_layer_names = [l.name for l in mask_layers]
for n, d in six.iteritems(src_data_dict):
if n in mask_layer_names:
if agg_mask is None:
agg_mask = d
else:
agg_mask = ((agg_mask + d) > 0).byte()
if agg_mask is not None:
coarsen_factor = int(2**(6 - self.mip))
agg_mask = helpers.coarsen_mask(agg_mask, coarsen_factor)
if agg_field is not None:
warped_mask = residuals.res_warp_img(
agg_mask.float(),
agg_field)
else:
warped_mask = agg_mask
warped_mask = (warped_mask > 0.4).byte()
else:
warped_mask = None
if self.render_masks:
write_layers = self.dst_stack.get_layers_of_type(["img", "mask"])
else:
write_layers = self.dst_stack.get_layers_of_type("img")
for l in write_layers:
src = src_data_dict[f"{l.name}"]
if agg_field is not None:
warped_src = residuals.res_warp_img(src.float(), agg_field)
else:
warped_src = src
cropped_out = helpers.crop(warped_src, self.pad)
if self.blackout_masks or self.seethrough:
if warped_mask is not None:
warped_mask = helpers.crop(warped_mask, self.pad)
if l.get_layer_type() == "img" and self.blackout_masks and warped_mask is not None:
cropped_out[warped_mask] = self.blackout_value
if l.get_layer_type() == "img" and self.seethrough and warped_mask is not None:
seethrough_data = l.read(
bcube=seethrough_bcube,
mip=self.mip)
coarsen_factor = int(2**(6 - self.mip))
seethrough_mask = helpers.coarsen_mask(warped_mask, coarsen_factor)
seethrough_mask[cropped_out == 0] = True
cropped_out[seethrough_mask] = \
seethrough_data[seethrough_mask]
seenthru = (cropped_out[seethrough_mask] != 0).sum()
corgie_logger.debug(f"Seenthrough {seenthru} pixels")
l.write(cropped_out, bcube=self.bcube, mip=self.mip)
@click.command()
@corgie_optgroup('Layer Parameters')
@corgie_option('--src_layer_spec', '-s', nargs=1,
type=str, required=True, multiple=True,
help='Source layer spec. Use multiple times to include all masks, fields, images. ' + \
LAYER_HELP_STR)
#
@corgie_option('--dst_folder', nargs=1,
type=str, required=True,
help= "Folder where rendered stack will go")
@corgie_optgroup('Render Method Specification')
@corgie_option('--chunk_xy', '-c', nargs=1, type=int, default=1024)
@corgie_option('--chunk_z', nargs=1, type=int, default=1)
@corgie_option('--pad', nargs=1, type=int, default=512)
@corgie_option('--mip', 'mips', nargs=1, type=int, required=True, multiple=True)
@corgie_option('--render_masks/--no_render_masks', default=True)
@corgie_option('--blackout_masks/--no_blackout_masks', default=False)
@corgie_option('--seethrough/--no_seethrough', default=False)
@corgie_option('--force_chunk_xy', is_flag=True)
@corgie_option('--force_chunk_z', is_flag=True)
@corgie_optgroup('Data Region Specification')
@corgie_option('--start_coord', nargs=1, type=str, required=True)
@corgie_option('--end_coord', nargs=1, type=str, required=True)
@corgie_option('--coord_mip', nargs=1, type=int, default=0)
@click.option('--suffix', nargs=1, type=str, default=None)
@click.pass_context
def render(ctx, src_layer_spec, dst_folder, pad, render_masks, blackout_masks,
seethrough, chunk_xy, chunk_z, start_coord, end_coord, mips,
coord_mip, force_chunk_xy, force_chunk_z, suffix):
scheduler = ctx.obj['scheduler']
if suffix is None:
suffix = '_rendered'
else:
suffix = f"_{suffix}"
corgie_logger.debug("Setting up layers...")
src_stack = create_stack_from_spec(src_layer_spec,
name='src', readonly=True)
if force_chunk_xy:
force_chunk_xy = chunk_xy
else:
force_chunk_xy = None
if force_chunk_z:
force_chunk_z = chunk_z
else:
force_chunk_z = None
dst_stack = stack.create_stack_from_reference(reference_stack=src_stack,
folder=dst_folder, name="dst", types=["img", "mask"],
force_chunk_xy=force_chunk_xy, force_chunk_z=force_chunk_z,
suffix=suffix, overwrite=True)
bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)
render_job = RenderJob(src_stack=src_stack,
dst_stack=dst_stack,
mips=mips,
pad=pad,
bcube=bcube,
chunk_xy=chunk_xy,
chunk_z=chunk_z,
render_masks=render_masks,
blackout_masks=blackout_masks,
seethrough=seethrough)
# create scheduler and execute the job
scheduler.register_job(render_job, job_name="Render {}".format(bcube))
scheduler.execute_until_completion()
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
reserved_words = {
"and",
"del",
"for",
"is",
"raise",
"assert",
"elif",
"from",
"lambda",
"return",
"break",
"else",
"global",
"not",
"try",
"class",
"except",
"if",
"or",
"while",
"continue",
"exec",
"import",
"pass",
"yield",
"def",
"finally",
"in",
"print",
"as",
}
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
from django.contrib.auth.models import User
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount import requests
from allauth.socialaccount.models import SocialAccount, SocialLogin
from provider import PersonaProvider
def persona_login(request):
assertion = request.POST.get('assertion', '')
audience = request.build_absolute_uri('/')
resp = requests.post('https://verifier.login.persona.org/verify',
{ 'assertion': assertion,
'audience': audience })
if resp.json['status'] != 'okay':
return render_authentication_error(request)
email = resp.json['email']
user = User(email=email)
extra_data = resp.json
account = SocialAccount(uid=email,
provider=PersonaProvider.id,
extra_data=extra_data,
user=user)
# TBD: Persona e-mail addresses are verified, so we could check if
# a matching local user account already exists with an identical
# verified e-mail address and short-circuit the social login. Then
# again, this holds for all social providers that guarantee
# verified e-mail addresses, so if at all, short-circuiting should
# probably not be handled here...
login = SocialLogin(account)
login.state = SocialLogin.state_from_request(request)
return complete_social_login(request, login)
|
"""A collection of functions to parse STEM input and output files"""
from __future__ import division
import os.path
from glob import glob
import re
import numpy as np
import pandas as pd
import datetime
from netCDF4 import Dataset
# --------------------------------------------------
# helper classes, functions
class BlockReader(object):
"""
class to read ASCII text in n-line "blocks"
"""
def __init__(self, f, n=1):
self.f = f
self.n = n
def __iter__(self):
return self
def next(self):
return [self.f.next() for i in xrange(self.n)]
def string_strip(lst):
"""
apply strip to each string in an iterable container of strings
ARGS:
lst (list): an iterable containing strings
"""
return([x.strip() for x in lst])
def file_len(fname):
"""returns the number of lines contained in the file fname.
ARGS:
fname (string): path of the file"""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
class StemRunFile(object):
"""
Class to parse variable/value pairs from STEM run files.
CLASS ATTRIBUTES:
fname (string): the full path to the STEM run file
lines (list of strings): all lines from the run file that contain
variable assignments
vars (dict): variable (name, value) pairs
t_start (datetime.datetime): the starting time of the STEM simulation
t_end (datetime.datetime): the ending time of the STEM simulation
"""
def __init__(self, fname):
"""Parses the specified file and populates lines and vars.
ARGS:
fname (string): full path to a STEM run file
"""
self.fname = fname
self.parse_to_list()
self.trim_lines()
self.create_dict()
self.sub_vars()
self.calc_run_start()
self.calc_run_end()
def parse_to_list(self):
"""parse a STEM run file to a list of strings containing each line."""
f = open(self.fname)
self.lines = f.readlines()
f.close()
def trim_lines(self):
"""discard lines that do not contain variable/value assignments.
Lines that do not begin with a '#' and do contain '=' or
'setenv' are assumed to be assignments
"""
# get rid of comments
self.lines = [ln for ln in self.lines if ln[0] is not '#']
# keep variable assigments (shell and environment)
self.lines = [ln for ln in self.lines if
('=' in ln) or ('setenv' in ln)]
def create_dict(self):
"""populate a dict from (variable, value) pairs.
for each line in the format "var=val" or "sentenv var val",
put var and val into a dict
"""
# re to match "var = val", with arbitrary whitespace around the =
m_eq = [re.search('(?P<var>\w+)=(?P<val>.+)', ln) for
ln in self.lines]
m_eq = [mat for mat in m_eq if mat is not None]
# re to match "setenv var val", with arbitrary whitespace separating
m_env = [re.search('setenv\s(?P<var>.+)\s(?P<val>.+)', ln) for
ln in self.lines]
m_env = [mat for mat in m_env if mat is not None]
# combine the two lists of dicts into one big dict
merged_dict = {}
for m in m_eq:
d = m.groupdict()
merged_dict[d['var']] = d['val']
for m in m_env:
d = m.groupdict()
merged_dict[d['var']] = d['val']
self.vars = merged_dict
def sub_vars(self):
"""get values of environment vars referenced in the run file
Substitute environment variables referenced in the run file
with their values if they specify paths. For variables in
the format $ABC_DEF, try first to replace from the other
variables defined in the run file. If the variable is not
present, look within os environment variables. If both fail
leave the variable unchanged.
"""
for k in self.vars.keys():
# find environment variables
match = re.search('\$(?P<varname>[A-Z0-9_]+)', self.vars[k])
if match is not None:
varname = match.group('varname')
if ((varname in self.vars.keys()) and
(os.path.exists(self.vars[varname]))):
full_val = self.vars[varname]
self.vars[k] = self.vars[k].replace('$' + varname,
full_val)
elif ((os.getenv(varname) is not None) and
(os.path.exists(os.getenv(varname)))):
full_val = os.getenv(varname)
self.vars[k] = self.vars[k].replace('$' + varname,
full_val)
def calc_run_start(self):
"""place start time of run in datetime.datetime object"""
t = (datetime.datetime.strptime(self.vars['istday'], '%Y %m %d') +
datetime.timedelta(hours=int(self.vars['isthr'])))
self.t_start = t
def calc_run_end(self):
"""place end time of run in datetime.datetime object"""
dt = datetime.timedelta(hours=int(self.vars['iperiod']))
self.t_end = self.t_start + dt
# --------------------------------------------------
# parser functions
def parse_inputdat(fname):
"""parse the STEM input.dat file specified by fname to a pandas
data frame.
RETURNS: a data frame with the variables t, x, y, z, and COS.
"""
input_dat = pd.read_csv(fname,
sep=None,
header=None,
skiprows=4,
names=('t', 'x', 'y', 'z', 'COS'))
return(input_dat)
def parse_tobspred(fname, inputdat_fname=None, emi_fac_l_no=None):
""" parse model and observed OCS concentrations and emissions
scaling factors ('emi_fac') from a STEM t_obs_pred.dat file
specified by fname to a two pandas data frames.
ARGS:
fname (string): full path to the t_obs_pred.dat file to be read
inputdat_fname (string): optional; full path to the input.dat file
that drove the generation o f the t_obs_pred.dat file. If
specified the input.dat file is parsed to determine how many
lines of concentrations there are in the t_obs_pred.dat file.
emi_fac_l_no (integer): optional; The line number of the first line
in t_obs_pred.dat that contains emissions scaling factors.
Ignored if inputdat_fname is specified. Must be specified if
inputdat_fname is not specified.
RETURNS:
a dict with two entries: ocs_conc and emi_fac. Each entry
contains a pandas data frame. ocs_conc contains time stamps (t),
observed OCS concentrations (obs), and model OCS concentrations
(mod). emi_fac contains STEM grid X coordinate (x), STEM grid Y
coordinate (y), and the emissions scaling factor (emi_fac).
.. note::
The input.dat file is used to identify the line in the
t_obs_pred.dat file where the data change from concentrations to
emissions scaling factors. The only way I could think of to do
this completely from the information within the t_obs_pred.dat
file is to identify the first line where the first number is an
integer, rather than a floating point number in exponential
notation. This approach would be vulnerable to a change in output
format within STEM, though; therefore I decided to go with using
input.dat.
"""
if inputdat_fname is not None:
n_hdr_lines = 4
emi_fac_l_no = file_len(inputdat_fname) - n_hdr_lines
elif emi_fac_l_no is None:
raise TypeError('Neither inputdat_fname nor'
'emi_fac_l_no were specified')
ocs_conc = pd.read_csv(fname,
sep='[\s]*',
header=None,
skipinitialspace=False,
nrows=emi_fac_l_no,
names=('t', 'obs', 'mod'))
emi_fac = pd.read_csv(fname,
sep='[\s]*',
header=None,
skipinitialspace=False,
skiprows=emi_fac_l_no,
names=('x', 'y', 'emi_fac'))
return({"ocs_conc": ocs_conc,
"emi_fac": emi_fac})
def parse_reportopt(fname, block_sz=11):
"""parse a STEM inverse run Report.opt file to a pandas data frame.
ARGS:
fname (string): path to the Report.opt file
block_sz (int): how many Report.opt lines to parse in one go.
Default is 11.
RETURNS:
a pandas data frame with variables:
it: model iteration
mod_runs: number of model runs
cost: the value of the cost functions
misfit: the 'misfit' component of the cost function
bckg: the 'background' component of the cost function
task: the L-BFGS 'task' for the iteration
"""
block_list = []
with open(fname, 'r') as f:
for block in BlockReader(f, 11):
# build a regex to match floating point numbers
re_flt = r"[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?"
for this_line in block:
if re.search("LBFGS-TASK", this_line):
task = re.split('LBFGS-TASK =', this_line)[1].strip()
if re.search("It=", this_line):
it, nruns = string_strip(re.findall(re_flt, this_line))
if re.search("Cost", this_line):
cost, = string_strip(re.findall(re_flt, this_line))
if re.search("Misfit", this_line):
misfit, bckg = string_strip(re.findall(re_flt, this_line))
block_list.append({'task': task,
'it': int(it),
'nruns': int(nruns),
'cost': float(cost),
'misfit': float(misfit),
'bckg': float(bckg)})
f.close()
df = pd.DataFrame(block_list)
return(df)
def get_all_tobspred_fnames(run_dir):
"""find t_obs_pred.dat files in a STEM run directory
ARGS:
run_dir: path to the STEM run directory
RETURNS:
list of full paths to all files in the specified directory
matching t_obs_pred*.dat. The results are sorted lexically.
"""
file_list = glob(os.path.join(run_dir,
't_obs_pred*.dat'))
file_list = sorted(file_list)
return(file_list)
def parse_all_emifac(run_dir, mask_ones=True):
"""parse all emi_fac values from multiple t_obs_pred*.dat files
Parse all emi_fac values from all t_obs_pred_ABC.dat files present
within a specified directory into a numpy array, one column per
STEM iteration. Provides an option (on by default) to mask
emi_fac values equal to 1.0.
ARGS:
run_dir (string): path to a directory containing at least one
t_obs_pred.dat that get_all_tobspred_fnames() can locate.
mask_ones ({True}|False): if True, returns a numpy.ma masked array
with values of 1.0 masked.
RETURNS:
a numpy array or numpy.ma array containing all emi_fac values
from the parsed files, one column per STEM iteration.
"""
emifac = None
fnames = get_all_tobspred_fnames(run_dir)
if fnames:
emifac_list = [parse_tobspred(f) for f in fnames]
emifac = [x['emi_fac']['emi_fac'].values for x in emifac_list]
emifac = np.transpose(np.array(emifac))
# mask emifac values == 1.0
if mask_ones:
emifac = np.ma.masked_array(emifac, (np.abs(emifac) - 1.0) < 1e-10)
return(emifac)
def parse_STEM_tflag(nc_fname, out_format='datetime', varname=None):
"""Parse STEM time stamps to datetime.datetime objects.
Parse the TFLAG variable of a STEM Models-3 I/O API file to
datetime.datetime values. TFLAG is the time variable in STEM
input and output files. Its format is a text string:
YYYYDDD,HHMMSS. The specified file must contain the variable
TFLAG.
if the I/O API file is time-independent (TSTEP == 0), returns
np.NaN
ARGS:
nc_fname (string): the full path to the IO/API file.
out_format ({'datetime'}|'hour'): format to return time. If
'datetime', returns array of datetime.datetime objects. If
'hour', returns array of integers containing hours past the
first time stamp.
var_name (string): the variable name whose timestamp should be extracted.
If unspecified extracts the file's first variable's TFLAG
(i.e. TFLAG[..., 1, :])
RETURNS:
numpy array of datetime.datetime or integers (depending on
out_format parameter)
"""
SECONDS_PER_HOUR = 60*60
try:
nc = Dataset(nc_fname, 'r', format='NETCDF4')
except:
print('error opening {}'.format(nc_fname))
raise
if (nc.TSTEP == 0):
# "time-independent" I/O API file
result = np.empty((1))
result[0] = np.NaN
return(result)
if varname is None:
var_idx = 1
else:
var_idx = [varname == k for k in nc.variables.keys()]
if np.sum(var_idx) > 1:
raise ValueError(
'more than one variable matches {}'.format(varname))
if np.sum(var_idx) == 0:
raise KeyError(
'{} not present in {}'.format(varname, nc_fname))
else:
var_idx = np.where(var_idx)[0] - 1
# read timestamps to datetime.datetime
t = np.squeeze(nc.variables['TFLAG'][:, var_idx, ...])
t_dt = np.array(
([datetime.datetime.strptime(str(this[0]) +
str(this[1]).zfill(6), '%Y%j%H%M%S')
for this in t]))
nc.close()
if out_format is 'hour':
# conver the datetime objects to hours past the first timestamp.
t0 = t_dt[0]
# t_dt has dtype 'O'; numpy refuses to convert these to floats
# after hour calculation, so initialize a new array of dtype
# float to hold hours.
t_hr = np.empty_like(t_dt, dtype=float)
for i in range(t_dt.size):
td = t_dt[i] - t0
t_hr[i] = td.total_seconds() / SECONDS_PER_HOUR
t_dt = t_hr
return(t_dt)
def parse_STEM_var(nc_fname=None,
t_idx=None,
z_idx=None,
t0=None,
t1=None,
varname=None):
"""Parse a variable from a STEM I/O API netcdf file.
Varname (type str) must be a variable in the netcdf file. The file
must also contain a variable TFLAG containing timestamps in the
format <YYYYDDD,HHMMSS>.
A subset of time stamps may be specified via the t_idx, t0, and t1
parameters. A subset of vertical layers may be specified via the
z_idx parameter.
There are two ways so specify the time slices to extract:
(1) specify t_idx: array-like indices directly into the time
dimension of the IO/API file. If t_idx is specified t1 and t0 are
ignored.
(2) specify, t0 and/or t1: (datetime.datetime) restrict the
returned data to timestamps that satisfy t0 <= timestamp <= t1.
If none of t_idx, t0,and t1 are not specified then all timestamps
are returned. If t_idx is specified t1 and t0 are ignored.
z_idx specifies the vertical layers to extract. Must be None (all
vertical layers) , a scalar integer, or a tuple.
ARGS:
nc_fname (string): path to the I/O API data file
t_idx (numpy array-like, integer): indices of time stamps to
extract. If specified t0 and t1 are ignored.
t0: (datetime.datetime): first time stamp to parse. Ignored if
t_idx is specified.
t1: (datetime.datetime): last time stamp to parse. Ignored if
t_idx is specified.
varname (string): the variable to parse from nc_fname.
RETURNS:
a dict with keys 'data' and 't'. 'data' contains the values in
varname (np.ndarray) and 't' contains the timestamps
(datenum.datenum objects).
"""
try:
nc = Dataset(nc_fname, 'r', format='NETCDF4')
except:
print('error opening {}'.format(nc_fname))
raise
t_dt = parse_STEM_tflag(nc_fname, varname=varname)
if pd.isnull(t_dt).any():
t_idx = 0
elif t_idx is None:
# find the requested timestamps
if t0 is None:
t0 = t_dt.min()
if t1 is None:
t1 = t_dt.max()
t_idx = (t_dt >= t0) & (t_dt <= t1)
if z_idx is None:
z_idx = np.arange(nc.variables[varname].shape[1])
elif type(z_idx) is not tuple:
z_idx = (z_idx,)
# retrieve the requested [OCS] data
data = nc.variables[varname][t_idx, z_idx, ...]
nc.close()
return({'data': data, 't': t_dt[t_idx]})
def parse_STEM_coordinates(topo_fname):
"""Parse STEM grid latitude and longitude.
.. warning:: deprecated. Instead use stem_pytools.domain.get_lat
and stem_pytools.domain.get_lon, stem_pytools.domain.get_topo.
"""
try:
topo = Dataset(topo_fname, 'r', format='NETCDF4')
except:
print('error opening {}'.format(topo_fname))
raise
lat = np.squeeze(topo.variables['LAT'])
lon = np.squeeze(topo.variables['LON'])
topo = np.squeeze(topo.variables['TOPO'])
return(lon, lat, topo)
def get_CO2grossflux_varname(nc_fname):
"""
determine whether the CO2 gross flux variable is 'GPP' or 'GEE'.
examine variables in netCDF file nc_fname and return 'GEE' if
present. If GEE is not present, return 'GPP' if present. If
neither 'GEE' or 'GPP' are present return None.
ARGS:
nc_fname (string): path to netcdf file to examine
RETURNS:
string containing 'GPP', string containing 'GEE', or None
"""
try:
nc = Dataset(nc_fname)
except:
print('error opening {}'.format(nc_fname))
raise
if 'GEE' in nc.variables.keys():
return('GEE')
elif 'GPP' in nc.variables.keys():
return('GPP')
else:
return(None)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def mergeTwoLists(self, l1, l2):
dummy = ListNode(None)
temp1 = l1
temp2 = l2
temp3 = dummy
while temp1 and temp2:
if temp1.val < temp2.val:
temp3.next = temp1
temp1 = temp1.next
temp3 = temp3.next
else:
temp3.next = temp2
temp2 = temp2.next
temp3 = temp3.next
if temp1:
temp3.next = temp1
if temp2:
temp3.next = temp2
return dummy.next
|
# -*- coding: utf-8 -*-
import datetime
from gurobipy import *
from itertools import product
def maxpooling2d(model, layer, inputs):
return inputs
|
import numpy as np
from scipy import ndimage
_categories = (-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 6, 6, 6,
6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11,
11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16)
RPC_SUPPORT_CATEGORIES = (1, 17, 200)
_coco_categories = (-1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11,
11, 11, 11, 11, 11)
COCO_SUPPORT_CATEGORIES = (1, 12, 80)
def contiguous_coco_category_to_super_category(category_id, num_classes):
cat_id = -1
assert num_classes in COCO_SUPPORT_CATEGORIES, \
'Not support {} density categories'.format(num_classes)
if num_classes == 12:
cat_id = _coco_categories[category_id]
elif num_classes == 1:
cat_id = 0
elif num_classes == 80:
cat_id = category_id - 1
assert 79 >= cat_id >= 0
return cat_id
def rpc_category_to_super_category(category_id, num_classes):
"""Map category to super-category id
Args:
category_id: list of category ids, 1-based
num_classes: 1, 17, 200
Returns:
super-category id, 0-based
"""
cat_id = -1
assert num_classes in RPC_SUPPORT_CATEGORIES, \
'Not support {} density categories'.format(num_classes)
if num_classes == 17:
cat_id = _categories[category_id]
elif num_classes == 1:
cat_id = 0
elif num_classes == 200:
cat_id = category_id - 1
assert 199 >= cat_id >= 0
return cat_id
def generate_density_map(labels,
boxes,
scale=50.0 / 800,
size=50,
num_classes=200,
min_sigma=1):
density_map = np.zeros((num_classes, size, size), dtype=np.float32)
for category, box in zip(labels, boxes):
x1, y1, x2, y2 = [x * scale for x in box]
w, h = x2 - x1, y2 - y1
box_radius = min(w, h) / 2
sigma = max(min_sigma, box_radius * 5 / (4 * 3))
cx, cy = round((x1 + x2) / 2), round((y1 + y2) / 2)
density = np.zeros((size, size), dtype=np.float32)
density[cy, cx] = 100
density = ndimage.filters.gaussian_filter(
density, sigma, mode='constant')
density_map[category, :, :] += density
return density_map
def generate_density_map_v1(labels,
boxes,
scale=50.0 / 800,
size=50,
num_classes=200,
min_sigma=1):
num_classes = 3
density_map = np.zeros((num_classes, size, size), dtype=np.float32)
for category, box in zip(labels, boxes):
x1, y1, x2, y2 = [x * scale for x in box]
w, h = x2 - x1, y2 - y1
box_radius = min(w, h) / 2
# 3/5 of gaussian kernel is in box
sigma = max(min_sigma, box_radius * 5 / (4 * 3))
cx, cy = round((x1 + x2) / 2), round((y1 + y2) / 2)
x1, y1, x2, y2 = round(x1), round(y1), round(x2), round(y2)
density = np.zeros((size, size), dtype=np.float32)
density[cy, cx] = 100
density = ndimage.filters.gaussian_filter(
density, sigma, mode='constant')
density_map[0, :, :] += density
# added forgournd info
density_map[1, y1:y2, x1:x2] = 1.0 # mark area
density_map[2, cy, cx] = 1.0 # mark center
return density_map
def gaussian(kernel):
sigma = ((kernel - 1) * 0.3 - 1) * 0.3 + 0.8
s = 2 * (sigma**2)
dx = np.exp(-np.square(np.arange(kernel) - int(kernel / 2)) / s)
return np.reshape(dx, (-1, 1))
|
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
def calculate_pad_same(image_size, kernel_size, stride):
"""
Calculates the padding to get the "same" size as in Tensorflow
Only works for images were filter covers the complete image in the convolution
"""
print((image_size[0] - (kernel_size[0] - 1) - 1) % stride[0] == 0)
print("Image size", image_size)
print("Kernel size", kernel_size)
print("Stride size", stride)
assert (image_size[0] - (kernel_size[0] - 1) - 1) % stride[
0] == 0, "Image can't be convoluted on the height exactly"
assert (image_size[1] - (kernel_size[1] - 1) - 1) % stride[1] == 0, "Image can't be convoluted on the width exactly"
pad = tuple(
[(image_size[num] * (stride[num] - 1) - stride[num] + kernel_size[num]) // 2 for num in range(len(image_size))])
return pad
class Encoder(nn.Module):
def __init__(self,
n_out=4,
n_channels=3,
image_size=(64, 64),
conv_hid=64,
conv_kernel=(3, 3),
conv_stride=(1, 1),
maxpool_kernel=(2, 2)):
super().__init__()
conv_pad = calculate_pad_same(image_size, conv_kernel, conv_stride)
maxpool_pad = calculate_pad_same(image_size, maxpool_kernel, maxpool_kernel)
self.maxpool_pad = [maxpool_pad[1], maxpool_pad[1], maxpool_pad[0], maxpool_pad[0]]
self.conv1 = nn.Conv2d(n_channels, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)
self.maxpool1 = nn.MaxPool2d(maxpool_kernel, None)
self.conv2 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)
self.maxpool2 = nn.MaxPool2d(maxpool_kernel, None)
self.conv3 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)
self.maxpool3 = nn.MaxPool2d(maxpool_kernel, None)
final_size = np.product((conv_hid, image_size[0] // (2 ** 3), image_size[1] // (2 ** 3)))
self.fc1 = nn.Linear(final_size, conv_hid)
self.fc2 = nn.Linear(conv_hid, n_out)
def forward(self, x):
x = x.unsqueeze(0)
x = F.relu(self.conv1(x))
x = self.maxpool1(x)
x = F.relu(self.conv2(x))
x = self.maxpool2(x)
x = F.relu(self.conv3(x))
x = self.maxpool3(x)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.normalize(x).squeeze()
class Decoder(nn.Module):
def __init__(self, image_size=(64, 64),
n_in=4,
conv_hid=64,
conv_kernel=(3, 3),
conv_stride=(1, 1),
n_channels=3
):
super().__init__()
self.convdim = (conv_hid, image_size[0] // (2 ** 3), image_size[1] // (2 ** 3))
self.fc1 = nn.Linear(n_in, conv_hid)
self.fc2 = nn.Linear(conv_hid, np.product(self.convdim))
conv_pad = calculate_pad_same(image_size, conv_kernel, conv_stride)
self.up1 = nn.Upsample(scale_factor=2)
self.conv1 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)
self.conv2 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)
self.conv3 = nn.Conv2d(conv_hid, n_channels, conv_kernel, stride=conv_stride, padding=conv_pad)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = torch.reshape(x, (-1, self.convdim[0], self.convdim[1], self.convdim[2]))
x = self.up1(x)
x = F.relu(self.conv1(x))
x = self.up1(x)
x = F.relu(self.conv2(x))
x = self.up1(x)
x = self.conv3(x)
return torch.sigmoid(x).squeeze(dim=0)
|
import os
import subprocess
import pytest
from tests import config as conf
from tests import experiment as exp
@pytest.mark.e2e_cpu
def test_support_bundle() -> None:
exp_id = exp.run_basic_test(
config_file=conf.fixtures_path("no_op/single-one-short-step.yaml"),
model_def_file=conf.fixtures_path("no_op"),
expected_trials=1,
)
trial_id = exp.experiment_first_trial(exp_id)
output_dir = f"e2etest_trial_{trial_id}"
os.mkdir(output_dir)
command = ["det", "trial", "support-bundle", str(trial_id), "-o", output_dir]
completed_process = subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
|
# Generated by Django 2.2.11 on 2020-04-03 02:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("core", "0011_auto_20191104_0104")]
operations = [
migrations.AddField(
model_name="reportentry",
name="report_type",
field=models.TextField(
choices=[("checkout", "Check-Out"), ("checkin", "Check-In"), ("other", "Other")],
default="other",
null=True,
),
),
migrations.AddField(
model_name="station",
name="station_type",
field=models.TextField(
choices=[("standard", "Standard"), ("checkin", "Check-In/Check-Out")],
default="standard",
null=True,
),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.