max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
python/generator_conf.py | chakpongchung/katana | 64 | 12793451 | <reponame>chakpongchung/katana
import re
from abc import ABCMeta
from collections import namedtuple
NON_IDENTIFIER_CHAR_RE = re.compile(r"[^a-zA-Z0-9]")
def identifier_for_string(s):
return NON_IDENTIFIER_CHAR_RE.sub("_", s)
class TypeInstantiation(metaclass=ABCMeta):
element_c_type: str
element_py_type: str
by_pointer: bool
fixed_dtype: str or None
type_key: str
def dtype(self, dynamic):
return "np.dtype({})".format(self.fixed_dtype or dynamic)
@property
def type_scab(self):
return identifier_for_string(self.element_c_type)
class PrimitiveTypeInstantiation(
namedtuple("PrimitiveTypeInstantiation", ["element_c_type", "element_py_type",]), TypeInstantiation,
):
@property
def fixed_dtype(self):
return self.element_py_type
@property
def by_pointer(self):
return False
@property
def type_key(self):
return self.element_py_type
primitive_type_instantiations = [
PrimitiveTypeInstantiation("uint64_t", "np.uint64"),
PrimitiveTypeInstantiation("int64_t", "int"),
PrimitiveTypeInstantiation("uint32_t", "np.uint32"),
PrimitiveTypeInstantiation("int32_t", "np.int32"),
PrimitiveTypeInstantiation("double", "float"),
PrimitiveTypeInstantiation("float", "np.float32"),
]
class OpaqueTypeInstantiation(namedtuple("OpaqueTypeInstantiation", ["size"]), TypeInstantiation):
@property
def fixed_dtype(self):
return None
@property
def element_c_type(self):
return "Opaque{}".format(self.size)
@property
def element_py_type(self):
return "StructInstance"
@property
def by_pointer(self):
return True
@property
def type_key(self):
return self.size
opaque_type_instantiations = [OpaqueTypeInstantiation(s) for s in [8, 16, 32, 48, 64, 128]]
type_instantiations = primitive_type_instantiations + opaque_type_instantiations
type_instantiation_imports = """
import numpy as np
from libc.stdint cimport int64_t, uint64_t, int32_t, uint32_t
"""
exports = dict(
primitive_type_instantiations=primitive_type_instantiations,
opaque_type_instantiations=opaque_type_instantiations,
type_instantiations=type_instantiations,
type_instantiation_imports=type_instantiation_imports,
)
| 2.640625 | 3 |
aspen_ssh/parser/exceptions.py | thinkwelltwd/aspen_ssh | 1 | 12793452 | <reponame>thinkwelltwd/aspen_ssh
class SSHCertificateParserError(Exception):
pass
class UnsupportedKeyTypeError(SSHCertificateParserError):
"""This key has a type which we do not know how to parse"""
class InputTooShortError(SSHCertificateParserError):
pass
| 2.4375 | 2 |
src/auto_change.py | yoland68/junit-auto-migrate | 0 | 12793453 | <reponame>yoland68/junit-auto-migrate
#!/usr/bin/env python
import parser
import chrome_convert_agents
import webview_convert_agents
import instrumentation_convert_agents
import test_base_convert_agent
import content_convert_agents
import logging
import argparse
import os
import sys
_TEST_AGENT_DICT = {
"chrome-base-test-case": chrome_convert_agents.ChromeActivityBaseCaseAgent,
"chrome-permission-test": chrome_convert_agents.PermissionTestAgent,
"chrome-tabbed-test": chrome_convert_agents.ChromeTabbedTestAgent,
"instrumentation":
instrumentation_convert_agents.InstrumentationTestCaseAgent,
"multiactivity-test": chrome_convert_agents.MultiActivityTestAgent,
"vr-test": chrome_convert_agents.ChromeVrTestAgent,
"payment-test": chrome_convert_agents.PaymentRequestAgent,
"mojo-test": chrome_convert_agents.MojoTestAgent,
"cast-test": chrome_convert_agents.CastTestAgent,
"provider-test": chrome_convert_agents.ProviderTestAgent,
"customtabs-test": chrome_convert_agents.CustomTabActivityTestAgent,
"notification-test": chrome_convert_agents.NotificationTestAgent,
#"download-test": chrome_convert_agents.DownloadTestAgent,
"bottom-sheet-test": chrome_convert_agents.BottomSheetTestAgent,
"connectivity-checker-test":
content_convert_agents.ConnectivityCheckerTestAgent,
"tab-model-selector-observer-test":
content_convert_agents.SelectorObserverTest,
"native-library-test": content_convert_agents.NativeLibraryTestAgent,
"content-shell-test": content_convert_agents.ContentShellTestAgent,
"dialog-overlay-impl-test": content_convert_agents.DialogOverlayImplTestAgent,
"webview-test": webview_convert_agents.WebViewTestAgent,
"cronet-test": chrome_convert_agents.CronetTestAgent,
"partner-unit-test": chrome_convert_agents.PartnerUnitTestAgent,
"sync-test": chrome_convert_agents.SyncTestAgent,
"partner-integration-test": chrome_convert_agents.PartnerIntegrationTestAgent,
"crash-test": chrome_convert_agents.CrashTestAgent,
}
_AGENT_DICT = _TEST_AGENT_DICT.copy()
_AGENT_DICT.update({"base-class": test_base_convert_agent.BaseCaseAgent})
def ConvertDirectory(directory, java_parser, agent_strings,
save_as_new=False, logging_level=logging.WARNING,
use_base_class=False):
agent = None
for (dirpath, _, filenames) in os.walk(directory):
for filename in filenames:
whole_path = os.path.join(dirpath, filename)
agent = ConvertFile(
java_parser, agent_strings, whole_path, save_as_new,
previous_agent=agent, logging_level=logging_level,
use_base_class=use_base_class)
def ConvertFile(java_parser, agent_strings, whole_path, save_as_new,
previous_agent=None, logging_level=logging.WARNING,
use_base_class=False):
logger = SetLogger(logging_level, whole_path)
agent = previous_agent
for agent_class in [_AGENT_DICT[i] for i in agent_strings if
_AGENT_DICT[i].filename_match(whole_path)]:
agent = agent_class(java_parser, whole_path, logger=logger, agent=agent,
save_as_new=save_as_new, use_base_class=use_base_class)
if agent._failed_to_parse:
continue
if use_base_class or not agent.skip():
agent.actions()
return agent
logger.error('Failed to match to any agent')
def SetLogger(logging_level, filepath):
log = logging.getLogger()
filename = filepath.split('/')[-1]
f = logging.Formatter(
filename + ':%(levelname)s:%(module)s:%(lineno)s: %(message)s')
fh = logging.StreamHandler()
fh.setLevel(logging_level)
fh.setFormatter(f)
log.propagate = False
if len(log.handlers) > 0:
log.removeHandler(log.handlers[0])
log.setLevel(logging_level)
log.addHandler(fh)
return log
def CreateJavaParser(logging_level=logging.ERROR):
logger = logging.getLogger('parser_logger')
logger.setLevel(logging_level)
return parser.Parser(logger)
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
'-u', '--use-base-class', help='Use another base class to convert',
default=False, action='store_true')
argument_parser.add_argument(
'--no-skip', help='Do not skip the specified file', action='store_true',
default=False)
argument_parser.add_argument('-f', '--java-file', help='Java file')
argument_parser.add_argument('-d', '--directory',
help='Directory where all java file lives')
argument_parser.add_argument('-v', '--verbose', help='Log info',
action='store_true')
argument_parser.add_argument(
'-l', '--list-agents', help='List all available agents',
action='store_true', default=False)
argument_parser.add_argument('-n', '--save-as-new', default=False,
action='store_true', help='Save as a new file')
argument_parser.add_argument(
'-a', '--agent', help='Specify the agent for the current file',
default='all')
arguments = argument_parser.parse_args(sys.argv[1:])
logging_level = logging.INFO
if arguments.verbose:
logging_level = logging.DEBUG
if arguments.list_agents:
print('Available agents and description:\n')
for agent, agent_class in _AGENT_DICT.iteritems():
print("%25s:\t%s" % (agent, agent_class.__doc__.strip()))
return
if arguments.java_file and arguments.directory:
raise Exception(
'Can not specify --jave-file and --directory at the same time')
if arguments.agent == 'all':
agents = _TEST_AGENT_DICT.keys()
else:
agents = [arguments.agent]
java_parser = CreateJavaParser()
if arguments.java_file:
ConvertFile(java_parser, agents, arguments.java_file,
arguments.save_as_new, logging_level=logging_level,
use_base_class=arguments.use_base_class)
else:
ConvertDirectory(
arguments.directory, java_parser, agents,
save_as_new=arguments.save_as_new, logging_level=logging_level,
use_base_class=arguments.use_base_class)
if __name__ == '__main__':
main()
| 1.9375 | 2 |
jshbot/plugins.py | AmberHarris/Shaco | 0 | 12793454 | import asyncio
import logging
import importlib.util
import os.path
import sys
# Debug
import traceback
from jshbot import commands
from jshbot.exceptions import ErrorTypes, BotException
EXCEPTION = 'Plugins'
def add_plugins(bot):
"""
Gets a list of all of the plugins and stores them as a key/value pair of
the plugin name and the module itself (renamed to plugin for the user).
In addition, this also sets the commands given by each plugin.
"""
directory = '{}/plugins'.format(bot.path)
data_directory = '{}/plugins/plugin_data'.format(bot.path)
if os.path.isdir(data_directory):
logging.debug("Setting plugin_data as plugin import path.")
sys.path.append(data_directory)
try:
plugins_list = os.listdir(directory)
except FileNotFoundError:
raise BotException(
EXCEPTION, "Plugins directory not found",
error_type=ErrorTypes.STARTUP)
valid_plugins = {}
# Add base plugin
from jshbot import base
plugin_commands = base.get_commands()
commands.add_commands(bot, plugin_commands, base)
valid_plugins['base'] = [base, plugin_commands]
# Get plugin commands
for plugin in plugins_list:
if (plugin[0] in ('.', '_') or
plugin == 'base' or
not plugin.endswith('.py')):
continue
try:
spec = importlib.util.spec_from_file_location(
plugin, '{}/{}'.format(directory, plugin))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
plugin_commands = module.get_commands()
commands.add_commands(bot, plugin_commands, module)
except Exception as e:
traceback.print_exc()
raise BotException(
EXCEPTION, "Failed to import external plugin",
plugin, e=e, error_type=ErrorTypes.STARTUP)
else:
logging.debug("Adding plugin {}".format(plugin))
valid_plugins[plugin] = [module, plugin_commands]
if len(valid_plugins):
logging.debug("Loaded {} plugin(s)".format(len(valid_plugins)))
bot.plugins = valid_plugins
def broadcast_event(bot, event, *args, **kwargs):
"""
Loops through all of the plugins and looks to see if the event index
specified is associated it. If it is, call that function with args.
"""
for plugin in bot.plugins.values():
function = getattr(plugin[0], event, None)
if function:
try:
asyncio.ensure_future(function(bot, *args, **kwargs))
except TypeError as e:
logging.error(traceback.format_exc())
logging.error("Bypassing event error: " + e)
| 2.296875 | 2 |
DailyProgrammer/DP20131128C.py | DayGitH/Python-Challenges | 2 | 12793455 | <gh_stars>1-10
"""
[11/28/13] Challenge #137 [Intermediate / Hard] Banquet Planning
https://www.reddit.com/r/dailyprogrammer/comments/1rnrs2/112813_challenge_137_intermediate_hard_banquet/
# [](#IntermediateIcon) *(Intermediate)*: Banquet Planning
You and your friends are planning a big banquet, but need to figure out the order in which food will be served. Some
food, like a turkey, have to be served after appetizers, but before desserts. Other foods are more simple, like a pecan
pie, which can be eaten any time after the main meal. Given a list of foods and the order-relationships they have,
print the banquet schedule. If a given food item cannot be placed in this schedule, write an error message for it.
# Formal Inputs & Outputs
## Input Description
On standard console input, you will be given two space-delimited integers, N and M. N is the number of food items,
while M is the number of food-relationships. Food-items are unique single-word lower-case names with optional
underscores (the '_' character), while food-relationships are two food items that are space delimited. All food-items
will be listed first on their own lines, then all food-relationships will be listed on their own lines afterwards. A
food-relationship is where the first item must be served before the second item.
Note that in the food-relationships list, some food-item names can use the
[wildcard-character](http://en.wikipedia.org/wiki/Wildcard_character) '\*'. You must support this by expanding the rule
to fulfill any combination of strings that fit the wildcard. For example, using the items from Sample Input 2, the rule
"turkey\* \*_pie" expands to the following four rules:
turkey almond_pie
turkey_stuffing almond_pie
turkey pecan_pie
turkey_stuffing pecan_pie
A helpful way to think about the wildcard expansion is to use the phrase "any item A must be before any item B". An
example would be the food-relationship "\*pie coffee", which can be read as "any pie must be before coffee".
Some orderings may be ambiguous: you might have two desserts before coffee, but the ordering of desserts may not be
explicit. In such a case, group the items together.
## Output Description
Print the correct order of food-items with a preceding index, starting from 1. If there are ambiguous ordering for
items, list them together on the same line as a comma-delimited array of food-items. Any items that do not have a
relationship must be printed with a warning or error message.
# Sample Inputs & Outputs
## Sample Input 1
3 3
salad
turkey
dessert
salad dessert
turkey dessert
salad turkey
## Sample Output 1
1. salad
2. turkey
3. dessert
## Sample Input 2
8 5
turkey
pecan_pie
salad
crab_cakes
almond_pie
rice
coffee
turkey_stuffing
turkey_stuffing turkey
turkey* *_pie
*pie coffee
salad turkey*
crab_cakes salad
## Sample Output 2
1. crab_cakes
2. salad
3. turkey_stuffing
4. turkey
5. almond_pie, pecan_pie
6. coffee
Warning: Rice does not have any ordering.
# Author's Note:
This challenge has some subtle ordering logic that might be hard to understand at first. Work through sample data 2 by
hand to better understand the ordering rules before writing code. Make sure to expand all widecard rules as well.
"""
def main():
pass
if __name__ == "__main__":
main()
| 3.9375 | 4 |
z3py/examples/fixedpoint.3.py | rainoftime/rainoftime.github.io | 1 | 12793456 | fp = Fixedpoint()
fp.set(engine='datalog')
s = BitVecSort(3)
edge = Function('edge', s, s, BoolSort())
path = Function('path', s, s, BoolSort())
a = Const('a',s)
b = Const('b',s)
c = Const('c',s)
fp.register_relation(path,edge)
fp.declare_var(a,b,c)
fp.rule(path(a,b), edge(a,b))
fp.rule(path(a,c), [edge(a,b),path(b,c)])
v1 = BitVecVal(1,s)
v2 = BitVecVal(2,s)
v3 = BitVecVal(3,s)
v4 = BitVecVal(4,s)
fp.fact(edge(v1,v2))
fp.fact(edge(v1,v3))
fp.fact(edge(v2,v4))
print "current set of rules", fp
print fp.query(path(v1,v4)), "yes we can reach v4 from v1"
print fp.query(path(v3,v4)), "no we cannot reach v4 from v3"
| 2.53125 | 3 |
src/neon/frontend/utils.py | MUTTERSCHIFF/ngraph-neon | 13 | 12793457 | <filename>src/neon/frontend/utils.py
from __future__ import absolute_import
import neon as ng
from .axis import ax
def make_convolution_placeholder(shape=None):
"""
Create a placeholder op for inputs to a convolution layer
Arguments:
shape (tuple): The desired shape of the placeholder,
with axes in the order of C, D, H, W, N
Returns:
5-D placeholder op
"""
H = ng.make_axis(name="H", docstring="Height")
W = ng.make_axis(name="W", docstring="Width")
D = ng.make_axis(name="D", docstring="Depth")
C = ng.make_axis(name="C", docstring="Channel")
x = ng.placeholder(axes=ng.make_axes([C, D, H, W, ax.N]))
if shape is not None:
x.axes.set_shape(shape)
return x
def get_function_or_class_name(obj):
if hasattr(obj, "__name__"):
name = obj.__name__
elif callable(obj):
name = type(obj).__name__
else:
name = None
return name
| 2.9375 | 3 |
wildlifecompliance/migrations/0125_auto_20190228_1127.py | preranaandure/wildlifecompliance | 1 | 12793458 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-28 03:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0124_auto_20190228_1035'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='proposed_decline_status',
),
migrations.RemoveField(
model_name='application',
name='proposed_issuance_licence',
),
]
| 1.273438 | 1 |
environments/inmoov/inmoov_client.py | BillChan226/Robotic | 3 | 12793459 | import zmq
from zmq import ssh
import numpy as np
from environments.inmoov.inmoov_p2p_client_ready import InmoovGymEnv
from .inmoov_server import server_connection, client_ssh_connection, client_connection
SERVER_PORT = 7777
HOSTNAME = 'localhost'
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def test_inmoov_gym():
while True:
k = input()
try:
# time.sleep(0.5)
action = np.zeros(shape=(joints_num,))
signal = k.split()
joint, move = int(signal[0]), float(signal[1])
action[joint] = move
robot.step(action)
except:
continue
# robot.step()
if __name__ == "__main__":
socket = server_connection()
robot = InmoovGymEnv(debug_mode=True, positional_control=True)
init_pose = robot._inmoov.get_joints_pos()
joints_num = len(init_pose)
while True:
msg = socket.recv_json()
command = msg["command"]
if command == "position":
data = robot.server_step(msg[command])
joint_state, reward, done, infos, px, end_position = data
send_array(socket, joint_state, flags=0, copy=True, track=False)
send_array(socket, np.array(reward), flags=0, copy=True, track=False)
send_array(socket, np.array(done), flags=0, copy=True, track=False)
send_array(socket, px, flags=0, copy=True, track=False)
send_array(socket, end_position, flags=0, copy=True, track=False)
print("message sent")
elif command == "action":
print(1)
elif command == "done":
print(2)
elif command == "reset":
print(3)
| 2.296875 | 2 |
PHY407/gaussxw.py | ngrisouard/TenureApplicationCode | 1 | 12793460 | <filename>PHY407/gaussxw.py
from pylab import *
def gaussxw(N):
# Initial approximation to roots of the Legendre polynomial
a = linspace(3,4*N-1,N)/(4*N+2)
x = cos(pi*a+1/(8*N*N*tan(a)))
# Find roots using Newton's method
epsilon = 1e-15
delta = 1.0
while delta>epsilon:
p0 = ones(N,float)
p1 = copy(x)
for k in range(1,N):
p0,p1 = p1,((2*k+1)*x*p1-k*p0)/(k+1)
dp = (N+1)*(p0-x*p1)/(1-x*x)
dx = p1/dp
x -= dx
delta = max(abs(dx))
# Calculate the weights
w = 2*(N+1)*(N+1)/(N*N*(1-x*x)*dp*dp)
return x,w
def gaussxwab(N,a,b):
x,w = gaussxw(N)
return 0.5*(b-a)*x+0.5*(b+a),0.5*(b-a)*w
| 2.609375 | 3 |
第11章/program/baidu/pipelines.py | kingname/SourceCodeOfBook | 274 | 12793461 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
class BaiduPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
db_name = settings['MONGODB_DBNAME']
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.post = db[settings['MONGODB_DOCNAME']]
def process_item(self, item, spider):
person_info = dict(item)
self.post.insert(person_info)
return item
| 2.390625 | 2 |
surftrace/execCmd.py | aliyun/surftrace | 32 | 12793462 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: execCmd
Description :
Author : liaozhaoyan
date: 2022/3/19
-------------------------------------------------
Change Activity:
2022/3/19:
-------------------------------------------------
"""
__author__ = 'liaozhaoyan'
import os
import sys
import shlex
from subprocess import PIPE, Popen
from threading import Thread
import select
from .surfException import FileNotExistException
ON_POSIX = 'posix' in sys.builtin_module_names
class CasyncPipe(Thread):
def __init__(self, f, func):
if not os.path.exists(f):
FileNotExistException("%s is not exist." % f)
self.__callBack = func
super(CasyncPipe, self).__init__()
self.daemon = True # thread dies with the program
self.__pipe = open(f, 'r')
self.__loop = True
self.start()
def newCb(self, func):
self.__callBack = func
def run(self):
while self.__loop:
line = self.__pipe.readline().strip()
self.__callBack(line)
def terminate(self):
self.__loop = False
self.join(1)
class CexecCmd(object):
def __init__(self):
pass
def cmd(self, cmds):
p = Popen(shlex.split(cmds), stdout=PIPE)
if sys.version_info.major == 2:
return p.stdout.read().strip()
else:
return p.stdout.read().decode().strip()
def system(self, cmds):
cmds = cmds.replace('\0', '').strip()
return os.popen(cmds).read(8192)
class CasyncCmdQue(object):
def __init__(self, cmd):
super(CasyncCmdQue, self).__init__()
self.daemon = True # thread dies with the program
self.__p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, close_fds=ON_POSIX)
self.__e = select.epoll()
self.__e.register(self.__p.stdout.fileno(), select.EPOLLIN)
def __del__(self):
self.__p.kill()
def write(self, cmd):
try:
self.__p.stdin.write(cmd.encode())
self.__p.stdin.flush()
except IOError:
return -1
def writeLine(self, cmd):
self.write(cmd + "\n")
def read(self, tmout=0.2, l=16384):
while True:
es = self.__e.poll(tmout)
if not es:
return ""
for f, e in es:
if e & select.EPOLLIN:
if sys.version_info.major == 2:
s = os.read(f, l)
else:
s = os.read(f, l).decode()
return s
def readw(self, want, tries=100):
i = 0
r = ""
while i < tries:
line = self.read()
if want in line:
return r + line
r += line
i += 1
raise Exception("get want args %s overtimes" % want)
def terminate(self):
self.__p.terminate()
return self.__p.wait()
if __name__ == "__main__":
pass
| 2.484375 | 2 |
models/validation/OneOfValidator.py | meguia/virtualroom | 1 | 12793463 | <reponame>meguia/virtualroom
from .Validator import Validator
class OneOfValidator(Validator):
def __init__(self, *options):
self.options = set(options)
def validate(self, value):
if value not in self.options:
raise ValueError(f'Expected {value!r} to be one of {self.options!r}')
| 2.96875 | 3 |
reactivated/apps.py | silviogutierrez/reactivated | 178 | 12793464 | import importlib
import json
import logging
import os
import subprocess
from typing import Any, Dict, NamedTuple, Tuple
from django.apps import AppConfig
from django.conf import settings
from . import (
definitions_registry,
extract_views_from_urlpatterns,
global_types,
template_registry,
type_registry,
value_registry,
)
from .serialization import create_schema
logger = logging.getLogger("django.server")
def get_urls_schema() -> Dict[str, Any]:
urlconf = importlib.import_module(settings.ROOT_URLCONF)
urlpatterns = urlconf.urlpatterns # type: ignore[attr-defined]
from django.urls import converters
from django.urls.resolvers import RoutePattern
converter_mapping = {
converters.IntConverter: "number",
converters.StringConverter: "string",
converters.UUIDConverter: "string",
converters.SlugConverter: "string",
converters.PathConverter: "string",
}
urls = extract_views_from_urlpatterns(urlpatterns) # type: ignore[no-untyped-call]
reverse = {}
for _, regex, name, pattern in urls:
if not isinstance(pattern, RoutePattern):
continue
reverse[name or regex] = {
"route": f"/{regex}",
"args": {
arg_name: converter_mapping.get(arg_converter.__class__, "string")
for arg_name, arg_converter in pattern.converters.items()
},
}
return reverse
def get_types_schema() -> Any:
""" The package json-schema-to-typescript does expose a way to
automatically export any interface it sees. However, this can bloat our
generated files.
Instead, while creating the schema, we occasionally run into types that we
want available globally but are not directly referenced by templates.
These aren't exported by `json-schem-to-typescript` because they're
referenced using `tsType`, so the libraary is unaware of their usage.
So we register them in `globals` and force `json-schema-to-typescript` to
expose them.
We can't just add these types to the `type_registry` because that's only
parsed once when generating the parent tuple.
We could explore doing two passes in the future.
See `unreachableDefinitions` in json-schema-to-typescript
"""
type_registry["globals"] = Any # type: ignore[assignment]
context_processors = []
from .serialization.context_processors import create_context_processor_type
for engine in settings.TEMPLATES:
if engine["BACKEND"] == "reactivated.backend.JSX":
context_processors.extend(engine["OPTIONS"]["context_processors"]) # type: ignore[index]
type_registry["Context"] = create_context_processor_type(context_processors)
ParentTuple = NamedTuple("ParentTuple", type_registry.items()) # type: ignore[misc]
parent_schema, definitions = create_schema(ParentTuple, definitions_registry)
definitions_registry.update(definitions)
return {
"definitions": definitions,
**{
**definitions["reactivated.apps.ParentTuple"],
"properties": {
**definitions["reactivated.apps.ParentTuple"]["properties"],
"globals": {
"type": "object",
"additionalProperties": False,
"required": list(global_types.keys()),
"properties": global_types,
},
},
},
}
def get_templates() -> Dict[str, Tuple[Any]]:
return template_registry
def get_values() -> Dict[str, Any]:
return value_registry
def get_schema() -> str:
schema = {
"urls": get_urls_schema(),
"templates": get_templates(),
"types": get_types_schema(),
"values": get_values(),
}
return json.dumps(schema, indent=4)
class ReactivatedConfig(AppConfig):
name = "reactivated"
def ready(self) -> None:
"""
Django's dev server actually starts twice. So we prevent generation on
the first start. TODO: handle noreload.
"""
schema = get_schema()
if (
os.environ.get("WERKZEUG_RUN_MAIN") == "true"
or os.environ.get("RUN_MAIN") == "true"
):
# Triggers for the subprocess of the dev server after restarts or initial start.
pass
is_server_started = "DJANGO_SEVER_STARTING" in os.environ
if is_server_started is False:
os.environ["DJANGO_SEVER_STARTING"] = "true"
return
generate_schema(schema)
def generate_schema(schema: str, skip_cache: bool = False) -> None:
"""
For development usage only, this requires Node and Python installed
You can use this function for your E2E test prep.
"""
logger.info("Generating interfaces and client side code")
encoded_schema = schema.encode()
import hashlib
digest = hashlib.sha1(encoded_schema).hexdigest().encode()
if skip_cache is False and os.path.exists("client/generated/index.tsx"):
with open("client/generated/index.tsx", "r+b") as existing:
already_generated = existing.read()
if digest in already_generated:
logger.info("Skipping generation as nothing has changed")
return
#: Note that we don't pass the file object to stdout, because otherwise
# webpack gets confused with the half-written file when we make updates.
# Maybe there's a way to force it to be a single atomic write? I tried
# open('w+b', buffering=0) but no luck.
process = subprocess.Popen(
["node", "./node_modules/reactivated/generator.js"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, error = process.communicate(encoded_schema)
os.makedirs("client/generated", exist_ok=True)
with open("client/generated/index.tsx", "w+b") as output:
output.write(b"// Digest: %s\n" % digest)
output.write(out)
logger.info("Finished generating.")
| 2.140625 | 2 |
tests/keypresses.py | nateonguitar/PythonConsoleGameEngine | 1 | 12793465 | import threading
from pynput import keyboard
class KeyPresses():
def __init__(self):
self.keep_from_dying_thread = None
self.holding_shift = False
self.key_listener = keyboard.Listener(on_press=self.on_keydown, on_release=self.on_keyup)
self.key_listener.start()
def on_keydown(self, key: keyboard.Key):
char = hasattr(key, 'char')
if char:
if self.holding_shift and key.char.lower() == 'b':
print('Shift B')
else:
if key == keyboard.Key.esc:
self.key_listener.stop()
self.keep_from_dying_thread.cancel()
if key == keyboard.Key.shift:
self.holding_shift = True
def on_keyup(self, key: keyboard.Key):
if key == keyboard.Key.shift:
self.holding_shift = False
def keep_from_dying(self):
self.keep_from_dying_thread = threading.Timer(1000000, lambda : None)
self.keep_from_dying_thread.start()
k = KeyPresses()
k.keep_from_dying()
| 3.25 | 3 |
Plot3D.py | MohFarahani/dance_generator | 1 | 12793466 | <filename>Plot3D.py
import dataclasses
from typing import List, Mapping, Optional, Tuple, Union
from model_setup import Model_Setup
import matplotlib.pyplot as plt
import pandas as pd
import imageio
from pathlib import Path
import os
import cv2
NUM_COORDS = 33
WHITE_COLOR = (224, 224, 224)
BLACK_COLOR = (0, 0, 0)
RED_COLOR = (0, 0, 255)
GREEN_COLOR = (0, 128, 0)
BLUE_COLOR = (255, 0, 0)
_PRESENCE_THRESHOLD = 0.5
_VISIBILITY_THRESHOLD = 0.5
_RGB_CHANNELS = 3
class Landmark:
def __init__(self, x=None, y=None, z=None, visibility=None):
self.x = None
self.y = None
self.z = None
self.visibility = None
class Landmark_list:
def __init__(self, config):
self.config = config
self.list_size = self.config.NUM_COORDS
self.landmark_list = []
for i in range(self.list_size):
obj = Landmark()
self.landmark_list.append(obj)
self.Min_Max_axis = {
"x_min": None,
"x_max": None,
"y_min": None,
"y_max": None,
"z_min": None,
"z_max": None,
}
def load_xyz(self, x, y, z, visibility=None):
for i, landmark in enumerate(self.landmark_list):
self.landmark_list[i].x = x[i]
self.landmark_list[i].y = y[i]
self.landmark_list[i].z = z[i]
if visibility != None:
self.landmark_list[i].visibility = visibility[i]
def load_df(self, df):
x = df.loc[:, df.columns.str.startswith("x")].to_numpy().flatten()
y = df.loc[:, df.columns.str.startswith("y")].to_numpy().flatten()
z = df.loc[:, df.columns.str.startswith("z")].to_numpy().flatten()
if df.columns.str.startswith("v").any() != False:
visibility = df.loc[:, df.columns.str.startswith("v")].to_numpy().flatten()
self.Min_Max_axis["x_min"] = min(x)
self.Min_Max_axis["x_max"] = max(x)
self.Min_Max_axis["y_min"] = min(y)
self.Min_Max_axis["y_max"] = max(y)
self.Min_Max_axis["z_min"] = min(z)
self.Min_Max_axis["z_max"] = max(z)
if df.columns.str.startswith("v").any():
self.load_xyz(x, y, z, visibility)
else:
self.load_xyz(x, y, z)
def load_csv(self, RESULT_CSV):
df = pd.read_csv(RESULT_CSV)
self.load_df(df)
return df
def save_image(config, csv_file, IMAGE_PATH):
landmark_list_all = Landmark_list(config)
df = landmark_list_all.load_csv(csv_file)
landmark_list_all.load_df(df)
# Plot every frame
index = 0
counter = 0
for i in range(480,len(df)):
if index % 1 == 0:
landmark_list = Landmark_list(config)
df_temp = df.iloc[i, :]
x = df_temp[df.columns.str.startswith("x")].to_numpy().flatten()
y = df_temp[df.columns.str.startswith("y")].to_numpy().flatten()
z = df_temp[df.columns.str.startswith("z")].to_numpy().flatten()
visibility = df_temp[df.columns.str.startswith("v")].to_numpy().flatten()
landmark_list.load_xyz(x, y, z, visibility)
plot_landmarks(
landmark_list,
config.POSE_CONNECTIONS,
counter=counter,
IMAGE_PATH=IMAGE_PATH,
Min_Max_axis=landmark_list_all.Min_Max_axis,
)
counter += 1
index += 1
# Adopt the plot_landmarks from MediaPipe
# https://github.com/google/mediapipe/blob/master/mediapipe/python/solutions/drawing_utils.py
@dataclasses.dataclass
class DrawingSpec:
# Color for drawing the annotation. Default to the white color.
color: Tuple[int, int, int] = WHITE_COLOR
# Thickness for drawing the annotation. Default to 2 pixels.
thickness: int = 2
# Circle radius. Default to 2 pixels.
circle_radius: int = 2
def _normalize_color(color):
return tuple(v / 255.0 for v in color)
def plot_landmarks(
landmark_list,
connections: Optional[List[Tuple[int, int]]] = None,
counter=None,
IMAGE_PATH=None,
Min_Max_axis=None,
landmark_drawing_spec: DrawingSpec = DrawingSpec(color=RED_COLOR, thickness=5),
connection_drawing_spec: DrawingSpec = DrawingSpec(color=BLACK_COLOR, thickness=5),
elevation: int = 10,
azimuth: int = 10,
):
"""Plot the landmarks and the connections in matplotlib 3d.
Args:
landmark_list: A normalized landmark list proto message to be plotted.
connections: A list of landmark index tuples that specifies how landmarks to
be connected.
landmark_drawing_spec: A DrawingSpec object that specifies the landmarks'
drawing settings such as color and line thickness.
connection_drawing_spec: A DrawingSpec object that specifies the
connections' drawing settings such as color and line thickness.
elevation: The elevation from which to view the plot.
azimuth: the azimuth angle to rotate the plot.
Raises:
ValueError: If any connetions contain invalid landmark index.
"""
if not landmark_list:
return
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection="3d")
if Min_Max_axis:
ax.set_xlim3d(-1 * Min_Max_axis["z_max"], -1 * Min_Max_axis["z_min"])
ax.set_ylim3d(Min_Max_axis["x_min"], Min_Max_axis["x_max"])
ax.set_zlim3d(-1 * Min_Max_axis["y_max"], -1 * Min_Max_axis["y_min"])
ax.view_init(elev=elevation, azim=azimuth)
plotted_landmarks = {}
for idx, landmark in enumerate(landmark_list.landmark_list):
if landmark.visibility and landmark.visibility < _VISIBILITY_THRESHOLD:
continue
ax.scatter3D(
xs=[-landmark.z],
ys=[landmark.x],
zs=[-landmark.y],
color=_normalize_color(landmark_drawing_spec.color[::-1]),
linewidth=landmark_drawing_spec.thickness,
)
plotted_landmarks[idx] = (-landmark.z, landmark.x, -landmark.y)
if connections:
num_landmarks = landmark_list.list_size
# Draws the connections if the start and end landmarks are both visible.
for connection in connections:
start_idx = connection[0]
end_idx = connection[1]
if not (0 <= start_idx < num_landmarks and 0 <= end_idx < num_landmarks):
raise ValueError(
f"Landmark index is out of range. Invalid connection "
f"from landmark #{start_idx} to landmark #{end_idx}."
)
if start_idx in plotted_landmarks and end_idx in plotted_landmarks:
landmark_pair = [
plotted_landmarks[start_idx],
plotted_landmarks[end_idx],
]
ax.plot3D(
xs=[landmark_pair[0][0], landmark_pair[1][0]],
ys=[landmark_pair[0][1], landmark_pair[1][1]],
zs=[landmark_pair[0][2], landmark_pair[1][2]],
color=_normalize_color(connection_drawing_spec.color[::-1]),
linewidth=connection_drawing_spec.thickness,
)
plt.savefig(os.path.join(IMAGE_PATH, "fram_sec_{}.png".format(counter)), dpi=50)
def save_gif(IMAGE_PATH):
# https://medium.com/swlh/python-animated-images-6a85b9b68f86
image_path = Path(IMAGE_PATH)
images = list(image_path.glob("*.png"))
images.sort(key=lambda x: int(x.split("_")[2].split(".")[0]), reverse=False)
image_list = []
for file_name in images:
image_list.append(imageio.imread(file_name))
imageio.mimwrite("animated_from_images.gif", image_list)
"""
from pygifsicle import optimize
gif_path = 'animated_from_video.gif'# create a new one
optimize(gif_path, 'animated_from_video_optimized.gif')# overwrite the original one
optimize(gif_path)
"""
def save_video(IMAGE_PATH):
image_folder = IMAGE_PATH
video_name = "video.avi"
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
images.sort(key=lambda x: int(x.split("_")[2].split(".")[0]), reverse=False)
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
fps = 24
video = cv2.VideoWriter(
video_name, cv2.VideoWriter_fourcc(*"DIVX"), fps=fps, frameSize=(width, height)
)
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
| 2.578125 | 3 |
examples/sync_cmdclass_pyproject/sync_cmdclass_pyproject/__init__.py | linshoK/pysen | 423 | 12793467 | from typing import Any, Callable, Optional, Sequence, Set, Tuple
def foo(
a: Any,
b: Callable[[], Tuple[int, int, str]],
c: Set[str],
d: Optional[Sequence[int]] = None,
e: Any = None,
) -> None:
pass
print("Hello world")
foo(a=1, b=lambda: (1, 2, "hoge"), c=set(), d=None, e=None)
| 3.1875 | 3 |
tenseal/enc_context.py | rand0musername/TenSEAL | 0 | 12793468 | <gh_stars>0
"""The Context manages everything related to the encrypted computation, including keys, which
optimization should be enabled, and how many threads should run for a parallel computation.
"""
import multiprocessing
from enum import Enum
from typing import List, Union
from abc import ABC
import tenseal as ts
class ENCRYPTION_TYPE(Enum):
ASYMMETRIC = ts._ts_cpp.ENCRYPTION_TYPE.ASYMMETRIC
SYMMETRIC = ts._ts_cpp.ENCRYPTION_TYPE.SYMMETRIC
class SCHEME_TYPE(Enum):
NONE = ts._ts_cpp.SCHEME_TYPE.NONE
BFV = ts._ts_cpp.SCHEME_TYPE.BFV
CKKS = ts._ts_cpp.SCHEME_TYPE.CKKS
class Key(ABC):
"""Wrapper class for encryption keys"""
def __init__(self, data):
self.data = data
@property
def data(
self,
) -> Union[
ts._ts_cpp.PublicKey, ts._ts_cpp.SecretKey, ts._ts_cpp.GaloisKeys, ts._ts_cpp.RelinKeys
]:
"""Get the wrapped low level key object"""
return self._data
@data.setter
def data(
self,
value: Union[
ts._ts_cpp.PublicKey, ts._ts_cpp.SecretKey, ts._ts_cpp.GaloisKeys, ts._ts_cpp.RelinKeys
],
):
"""Set the wrapped low level key object"""
native_type = getattr(ts._ts_cpp, self.__class__.__name__)
if not isinstance(value, native_type):
raise TypeError(f"value must be of type {native_type}")
self._data = value
@classmethod
def _wrap(
cls,
data: Union[
ts._ts_cpp.PublicKey, ts._ts_cpp.SecretKey, ts._ts_cpp.GaloisKeys, ts._ts_cpp.RelinKeys
],
):
"""Return a new key object wrapping the low level key object"""
return cls(data)
# We have a class for every key type, to differentiate between them only
class SecretKey(Key):
pass
class PublicKey(Key):
pass
class GaloisKeys(Key):
pass
class RelinKeys(Key):
pass
class Context:
def __init__(
self,
scheme: SCHEME_TYPE = None,
poly_modulus_degree: int = None,
plain_modulus: int = None,
coeff_mod_bit_sizes: List[int] = [],
encryption_type: ENCRYPTION_TYPE = ENCRYPTION_TYPE.ASYMMETRIC,
n_threads: int = None,
data: ts._ts_cpp.TenSEALContext = None,
):
"""Construct a context that holds keys and parameters needed for operating
encrypted tensors using either BFV or CKKS scheme.
Args:
scheme : define the scheme to be used, either SCHEME_TYPE.BFV or SCHEME_TYPE.CKKS.
poly_modulus_degree: The degree of the polynomial modulus, must be a power of two.
plain_modulus: The plaintext modulus. Should not be passed when the scheme is CKKS.
coeff_mod_bit_sizes: List of bit size for each coeffecient modulus.
Can be an empty list for BFV, a default value will be given.
encryption_type : define the encryption type to be used, either ENCRYPTION_TYPE.ASYMMETRIC, or ENCRYPTION_TYPE.SYMMETRIC.
n_threads: define number of threads that shall be later used for parallel computation.
data: A TenSEALContext to wrap. We won't construct a new object if it's passed.
Returns:
A Context object.
"""
# wrapping
if data is not None:
self.data = data
return
# constructing a new object
if scheme == SCHEME_TYPE.BFV:
if plain_modulus is None:
raise ValueError("plain_modulus must be provided")
elif scheme == SCHEME_TYPE.CKKS:
# must be int, but the value doesn't matter for ckks
plain_modulus = 0
else:
raise ValueError("Invalid scheme type, use either SCHEME_TYPE.BFV or SCHEME_TYPE.CKKS")
# We can't pass None here, everything should be set prior to this call
if not (isinstance(n_threads, int) and n_threads > 0):
n_threads = multiprocessing.cpu_count()
self.data = ts._ts_cpp.TenSEALContext.new(
scheme.value,
poly_modulus_degree,
plain_modulus,
coeff_mod_bit_sizes,
encryption_type.value,
n_threads,
)
@property
def data(self) -> ts._ts_cpp.TenSEALContext:
"""Get the wrapped low level TenSEALContext object"""
return self._data
@data.setter
def data(self, value: ts._ts_cpp.TenSEALContext):
"""Set the wrapped low level TenSEALContext object"""
native_type = ts._ts_cpp.TenSEALContext
if not isinstance(value, native_type):
raise TypeError(f"value must be of type {native_type}")
self._data = value
def copy(self) -> "Context":
return self._wrap(self.data.copy())
def __copy__(self) -> "Context":
return self.copy()
@classmethod
def load(cls, data: bytes, n_threads: int = None) -> "Context":
"""Construct a context from a serialized buffer.
Args:
data : bytes buffer from the original context.
n_threads: define number of threads that shall be later used for parallel computation.
Returns:
A Context object.
"""
if n_threads:
return cls._wrap(ts._ts_cpp.TenSEALContext.deserialize(data, n_threads))
return cls._wrap(ts._ts_cpp.TenSEALContext.deserialize(data))
def serialize(
self,
save_public_key: bool = True,
save_secret_key: bool = False,
save_galois_keys: bool = True,
save_relin_keys: bool = True,
) -> bytes:
"""Serialize the context into a stream of bytes."""
return self.data.serialize(
save_public_key, save_secret_key, save_galois_keys, save_relin_keys
)
@property
def global_scale(self) -> float:
return self.data.global_scale
@global_scale.setter
def global_scale(self, value: float):
self.data.global_scale = value
@classmethod
def _wrap(cls, data: ts._ts_cpp.TenSEALContext) -> "Context":
"""Return a new Context object wrapping the low level TenSEALContext object"""
return cls(data=data)
@property
def auto_mod_switch(self) -> bool:
return self.data.auto_mod_switch
@auto_mod_switch.setter
def auto_mod_switch(self, value: bool):
self.data.auto_mod_switch = value
@property
def auto_relin(self) -> bool:
return self.data.auto_relin
@auto_relin.setter
def auto_relin(self, value: bool):
self.data.auto_relin = value
@property
def auto_rescale(self) -> bool:
return self.data.auto_rescale
@auto_rescale.setter
def auto_rescale(self, value: bool):
self.data.auto_rescale = value
def has_galois_keys(self) -> bool:
return self.data.has_galois_keys()
def galois_keys(self) -> GaloisKeys:
return GaloisKeys(self.data.galois_keys())
def generate_galois_keys(self, secret_key: SecretKey = None):
if secret_key is None:
self.data.generate_galois_keys()
elif isinstance(secret_key, SecretKey):
self.data.generate_galois_keys(secret_key.data)
else:
raise TypeError(f"incorrect type: {type(secret_key)} != SecretKey")
def has_relin_keys(self) -> bool:
return self.data.has_relin_keys()
def relin_keys(self) -> RelinKeys:
return RelinKeys(self.data.relin_keys())
def generate_relin_keys(self, secret_key: SecretKey = None):
if secret_key is None:
self.data.generate_relin_keys()
elif isinstance(secret_key, SecretKey):
self.data.generate_relin_keys(secret_key.data)
else:
raise TypeError(f"incorrect type: {type(secret_key)} != SecretKey")
def has_secret_key(self) -> bool:
return self.data.has_secret_key()
def secret_key(self) -> SecretKey:
return SecretKey(self.data.secret_key())
def has_public_key(self) -> bool:
return self.data.has_public_key()
def public_key(self) -> PublicKey:
return PublicKey(self.data.public_key())
def is_private(self) -> bool:
return self.data.is_private()
def is_public(self) -> bool:
return self.data.is_public()
def make_context_public(
self, generate_galois_keys: bool = False, generate_relin_keys: bool = False
):
"""Drop secret part from the context. This is useful before sending the context for remote
computation, as we don't want to send the secret-key that can be used to decrypt values.
Args:
generate_galois_keys: should we generate galois-keys before dropping the secret-key?
generate_relin_keys: should we generate relin-keys before dropping the secret-key?
"""
self.data.make_context_public(
generate_galois_keys=generate_galois_keys, generate_relin_keys=generate_relin_keys
)
| 2.46875 | 2 |
AmbieNet/users/tests/test_login.py | sansuaza/Backend-AmbieNet | 0 | 12793469 | <filename>AmbieNet/users/tests/test_login.py
#django
from django.test import TestCase
from django.urls import reverse, path
# Django REST Framework
from rest_framework import status
from rest_framework.test import APITestCase
#Model
from AmbieNet.users.models import User, Profile
from AmbieNet.posts.models import Post
class LoginAPITestCase(APITestCase):
"""User login test case."""
def setUp(self):
"""Test case setup, building de instances that are needes in the test case."""
self.user = User.objects.create(
username= 'saenzavs',
email= '<EMAIL>',
password= '<PASSWORD>',
phone_number= '31212231232',
first_name= 'steven',
last_name= 'saenz',
)
self.profile = Profile.objects.create(
user= self.user,
latitude= '1.23',
longitude= '1.22'
)
self.post = Post.objects.create(
user= self.user,
profile = self.profile,
photo='string',
title='Stunami en la casa de saenz',
description='se les creció el rio calarca',
type_catastrophe='maremoto',
latitude='3.2',
longitude= '3.4'
)
self.data = {
'username' : 'saenzavs',
'password' : '<PASSWORD>'
}
self.url = '/posts/'
def test_persist_user(self):
listUser = User.objects.all()
self.assertEqual(len(listUser), 1)
def test_persist_profile(self):
listUser = Profile.objects.all()
self.assertEqual(len(listUser), 1)
def test_persistence_post(self):
post = Post.objects.filter(user= self.user)
self.assertIsNotNone(post)
def test_filter_post_for_ubication(self):
post = Post.objects.filter (latitude = 3.2, longitude= 3.4)
self.assertIsNotNone(post)
def test_search_users_home (self):
post = Post.objects.filter(latitude = 3.2, longitude= 3.4)
self.assertIsNotNone(post)
def test_search_post_for_type_catastrophe(self):
post = Post.objects.filter (type_catastrophe='maremoto')
self.assertIsNotNone(post)
def test_search_user_for_username(self):
user = User.objects.filter(username='saenzavs')
self.assertIsNotNone(user)
def test_search_post_for_user(self):
user = User.objects.get(username='saenzavs')
post = Post.objects.get(user=user)
| 2.796875 | 3 |
tests/_testsite/dummyapp01/dummymodule01.py | bastiedotorg/django-precise-bbcode | 30 | 12793470 | from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
class LoadDummyTag(BBCodeTag):
name = 'loaddummy01'
definition_string = '[loaddummy01]{TEXT}[/loaddummy01]'
format_string = '<loaddummy>{TEXT}</loaddummy>'
tag_pool.register_tag(LoadDummyTag)
| 1.960938 | 2 |
importify/__init__.py | litcoderr/loadit | 1 | 12793471 | # Copyright 2020 by <NAME>.
# Github: https://github.com/litcoderr
# All rights reserved.
# This file is released under the "MIT License Agreement".
# Please see the LICENSE file.
from .interface import Serializable
| 0.902344 | 1 |
main.py | Vrim/TicTacToe | 0 | 12793472 | <reponame>Vrim/TicTacToe
""" Tic Tac Toe
Author: <NAME>
Date: Oct. 11, 2019
"""
from __future__ import annotations
from typing import Any, List
from Player import Player
from TicTac import TicTac
import os
from AI import AI as ai
board: TicTac
def main():
mode = _selectMode()
if mode == 0:
p1 = Player('X', _askName("P1: "))
p2 = Player('O', "AI")
elif mode == 1:
p1 = Player('X', _askName("P1: "))
p2 = Player('O', _askName("P2: "))
else:
p1 = Player('X', "AI 1")
p2 = Player('O', "AI 2")
board = TicTac(mode, p1, p2)
a = False
c = ''
players = [p1, p2]
curr = 0
print(board)
while not a:
_playTurn(board, mode, players, curr)
print(board)
a, c = board.checkWin()
curr = abs(curr - 1)
print("{} Wins".format("Nooone" if c is None else c.name))
def _playTurn(board: TicTac, mode: int, players: List, curr: int) -> None:
if mode == 0:
if curr == 0:
a = False
while not a:
a = board.place(players[0], _askPlace())
else:
x, y = ai(board, players[1])
board.place(players[1], y * 3 + x + 1)
elif mode == 1:
a = False
while not a:
a = board.place(players[curr], _askPlace())
else:
x, y = ai(board, players[curr])
board.place(players[curr], y * 3 + x + 1)
def _askPlace() -> int:
a = ''
while not a.isdigit() or not int(a) in range(1, 9 + 1):
a = input("Spot #: ")
return int(a)
def _selectMode() -> int:
a = ''
while not a.isnumeric() or not int(a) in range(3):
_clear()
a = input("Select gamemode:\n\
0. One-Player\n\
1. Two-Player\n\
2. AI vs AI\n")
return int(a)
def _askName(prompt: str) -> str:
a = ''
while len(a) < 1:
a = input(prompt)
return a
def _clear() -> None:
""" Clears the console
"""
os.system('cls' if os.name == 'nt' else 'clear')
if __name__ == "__main__":
main() | 3.453125 | 3 |
apps/log_extract/models.py | yiqiwang-17/bk-log | 0 | 12793473 | # coding=utf-8
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import operator
from datetime import datetime
from functools import reduce
from typing import List
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from apps.utils.log import logger
from apps.models import (
OperateRecordModel,
SoftDeleteModel,
JsonField,
MultiStrSplitByCommaFieldText,
OperateRecordModelManager,
EncryptionField,
)
from apps.log_extract.constants import ExtractLinkType, PIPELINE_TIME_FORMAT
from pipeline.service import task_service
class Strategies(SoftDeleteModel):
"""用户策略表"""
strategy_id = models.AutoField(_("策略ID"), primary_key=True, default=None)
bk_biz_id = models.IntegerField(_("业务ID"), db_index=True)
strategy_name = models.TextField(_("策略名称"))
user_list = MultiStrSplitByCommaFieldText(_("用户ID"))
select_type = models.CharField(_("目标选择类型"), max_length=16)
modules = JsonField(_("模块列表"))
visible_dir = MultiStrSplitByCommaFieldText(_("目录列表"))
file_type = MultiStrSplitByCommaFieldText(_("文件类型"))
operator = models.CharField(_("作业执行人"), max_length=64, default="")
class Meta:
ordering = ["-updated_at"]
class TasksManager(OperateRecordModelManager):
search_fields = ["ip_list", "file_path", "created_by", "remark"]
def search(self, keyword):
if keyword:
filter_query = [Q(**{f"{field}__icontains": keyword}) for field in self.search_fields]
filter_q = reduce(operator.or_, filter_query)
return self.filter(filter_q)
return self
class Tasks(OperateRecordModel):
"""任务记录 一个"下载"行为记作一个"Task" """
objects = TasksManager()
task_id = models.AutoField(_("任务记录id"), primary_key=True)
bk_biz_id = models.IntegerField(_("业务id"), db_index=True)
ip_list = MultiStrSplitByCommaFieldText(_("业务机器ip"))
file_path = MultiStrSplitByCommaFieldText(_("文件列表"))
filter_type = models.CharField(_("过滤类型"), max_length=16, null=True, blank=True)
filter_content = JsonField(_("过滤内容"), null=True, blank=True)
download_status = models.CharField(_("当前文件下载状态"), max_length=64, null=True, blank=True)
expiration_date = models.DateTimeField(_("任务过期时间"), default=None)
pipeline_id = models.CharField(_("流水线ID"), max_length=128, null=True, blank=True, db_index=True)
pipeline_components_id = JsonField(_("流水线组件ID"), null=True, blank=True)
job_task_id = models.BigIntegerField(_("文件分发ID"), null=True, blank=True)
# 调创建上传任务的API
cstone_upload_ticket = models.BigIntegerField(_("上传票据"), null=True, blank=True)
cstone_upload_random = models.TextField(_("上传随机值"), null=True, blank=True)
# 创建中转服务器到云石的上传任务
job_upload_task_id = models.BigIntegerField(_("任务上传ID"), null=True, blank=True) # 查询上传脚本的执行结果, 执行结果里有云石返回的task_id
cstone_upload_task_id = models.BigIntegerField(_("云石上传ID"), null=True, blank=True) # 用于查询中转服务器到云石的上传情况
# 云石上待下载的文件路径
cstone_file_path = models.CharField(_("云石文件路径"), default=None, max_length=64, null=True, blank=True)
# 等到上传完毕后,调创建下载链接的API
cstone_download_task_id = models.BigIntegerField(_("云石任务ID"), null=True, blank=True)
cstone_download_bk_biz_id = models.BigIntegerField(_("云石下载业务ID"), null=True, blank=True)
cstone_download_ticket = models.BigIntegerField(_("下载票据"), null=True, blank=True) # 根据票据向云石网盘发起下载请求
cstone_download_random = models.TextField(_("下载随机值"), null=True, blank=True)
task_process_info = models.TextField(_("任务过程信息"), null=True, blank=True)
remark = models.TextField(_("备注"), null=True, blank=True)
preview_directory = models.CharField(_("预览目录"), null=True, blank=True, max_length=255)
preview_ip = models.TextField(_("预览地址ip"), null=True, blank=True)
preview_time_range = models.CharField(_("预览日期"), max_length=10, null=True, blank=True)
preview_is_search_child = models.BooleanField(_("预览是否搜索子目录"), default=False, blank=True)
preview_start_time = models.CharField(_("预览开始日期"), null=True, blank=True, max_length=20)
preview_end_time = models.CharField(_("预览结束日期"), null=True, blank=True, max_length=20)
ex_data = JsonField(_("额外数据"), null=True, blank=True)
cos_file_name = models.CharField(_("cos对象文件名称"), null=True, blank=True, max_length=255)
link_id = models.IntegerField(_("链路id"), null=True, blank=True)
class Meta:
ordering = ["-created_at"]
def get_link_type(self):
try:
return ExtractLink.objects.get(link_id=self.link_id).link_type
except ExtractLink.DoesNotExist:
return ""
def get_extract(self):
from apps.log_extract.handlers.extract import ExtractLinkFactory
return ExtractLinkFactory.get_link(self.get_link_type())()
def get_link(self):
return ExtractLink.objects.filter(link_id=self.link_id).first()
def total_elapsed(self):
try:
task_status = task_service.get_state(self.pipeline_id)
except Exception:
# 存在多主机,单主机日志下载的情况,因此有可能有些pipeline节点未执行
logger.info("pipeline任务不存在,pipeline_id=>[{}]".format(self.pipeline_id))
return "0s"
component_status_list = []
if "activities" not in self.pipeline_components_id:
return "0s"
for component_id, component_info in self.pipeline_components_id["activities"].items():
# 这里有可能有些pipeline组件并未执行
try:
task_status["children"][component_id]["name"] = component_info["name"]
component_status_list.append(task_status["children"][component_id])
except KeyError:
pass
return f"{self._cal_total_time(component_status_list)}s"
def _cal_total_time(self, components: List[dict]):
return sum(
[
(
datetime.strptime(component["finish_time"], PIPELINE_TIME_FORMAT)
- datetime.strptime(component["start_time"], PIPELINE_TIME_FORMAT)
).seconds
for component in components
if component["finish_time"] is not None
]
)
total_elapsed.short_description = _("总耗时")
def ip_num(self):
return len(self.ip_list)
ip_num.short_description = _("IP数量")
def download_file_detail(self):
all_file_size = sum(int(ip.get("all_origin_file_size", 0)) for ip in self.ex_data.values())
all_file_num = sum(int(ip.get("file_count", 0)) for ip in self.ex_data.values())
all_pack_file_size = sum(int(ip.get("all_pack_file_size", 0)) for ip in self.ex_data.values())
ret = [
f"{_('下载文件总大小')}: {all_pack_file_size}kb",
f"{_('下载原始文件原始总大小')}: {all_file_size}kb",
f"{_('下载文件总数量')}: {all_file_num}",
]
return " ".join(ret)
download_file_detail.short_description = _("下载文件统计")
class ExtractLink(OperateRecordModel):
name = models.CharField(_("链路名称"), max_length=255)
link_id = models.AutoField(_("链路id"), primary_key=True)
link_type = models.CharField(_("链路类型"), max_length=20, default=ExtractLinkType.COMMON.value)
operator = models.CharField(_("执行人"), max_length=255)
op_bk_biz_id = models.IntegerField(_("执行bk_biz_id"))
qcloud_secret_id = EncryptionField(_("腾讯云SecretId"), default="", null=True, blank=True, help_text=_("内网链路不需要填写"))
qcloud_secret_key = EncryptionField(_("腾讯云SecretKey"), default="", null=True, blank=True, help_text=_("内网链路不需要填写"))
qcloud_cos_bucket = models.CharField(
_("腾讯云Cos桶名称"), max_length=255, default="", blank=True, help_text=_("内网链路不需要填写")
)
qcloud_cos_region = models.CharField(
_("腾讯云Cos区域"), max_length=255, default="", blank=True, help_text=_("内网链路不需要填写")
)
is_enable = models.BooleanField(_("是否启用"), default=True)
created_at = models.DateTimeField(_("创建时间"), auto_now_add=True, blank=True, db_index=True, null=True)
class Meta:
verbose_name = _("提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)")
verbose_name_plural = _("提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)")
class ExtractLinkHost(models.Model):
target_dir = models.CharField(_("挂载目录"), max_length=255, default="")
bk_cloud_id = models.IntegerField(_("主机云区域id"))
ip = models.GenericIPAddressField(_("主机ip"))
link = models.ForeignKey(ExtractLink, on_delete=models.CASCADE)
class Meta:
verbose_name = _("链路中转机")
verbose_name_plural = _("链路中转机")
| 1.335938 | 1 |
sort_algorithms.py | vsaliievaa/DSA_Lab1 | 0 | 12793474 | """sorting algorithms"""
def merge(lst1: list, lst2: list) -> list:
"""Returns two lists merged into one."""
counter = 0
new_list = []
one, two = 0, 0
while one != len(lst1) and two != len(lst2):
if lst1[one] <= lst2[two]:
new_list.append(lst1[one])
one += 1
else:
new_list.append(lst2[two])
two += 1
counter += 1
new_list.extend(lst1[one:])
new_list.extend(lst2[two:])
return (new_list, counter)
def merge_sort(lst: list) -> list:
"""Merge Sort implementation."""
counter = 0
result = []
lst_copy = lst
for i in range(len(lst_copy)):
result.append([lst_copy[i]])
i = 0
while i < len(result)-1:
lst1 = result[i]
lst2 = result[i+1]
new_list, num = (merge(lst1, lst2))
counter += num
result.append(new_list)
i += 2
if len(result) != 0:
lst_copy[:] = result[-1][:]
return (lst_copy, counter)
def selection_sort(lst: list) -> list:
"""Selection Sort implementation."""
length = len(lst)
counter = 0
lst_copy = lst
for i in range(length - 1):
min_index = i
for j in range(i + 1, length):
counter += 1
if lst_copy[min_index] > lst_copy[j]:
min_index = j
lst_copy[i], lst_copy[min_index] = lst_copy[min_index], lst_copy[i]
return counter
def insertion_sort(lst: list) -> list:
"""Insertion Sort implementation."""
length = len(lst)
lst_copy, counter = lst, 0
for i in range(1, length):
curr_element = lst_copy[i]
idx = i-1
if not (lst_copy[idx] > curr_element and idx >= 0):
counter += 1
else:
while lst_copy[idx] > curr_element and idx >= 0:
counter += 1
lst_copy[idx+1] = lst_copy[idx]
idx -= 1
if idx >= 0:
counter += 1
lst_copy[idx+1] = curr_element
return counter
def shell_sort(lst: list):
"""Shell sort implementation."""
counter = 0
lst_copy = lst
length = len(lst_copy)
interval = length // 2
while interval > 0:
for i in range(interval, length):
temp = lst_copy[i]
j = i
counter += 1
while j >= interval and lst_copy[j-interval] > temp:
if lst_copy[j-interval] > temp:
counter += 1
lst_copy[j], lst_copy[j -
interval] = lst_copy[j-interval], lst_copy[j]
j -= interval
else:
break
lst_copy[j] = temp
interval = interval // 2
return counter
| 4.1875 | 4 |
src/smtv_api/helpers/file_utils.py | AdamDomagalsky/smtv-micro-scpr | 0 | 12793475 | from functools import wraps
import os
import tempfile
import tarfile
def TemporaryDirectory(func):
'''This decorator creates temporary directory and wraps given fuction'''
@wraps(func)
def wrapper(*args, **kwargs):
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmp_path:
os.chdir(tmp_path)
result = func(*args, **kwargs)
os.chdir(cwd)
return result
return wrapper
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
| 3.46875 | 3 |
data/models/simulator/pepsenum.py | SIXMON/peps | 5 | 12793476 | from enum import IntEnum
from abc import abstractmethod
class PepsEnum(IntEnum):
@property
@abstractmethod
def display_text(self):
raise NotImplementedError('display_text method not implemented')
| 3.125 | 3 |
mmdet/ops/corner_pool/__init__.py | vanyalzr/mmdetection | 274 | 12793477 | <reponame>vanyalzr/mmdetection
from .corner_pool import CornerPool
__all__ = ['CornerPool']
| 1.148438 | 1 |
panelapp/panels/models/genepanel.py | genomicsengland/panelapp | 7 | 12793478 | <filename>panelapp/panels/models/genepanel.py
##
## Copyright (c) 2016-2019 Genomics England Ltd.
##
## This file is part of PanelApp
## (see https://panelapp.genomicsengland.co.uk).
##
## Licensed to the Apache Software Foundation (ASF) under one
## or more contributor license agreements. See the NOTICE file
## distributed with this work for additional information
## regarding copyright ownership. The ASF licenses this file
## to you under the Apache License, Version 2.0 (the
## "License"); you may not use this file except in compliance
## with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an
## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
## KIND, either express or implied. See the License for the
## specific language governing permissions and limitations
## under the License.
##
from django.db import models
from django.db.models import Sum
from django.db.models import Case
from django.db.models import When
from django.db.models import Value
from django.urls import reverse
from django.utils.functional import cached_property
from model_utils import Choices
from model_utils.models import TimeStampedModel
from .panel_types import PanelType
class GenePanelManager(models.Manager):
def get_panel(self, pk):
if pk.isdigit():
return super().get_queryset().get(pk=pk)
else:
return super().get_queryset().get(old_pk=pk)
def get_active_panel(self, pk):
return self.get_panel(pk).active_panel
class GenePanel(TimeStampedModel):
STATUS = Choices("promoted", "public", "retired", "internal", "deleted")
old_pk = models.CharField(
max_length=24, null=True, blank=True, db_index=True
) # Mongo ObjectID hex string
name = models.CharField(max_length=255, db_index=True)
status = models.CharField(
choices=STATUS, default=STATUS.internal, max_length=36, db_index=True
)
types = models.ManyToManyField(PanelType)
objects = GenePanelManager()
def __str__(self):
ap = self.active_panel
return "{} version {}.{}".format(self.name, ap.major_version, ap.minor_version)
@property
def unique_id(self):
return self.old_pk if self.old_pk else str(self.pk)
def approve(self):
self.status = GenePanel.STATUS.public
self.save()
def is_approved(self):
return self.status in [GenePanel.STATUS.public, GenePanel.STATUS.promoted]
def is_public(self):
return self.status in [GenePanel.STATUS.public, GenePanel.STATUS.promoted]
def is_deleted(self):
return self.status == GenePanel.STATUS.deleted
def reject(self):
self.status = GenePanel.STATUS.internal
self.save()
def get_absolute_url(self):
return reverse("panels:detail", args=(self.pk,))
def _prepare_panel_query(self):
"""Returns a queryset for all snapshots ordered by version"""
return (
self.genepanelsnapshot_set.prefetch_related("panel", "level4title")
.annotate(
number_of_green_genes=Sum(
Case(
When(
genepanelentrysnapshot__saved_gel_status__gt=3,
then=Value(1),
),
default=Value(0),
output_field=models.IntegerField(),
)
),
number_of_amber_genes=Sum(
Case(
When(genepanelentrysnapshot__saved_gel_status=2, then=Value(1)),
default=Value(0),
output_field=models.IntegerField(),
)
),
number_of_red_genes=Sum(
Case(
When(genepanelentrysnapshot__saved_gel_status=1, then=Value(1)),
default=Value(0),
output_field=models.IntegerField(),
)
),
number_of_gray_genes=Sum(
Case(
When(genepanelentrysnapshot__saved_gel_status=0, then=Value(1)),
default=Value(0),
output_field=models.IntegerField(),
)
),
)
.order_by("-major_version", "-minor_version", "-modified", "-pk")
)
def clear_cache(self):
if self.active_panel:
del self.__dict__["active_panel"]
@cached_property
def active_panel(self):
"""Return the panel with the largest version"""
return self.genepanelsnapshot_set.order_by(
"-major_version", "-minor_version", "-modified", "-pk"
).first()
@cached_property
def active_panel_extra(self):
"""Return the panel with the largest version and related info"""
return (
self.genepanelsnapshot_set.prefetch_related(
"panel",
"level4title",
"genepanelentrysnapshot_set",
"genepanelentrysnapshot_set__tags",
"genepanelentrysnapshot_set__evidence",
"genepanelentrysnapshot_set__gene_core",
"genepanelentrysnapshot_set__evaluation__comments",
)
.order_by("-major_version", "-minor_version", "-modified", "-pk")
.first()
)
def get_panel_version(self, version):
"""Get a specific version. Version argument should be a string"""
major_version, minor_version = version.split(".")
return (
self._prepare_panel_query()
.filter(major_version=int(major_version), minor_version=int(minor_version))
.first()
)
def add_activity(self, user, text, entity=None):
"""Adds activity for this panel"""
self.active_panel.add_activity(user, text)
| 1.804688 | 2 |
series_tiempo_ar_api/apps/api/tests/endpoint_tests/pagination_tests.py | datosgobar/series-tiempo-ar-api | 28 | 12793479 | from django.urls import reverse
from series_tiempo_ar_api.apps.api.tests.endpoint_tests.endpoint_test_case import EndpointTestCase
class PaginationTests(EndpointTestCase):
def test_get_single_value(self):
resp = self.client.get(reverse('api:series:series'), data={'ids': self.increasing_month_series_id, 'limit': 1})
self.assertEqual(len(resp.json()['data']), 1)
def test_get_five_offset_values(self):
data = {'ids': self.increasing_month_series_id, 'start': 5, 'limit': 5}
resp = self.run_query(data)
data = [
['1999-06-01', 105],
['1999-07-01', 106],
['1999-08-01', 107],
['1999-09-01', 108],
['1999-10-01', 109],
]
self.assertEqual(resp['data'], data)
| 2.203125 | 2 |
vae/main.py | fomorians/vae | 2 | 12793480 | <gh_stars>1-10
import os
import attr
import random
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import tensorflow_probability as tfp
from tqdm import trange
from vae import losses
from vae.data import prep_images, get_dataset
from vae.model import Model
@attr.s
class Params:
"""
Container for hyperparameters.
"""
learning_rate = attr.ib(default=1e-3)
epochs = attr.ib(default=100)
batch_size = attr.ib(default=1024)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--job-dir', required=True)
parser.add_argument('--seed', default=67, type=int)
args = parser.parse_args()
print('args:', args)
# create a job directory if it doesn't already exist
if not os.path.exists(args.job_dir):
os.makedirs(args.job_dir)
# enable eager execution
tf.enable_eager_execution()
# set random seeds for consistent execution
random.seed(args.seed)
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
# define hyperparameters
params = Params()
print('params:', params)
# load MNIST dataset
((images_train, labels_train),
(images_test, labels_test)) = tf.keras.datasets.mnist.load_data()
# prepare the images by casting and rescaling
images_train = prep_images(images_train)
images_test = prep_images(images_test)
# compute statistics from the training set
images_loc = images_train.mean()
images_scale = images_train.std()
# define datasets for sampling batches
dataset_train = get_dataset(
(images_train, labels_train),
batch_size=params.batch_size,
shuffle=True)
dataset_test = get_dataset(
(images_test, labels_test), batch_size=params.batch_size)
# model / optimization
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate=params.learning_rate)
model = Model(
inputs_loc=images_loc,
inputs_scale=images_scale,
inputs_shape=[28, 28, 1])
latent_prior = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(shape=[2], dtype=tf.float32),
scale_identity_multiplier=1.0)
# checkpoints
checkpoint = tf.train.Checkpoint(
optimizer=optimizer, model=model, global_step=global_step)
checkpoint_path = tf.train.latest_checkpoint(args.job_dir)
if checkpoint_path is not None:
checkpoint.restore(checkpoint_path).assert_consumed()
# summaries
summary_writer = tf.contrib.summary.create_file_writer(
args.job_dir, max_queue=1, flush_millis=1000)
summary_writer.set_as_default()
with trange(params.epochs) as pbar:
for epoch in pbar:
loss_train = tfe.metrics.Mean(name='loss/train')
for images, labels in dataset_train:
with tf.GradientTape() as tape:
outputs_dist, z_dist, z = model(
images, labels, training=True)
loss = losses.variational(outputs_dist, z_dist, images,
latent_prior)
loss_train(loss)
grads = tape.gradient(loss, model.trainable_variables)
grads_and_vars = zip(grads, model.trainable_variables)
optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
with tf.contrib.summary.always_record_summaries():
loss_train.result()
tf.contrib.summary.scalar(
name='grad_norm', tensor=tf.global_norm(grads))
tf.contrib.summary.image(
name='image/train',
tensor=images,
max_images=1,
step=global_step)
tf.contrib.summary.image(
name='outputs/train',
tensor=outputs_dist.mean(),
max_images=1,
step=global_step)
loss_test = tfe.metrics.Mean(name='loss/eval')
for images, labels in dataset_test:
outputs_dist, z_dist, z = model(images, labels)
loss = losses.variational(outputs_dist, z_dist, images,
latent_prior)
loss_test(loss)
with tf.contrib.summary.always_record_summaries():
loss_test.result()
tf.contrib.summary.image(
name='image/eval',
tensor=images,
max_images=1,
step=global_step)
tf.contrib.summary.image(
name='outputs/eval',
tensor=outputs_dist.mean(),
max_images=1,
step=global_step)
pbar.set_description('loss (train): {}, loss (eval): {}'.format(
loss_train.result().numpy(),
loss_test.result().numpy()))
checkpoint_prefix = os.path.join(args.job_dir, 'ckpt')
checkpoint.save(checkpoint_prefix)
if __name__ == '__main__':
main()
| 2.0625 | 2 |
possum/utils/pos_util_midpoints.py | morales-gregorio/poSSum3 | 10 | 12793481 | #!/usr/bin/env python
# encoding: utf-8
import os
import sys
import itk
from possum import pos_itk_core
from possum import pos_itk_transforms
from possum.pos_common import r
"""
.. note::
Some of the non-cruical, optional functions in this module require vtk
module to be installed. If it is not available the VTK support will be
disabled.
"""
def calculate_labels_midpoints(itk_image):
"""
This function introduces a workflow for calculating the middle midpoints of
the labelled imags. The term 'middle midpoints' is used on purpose. You might
think that we're calculating centroids here, but not. I use the term
'middle midpoints' as it is not the centroids what is calculated here.
Anyway, this function calculated middle midpoints of labels in the provided image.
The midpoints are calculated in the following way:
Now iterate over all available labels except the background label which
has been removed. The overall idea of this loop is to:
1) Extract given label from the segmentation
2) Extract the largest patch of the segmentation as there
might be multiple disjoint regions colored with given label
3) Apply the distance transform to the largest path with
given segmentation
4) Pick the maximum of the distance transform for given segmentation
and by this define the 'middle point' of given label.
.. note :: Please have in ming that this procedure returns position of the
first (index-wise) voxel with the maimum value. This means that if there is
more than one pixels with the maximum value of the distance transform,
location of the first one is returned. One could think that probably a
centre of mass of the max voxels should be returned, but no. It is unknown
is such centre would be located in the actual structure or outside the
structure. Therefore some of the results may look wired but they are
actually ok.
:param itk_image: Labelled image, the image is expected to be a labelled
image in which individual discrete values correspond
to individual structures. Formally this means that
the image has to be of `uchar` or `ushort` type,
to have a single component and to have
a dimensionality of two or three. Images having
different properties will not be processed.
:type itk_image: `itk.Image`
:return: Middle midpoints of the labels in the image.
:rtype: {int: ((float, float, float), (float, float, float)), ...}
And now it it a time to do some unit testing. Please also consited this set
of unittests as an example how to use this function.
>>> import base64
>>> from possum import pos_itk_transforms
>>> example_two_dimensions='<KEY>
>>> input_filename="/tmp/pos_itk_centroids_example_two_dimensions.nii.gz"
>>> open(input_filename, "w").write(base64.decodestring(example_two_dimensions))
>>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
>>> midpoints = calculate_labels_midpoints(itk_image)
>>> sorted(midpoints.keys()) == [1, 2, 3, 10, 11, 12, 13, 20, 21, 22, 23, 30, 31, 32, 33]
True
>>> map(int, midpoints[1][0]) == [14, 0, 0]
True
>>> map(int, midpoints[21][0]) == [14, 24, 0]
True
>>> midpoints[30] == ((0.0, 39.0, 0), (0, 39, 0))
True
>>> type(midpoints[30][1][1]) == type(1)
True
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1.0)
True
>>> os.remove(input_filename)
Now we will try to process a 3D image
>>> example_three_dimensions="<KEY>
>>> input_filename="/tmp/pos_itk_centroids_example_three_dimensions.nii.gz"
>>> open(input_filename, "w").write(base64.decodestring(example_three_dimensions))
>>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
>>> midpoints = calculate_labels_midpoints(itk_image)
>>> os.remove(input_filename)
>>> str(type(midpoints)) == "<type 'dict'>"
True
>>> len(midpoints.keys()) == 63
True
>>> str(midpoints.get(0,None)) == "None"
True
>>> midpoints[1] == ((5.0, 0.0, 0.0), (5, 0, 0))
True
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1.0)
True
>>> midpoints[183] == ((15.0, 15.0, 15.0), (15, 15, 15))
True
>>> midpoints[111] == ((5.0, 5.0, 9.0), (5, 5, 9))
True
>>> midpoints[53] == ((13.0, 0.0, 5.0), (13, 0, 5))
True
"""
C_BACKGROUND_LABEL_IDX = 0
# Define the dimensionality, data type and number of components
# of the label image
label_type = \
pos_itk_core.io_image_type_to_component_string_name[
itk_image.__class__]
# Extract the details of the image provided and check if they are
# ok to use in the routine.
n_dim = len(itk_image.GetLargestPossibleRegion().GetSize())
number_of_components = itk_image.GetNumberOfComponentsPerPixel()
data_type = label_type[1]
assert n_dim in [2, 3], \
"Incorrect dimensionality."
assert number_of_components == 1, \
"Only single component images are allowed."
assert data_type in ["unsigned_char", "unsigned_short"], \
r("Incorrect data type for a labelled image only unsigned_char\
and unsigned_short are accepted.")
# t_label_img is the ITK image type class to be used in filters
# templates.
t_label_img = itk_image.__class__
# We'll be also using another image type. This one is identical
# in terms of size and dimensionality as the labelled image.
# The differe is in data type: this one has to be float to handle
# the distance transform well.
float_type = list(label_type)
float_type[1] = "float"
t_float_img = \
pos_itk_core.io_component_string_name_to_image_type[tuple(float_type)]
# The purpose of the filter below is to define the unique labels
# given segmentation contains.
unique_labels = \
itk.LabelGeometryImageFilter[(t_label_img, t_label_img)].New()
unique_labels.SetInput(itk_image)
unique_labels.CalculatePixelIndicesOff()
unique_labels.Update()
# This is where we'll collect the results. We collect, both, the physical
# location as well as the
middle_points = {}
# We have to map the available labels returned by itk
# as sometimes strange things happen and they are returned as longints
# which are apparently incomparibile with python in type.
# Consider it a safety precaution
available_labels = map(int, unique_labels.GetLabels())
# Now we need to remove the background label (if such
# label actually exists)
C_BACKGROUND_LABEL_IDX
try:
available_labels.remove(C_BACKGROUND_LABEL_IDX)
except:
pass
# Now iterate over all available labels except the background label which
# has been removed. The overall idea of this loop is to:
# 1) Extract given label from the segmentation
# 2) Extract the largest patch of the segmentation as there
# might be multiple disjoint regions colored with given label
# 3) Apply the distance transform to the largest path with
# given segmentation
# 4) Pick the maximum of the distance transform for given segmentation
# and by this define the 'middle point' of given label
# I call the midpoints 'middle midpoints' not centroids as centroids
# are something different and they are calculated in a different
# way. Our center midpoints cannot be called centroids.
for label_idx in available_labels:
extract_label = \
itk.BinaryThresholdImageFilter[
(t_label_img, t_label_img)].New()
extract_label.SetInput(itk_image)
extract_label.SetUpperThreshold(label_idx)
extract_label.SetLowerThreshold(label_idx)
extract_label.SetOutsideValue(0)
extract_label.SetInsideValue(1)
extract_label.Update()
patches = \
itk.ConnectedComponentImageFilter[
(t_label_img, t_label_img)].New()
patches.SetInput(extract_label.GetOutput())
patches.Update()
largest_patch = \
itk.LabelShapeKeepNObjectsImageFilter[t_label_img].New()
largest_patch.SetInput(patches.GetOutput())
largest_patch.SetBackgroundValue(0)
largest_patch.SetNumberOfObjects(1)
largest_patch.SetAttribute(100)
largest_patch.Update()
distance_transform = \
itk.SignedMaurerDistanceMapImageFilter[
(t_label_img, t_float_img)].New()
distance_transform.SetInput(largest_patch.GetOutput())
distance_transform.InsideIsPositiveOn()
distance_transform.Update()
centroid = itk.MinimumMaximumImageCalculator[t_float_img].New()
centroid.SetImage(distance_transform.GetOutput())
centroid.Compute()
centroid.GetIndexOfMaximum()
index = centroid.GetIndexOfMaximum()
point = itk_image.TransformIndexToPhysicalPoint(index)
# We need to slightly refine the results returned by itk
# The results have to be processed in a slightly different way for
# two dimensional results and slightly different for 3D resuls:
# Again, we do a lot of explicit casting assure types
# compatibility. The 2D midpoints are converted into 3D midpoints since
# it is easier to use them in vtk if they're 3D midpoints.
if n_dim == 2:
point = map(float, point) + [0]
index = map(int, index) + [0]
if n_dim == 3:
point = map(float, point)
index = map(int, index)
middle_points[label_idx] = (tuple(point), tuple(index))
# Below there is some debugging code. Not really important for everyday
# use.
# print middle_points.__repr__()
return middle_points
def points_to_vtk_points(points_list):
"""
The function converts the location of the middle points into a vtkPolyData
structure and assigns appropriate label IDs to the individual points of the
vtk points structure. Basically, you can use the resulting vtkPolyData() and
know where is a centre of a particular structure.
... note ::
This function will not work if the vtk module is not loaded.
:param point_list: List of points to turn into vtk points
:type point_list: {int: ((float, float, float), (float, float, float)), ...}
:return: Midpoints of the individual structures expressed as
vtk.vtkPolyData()
:rtype: `vtk.vtkPolyData`
"""
try:
vtk.vtkVersion()
except:
return None
n_points = len(points_list.keys())
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
id_array = vtk.vtkUnsignedCharArray()
id_array.SetName("Label_ID")
id_array.SetNumberOfComponents(1)
id_array.SetNumberOfTuples(n_points)
for (i, (pt, idx)) in points_list.items():
id_ = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id_)
id_array.SetTuple1(id_, i)
point = vtk.vtkPolyData()
point.SetPoints(points)
point.SetVerts(vertices)
point.GetPointData().AddArray(id_array)
return point
if __name__ == '__main__':
import doctest
print doctest.testmod(verbose=True)
| 2.609375 | 3 |
MLSD/Transformers/activeTrans.py | HaoranXue/Machine_Learning_For_Structured_Data | 4 | 12793482 | <filename>MLSD/Transformers/activeTrans.py
from sklearn.base import TransformerMixin
class activeTrans(TransformerMixin):
def __init__(self,
ifSData=False,
Trans_in=True,
Multi_col=False,
New_Trans=None,
Reset_default=False):
self.Trans_in = Trans_in
self.Multi_col = Multi_col
self.New_Trans = New_Trans
self.Reset_default = Reset_default
self.ifSData = ifSData
def fit(self, X, y=None):
if self.New_Trans != None:
X.transformer = self.New_Trans
if self.ifSData == True:
trans = X.transformer
trans.fit(X)
self.trans = trans
elif self.ifSData == False:
self.trans = []
for i in X.data:
self.trans.append(i.transformer.fit(i))
def transform(self, X, y=None):
if self.New_Trans != None:
X.transformer = self.New_Trans
if self.ifSData == True:
return self.trans.transform(X)
elif self.ifSData == False:
features = self.trans[0].transform(X.data[0])
for i in range(2, len(X.data)):
features.join(self.trans[i].transform(X.data[i]))
return features
def fit_transform(self, X, y=None):
if self.New_Trans != None:
X.transformer = self.New_Trans
if self.ifSData == True:
self.trans = X.transformer
return self.trans.fit_transform(X)
elif self.ifSData == False:
self.trans = []
for i in X.data:
self.trans.append(i.transformer)
features = self.trans[0].fit_transform(X.data[0])
for i in range(2, len(X.data)):
features.join(self.trans[i].fit_transform(X.data[i]))
return features
| 2.640625 | 3 |
WalkerBuddyAPI/app.py | gardyna/WalkerAppGame | 0 | 12793483 | # standard lib
from functools import wraps
# third party packages
from flask import Flask, jsonify, abort, request, Response
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.secret_key = '<KEY>'
db = SQLAlchemy(app)
# region dbClasses
class User(db.Model):
"""
Represent a user in database
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'email': self.email
}
# endregion
# region authorization
def check_auth(username, password):
return username == 'admin' and password == '<PASSWORD>'
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# endregion
@app.route('/')
@requires_auth
def hello_world():
return 'Hello World!'
@app.route('/users', methods=['GET', 'POST'])
def get_users():
if request.method == 'POST':
if (request.json['username'] is None
or request.json['email'] is None):
abort()
user = User(request.json['username'],
request.json['email'])
db.session.add(user)
db.session.commit()
return jsonify({'user': user.to_dict()}), 201
elif request.method == 'GET':
users = User.query.all()
users_dto = [user.to_dict() for user in users]
return jsonify({'users': users_dto}), 200
else:
abort(405, "Method not supported")
@app.errorhandler(405)
def custom405(error):
response = jsonify({'message': error.description})
return response, 405
if __name__ == '__main__':
if app.debug:
app.run()
else:
app.run(host='0.0.0.0')
| 2.765625 | 3 |
Auto2DSelect/helper.py | MPI-Dortmund/sphire_classes_autoselect | 3 | 12793484 | """
Automatic 2D class selection tool.
MIT License
Copyright (c) 2019 <NAME> Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import path, listdir
import h5py
from PIL import Image # install it via pip install pillow
import numpy as np
import mrcfile
"""
The format of the .hf file is the following:
['MDF']['images']['i']['image'] where i is a number representing the i-th images
hence to get the images number 5:
['MDF']['images']['5']['image'][()]
"""
def create_circular_mask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask
def checkfiles(path_to_files):
"""
checks if the hdf files are in the correct path and returns True if all of them exists
:param path_to_files: list of paths
:return:
"""
if isinstance(path_to_files, (list, tuple)):
for p in path_to_files:
if not path.isfile(p):
return False
elif isinstance(path_to_files, str):
return path.isfile(path_to_files)
return True
def calc_2d_spectra(img):
from scipy import fftpack
import numpy as np
F1 = fftpack.fft2(img)
F2 = fftpack.fftshift(F1)
psd2D = np.abs(F2) ** 2
return psd2D
def getList_files(paths):
"""
Returns the list of the valid hdf files in the given paths. It is called recursively
:param paths: path or list of paths
:return:
"""
if isinstance(paths, str):
paths = [paths]
list_new_paths = list()
iterate = False
for p in paths:
if path.isdir(p):
iterate = True
list_new_paths += [path.join(p, f) for f in listdir(p)]
elif path.isfile(p):
list_new_paths.append(p)
else:
print(
"WARNING: The given path '"
+ str(p)
+ "' is not a folder or a file and it will be ignored"
)
if iterate is True:
return getList_files(list_new_paths)
return list_new_paths
def getList_relevant_files(path_to_files):
"""
Check if the given files are hdf/mrcs/st with a valid format. Return The list of valid hdf
:param path_to_files: list of all the files present in the folder (and subfolder)given from the user
:return: list of valid hdf
"""
return [
path_to_file
for path_to_file in path_to_files
if path_to_file.endswith("mrcs")
or path_to_file.endswith("mrc")
or path_to_file.endswith("st")
or h5py.is_hdf5(path_to_file)
]
""" FUNCTION TO READ THE HDF"""
def get_key_list_images(path):
"""
Returns the list of the keys representing the images in the hdf/mrcs/st file. It will be converted in list of integer
:param path:
:return:
"""
print("Try to list images on", path)
import os
filename_ext = os.path.basename(path).split(".")[-1]
result_list = None
try:
if filename_ext in {"mrcs", "st"}:
with mrcfile.mmap(path, permissive=True, mode="r") as mrc:
list_candidate = [i for i in range(mrc.header.nz)]
if len(list_candidate) > 0:
result_list = list_candidate
if filename_ext == "mrc":
with mrcfile.mmap(path, permissive=True, mode="r") as mrc:
result_list = list(range(1))
except Exception as e:
print(e)
print(
"WARNING in get_list_images: the file '"
+ path
+ " is not an valid mrc file. It will be ignored"
)
if filename_ext == "hdf":
try:
with h5py.File(path, "r") as f:
list_candidate = [int(v) for v in list(f["MDF"]["images"])]
except:
print(
"WARNING in get_list_images: the file '"
+ path
+ " is not an HDF file with the following format:\n\t['MDF']['images']. It will be ignored"
)
if len(list_candidate) > 0:
result_list = list_candidate
return result_list
def getImages_fromList_key(file_index_tubles):
"""
Returns the images in the hdf file (path_to_file) listed in (list_images)
:param path_to_file: path to hdf file
:param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images'
:return: Returns a list of numpy arrays
"""
# driver="core"
result_data = list()
for path_to_file, list_images in file_index_tubles:
data = list()
if path.isfile(path_to_file):
if path.basename(path_to_file).split(".")[-1] == "hdf":
try:
with h5py.File(path_to_file, 'r') as f:
if isinstance(list_images, list) or isinstance(
list_images, tuple
):
data = [
np.nan_to_num(f["MDF"]["images"][str(i)]["image"][()])
for i in list_images
] # [()] is used instead of .value
elif isinstance(list_images, int):
data = np.nan_to_num(f["MDF"]["images"][str(list_images)]["image"][()])
else:
print(
"\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:",
type(list_images),
)
print("you try to get the following images")
print(list_images)
exit()
except Exception as e:
print(e)
print(
"\nERROR in getImages_fromList_key: the file '"
+ path_to_file
+ " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']"
)
print("you try to get the following images")
print(list_images)
print("there are " + str(len(f["MDF"]["images"])))
exit()
elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]:
data = []
with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc:
if isinstance(list_images, int):
list_images = [list_images]
if isinstance(list_images, list) or isinstance(list_images, tuple):
if mrc.header.nz > 1:
if len(list_images)==1:
data = np.nan_to_num(mrc.data[list_images[0]])
else:
data = [np.nan_to_num(mrc.data[i]) for i in list_images]
elif len(list_images) == 1:
data = np.nan_to_num(mrc.data)
result_data.append(data)
return result_data
def getImages_fromList_key_old(path_to_file, list_images):
"""
Returns the images in the hdf file (path_to_file) listed in (list_images)
:param path_to_file: path to hdf file
:param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images'
:return: Returns a list of numpy arrays
"""
data = list()
if path.isfile(path_to_file):
if path.basename(path_to_file).split(".")[-1] == "hdf":
try:
with h5py.File(path_to_file, driver="core") as f:
if isinstance(list_images, list) or isinstance(list_images, tuple):
data = [
f["MDF"]["images"][str(i)]["image"][()] for i in list_images
] # [()] is used instead of .value
elif isinstance(list_images, int):
data = f["MDF"]["images"][str(list_images)]["image"][()]
else:
print(
"\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:",
type(list_images),
)
print("you try to get the following images")
print(list_images)
exit()
except Exception as e:
print(e)
print(
"\nERROR in getImages_fromList_key: the file '"
+ path_to_file
+ " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']"
)
print("you try to get the following images")
print(list_images)
print("there are " + str(len(f["MDF"]["images"])))
exit()
elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]:
data = []
with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc:
if isinstance(list_images, int):
list_images = [list_images]
if isinstance(list_images, list) or isinstance(list_images, tuple):
if mrc.header.nz > 1:
data = [mrc.data[i] for i in list_images]
elif len(list_images) == 1:
data = [mrc.data]
return data
""" FUNCTION TO MANIPULATE THE IMAGES"""
def apply_mask(img, mask):
mean = np.mean(img)
img[mask==False]=mean
return img
def resize_img(img, resize=(76, 76)):
"""
Resize the given image into the given size
:param img: as numpy array
:param resize: resize size
:return: return the resized img
"""
im = Image.fromarray(img)
return np.array(im.resize(resize, resample=Image.BILINEAR))
def normalize_img(img):
"""
normalize the images in base of its mean and variance
:param img:
:return:
"""
import numpy as np
# img = img.astype(np.float64, copy=False)
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / (std+0.00001)
# img = img.astype(np.float32, copy=False)
return img
def flip_img(img, t=None):
"""
It flip the image in function of the given typ
:param img:
:param t: type of the flip
1 --> flip over the row. Flipped array in up-down direction.(X)
2 --> flip over the column Flipped array in right-left direction(Y)
3 --> flip over the column and the row (X and Y)
otherwise --> no flip
:return:
"""
if t == 1:
return np.flipud(img)
elif t == 2:
return np.fliplr(img)
elif t == 3:
return np.flipud(np.fliplr(img))
return img
| 2.265625 | 2 |
blacktape/util.py | carascap/blacktape | 0 | 12793485 | <filename>blacktape/util.py
import signal
def worker_init():
"""
Initializer for worker processes that makes them ignore interrupt signals
https://docs.python.org/3/library/signal.html#signal.signal
https://docs.python.org/3/library/signal.html#signal.SIG_IGN
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
| 1.851563 | 2 |
PSET 4/problem-5.py | AtharvaPusalkar/MITx-6.00.1x | 1 | 12793486 | def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# BEGIN PSEUDOCODE (download ps4a.py to see)
# Keep track of the total score
score = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
print('Current Hand:', end=' '); displayHand(hand)
# Ask user for input
guess = str(input('Enter word, or a "." to indicate that you are finished: '))
# If the input is a single period:
if guess == '.':
# End the game (break out of the loop)
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if isValidWord(guess, hand, wordList) == False:
# Reject invalid word (print a message followed by a blank line)
print('Invalid word, please try again.', '\n')
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
score += getWordScore(guess, n)
print('"'+guess+'"', "earned", getWordScore(guess, n), "points. Total:", score, "points", '\n')
# Update the hand
hand = updateHand(hand, guess)
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if guess == '.':
print('Goodbye! Total score:', score, 'points.')
else:
print('Run out of letters. Total score:', score, 'points.')
| 3.890625 | 4 |
tests/printing/test_registry_rendering.py | anna-naden/qalgebra | 2 | 12793487 | <gh_stars>1-10
import os
import pytest
from qalgebra.utils.testing import datadir
# TODO
| 0.976563 | 1 |
src/question/migrations/0006_auto_20190215_0755.py | DevTeamSCH/vikoverflow-backend | 0 | 12793488 | # Generated by Django 2.1.7 on 2019-02-15 07:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("question", "0005_merge_20190215_0616")]
operations = [
migrations.RemoveField(model_name="answer", name="is_visible"),
migrations.RemoveField(model_name="comment", name="is_visible"),
migrations.RemoveField(model_name="question", name="is_visible"),
]
| 1.523438 | 2 |
highlights_notifications.py | skontar/hexchat-plugins | 0 | 12793489 | """
Plugin for better notifications with actions.
HexChat Python Interface: http://hexchat.readthedocs.io/en/latest/script_python.html
IRC String Formatting: https://github.com/myano/jenni/wiki/IRC-String-Formatting
"""
import logging
import re
import subprocess
import sys
from os import path
import dbus
import hexchat
__module_name__ = 'highlights_notifications'
__module_description__ = 'Better notifications with actions'
__module_version__ = '1.1'
NOTIFICATION_SERVER = '/home/skontar/Repos/hexchat-plugins/notification_server.py'
LOG = '~/highlights_notifications.log'
FORMAT = '%(asctime)-24s %(levelname)-9s %(message)s'
logging.basicConfig(filename=path.expanduser(LOG), format=FORMAT, level=logging.DEBUG)
def handle_exception(exc_type, exc_value, exc_traceback):
logging.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
def server_start():
logging.info('Starting server')
subprocess.Popen('python3 {}'.format(NOTIFICATION_SERVER), shell=True)
def get_dbus_interface():
logging.info('Getting DBus interface for Notification Server')
try:
session_bus = dbus.SessionBus()
proxy = session_bus.get_object('com.skontar.HexChat', '/com/skontar/HexChat')
interface = dbus.Interface(proxy, dbus_interface='com.skontar.HexChat')
logging.debug('DBus interface Success')
return interface
except dbus.exceptions.DBusException:
logging.debug('DBus interface Fail')
server_start()
return None
def on_focus_tab(word, word_eol, userdata):
global active_channel
active_channel = hexchat.get_info('channel')
logging.info('Changed active tab to %s', active_channel)
def on_highlight_notification(word, word_eol, userdata):
global interface
win_status = hexchat.get_info('win_status')
network = hexchat.get_info('network')
channel = hexchat.get_info('channel')
nickname = word[0]
nickname = re.sub(r'^\x03\d+', '', nickname) # Remove color
text = word[1]
message_type = userdata
if message_type == 'HLT':
title = 'Highlighted message from: {} ({})'.format(nickname, channel)
else:
title = 'Private message from: {} ({})'.format(nickname, network)
logging.info('New notification [%s | %s | %s]', network, channel, repr(str(nickname)))
logging.debug('Application details: [%s | %s]', win_status, active_channel)
logging.debug('Message type: "%s"', message_type)
logging.debug('Message: %s', repr(text))
# Ignore notification if window is active and active channel is the one where message arrived
if win_status == 'active' and channel == active_channel:
logging.info('Not showing notifications as channel is already active')
return hexchat.EAT_NONE
if interface is None:
logging.debug('No DBus interface prepared')
interface = get_dbus_interface()
if interface is None:
logging.warning('DBus connection to Notification Server fail')
logging.warning('Notification fallback')
hexchat.command('TRAY -b "{}" {}'.format(title, text))
else:
try:
logging.info('Sending message to Notification Server through DBus')
interface.create_notification(nickname, network, channel, title, text, message_type)
except dbus.exceptions.DBusException:
logging.warning('DBus message to Notification Server fail')
logging.warning('Notification fallback')
hexchat.command('TRAY -b "{}" {}'.format(title, text))
interface = get_dbus_interface()
return hexchat.EAT_NONE
def on_unload(userdata):
global interface
logging.info('HexChat notification server ending')
hexchat.prnt('Unloading {}, version {}'.format(__module_name__, __module_version__))
logging.info('Setting common notifications to normal')
hexchat.command('set input_balloon_hilight 1')
hexchat.command('set input_balloon_priv 1')
try:
logging.info('Sending Quit message to Notification Server')
interface.quit()
except (AttributeError, dbus.exceptions.DBusException):
logging.warning('Quit message to Notification Server failed')
logging.info('Explicitly quit')
# Unfortunately, this also kills whole HexChat, so the plugin cannot be restarted.
# However, I did not find a better way, as if the plugin used DBus interface it seems to hang
# on exit. Only other workaround I have found was to raise an Exception, but that stopped to
# work when I hooked `sys.excepthook`. I have tried to unhook it just before exit, but that did
# not work either. I find the proper Exception logging more useful than ability to restart
# plugin.
exit(1)
active_channel = None
win_status = None
interface = None
logging.info('HexChat notification plugin starting ==============================')
server_start()
hexchat.prnt('{}, version {}'.format(__module_name__, __module_version__))
logging.info('Setting common notifications to suspended')
hexchat.command('set input_balloon_hilight 0')
hexchat.command('set input_balloon_priv 0')
hexchat.hook_print('Focus Tab', on_focus_tab)
hexchat.hook_unload(on_unload)
hexchat.hook_print('Channel Action Hilight', on_highlight_notification, userdata='HLT')
hexchat.hook_print('Channel Msg Hilight', on_highlight_notification, userdata='HLT')
hexchat.hook_print('Private Message', on_highlight_notification, userdata='PVT')
hexchat.hook_print('Private Message to Dialog', on_highlight_notification, userdata='PVT')
hexchat.hook_print('Private Action to Dialog', on_highlight_notification, userdata='PVT')
| 2.21875 | 2 |
plugin/lighthouse/reader/__init__.py | x9090/lighthouse | 1,741 | 12793490 | from .coverage_reader import CoverageReader
| 1.03125 | 1 |
records_mover/records/pandas/to_csv_options.py | ellyteitsworth/records-mover | 0 | 12793491 | import csv
from ...utils import quiet_remove
from ..delimited import cant_handle_hint
from ..processing_instructions import ProcessingInstructions
from ..records_format import DelimitedRecordsFormat
from records_mover.mover_types import _assert_never
import logging
from typing import Set, Dict
logger = logging.getLogger(__name__)
def pandas_to_csv_options(records_format: DelimitedRecordsFormat,
unhandled_hints: Set[str],
processing_instructions: ProcessingInstructions) -> Dict[str, object]:
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html
hints = records_format.\
validate(fail_if_cant_handle_hint=processing_instructions.fail_if_cant_handle_hint)
fail_if_cant_handle_hint = processing_instructions.fail_if_cant_handle_hint
pandas_options: Dict[str, object] = {}
pandas_options['encoding'] = hints.encoding
quiet_remove(unhandled_hints, 'encoding')
if hints.compression is None:
# hints['compression']=None will output an uncompressed csv,
# which is the pandas default.
pass
elif hints.compression == 'GZIP':
pandas_options['compression'] = 'gzip'
elif hints.compression == 'BZIP':
pandas_options['compression'] = 'bz2'
else:
cant_handle_hint(fail_if_cant_handle_hint, 'compression', hints)
quiet_remove(unhandled_hints, 'compression')
if hints.quoting is None:
pandas_options['quoting'] = csv.QUOTE_NONE
elif hints.quoting == 'all':
pandas_options['quoting'] = csv.QUOTE_ALL
elif hints.quoting == 'minimal':
pandas_options['quoting'] = csv.QUOTE_MINIMAL
elif hints.quoting == 'nonnumeric':
pandas_options['quoting'] = csv.QUOTE_NONNUMERIC
else:
_assert_never(hints.quoting)
quiet_remove(unhandled_hints, 'quoting')
pandas_options['doublequote'] = hints.doublequote
quiet_remove(unhandled_hints, 'doublequote')
pandas_options['quotechar'] = hints.quotechar
quiet_remove(unhandled_hints, 'quotechar')
if hints.escape is None:
pass
else:
pandas_options['escapechar'] = hints.escape
quiet_remove(unhandled_hints, 'escape')
pandas_options['header'] = hints.header_row
quiet_remove(unhandled_hints, 'header-row')
if hints.dateformat is None:
if hints.datetimeformattz == hints.datetimeformat:
# BigQuery requires that timezone offsets have a colon;
# Python (and thus Pandas) doesn't support adding the
# colon with strftime. However, we can specify things
# without a timezone delimiter just fine.
#
# Unfortunately Python/Pandas will drop the timezone info
# instead of converting the timestamp to UTC. This
# corrupts the time, as BigQuery assumes what it gets in
# is UTC format. Boo.
#
# $ python3
# >>> import pytz
# >>> us_eastern = pytz.timezone('US/Eastern')
# >>> import datetime
# >>> us_eastern.localize(datetime.datetime(2000, 1, 2, 12, 34, 56, 789012))
# .strftime('%Y-%m-%d %H:%M:%S.%f')
# '2000-01-02 12:34:56.789012'
# >>>
#
# https://github.com/bluelabsio/records-mover/issues/95
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f%z'
elif hints.dateformat == 'YYYY-MM-DD':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f%z'
elif hints.dateformat == 'MM-DD-YYYY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%m-%d-%Y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%m-%d-%Y %H:%M:%S.%f%z'
elif hints.dateformat == 'DD-MM-YYYY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%d-%m-%Y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%d-%m-%Y %H:%M:%S.%f%z'
elif hints.dateformat == 'MM/DD/YY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%m/%d/%y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%m/%d/%y %H:%M:%S.%f%z'
else:
cant_handle_hint(fail_if_cant_handle_hint, 'dateformat', hints)
quiet_remove(unhandled_hints, 'dateformat')
# pandas can't seem to export a date and time together :(
#
# might be nice someday to only emit the errors if the actual data
# being moved is affected by whatever limitation...
if (hints.datetimeformattz not in (f"{hints.dateformat} HH24:MI:SSOF",
f"{hints.dateformat} HH:MI:SSOF",
f"{hints.dateformat} HH24:MI:SS",
f"{hints.dateformat} HH:MI:SS",
f"{hints.dateformat} HH:MIOF",
f"{hints.dateformat} HH:MI",
f"{hints.dateformat} HH24:MIOF",
f"{hints.dateformat} HH24:MI")):
cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformattz', hints)
quiet_remove(unhandled_hints, 'datetimeformattz')
valid_datetimeformat = [
f"{hints.dateformat} HH24:MI:SS",
f"{hints.dateformat} HH:MI:SS",
f"{hints.dateformat} HH24:MI",
f"{hints.dateformat} HH:MI",
]
if (hints.datetimeformat not in valid_datetimeformat):
cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformat', hints)
quiet_remove(unhandled_hints, 'datetimeformat')
if hints.timeonlyformat != 'HH24:MI:SS':
cant_handle_hint(fail_if_cant_handle_hint, 'timeonlyformat', hints)
quiet_remove(unhandled_hints, 'timeonlyformat')
pandas_options['sep'] = hints.field_delimiter
quiet_remove(unhandled_hints, 'field-delimiter')
pandas_options['line_terminator'] = hints.record_terminator
quiet_remove(unhandled_hints, 'record-terminator')
return pandas_options
| 2.53125 | 3 |
tests/unit/raptiformica/actions/prune/test_ensure_neighbour_removed_from_config_by_host.py | vdloo/raptiformica | 21 | 12793492 | from raptiformica.actions.prune import ensure_neighbour_removed_from_config_by_host
from tests.testcase import TestCase
class TestEnsureNeighbourRemovedFromConfigByHost(TestCase):
def setUp(self):
self._del_neighbour_by_key = self.set_up_patch(
'raptiformica.actions.prune._del_neighbour_by_key'
)
def test_ensure_neighbour_removed_from_config_by_host_deleted_neighbour_by_host(self):
ensure_neighbour_removed_from_config_by_host('1.2.3.4')
self._del_neighbour_by_key.assert_called_once_with(
'host', '1.2.3.4'
)
| 1.976563 | 2 |
src/driving_curriculum/agents/algorithmic/teacher_quintic_polynomials.py | takeitallsource/pac-simulator | 1 | 12793493 | from math import cos, sin
import numpy as np
from ....simulator import Agent
from .quintic_polynomials_planner import quinic_polynomials_planner
class TeacherQuinticPolynomials(Agent):
def learn(self, state, action):
raise NotImplementedError()
def explore(self, state, horizon=1):
raise NotImplementedError()
def __init__(self, world, lane):
Agent.__init__(self, world)
self.lane = lane
self.navigation_plan = None
self.goal = self.lane.end_middle()
self.goal = self.goal[0], self.goal[1], 0.0 # the angle depends on the lane direction
def plan(self, horizon=10):
trajectory = quinic_polynomials_planner(sx=self.x, sy=self.y, syaw=self.theta, sv=self.v, sa=0.0,
gx=self.goal[0], gy=self.goal[1], gyaw=self.goal[2], gv=0.0, ga=0.0,
max_accel=0.0, max_jerk=0.1, dt=1)
return np.array(trajectory[3])[:horizon]
def exploit(self, state, horizon=1):
if self.navigation_plan is None:
self.navigation_plan = self.plan()
for _ in range(horizon):
self.execute()
def execute(self, action, horizon=1):
for _ in range(horizon):
self.x = self.x + self.v * cos(action)
self.y = self.y + self.v * sin(action)
| 3.125 | 3 |
research/cv/meta-baseline/postprocess.py | mindspore-ai/models | 77 | 12793494 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess
"""
import os
import argparse
from functools import reduce
import numpy as np
import mindspore as ms
from mindspore import ops, Tensor, context
import src.util as util
def cal_acc(args):
"""
:return: meta-baseline eval
"""
temp = 5.
n_shots = [args.num_shots]
file_num = int(len(os.listdir(args.post_result_path)) / args.num_shots)
aves_keys = ['tl', 'ta', 'vl', 'va']
for n_shot in n_shots:
aves_keys += ['fsa-' + str(n_shot)]
aves = {k: util.Averager() for k in aves_keys}
label_list = np.load(os.path.join(args.pre_result_path, "label.npy"), allow_pickle=True)
shape_list = np.load(os.path.join(args.pre_result_path, "shape.npy"), allow_pickle=True)
x_shot_shape = shape_list[0]
x_query_shape = shape_list[1]
shot_shape = x_shot_shape[:-3]
query_shape = x_query_shape[:-3]
x_shot_len = reduce(lambda x, y: x*y, shot_shape)
x_query_len = reduce(lambda x, y: x*y, query_shape)
for i, n_shot in enumerate(n_shots):
np.random.seed(0)
label_shot = label_list[i]
for j in range(file_num):
labels = Tensor(label_shot[j])
f = os.path.join(args.post_result_path, "nshot_" + str(i) + "_" + str(j) + "_0.bin")
x_tot = Tensor(np.fromfile(f, np.float32).reshape(args.batch_size, 512))
x_shot, x_query = x_tot[:x_shot_len], x_tot[-x_query_len:]
x_shot = x_shot.view(*shot_shape, -1)
x_query = x_query.view(*query_shape, -1)
########## cross-class bias ############
bs = x_shot.shape[0]
fs = x_shot.shape[-1]
bias = x_shot.view(bs, -1, fs).mean(1) - x_query.mean(1)
x_query = x_query + ops.ExpandDims()(bias, 1)
x_shot = x_shot.mean(axis=-2)
x_shot = ops.L2Normalize(axis=-1)(x_shot)
x_query = ops.L2Normalize(axis=-1)(x_query)
logits = ops.BatchMatMul()(x_query, x_shot.transpose(0, 2, 1))
logits = logits * temp
ret = ops.Argmax()(logits) == labels.astype(ms.int32)
acc = ret.astype(ms.float32).mean()
aves['fsa-' + str(n_shot)].add(acc.asnumpy())
for k, v in aves.items():
aves[k] = v.item()
for n_shot in n_shots:
key = 'fsa-' + str(n_shot)
print("epoch {}, {}-shot, val acc {:.4f}".format(str(1), n_shot, aves[key]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device_target', type=str, default='CPU', choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--dataset', default='mini-imagenet')
parser.add_argument('--post_result_path', default='./result_Files')
parser.add_argument('--pre_result_path', type=str, default='./preprocess_Result')
parser.add_argument('--batch_size', type=int, default=320)
parser.add_argument('--num_shots', type=int, default=1)
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)
cal_acc(args_opt)
| 2.015625 | 2 |
answers/Khushi/Day 6/Question 2.py | arc03/30-DaysOfCode-March-2021 | 22 | 12793495 | <gh_stars>10-100
# For the output shown in example enter the required array as shown [1,4,2,5,3]
def OddLengthSum(arr):
sum=0
l=len(arr)
for i in range(l):
for j in range(i,l,2):
for k in range(i,j+1,1):
sum+=arr[k]
return sum
print("Enter the array of 5 elements: ")
arr = []
for x in range(5):
element=int(input())
arr.append(element)
print("Sum of possible odd length sub-arrays:")
print(OddLengthSum(arr)) | 3.921875 | 4 |
PiCode/test_docker/test.py | SilentByte/healthcam | 2 | 12793496 | from picamera import PiCamera
from picamera.exc import PiCameraMMALError
from time import sleep
from io import StringIO
from glob import glob
from os.path import getsize
if __name__ == "__main__":
tries = 0
while tries < 5:
try:
cam = PiCamera(camera_num=0)
except PiCameraMMALError:
# Sometimes happens if something else is hogging the resource
sleep(10)
continue
cam.resolution = (512, 512)
cam.start_preview()
sleep(4)
byte_buffer = StringIO()
byte_buffer.seek(0)
cam.start_recording('/home/test.mjpeg', format='mjpeg')
cam.wait_recording(10)
cam.stop_recording()
cam.capture('/home/foo.jpeg')
cam.stop_preview()
print("Recording")
cam.close()
print(glob("/home/*"))
print(getsize('home/test.mjpeg'))
print(getsize('home/foo.jpeg'))
print(byte_buffer.read())
| 2.671875 | 3 |
nn/framework.py | thunlp/Chinese_NRE | 272 | 12793497 | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .encoder import BiLstmEncoder
from .classifier import AttClassifier
from torch.autograd import Variable
from torch.nn import functional, init
class MGLattice_model(nn.Module):
def __init__(self, data):
super(MGLattice_model, self).__init__()
# MG-Lattice encoder
self.encoder = BiLstmEncoder(data)
# Attentive classifier
self.classifier = AttClassifier(data)
def forward(self, gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs, ins_label, scope):
# ins_num * seq_len * hidden_dim
hidden_out = self.encoder.get_seq_features(gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs)
# batch_size * num_classes
logit = self.classifier.get_logit(hidden_out, ins_label, scope)
return logit
| 2.625 | 3 |
tools/gen/plugin_base.py | mingkaic/tenncor | 1 | 12793498 | #!/usr/bin/env python3
''' Plugin interface definition '''
import abc
class PluginBase(metaclass=abc.ABCMeta):
@abc.abstractmethod
def plugin_id(self) -> str:
'''
Return plugin identifier
'''
@abc.abstractmethod
def process(self,
generated_files: dict, arguments: dict, **kwargs) -> dict:
'''
Given
output path
dictionary of generated_files (
mapping filename to FileRep) and
dictionary of arguments,
Return {filename: FileRep,...}
'''
| 2.90625 | 3 |
crawl/dirbot/spiders/data.py | okfde/odm-datenerfassung | 5 | 12793499 | import unicodecsv
import metautils
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from dirbot.items import Website
class DataSpider(CrawlSpider):
name = "data"
rules = (
# Extract all links and parse them with the spider's method parse_page
Rule(SgmlLinkExtractor(),callback='parse_page',follow=True),
)
def __init__(self, domain=None, *a, **kw):
super(DataSpider, self).__init__(*a, **kw)
self.domain = domain
self.fileoutall = domain + ".csv"
self.fileoutdata = domain + ".data.csv"
self.allowed_domains = [self.domain]
self.start_urls = [
"http://www." + domain + "/",
]
#File types to search for (non-geo); list so that we can extend
self.filetypes = ['.' + ft for ft in metautils.fileformats]
#Geographic file types
self.geofiletypes = ['.' + ft for ft in metautils.geoformats]
#Combined list to search for at first
self.filetypes.extend(self.geofiletypes)
#Better for searching later
self.filetypes = tuple(self.filetypes)
self.fields = ('Stadt_URL', 'URL_Datei', 'URL_Text', 'URL_Dateiname', 'Format', 'geo', 'URL_PARENT', 'Title_PARENT')
self.writer = unicodecsv.DictWriter(open(self.fileoutall, "wb", encoding='utf-8'), self.fields)
self.writer.writeheader()
self.writerdata = unicodecsv.DictWriter(open(self.fileoutdata, "wb", encoding='utf-8'), self.fields)
self.writerdata.writeheader()
print "Searching " + domain + "..."
def parse_page(self, response):
for ext in self.filetypes:
if (ext[1:] in response.headers['Content-Type'].upper() or ('Content-Disposition' in response.headers and ext in response.headers['Content-Disposition'].upper())):
print "Detected a downloadable, generated file"
item = Website()
item['URL_Datei'] = response.url
item['Stadt_URL'] = unicode(self.domain, 'utf-8')
#Not applicable
item['URL_Text'] = unicode('', 'utf-8')
if ('Content-Disposition' in response.headers):
item['URL_Dateiname'] = unicode(response.headers['Content-Disposition'], 'utf-8')
else:
item['URL_Dateiname'] = unicode(item['URL_Datei']).split('/')[-1]
item['Format'] = ext[1:]
#if we just have e.g. "json" and we are dealing with DKAN, then we are probably dealing with an API item description and not a file
if (item['URL_Dateiname'].upper() == item['Format']) and 'node' in item['URL_Datei']:
return []
if (ext in self.geofiletypes):
item['geo'] = 'x'
else:
item['geo'] = u''
item['URL_PARENT'] = u'Nicht moeglich kann aber nachtraeglich ermittelt werden'
item['Title_PARENT'] = u'Nicht moeglich kann aber nachtraeglich ermittelt werden'
self.writerdata.writerow(item)
#Done
return []
if ('Content-Type' in response.headers and 'text/html' not in response.headers['Content-Type']):
print "Not HTML or anything else of interest, giving up"
print response.headers
return []
#Otherwise, its html and we process all links on the page
sel = Selector(response)
#Title of the page we are on (this will be the 'parent')
parent_title = sel.xpath('//title/text()').extract()
if (len(parent_title)>0): parent_title = parent_title[0]
#URL of the page we are on (parent)
parent_url = response.url
#Get all links
sites = sel.xpath('//body//a')
#items = []
for site in sites:
item = Website()
item['URL_Datei'] = unicode('', 'utf-8')
url_file = site.xpath('@href').extract()
if (len(url_file)>0):
item['URL_Datei'] = url_file[0]
item['Stadt_URL'] = unicode(self.domain, 'utf-8')
#Get ALL text of everything inside the link
#First any sub-elements like <span>
textbits = site.xpath('child::node()')
item['URL_Text'] = unicode('', 'utf-8')
for text in textbits:
thetext = text.xpath('text()').extract()
if (len(thetext) > 0): item['URL_Text'] += thetext[0]
#Then the actual text
directText = site.xpath('text()').extract()
#If there's something there and it isn't a repetition, use it
if (len(directText) > 0) and (directText != thetext):
item['URL_Text'] += directText[0]
item['URL_Text'] = item['URL_Text'].replace("\t", " ").replace("\n", "").strip()
#If that got us nothing, then look at the title and alt elements
title_text = site.xpath('@title').extract()
if (len(title_text)>0) and (item['URL_Text'] == u''):
item['URL_Datei'] = title_text[0]
alt_text = site.xpath('@alt').extract()
if (len(alt_text)>0) and (item['URL_Text'] == u''):
item['URL_Datei'] = alt_text[0]
item['URL_Dateiname'] = unicode(item['URL_Datei']).split('/')[-1]
item['Format'] = u'Not interesting'
item['geo'] = u''
item['URL_PARENT'] = parent_url
item['Title_PARENT'] = parent_title
#Is it a file (does it have any of the extensions (including the '.' in the filename,
#then remove the '.'
for ext in self.filetypes:
if ext in item['URL_Dateiname'].encode('ascii', errors='ignore').upper():
item['Format'] = ext[1:len(ext)]
#And is it one of our special geo filetypes?
if ext in self.geofiletypes:
item['geo'] = 'x'
self.writerdata.writerow(item)
self.writer.writerow(item)
#items.append(item)
return []
| 2.828125 | 3 |
kerlas/gym_env.py | imandr/KeRLas | 0 | 12793500 | <filename>kerlas/gym_env.py<gh_stars>0
import gym, random, numpy as np
class MultiEnv(object):
pass
class MultiGymEnv(MultiEnv):
#
# Convert 1-agent Gym environment into a multi-agent environment
#
NAgents = 1
def __init__(self, env, tlimit=None, random_observation_space=None):
if isinstance(env, str):
env = gym.make(env)
self.observation_space = env.observation_space
self.action_space = env.action_space
self.TLimit = tlimit
self.Env = env
self.Obs = None
self.Reward = None
self.Done = False
self.Info = None
self.RandomObservationSpace = random_observation_space
def __str__(self):
return "GymEnv(%s)" % (self.Env,)
def randomStates(self, n):
space = self.RandomObservationSpace or self.Env.observation_space
return np.array([space.sample() for _ in xrange(n)])
def randomActions(self, n):
return np.array([self.Env.action_space.sample() for _ in xrange(n)])
def reset(self, agents, random = False):
assert len(agents) == 1, "Converted Gym environments can not handle multiple agents"
self.Agent = agents[0]
obs = self.Env.reset()
if random and self.RandomObservationSpace is not None:
obs = self.RandomObservationSpace.sample()
self.Env.state = obs
self.Obs = obs
self.Done = False
self.T = self.TLimit
return [self.Obs]
def addAgent(self, agent, random = False):
raise NotImplementedError
def step(self, actions):
assert len(actions) == 1, "Converted Gym environments can not handle multiple agents"
self.Obs, self.Reward, self.Done, self.Info = self.Env.step(actions[0][1])
if self.T is not None:
self.T -= 1
self.Done = self.Done or (self.T <= 0)
def feedback(self):
return False, [(self.Agent, self.Obs, self.Reward, self.Done, self.Info)]
def __getattr__(self, name):
return getattr(self.Env, name)
class TimedGymEnv(object):
def __init__(self, env, tlimit=None, random_observation_space=None):
if isinstance(env, str):
env = gym.make(env)
self.TLimit = tlimit
self.Env = env
self.RandomObservationSpace = random_observation_space
self.T = self.TLimit = tlimit
def reset(self, random=False):
self.T = self.TLimit
state = self.Env.reset()
if random:
state = self.randomStates(1)[0]
self.Env.state = state
return state
def step(self, action):
obs, reward, done, info = self.Env.step(action)
if self.T is not None:
self.T -= 1
if self.T <= 0:
done = True
return obs, reward, done, info
def randomStates(self, n):
space = self.RandomObservationSpace or self.Env.observation_space
return np.array([space.sample() for _ in xrange(n)])
def randomActions(self, n):
return np.array([self.Env.action_space.sample() for _ in xrange(n)])
def randomAction(self):
return self.randomActions(1)[0]
def __getattr__(self, name):
return getattr(self.Env, name)
| 2.9375 | 3 |
lib/to_geojson.py | erictheise/trctr-pllr | 0 | 12793501 | <reponame>erictheise/trctr-pllr
import json
def array_to_geojson(array):
props = []
for i in range(len(array[0])-1):
props.append(array[0][i])
feature_collection = {
"type": "FeatureCollection",
"features": []
}
features = []
for i in range(1, len(array)):
feature = {
"type": "Feature",
"geometry": array[i][len(array[i])-1],
"properties": {
},
}
for j in range(len(array[i])-1):
feature['properties'][props[j]] = array[i][j]
feature_collection["features"].append(feature)
return json.dumps(feature_collection)
| 2.859375 | 3 |
setup.py | 465b/General-Ecosystem-Modeling-Framework | 1 | 12793502 | <filename>setup.py
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
readme_as_long_description = f.read()
setup(
name="nemf",
version="0.3.4",
packages=find_packages(),
# install_requires=[""],
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Network-based ecosystem Modelling Framework",
long_description=readme_as_long_description,
long_description_content_type="text/markdown",
keywords="ecosystem modelling framework inverse-modelling",
url="https://github.com/465b/nemf/",
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Development Status :: 3 - Alpha',
],
license='BSD',
install_requires=[
'numpy','seaborn','pandas','matplotlib','networkx','pyyaml',
'termcolor']
) | 1.53125 | 2 |
simulator/entities/garage.py | lucassm02/fiap-cptm | 0 | 12793503 | <reponame>lucassm02/fiap-cptm
from typing import List
from .train import Train
class Garage(object):
def __init__(self, name: str, key: str, cars: List[Train], volume: int):
self.name = name
self.key = key
self.cars = cars
self.volume = volume
| 2.671875 | 3 |
3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/06. Multidimensional Lists/05_primary_diagonal.py | kzborisov/SoftUni | 1 | 12793504 | size = int(input())
matrix = [[int(x) for x in input().split()] for _ in range(size)]
print(sum([matrix[x][x] for x in range(size)]))
| 3.421875 | 3 |
Python-Data-Science/code.py | Prakhar1212/greyatom-python-for-data-science | 0 | 12793505 | # --------------
# Code starts here
class_1 = ['<NAME>','<NAME>','<NAME>','<NAME>']
class_2 = ['<NAME>','<NAME>','<NAME>']
new_class = class_1 + class_2
new_class.append('<NAME>')
new_class.remove('<NAME>')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math':65, 'English': 70, 'History': 80, 'French': 70, 'Science':60}
total = 65+70+80+70+60
percentage = total * 100 / 500
print(total)
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'<NAME> ': 78, '<NAME>': 95, '<NAME>':65, '<NAME>':50, '<NAME>':70, '<NAME>': 66, '<NAME>':75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = '<NAME>'
last_name = topper.split()[1]
first_name = topper.split()[0]
full_name = last_name + " " + first_name
certificate_name = full_name.upper()
print(certificate_name)
# Code starts here
# Code ends here
| 3.78125 | 4 |
src/sklearn_evaluation/SQLiteTracker.py | abcnishant007/sklearn-evaluation | 351 | 12793506 | <gh_stars>100-1000
from uuid import uuid4
import sqlite3
import json
import pandas as pd
from sklearn_evaluation.table import Table
class SQLiteTracker:
"""A simple experiment tracker using SQLite
:doc:`Click here <../user_guide/SQLiteTracker>` to see the user guide.
Parameters
----------
path
Database location
"""
def __init__(self, path: str):
self.conn = sqlite3.connect(path)
cur = self.conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS experiments (
uuid TEXT NOT NULL UNIQUE,
created TIMESTAMP default current_timestamp,
parameters TEXT,
comment TEXT
)
""")
cur.close()
def __getitem__(self, uuid):
"""Get experiment with a given uuid
"""
# TODO: make it work for a list of uuids
return pd.read_sql('SELECT * FROM experiments WHERE uuid = ?',
self.conn,
params=[uuid],
index_col='uuid')
def recent(self, n=5, normalize=False):
"""Get most recent experiments as a pandas.DataFrame
"""
query = """
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
"""
df = pd.read_sql(query, self.conn, params=[n], index_col='uuid')
if normalize:
# parse and normalize json
parameters = pd.json_normalize(
df.pop('parameters').apply(lambda s: json.loads(s))).set_index(
df.index)
df = df.join(parameters)
# re order columns to show "comment" at the end
comment = df.pop('comment')
df.insert(len(df.columns), 'comment', comment)
return df
def query(self, code):
"""Query the database, returns a pandas.DataFrame
Examples
--------
>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker(':memory:') # example in-memory db
>>> tracker.insert('my_uuid', {'a': 1})
>>> df = tracker.query(
... "SELECT uuid, json_extract(parameters, '$.a') FROM experiments")
"""
df = pd.read_sql(code, self.conn)
if 'uuid' in df:
df = df.set_index('uuid')
return df
def new(self):
"""Create a new experiment, returns a uuid
"""
uuid = uuid4().hex
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid)
VALUES(?)
""", [uuid])
cur.close()
self.conn.commit()
return uuid
def update(self, uuid, parameters):
"""Update the parameters of an empty experiment given its uuid
"""
self._can_update(uuid)
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET parameters = ?
WHERE uuid = ?
""", [json.dumps(parameters), uuid])
cur.close()
self.conn.commit()
def insert(self, uuid, parameters):
"""Insert a new experiment
"""
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid, parameters)
VALUES(?, ?)
""", [uuid, json.dumps(parameters)])
cur.close()
self.conn.commit()
def comment(self, uuid, comment):
"""Add a comment to an experiment given its uuid
"""
# TODO: add overwrite (false by default) and append options
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET comment = ?
WHERE uuid = ?
""", [comment, uuid])
cur.close()
self.conn.commit()
def _recent(self, n=5, fmt='html'):
if fmt not in {'html', 'plain'}:
raise ValueError('fmt must be one "html" or "plain"')
cur = self.conn.cursor()
cur.execute(
"""
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
""", [n])
res = cur.fetchall()
table = Table(res, header=['uuid', 'created', 'parameters', 'comment'])
title_template = '<h4> {} </h4>' if fmt == 'html' else '{}\n'
title = title_template.format(type(self).__name__)
if not len(table):
title += '(No experiments saved yet)'
if fmt == 'plain':
title += '\n'
if len(table):
footer = (('<br>' if fmt == 'html' else '\n') +
'(Most recent experiments)')
else:
footer = ''
return (title + (table.to_html() if fmt == 'html' else str(table)) +
footer)
def _can_update(self, uuid):
"""Check if an experiment with a given uuid can be updated
"""
cur = self.conn.cursor()
cur.execute(
"""
SELECT parameters
FROM experiments
WHERE uuid = ?
""", [uuid])
row = cur.fetchone()
exists = row is not None
if exists:
empty = row[0] is None
if not empty:
raise ValueError('Cannot update non-empty experiment with '
'uuid "{}"'.format(uuid))
else:
raise ValueError('Cannot update experiment with '
'uuid "{}" because it does '
'not exist'.format(uuid))
def __repr__(self):
return self._recent(fmt='plain')
def _repr_html_(self):
return self._recent(fmt='html')
def __del__(self):
self.conn.close()
| 3.171875 | 3 |
run.pyw | xue0228/keyboard | 0 | 12793507 | <gh_stars>0
from xue_macro import macro
if __name__ == '__main__':
# macro.run(fg='#ECB1AC')
macro.run()
| 1.226563 | 1 |
redis/__init__.py | RuiCoreSci/auth | 0 | 12793508 | from redis.client import Redis
redis = Redis()
__all__ = ['redis']
| 1.25 | 1 |
src/RIOT/tests/blob/tests/01-run.py | ARte-team/ARte | 2 | 12793509 | <reponame>ARte-team/ARte
#!/usr/bin/env python3
# Copyright (C) 2019 <NAME> <<EMAIL>>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect_exact("Hello blob!")
child.expect_exact("Hello blob_subdir!")
child.expect_exact("0x00")
child.expect_exact("0x01")
child.expect_exact("0x02")
child.expect_exact("0x03")
child.expect_exact("0xFF")
if __name__ == "__main__":
sys.exit(run(testfunc))
| 1.5 | 2 |
backend/gifz_api/gifs/__init__.py | mkusiciel/terraform-workshops | 3 | 12793510 | <reponame>mkusiciel/terraform-workshops
default_app_config = 'gifz_api.gifs.apps.GifsConfig'
| 1.015625 | 1 |
consumer_lag.py | aseev-xx/kafka-consumer-lag-metrics | 0 | 12793511 | <reponame>aseev-xx/kafka-consumer-lag-metrics<filename>consumer_lag.py
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import sys
import requests
import ConfigParser
import time
import socket
import os
# get base json object for next
def get_json_object(url):
try:
obj = requests.get(url)
except requests.exceptions.RequestException as e:
print e
sys.exit(1)
return obj.json()
# get all exist clusters for url concatenate in future
def get_clusters(base_url):
obj = get_json_object(base_url + '/api/status/clusters')
clusters = []
for el in obj["clusters"]["active"]:
clusters.append(el["name"])
return clusters
def prepare_graphite_metrics(base_url,graphite_prefix):
clusters = get_clusters(base_url)
metrics = []
timestamp = int(time.time())
for cluster in clusters:
url = base_url + '/api/status/' + cluster + '/consumersSummary'
obj = get_json_object(url)
consumers = obj["consumers"]
for consumer in consumers:
for topic in consumer['topics']:
cons = consumer['name']
value = graphite_prefix + cluster.replace('.','_') + '.' + consumer['type'] + '.' + cons.replace('.','_') + '.' + topic.replace('.','_')
message = '%s %s %d' % (value, consumer['lags'][topic], timestamp)
metrics.append(message)
return metrics
def send_graphite_metrics(message,graphite_host,graphite_port):
print 'sending message:\n%s' % message
sock = socket.socket()
sock.connect((graphite_host,graphite_port))
sock.sendall(message)
sock.close()
def main():
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(__file__) + '/consumer_lag.ini')
base_url = config.get('api','url')
bulk = prepare_graphite_metrics(base_url,config.get('graphite','prefix'))
message = '\n' . join(bulk) + '\n'
send_graphite_metrics(message,config.get('graphite','host'),int(config.get('graphite','port')))
if __name__ == "__main__":
main()
| 2.421875 | 2 |
cmasher/cli_tools.py | ajdittmann/CMasher | 0 | 12793512 | <filename>cmasher/cli_tools.py
# -*- coding: utf-8 -*-
# %% IMPORTS
# Built-in imports
import argparse
from importlib import import_module
import os
import sys
# Package imports
import e13tools as e13
from matplotlib import cm as mplcm
import numpy as np
# CMasher imports
from cmasher import __version__
import cmasher as cmr
# All declaration
__all__ = ['main']
# %% GLOBALS
# Define main description of this package
main_desc = ("CMasher: Scientific colormaps for making accessible, informative"
" and 'cmashing' plots")
# %% CLASS DEFINITIONS
# Define formatter that automatically extracts help strings of subcommands
class HelpFormatterWithSubCommands(argparse.ArgumentDefaultsHelpFormatter):
# Override the add_argument function
def add_argument(self, action):
# Check if the help of this action is required
if action.help is not argparse.SUPPRESS:
# Check if this action is a subparser's action
if isinstance(action, argparse._SubParsersAction):
# If so, sort action.choices on name
names = sorted(action.choices.keys())
# Loop over all subcommands defined in the action
for name in names:
# Obtain corresponding subparser
subparser = action.choices[name]
# Format the description of this subcommand and add it
self._add_item(self.format_subcommands,
[name, subparser.description])
# Call super method in all other cases
else:
super().add_argument(action)
# This function formats the description of a subcommand with given name
def format_subcommands(self, name, description):
# Determine the positions and widths of the help texts
help_position = min(self._action_max_length+2, self._max_help_position)
help_width = max(self._width-help_position, 11)
name_width = help_position-self._current_indent-2
# Transform name to the proper formatting
name = "{0}{1: <{2}}{3}".format(
' '*self._current_indent, name, name_width,
' ' if(len(name) <= name_width) else '\n'+' '*help_position)
# Split the lines of the subcommand description
desc_lines = self._split_lines(description, help_width)
# Create list of all parts of the description of this subcommand
parts = [name, desc_lines.pop(0), '\n']
# Loop over all remaining desc_lines
for line in desc_lines:
# Format and add to parts
parts.append("%s%s\n" % (' '*help_position, line))
# Convert to a single string and return
return(''.join(parts))
# %% COMMAND FUNCTION DEFINITIONS
# This function handles the 'bibtex' subcommand
def cli_bibtex():
cmr.get_bibtex()
# This function handles the 'cmap_type' subcommand
def cli_cmap_type():
# Import cmap packages
import_cmap_pkgs()
# Print cmap type
print(cmr.get_cmap_type(get_cmap(ARGS.cmap)))
# This function handles the 'take_cmap_colors' subcommand
def cli_cmap_colors():
# Import cmap packages
import_cmap_pkgs()
# Obtain the colors
colors = cmr.take_cmap_colors(get_cmap(ARGS.cmap), ARGS.ncolors,
cmap_range=ARGS.cmap_range,
return_fmt=ARGS.return_fmt)
# Print the colors line-by-line
if ARGS.return_fmt in ('float', 'norm'):
np.savetxt(sys.stdout, colors, '%.8f')
elif ARGS.return_fmt in ('int', '8bit'):
np.savetxt(sys.stdout, colors, '%i')
else:
np.savetxt(sys.stdout, colors, '%s')
# This function handles the 'mkcmod' subcommand
def cli_mk_cmod():
# Create cmap module
cmap_path = cmr.create_cmap_mod(ARGS.cmap, save_dir=ARGS.dir)
# Print on commandline that module has been created
print("Created standalone colormap module of %r in %r."
% (ARGS.cmap, cmap_path))
# %% FUNCTION DEFINITIONS
# This function obtains the colormap that was requested
def get_cmap(cmap):
# Try to obtain the colormap from MPL
try:
cmap = mplcm.get_cmap(cmap)
# If this does not work, try to expand given cmap in setuptools-style
except ValueError:
# Check if cmap contains a colon
if ':' in cmap:
# Split cmap up into mod_name and obj_name
mod_name, obj_name = cmap.split(':', 1)
obj_path = obj_name.split('.')
# Import the provided module as cmap
cmap = import_module(mod_name)
# Import the provided object from this module
for obj in obj_path:
cmap = getattr(cmap, obj)
# If cmap is still a string, raise error
if isinstance(cmap, str):
# Print error and exit
print("Requested 'CMAP' ({!r}) cannot be found!".format(cmap))
sys.exit()
# Return cmap
return(cmap)
# This function attempts to import a collection of packages with colormaps
def import_cmap_pkgs():
# Define set of packages with colormaps
cmap_pkgs = {'cmocean', 'colorcet', 'palettable'}
# Obtain packages from CMR_CMAP_PKGS environment variable
env_pkgs = os.environ.get('CMR_CMAP_PKGS', None)
# Add env_pkgs to cmap_pkgs if it is not empty
if env_pkgs is not None:
# If Windows, split variable at semicolons
if sys.platform.startswith('win'):
env_pkgs = env_pkgs.split(';')
# Else, if UNIX, split variable at colons
elif sys.platform.startswith(('darwin', 'linux')):
env_pkgs = env_pkgs.split(':')
# Else, ignore the variable
else:
env_pkgs = []
# Add pkgs
cmap_pkgs.update(env_pkgs)
# Attempt to import each package
for cmap_pkg in cmap_pkgs:
try:
import_module(cmap_pkg)
except ImportError:
pass
# %% MAIN FUNCTION
def main():
"""
This is the main function of the CLI and is called whenever `cmr` is
invoked from the command-line.
"""
# Initialize argparser
parser = argparse.ArgumentParser(
'cmr',
description=main_desc,
formatter_class=HelpFormatterWithSubCommands,
add_help=True,
allow_abbrev=True)
# Add subparsers
subparsers = parser.add_subparsers(
title='commands',
metavar='COMMAND')
# OPTIONAL ARGUMENTS
# Add 'version' argument
parser.add_argument(
'-v', '--version',
action='version',
version="CMasher v{}".format(__version__))
# Create a cmap parser for several commands
cmap_parent_parser = argparse.ArgumentParser(add_help=False)
# Add 'cmap' argument
cmap_parent_parser.add_argument(
'cmap',
help=("Name of colormap to use as registered in *matplotlib* or the "
"object path of a colormap (e.g., 'a.b:c.d' -> import a.b; "
"cmap = a.b.c.d)"),
metavar='CMAP',
action='store',
type=str)
# BIBTEX COMMAND
# Add bibtex subparser
bibtex_parser = subparsers.add_parser(
'bibtex',
description=e13.get_main_desc(cmr.get_bibtex),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True)
# Set defaults for bibtex_parser
bibtex_parser.set_defaults(func=cli_bibtex)
# CMAP_TYPE COMMAND
# Add cmap_type subparser
cmap_type_parser = subparsers.add_parser(
'cmtype',
parents=[cmap_parent_parser],
description=e13.get_main_desc(cmr.get_cmap_type),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True)
# Set defaults for cmap_type_parser
cmap_type_parser.set_defaults(func=cli_cmap_type)
# CMAP_COLORS COMMAND
# Obtain the optional default arguments of take_cmap_colors
defaults = cmr.take_cmap_colors.__kwdefaults__
# Create a take_colors parser
take_colors_parent_parser = argparse.ArgumentParser(add_help=False)
# Add 'cmap_range' optional argument
take_colors_parent_parser.add_argument(
'--range',
help=("Normalized value range in the colormap from which colors should"
" be taken"),
metavar=('LOWER', 'UPPER'),
action='store',
nargs=2,
default=defaults['cmap_range'],
type=float,
dest='cmap_range')
# Add 'fmt' optional argument
take_colors_parent_parser.add_argument(
'--fmt',
help="Format to return colors in",
action='store',
default=defaults['return_fmt'],
choices=['float', 'norm', 'int', '8bit', 'str', 'hex'],
type=str,
dest='return_fmt')
# Add cmap_colors subparser
cmap_colors_parser = subparsers.add_parser(
'cmcolors',
parents=[cmap_parent_parser, take_colors_parent_parser],
description=e13.get_main_desc(cmr.take_cmap_colors),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True)
# Add 'N' argument
cmap_colors_parser.add_argument(
'ncolors',
help="Number of colors to take",
metavar='N',
action='store',
type=int)
# Set defaults for cmap_colors_parser
cmap_colors_parser.set_defaults(func=cli_cmap_colors)
# RGB_TABLE COMMAND
# Add rgb_table subparser
rgb_table_parser = subparsers.add_parser(
'rgbtable',
parents=[cmap_parent_parser, take_colors_parent_parser],
description="Retrieves the RGB values of the provided `cmap`.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True)
# Set defaults for rgb_table_parser
rgb_table_parser.set_defaults(func=cli_cmap_colors,
ncolors=None)
# MK_CMOD COMMAND
# Add mk_cmod subparser
mk_cmod_parser = subparsers.add_parser(
'mkcmod',
description=e13.get_main_desc(cmr.create_cmap_mod),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True)
# Add 'cmap' argument
mk_cmod_parser.add_argument(
'cmap',
help="Name of *CMasher* colormap to create standalone module for",
metavar='CMAP',
action='store',
type=str)
# Add 'dir' optional argument
mk_cmod_parser.add_argument(
'-d', '--dir',
help="Path to directory where the module must be saved",
action='store',
default=cmr.create_cmap_mod.__kwdefaults__['save_dir'],
type=str)
# Set defaults for mk_cmod_parser
mk_cmod_parser.set_defaults(func=cli_mk_cmod)
# Parse the arguments
global ARGS
ARGS = parser.parse_args()
# If arguments is empty (no func was provided), show help
if 'func' not in ARGS:
parser.print_help()
# Else, call the corresponding function
else:
ARGS.func()
| 2.125 | 2 |
dataprep_example/ingest_retailrocket_dataset.py | DynamicYieldProjects/funnel-rocket | 56 | 12793513 | <reponame>DynamicYieldProjects/funnel-rocket
import sys
import time
import argparse
from pathlib import Path
from contextlib import contextmanager
import pandas as pd
from pandas import DataFrame
EVENTS_FILE = 'events.csv'
PROPS_FILE_1 = 'item_properties_part1.csv'
PROPS_FILE_2 = 'item_properties_part2.csv'
INPUT_FILENAMES = {EVENTS_FILE, PROPS_FILE_1, PROPS_FILE_2}
ITEM_PROPERTY_COLUMNS = {'categoryid', 'available', '790', '888'}
EXPECTED_EVENT_COUNT = 2_500_516
def progress_msg(msg: str):
print(f"\033[33m{msg}\033[0m") # Yellow, just yellow
@contextmanager
def timed(caption: str):
start = time.time()
yield
total = time.time() - start
print(f"Time to {caption}: {total:.3f} seconds")
# Read item properties files, filter for relevant columns and 'pivot' its structure from rows to columns
def read_item_props(filepath: Path) -> DataFrame:
df = pd.read_csv(filepath)
df = df[df['property'].isin(ITEM_PROPERTY_COLUMNS)]
first_value_per_item = df.groupby(["itemid", "property"])["value"].first()
df = first_value_per_item.to_frame()
df = df.unstack(level=-1)
df.columns = df.columns.droplevel(0)
return df
def ingest(path: Path):
with timed("read & transform item properties of all products"):
item_props_tempfile = path / "item_props.parquet"
if item_props_tempfile.exists():
progress_msg(f"Reading item properties from cached file {item_props_tempfile}")
item_props_df = pd.read_parquet(item_props_tempfile)
else:
progress_msg("Reading item properties... (this takes a bit)")
item_props_df1 = read_item_props(path / PROPS_FILE_1)
item_props_df2 = read_item_props(path / PROPS_FILE_2)
item_props_df = item_props_df1.combine_first(item_props_df2)
progress_msg(f"Storing item properties to {item_props_tempfile} for faster re-runs...")
item_props_df.to_parquet(item_props_tempfile)
with timed("read & transform user events"):
progress_msg("Reading user events...")
events = pd.read_csv(path / EVENTS_FILE)
progress_msg("Joining events with item properties...")
events = pd.merge(events, item_props_df, how='inner', on='itemid')
progress_msg("Making columns more queryable...")
events['price'] = events['790'].str[1:].astype(float) / 1000
events.drop(columns=['790'], inplace=True)
events['available'] = events['available'].astype(int).astype(bool)
events['categoryid'] = events['categoryid'].astype('category')
events['event'] = events['event'].astype('category')
events.rename(columns={'888': 'cryptic_attrs'}, inplace=True)
progress_msg("Storing 'cryptic_attrs' also as categorical column 'cryptic_attrs_cat'...")
events['cryptic_attrs_cat'] = events['cryptic_attrs'].astype('category')
events.reset_index(drop=True)
progress_msg("Excerpt from final DataFrame:")
print(events)
progress_msg("Columns types (a.k.a. dtypes):")
print(events.dtypes)
progress_msg("Breakdown of event types:")
print(events['event'].value_counts())
if len(events) != EXPECTED_EVENT_COUNT:
progress_msg(f"WARNING: Expected {EXPECTED_EVENT_COUNT} events, but final DataFrame has {len(events)}")
output_file = path / 'retailrocket.parquet'
events.to_parquet(output_file)
col_memory_sizes = (events.memory_usage(deep=True) / 1024 ** 2).round(decimals=2)
progress_msg(f'Size of DataFrame columns in memory (in MB):')
print(col_memory_sizes)
progress_msg(f"==> Saved output file to: {output_file}, size: {output_file.stat().st_size / 1024 ** 2:.1f}MB")
with timed("load file - all columns"):
pd.read_parquet(output_file)
with timed("load file - just the 'cryptic_attrs' column"):
pd.read_parquet(output_file, columns=['cryptic_attrs'])
with timed("load file - just the 'cryptic_attrs_cat' column"):
pd.read_parquet(output_file, columns=['cryptic_attrs_cat'])
with timed("load file - all columns *except* these two"):
cols = [col for col in events.dtypes.index
if col not in ['cryptic_attrs', 'cryptic_attrs_cat']]
pd.read_parquet(output_file, columns=cols)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Ingest RetailRocket dataset (to download: https://www.kaggle.com/retailrocket/ecommerce-dataset/)')
parser.add_argument(
'path', type=str,
help='Directory where downloaded dataset files are found and output file will be written')
args = parser.parse_args()
path = Path(args.path)
if not path.exists() or not path.is_dir():
sys.exit(f'No such directory: {path}')
files_in_path = {f.name for f in path.iterdir()}
if not files_in_path >= INPUT_FILENAMES:
sys.exit(f'Missing one or more input files: {INPUT_FILENAMES}')
ingest(path)
| 2.5 | 2 |
multipole-graph-neural-operator/utilities.py | vir-k01/graph-pde | 121 | 12793514 | <gh_stars>100-1000
import torch
import numpy as np
import scipy.io
import h5py
import sklearn.metrics
from torch_geometric.data import Data
import torch.nn as nn
from scipy.ndimage import gaussian_filter
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
class DenseNet_sin(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet_sin, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
def forward(self, x):
for j, l in enumerate(self.layers):
x = l(x)
if j != self.n_layers - 1:
x = torch.sin(x)
return x
# generate graphs on square domain
class SquareMeshGenerator(object):
def __init__(self, real_space, mesh_size):
super(SquareMeshGenerator, self).__init__()
self.d = len(real_space)
self.s = mesh_size[0]
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
def ball_connectivity(self, r):
pwd = sklearn.metrics.pairwise_distances(self.grid)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def gaussian_connectivity(self, sigma):
pwd = sklearn.metrics.pairwise_distances(self.grid)
rbf = np.exp(-pwd**2/sigma**2)
sample = np.random.binomial(1,rbf)
self.edge_index = np.vstack(np.where(sample))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def get_grid(self):
return torch.tensor(self.grid, dtype=torch.float)
def attributes(self, f=None, theta=None):
if f is None:
if theta is None:
edge_attr = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
else:
edge_attr = np.zeros((self.n_edges, 2*self.d+2))
edge_attr[:,0:2*self.d] = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
edge_attr[:, 2 * self.d] = theta[self.edge_index[0]]
edge_attr[:, 2 * self.d +1] = theta[self.edge_index[1]]
else:
xy = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
if theta is None:
edge_attr = f(xy[:,0:self.d], xy[:,self.d:])
else:
edge_attr = f(xy[:,0:self.d], xy[:,self.d:], theta[self.edge_index[0]], theta[self.edge_index[1]])
return torch.tensor(edge_attr, dtype=torch.float)
def get_boundary(self):
s = self.s
n = self.n
boundary1 = np.array(range(0, s))
boundary2 = np.array(range(n - s, n))
boundary3 = np.array(range(s, n, s))
boundary4 = np.array(range(2 * s - 1, n, s))
self.boundary = np.concatenate([boundary1, boundary2, boundary3, boundary4])
def boundary_connectivity2d(self, stride=1):
boundary = self.boundary[::stride]
boundary_size = len(boundary)
vertice1 = np.array(range(self.n))
vertice1 = np.repeat(vertice1, boundary_size)
vertice2 = np.tile(boundary, self.n)
self.edge_index_boundary = np.stack([vertice2, vertice1], axis=0)
self.n_edges_boundary = self.edge_index_boundary.shape[1]
return torch.tensor(self.edge_index_boundary, dtype=torch.long)
def attributes_boundary(self, f=None, theta=None):
# if self.edge_index_boundary == None:
# self.boundary_connectivity2d()
if f is None:
if theta is None:
edge_attr_boundary = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
else:
edge_attr_boundary = np.zeros((self.n_edges_boundary, 2*self.d+2))
edge_attr_boundary[:,0:2*self.d] = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
edge_attr_boundary[:, 2 * self.d] = theta[self.edge_index_boundary[0]]
edge_attr_boundary[:, 2 * self.d +1] = theta[self.edge_index_boundary[1]]
else:
xy = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
if theta is None:
edge_attr_boundary = f(xy[:,0:self.d], xy[:,self.d:])
else:
edge_attr_boundary = f(xy[:,0:self.d], xy[:,self.d:], theta[self.edge_index_boundary[0]], theta[self.edge_index_boundary[1]])
return torch.tensor(edge_attr_boundary, dtype=torch.float)
# generate graphs with sampling
class RandomMeshGenerator(object):
def __init__(self, real_space, mesh_size, sample_size, attr_features=1):
super(RandomMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_size
self.attr_features = attr_features
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
if self.m > self.n:
self.m = self.n
self.idx = np.array(range(self.n))
self.grid_sample = self.grid
def sample(self):
perm = torch.randperm(self.n)
self.idx = perm[:self.m]
self.grid_sample = self.grid[self.idx]
return self.idx
def get_grid(self):
return torch.tensor(self.grid_sample, dtype=torch.float)
def ball_connectivity(self, r, is_forward=False):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
if is_forward:
print(self.edge_index.shape)
self.edge_index = self.edge_index[:, self.edge_index[0] >= self.edge_index[1]]
print(self.edge_index.shape)
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def torus1d_connectivity(self, r):
grid = self.grid_sample
pwd0 = sklearn.metrics.pairwise_distances(grid, grid)
grid1 = grid
grid1[:,0] = grid[:,0]+1
pwd1 = sklearn.metrics.pairwise_distances(grid, grid1)
PWD = np.stack([pwd0,pwd1], axis=2)
pwd = np.min(PWD, axis=2)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def gaussian_connectivity(self, sigma):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
rbf = np.exp(-pwd**2/sigma**2)
sample = np.random.binomial(1,rbf)
self.edge_index = np.vstack(np.where(sample))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def attributes(self, f=None, theta=None):
if f is None:
if theta is None:
edge_attr = self.grid[self.edge_index.T].reshape((self.n_edges, -1))
else:
theta = theta[self.idx]
edge_attr = np.zeros((self.n_edges, 2 * self.d + 2*self.attr_features))
edge_attr[:, 0:2 * self.d] = self.grid_sample[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr[:, 2 * self.d : 2 * self.d + self.attr_features] = theta[self.edge_index[0]].view(-1, self.attr_features)
edge_attr[:, 2 * self.d + self.attr_features: 2 * self.d + 2*self.attr_features] = theta[self.edge_index[1]].view(-1, self.attr_features)
else:
xy = self.grid_sample[self.edge_index.T].reshape((self.n_edges, -1))
if theta is None:
edge_attr = f(xy[:, 0:self.d], xy[:, self.d:])
else:
theta = theta[self.idx]
edge_attr = f(xy[:, 0:self.d], xy[:, self.d:], theta[self.edge_index[0]], theta[self.edge_index[1]])
return torch.tensor(edge_attr, dtype=torch.float)
# # generate two-level graph
class RandomTwoMeshGenerator(object):
def __init__(self, real_space, mesh_size, sample_size, induced_point):
super(RandomTwoMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_size
self.m_i = induced_point
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
if self.m > self.n:
self.m = self.n
self.idx = np.array(range(self.n))
self.idx_i = self.idx
self.idx_both = self.idx
self.grid_sample = self.grid
self.grid_sample_i = self.grid
self.grid_sample_both = self.grid
def sample(self):
perm = torch.randperm(self.n)
self.idx = perm[:self.m]
self.idx_i = perm[self.m: self.m+self.m_i]
self.idx_both = perm[: self.m+self.m_i]
self.grid_sample = self.grid[self.idx]
self.grid_sample_i = self.grid[self.idx_i]
self.grid_sample_both = self.grid[self.idx_both]
return self.idx, self.idx_i, self.idx_both
def get_grid(self):
return torch.tensor(self.grid_sample, dtype=torch.float), \
torch.tensor(self.grid_sample_i, dtype=torch.float), \
torch.tensor(self.grid_sample_both, dtype=torch.float)
def ball_connectivity(self, r11, r12, r22):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
pwd12 = sklearn.metrics.pairwise_distances(self.grid_sample, self.grid_sample_i)
pwd22 = sklearn.metrics.pairwise_distances(self.grid_sample_i)
self.edge_index = np.vstack(np.where(pwd <= r11))
self.edge_index_12 = np.vstack(np.where(pwd12 <= r12))
self.edge_index_12[1,:] = self.edge_index_12[1,:] + self.m
self.edge_index_21 = self.edge_index_12[[1,0],:]
self.edge_index_22 = np.vstack(np.where(pwd22 <= r22)) + self.m
self.n_edges = self.edge_index.shape[1]
self.n_edges_12 = self.edge_index_12.shape[1]
self.n_edges_22 = self.edge_index_22.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long), \
torch.tensor(self.edge_index_12, dtype=torch.long), \
torch.tensor(self.edge_index_21, dtype=torch.long), \
torch.tensor(self.edge_index_22, dtype=torch.long)
def attributes(self, theta=None):
if theta is None:
edge_attr = self.grid_sample_both[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr_12 = self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12, -1))
edge_attr_21 = self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12, -1))
edge_attr_22 = self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22, -1))
else:
theta = theta[self.idx_both]
edge_attr = np.zeros((self.n_edges, 3 * self.d))
edge_attr[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr[:, 2 * self.d] = theta[self.edge_index[0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[1]]
edge_attr_12 = np.zeros((self.n_edges_12, 3 * self.d))
edge_attr_12[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12, -1))
edge_attr_12[:, 2 * self.d] = theta[self.edge_index_12[0]]
edge_attr_12[:, 2 * self.d + 1] = theta[self.edge_index_12[1]]
edge_attr_21 = np.zeros((self.n_edges_12, 3 * self.d))
edge_attr_21[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12, -1))
edge_attr_21[:, 2 * self.d] = theta[self.edge_index_21[0]]
edge_attr_21[:, 2 * self.d + 1] = theta[self.edge_index_21[1]]
edge_attr_22 = np.zeros((self.n_edges_22, 3 * self.d))
edge_attr_22[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22, -1))
edge_attr_22[:, 2 * self.d] = theta[self.edge_index_22[0]]
edge_attr_22[:, 2 * self.d + 1] = theta[self.edge_index_22[1]]
return torch.tensor(edge_attr, dtype=torch.float), \
torch.tensor(edge_attr_12, dtype=torch.float), \
torch.tensor(edge_attr_21, dtype=torch.float), \
torch.tensor(edge_attr_22, dtype=torch.float)
# generate multi-level graph
class RandomMultiMeshGenerator(object):
def __init__(self, real_space, mesh_size, level, sample_sizes):
super(RandomMultiMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_sizes
self.level = level
assert len(sample_sizes) == level
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
self.idx = []
self.idx_all = None
self.grid_sample = []
self.grid_sample_all = None
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
self.n_edges_inner = []
self.n_edges_inter = []
def sample(self):
self.idx = []
self.grid_sample = []
perm = torch.randperm(self.n)
index = 0
for l in range(self.level):
self.idx.append(perm[index: index+self.m[l]])
self.grid_sample.append(self.grid[self.idx[l]])
index = index+self.m[l]
self.idx_all = perm[:index]
self.grid_sample_all = self.grid[self.idx_all]
return self.idx, self.idx_all
def get_grid(self):
grid_out = []
for grid in self.grid_sample:
grid_out.append(torch.tensor(grid, dtype=torch.float))
return grid_out, torch.tensor(self.grid_sample_all, dtype=torch.float)
def ball_connectivity(self, radius_inner, radius_inter):
assert len(radius_inner) == self.level
assert len(radius_inter) == self.level - 1
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.n_edges_inner = []
self.n_edges_inter = []
edge_index_out = []
edge_index_down_out = []
edge_index_up_out = []
index = 0
for l in range(self.level):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l])
edge_index = np.vstack(np.where(pwd <= radius_inner[l])) + index
self.edge_index.append(edge_index)
edge_index_out.append(torch.tensor(edge_index, dtype=torch.long))
self.n_edges_inner.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
index = 0
for l in range(self.level-1):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l], self.grid_sample[l+1])
edge_index = np.vstack(np.where(pwd <= radius_inter[l])) + index
edge_index[1, :] = edge_index[1, :] + self.grid_sample[l].shape[0]
self.edge_index_down.append(edge_index)
edge_index_down_out.append(torch.tensor(edge_index, dtype=torch.long))
self.edge_index_up.append(edge_index[[1,0],:])
edge_index_up_out.append(torch.tensor(edge_index[[1,0],:], dtype=torch.long))
self.n_edges_inter.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
edge_index_out = torch.cat(edge_index_out, dim=1)
edge_index_down_out = torch.cat(edge_index_down_out, dim=1)
edge_index_up_out = torch.cat(edge_index_up_out, dim=1)
return edge_index_out, edge_index_down_out, edge_index_up_out
def get_edge_index_range(self):
# in order to use graph network's data structure,
# the edge index shall be stored as tensor instead of list
# we concatenate the edge index list and label the range of each level
edge_index_range = torch.zeros((self.level,2), dtype=torch.long)
edge_index_down_range = torch.zeros((self.level-1,2), dtype=torch.long)
edge_index_up_range = torch.zeros((self.level-1,2), dtype=torch.long)
n_edge_index = 0
for l in range(self.level):
edge_index_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index[l].shape[1]
edge_index_range[l, 1] = n_edge_index
n_edge_index = 0
for l in range(self.level-1):
edge_index_down_range[l, 0] = n_edge_index
edge_index_up_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index_down[l].shape[1]
edge_index_down_range[l, 1] = n_edge_index
edge_index_up_range[l, 1] = n_edge_index
return edge_index_range, edge_index_down_range, edge_index_up_range
def attributes(self, theta=None):
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
if theta is None:
for l in range(self.level):
edge_attr = self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l], 2*self.d))
self.edge_attr.append(torch.tensor(edge_attr))
for l in range(self.level - 1):
edge_attr_down = self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l], 2*self.d))
edge_attr_up = self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l], 2*self.d))
self.edge_attr_down.append(torch.tensor(edge_attr_down))
self.edge_attr_up.append(torch.tensor(edge_attr_up))
else:
theta = theta[self.idx_all]
for l in range(self.level):
edge_attr = np.zeros((self.n_edges_inner[l], 2 * self.d + 2))
edge_attr[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index[l].T].reshape(
(self.n_edges_inner[l], 2 * self.d))
edge_attr[:, 2 * self.d] = theta[self.edge_index[l][0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[l][1]]
self.edge_attr.append(torch.tensor(edge_attr, dtype=torch.float))
for l in range(self.level - 1):
edge_attr_down = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_up = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_down[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_down[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_down[:, 2 * self.d] = theta[self.edge_index_down[l][0]]
edge_attr_down[:, 2 * self.d + 1] = theta[self.edge_index_down[l][1]]
self.edge_attr_down.append(torch.tensor(edge_attr_down, dtype=torch.float))
edge_attr_up[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_up[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_up[:, 2 * self.d] = theta[self.edge_index_up[l][0]]
edge_attr_up[:, 2 * self.d + 1] = theta[self.edge_index_up[l][1]]
self.edge_attr_up.append(torch.tensor(edge_attr_up, dtype=torch.float))
edge_attr_out = torch.cat(self.edge_attr, dim=0)
edge_attr_down_out = torch.cat(self.edge_attr_down, dim=0)
edge_attr_up_out = torch.cat(self.edge_attr_up, dim=0)
return edge_attr_out, edge_attr_down_out, edge_attr_up_out
# generate graph, with split and assemble
class RandomGridSplitter(object):
def __init__(self, grid, resolution, d=2, m=200, l=1, radius=0.25):
super(RandomGridSplitter, self).__init__()
self.grid = grid
self.resolution = resolution
self.n = resolution**d
self.d = d
self.m = m
self.l = l
self.radius = radius
assert self.n % self.m == 0
self.num = self.n // self.m # number of sub-grid
def get_data(self, theta, edge_features=1):
data = []
for i in range(self.l):
perm = torch.randperm(self.n)
perm = perm.reshape(self.num, self.m)
for j in range(self.num):
idx = perm[j,:].reshape(-1,)
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
X = torch.cat([grid_sample,theta_sample],dim=1)
pwd = sklearn.metrics.pairwise_distances(grid_sample)
edge_index = np.vstack(np.where(pwd <= self.radius))
n_edges = edge_index.shape[1]
edge_index = torch.tensor(edge_index, dtype=torch.long)
if edge_features == 0:
edge_attr = grid_sample[edge_index.T].reshape(n_edges, -1)
else:
edge_attr = np.zeros((n_edges, 2*self.d+2))
a = theta_sample[:,0]
edge_attr[:, :2*self.d] = grid_sample[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 2*self.d] = a[edge_index[0]]
edge_attr[:, 2*self.d+1] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=idx))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1, cuda=False):
assert len(pred) == len(split_idx)
assert len(pred) == self.num * self.l // batch_size2
out = torch.zeros(self.n, )
if cuda:
out = out.cuda()
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i].reshape(batch_size2, self.m)
for j in range(batch_size2):
pred_ij = pred_i[j,:].reshape(-1,)
idx = split_idx_i[j,:].reshape(-1,)
out[idx] = out[idx] + pred_ij
out = out / self.l
# out = gaussian_filter(out, sigma=sigma, mode='constant', cval=0)
# out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
# generate multi-level graph, with split and assemble
class RandomMultiMeshSplitter(object):
def __init__(self, real_space, mesh_size, level, sample_sizes):
super(RandomMultiMeshSplitter, self).__init__()
self.d = len(real_space)
self.ms = sample_sizes
self.m = sample_sizes[0]
self.level = level
assert len(sample_sizes) == level
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
self.splits = self.n // self.m # number of sub-grid
if self.splits * self.m < self.n:
self.splits = self.splits + 1
print('n:',self.n,' m:',self.m, ' number of splits:', self.splits )
self.perm = None
self.idx = []
self.idx_all = None
self.grid_sample = []
self.grid_sample_all = None
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
self.n_edges_inner = []
self.n_edges_inter = []
def sample(self, new_sample=True, index0=0):
self.idx = []
self.grid_sample = []
if (new_sample) or (self.perm is None):
self.perm = torch.randperm(self.n)
index = index0
for l in range(self.level):
index = index % self.n
index_end = (index+self.ms[l]) % self.n
if index < index_end:
idx = self.perm[index: index_end]
else:
idx = torch.cat((self.perm[index: ],self.perm[: index_end]), dim=0)
self.idx.append(idx)
self.grid_sample.append(self.grid[idx])
index = index_end
if index0 < index_end:
idx_all = self.perm[index0: index_end]
else:
idx_all = torch.cat((self.perm[index0:], self.perm[: index_end]), dim=0)
self.idx_all = idx_all
self.grid_sample_all = self.grid[self.idx_all]
return self.idx, self.idx_all
def get_grid(self):
grid_out = []
for grid in self.grid_sample:
grid_out.append(torch.tensor(grid, dtype=torch.float))
return grid_out, torch.tensor(self.grid_sample_all, dtype=torch.float)
def ball_connectivity(self, radius_inner, radius_inter):
assert len(radius_inner) == self.level
assert len(radius_inter) == self.level - 1
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.n_edges_inner = []
self.n_edges_inter = []
edge_index_out = []
edge_index_down_out = []
edge_index_up_out = []
index = 0
for l in range(self.level):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l])
edge_index = np.vstack(np.where(pwd <= radius_inner[l])) + index
self.edge_index.append(edge_index)
edge_index_out.append(torch.tensor(edge_index, dtype=torch.long))
self.n_edges_inner.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
index = 0
for l in range(self.level-1):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l], self.grid_sample[l+1])
edge_index = np.vstack(np.where(pwd <= radius_inter[l])) + index
edge_index[1, :] = edge_index[1, :] + self.grid_sample[l].shape[0]
self.edge_index_down.append(edge_index)
edge_index_down_out.append(torch.tensor(edge_index, dtype=torch.long))
self.edge_index_up.append(edge_index[[1,0],:])
edge_index_up_out.append(torch.tensor(edge_index[[1,0],:], dtype=torch.long))
self.n_edges_inter.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
edge_index_out = torch.cat(edge_index_out, dim=1)
edge_index_down_out = torch.cat(edge_index_down_out, dim=1)
edge_index_up_out = torch.cat(edge_index_up_out, dim=1)
return edge_index_out, edge_index_down_out, edge_index_up_out
def get_edge_index_range(self):
# in order to use graph network's data structure,
# the edge index shall be stored as tensor instead of list
# we concatenate the edge index list and label the range of each level
edge_index_range = torch.zeros((self.level,2), dtype=torch.long)
edge_index_down_range = torch.zeros((self.level-1,2), dtype=torch.long)
edge_index_up_range = torch.zeros((self.level-1,2), dtype=torch.long)
n_edge_index = 0
for l in range(self.level):
edge_index_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index[l].shape[1]
edge_index_range[l, 1] = n_edge_index
n_edge_index = 0
for l in range(self.level-1):
edge_index_down_range[l, 0] = n_edge_index
edge_index_up_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index_down[l].shape[1]
edge_index_down_range[l, 1] = n_edge_index
edge_index_up_range[l, 1] = n_edge_index
return edge_index_range, edge_index_down_range, edge_index_up_range
def attributes(self, theta=None):
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
if theta is None:
for l in range(self.level):
edge_attr = self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l], 2*self.d))
self.edge_attr.append(torch.tensor(edge_attr))
for l in range(self.level - 1):
edge_attr_down = self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l], 2*self.d))
edge_attr_up = self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l], 2*self.d))
self.edge_attr_down.append(torch.tensor(edge_attr_down))
self.edge_attr_up.append(torch.tensor(edge_attr_up))
else:
theta = theta[self.idx_all]
for l in range(self.level):
edge_attr = np.zeros((self.n_edges_inner[l], 2 * self.d + 2))
edge_attr[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index[l].T].reshape(
(self.n_edges_inner[l], 2 * self.d))
edge_attr[:, 2 * self.d] = theta[self.edge_index[l][0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[l][1]]
self.edge_attr.append(torch.tensor(edge_attr, dtype=torch.float))
for l in range(self.level - 1):
edge_attr_down = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_up = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_down[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_down[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_down[:, 2 * self.d] = theta[self.edge_index_down[l][0]]
edge_attr_down[:, 2 * self.d + 1] = theta[self.edge_index_down[l][1]]
self.edge_attr_down.append(torch.tensor(edge_attr_down, dtype=torch.float))
edge_attr_up[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_up[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_up[:, 2 * self.d] = theta[self.edge_index_up[l][0]]
edge_attr_up[:, 2 * self.d + 1] = theta[self.edge_index_up[l][1]]
self.edge_attr_up.append(torch.tensor(edge_attr_up, dtype=torch.float))
edge_attr_out = torch.cat(self.edge_attr, dim=0)
edge_attr_down_out = torch.cat(self.edge_attr_down, dim=0)
edge_attr_up_out = torch.cat(self.edge_attr_up, dim=0)
return edge_attr_out, edge_attr_down_out, edge_attr_up_out
def splitter(self, radius_inner, radius_inter, theta_a, theta_all):
# give a test mesh, generate a list of data
data = []
index = 0
for i in range(self.splits):
if i==0:
idx, idx_all = self.sample(new_sample=True, index0=index)
else:
idx, idx_all = self.sample(new_sample=False, index0=index)
index = (index + self.m) % self.n
grid, grid_all = self.get_grid()
edge_index, edge_index_down, edge_index_up = self.ball_connectivity(radius_inner, radius_inter)
edge_index_range, edge_index_down_range, edge_index_up_range = self.get_edge_index_range()
edge_attr, edge_attr_down, edge_attr_up = self.attributes(theta=theta_a)
x = torch.cat([grid_all, theta_all[idx_all,:] ], dim=1)
data.append(Data(x=x,
edge_index_mid=edge_index, edge_index_down=edge_index_down, edge_index_up=edge_index_up,
edge_index_range=edge_index_range, edge_index_down_range=edge_index_down_range, edge_index_up_range=edge_index_up_range,
edge_attr_mid=edge_attr, edge_attr_down=edge_attr_down, edge_attr_up=edge_attr_up,
sample_idx=idx[0]))
return data
def assembler(self, out_list, sample_idx_list, is_cuda=False):
assert len(out_list) == self.splits
if is_cuda:
pred = torch.zeros(self.n, ).cuda()
else:
pred = torch.zeros(self.n, )
for i in range(self.splits):
pred[sample_idx_list[i]] = out_list[i].reshape(-1)
return pred
# generate graph, with split and assemble with downsample
class DownsampleGridSplitter(object):
def __init__(self, grid, resolution, r, m=100, radius=0.15, edge_features=1):
super(DownsampleGridSplitter, self).__init__()
# instead of randomly sample sub-grids, here we downsample sub-grids
self.grid = grid.reshape(resolution, resolution,2)
# self.theta = theta.reshape(resolution, resolution,-1)
# self.y = y.reshape(resolution, resolution,1)
self.resolution = resolution
if resolution%2==1:
self.s = int(((resolution - 1)/r) + 1)
else:
self.s = int(resolution/r)
self.r = r
self.n = resolution**2
self.m = m
self.radius = radius
self.edge_features = edge_features
self.index = torch.tensor(range(self.n), dtype=torch.long).reshape(self.resolution, self.resolution)
def ball_connectivity(self, grid):
pwd = sklearn.metrics.pairwise_distances(grid)
edge_index = np.vstack(np.where(pwd <= self.radius))
n_edges = edge_index.shape[1]
return torch.tensor(edge_index, dtype=torch.long), n_edges
def get_data(self, theta):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
data = []
for x in range(self.r):
for y in range(self.r):
grid_sub = self.grid[x::self.r, y::self.r,:].reshape(-1,2)
theta_sub = theta[x::self.r, y::self.r,:].reshape(-1,theta_d)
perm = torch.randperm(self.n)
m = self.m - grid_sub.shape[0]
idx = perm[:m]
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
grid_split = torch.cat([grid_sub, grid_sample],dim=0)
theta_split = torch.cat([theta_sub, theta_sample],dim=0)
X = torch.cat([grid_split,theta_split],dim=1)
edge_index, n_edges = self.ball_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 4+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, :4] = grid_split[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 4:4 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 4 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x,y],dtype=torch.long).reshape(1,2)
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def sample(self, theta, Y):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.resolution, self.resolution)
x = torch.randint(0,self.r,(1,))
y = torch.randint(0,self.r,(1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[x::self.r, y::self.r].reshape(-1,)
index_sub = self.index[x::self.r, y::self.r].reshape(-1,)
n_sub = Y_sub.shape[0]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.n, )[idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=0).reshape(-1,)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(-1,)
index_split = index_sub.reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges = self.ball_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 4+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, :4] = grid_split[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 4:4+self.edge_features] = a[edge_index[0]]
edge_attr[:, 4+self.edge_features: 4+self.edge_features*2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, sample_idx=index_split)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1):
assert len(pred) == len(split_idx)
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.resolution,self.resolution))
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='constant', cval=0)
out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
# generate graph on Torus, with split and assemble
class TorusGridSplitter(object):
def __init__(self, grid, resolution, r, m=100, radius=0.15, T=None, edge_features=1, ):
super(TorusGridSplitter, self).__init__()
self.grid = grid.reshape(resolution, resolution,2)
# self.theta = theta.reshape(resolution, resolution,-1)
# self.y = y.reshape(resolution, resolution,1)
self.resolution = resolution
if resolution%2==1:
self.s = int(((resolution - 1)/r) + 1)
else:
self.s = int(resolution/r)
self.r = r
self.n = resolution**2
self.m = m
self.T = T
self.radius = radius
self.edge_features = edge_features
self.index = torch.tensor(range(self.n), dtype=torch.long).reshape(self.resolution, self.resolution)
def pairwise_difference(self,grid1, grid2):
n = grid1.shape[0]
x1 = grid1[:,0]
y1 = grid1[:,1]
x2 = grid2[:,0]
y2 = grid2[:,1]
X1 = np.tile(x1.reshape(n, 1), [1, n])
X2 = np.tile(x2.reshape(1, n), [n, 1])
X_diff = X1 - X2
Y1 = np.tile(y1.reshape(n, 1), [1, n])
Y2 = np.tile(y2.reshape(1, n), [n, 1])
Y_diff = Y1 - Y2
return X_diff, Y_diff
def torus_connectivity(self, grid):
pwd0 = sklearn.metrics.pairwise_distances(grid, grid)
X_diff0, Y_diff0 = self.pairwise_difference(grid, grid)
grid1 = grid
grid1[:,0] = grid[:,0]+1
pwd1 = sklearn.metrics.pairwise_distances(grid, grid1)
X_diff1, Y_diff1 = self.pairwise_difference(grid, grid1)
grid2 = grid
grid2[:, 1] = grid[:, 1] + 1
pwd2 = sklearn.metrics.pairwise_distances(grid, grid2)
X_diff2, Y_diff2 = self.pairwise_difference(grid, grid2)
grid3 = grid
grid3[:, :] = grid[:, :] + 1
pwd3 = sklearn.metrics.pairwise_distances(grid, grid3)
X_diff3, Y_diff3 = self.pairwise_difference(grid, grid3)
grid4 = grid
grid4[:, 0] = grid[:, 0] + 1
grid4[:, 1] = grid[:, 1] - 1
pwd4 = sklearn.metrics.pairwise_distances(grid, grid4)
X_diff4, Y_diff4 = self.pairwise_difference(grid, grid4)
PWD = np.stack([pwd0,pwd1,pwd2,pwd3,pwd4], axis=2)
X_DIFF = np.stack([X_diff0,X_diff1,X_diff2,X_diff3,X_diff4], axis=2)
Y_DIFF = np.stack([Y_diff0, Y_diff1, Y_diff2, Y_diff3, Y_diff4], axis=2)
pwd = np.min(PWD, axis=2)
pwd_index = np.argmin(PWD, axis=2)
edge_index = np.vstack(np.where(pwd <= self.radius))
pwd_index = pwd_index[np.where(pwd <= self.radius)]
PWD_index = (np.where(pwd <= self.radius)[0], np.where(pwd <= self.radius)[1], pwd_index)
distance = PWD[PWD_index]
X_difference = X_DIFF[PWD_index]
Y_difference = Y_DIFF[PWD_index]
n_edges = edge_index.shape[1]
return torch.tensor(edge_index, dtype=torch.long), n_edges, distance, X_difference, Y_difference
def get_data(self, theta, params=None):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
data = []
for x in range(self.r):
for y in range(self.r):
grid_sub = self.grid[x::self.r, y::self.r,:].reshape(-1,2)
theta_sub = theta[x::self.r, y::self.r,:].reshape(-1,theta_d)
perm = torch.randperm(self.n)
m = self.m - grid_sub.shape[0]
idx = perm[:m]
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
grid_split = torch.cat([grid_sub, grid_sample],dim=0)
theta_split = torch.cat([theta_sub, theta_sample],dim=0)
X = torch.cat([grid_split,theta_split],dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 3 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x,y],dtype=torch.long).reshape(1,2)
if params==None:
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx))
else:
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, params=params))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def sample(self, theta, Y):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.resolution, self.resolution)
x = torch.randint(0,self.r,(1,))
y = torch.randint(0,self.r,(1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[x::self.r, y::self.r].reshape(-1,)
index_sub = self.index[x::self.r, y::self.r].reshape(-1,)
n_sub = Y_sub.shape[0]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.n, )[idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=0).reshape(-1,)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(-1,)
index_split = index_sub.reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3+self.edge_features] = a[edge_index[0]]
edge_attr[:, 3+self.edge_features: 4+self.edge_features*2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, sample_idx=index_split)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def sampleT(self, theta, Y, params=None):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.T, self.resolution, self.resolution)
x = torch.randint(0, self.r, (1,))
y = torch.randint(0, self.r, (1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[:,x::self.r, y::self.r].reshape(self.T,-1)
index_sub = self.index[x::self.r, y::self.r].reshape(-1, )
n_sub = Y_sub.shape[1]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.T, self.n)[:,idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=1).reshape(self.T,-1)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1, )
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(self.T, -1)
index_split = index_sub.reshape(-1, )
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3 + self.edge_features * 2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 3 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
if params==None:
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx,
sample_idx=index_split)
else:
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx,
sample_idx=index_split, params=params)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1):
assert len(pred) == len(split_idx)
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.resolution,self.resolution))
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='wrap')
out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
def assembleT(self, pred, split_idx, batch_size2, sigma=1):
# pred is a list (batches) of list (time seq)
assert len(pred) == len(split_idx)
assert len(pred[0]) == self.T
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.T, self.resolution,self.resolution))
for t in range(self.T):
for i in range(len(pred)):
pred_i = pred[i][t].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[t, x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='wrap')
out = torch.tensor(out, dtype=torch.float)
return out.reshape(self.T,self.n)
def downsample(data, grid_size, l):
data = data.reshape(-1, grid_size, grid_size)
data = data[:, ::l, ::l]
data = data.reshape(-1, (grid_size // l) ** 2)
return data
def simple_grid(n_x, n_y):
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
edge_index.append((i, i + 1))
edge_attr.append((1, 0, 0))
edge_index.append((i + 1, i))
edge_attr.append((-1, 0, 0))
if (y != n_y - 1):
edge_index.append((i, i + n_x))
edge_attr.append((0, 1, 0))
edge_index.append((i + n_x, i))
edge_attr.append((0, -1, 0))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge(n_x, n_y, a=None):
if a != None:
a = a.reshape(n_x, n_y)
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
d = 1 / n_x
edge_index.append((i, i + 1))
edge_index.append((i + 1, i ))
if a != None:
a1 = a[x, y]
a2 = a[x + 1, y]
edge_attr.append((x / n_x, y / n_y, a1, a2))
edge_attr.append((y/n_y, x/n_x, a2, a1))
if (y != n_y - 1):
d = 1 / n_y
edge_index.append((i, i + n_x))
edge_index.append((i + n_x, i))
if a != None:
a1 = a[x, y]
a2 = a[x, y+1]
edge_attr.append((x/n_x, y/n_y, a1, a2))
edge_attr.append((y/n_y, x/n_x, a2, a1))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge1d(n_x, a=None):
if a != None:
a = a.reshape(n_x)
xs = np.linspace(0.0, 1.0, n_x)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
edge_index = []
edge_attr = []
for x in range(n_x):
i = x
i1 = (x+1)%n_x
edge_index.append((i, i1))
edge_index.append((i1, i ))
i2 = (x + 2) % n_x
edge_index.append((i, i2))
edge_index.append((i2, i ))
if a != None:
a1 = a[x]
a2 = a[x + 1]
edge_attr.append((x / n_x, a1, a2))
edge_attr.append((x / n_x, a2, a1))
X = torch.tensor(xs, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge_aug(n_x, n_y, a):
a = a.reshape(n_x, n_y)
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
d = 1 / n_x
a1 = a[x, y]
a2 = a[x + 1, y]
edge_index.append((i, i + 1))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i + 1, i))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
if (y != n_y - 1):
d = 1 / n_y
a1 = a[x, y]
a2 = a[x, y+1]
edge_index.append((i, i + n_x))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i + n_x, i))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge_aug_full(n_x, n_y, r, a):
n = n_x * n_y
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for i1 in range(n):
x1 = grid[i1]
for i2 in range(n):
x2 = grid[i2]
d = np.linalg.norm(x1-x2)
if(d<=r):
a1 = a[i1]
a2 = a[i2]
edge_index.append((i1, i2))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i2, i1))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def multi_grid(depth, n_x, n_y, grid, params):
edge_index_global = []
edge_attr_global = []
X_global = []
num_nodes = 0
# build connected graph
for l in range(depth):
h_x_l = n_x // (2 ** l)
h_y_l = n_y // (2 ** l)
n_l = h_x_l * h_y_l
a = downsample(params, n_x, (2 ** l))
if grid == 'grid':
X, edge_index_inner, edge_attr_inner = grid(h_y_l, h_x_l)
elif grid == 'grid_edge':
X, edge_index_inner, edge_attr_inner = grid_edge(h_y_l, h_x_l, a)
elif grid == 'grid_edge_aug':
X, edge_index_inner, edge_attr_inner = grid_edge(h_y_l, h_x_l, a)
# update index
edge_index_inner = edge_index_inner + num_nodes
edge_index_global.append(edge_index_inner)
edge_attr_global.append(edge_attr_inner)
# construct X
# if (is_high):
# X = torch.cat([torch.zeros(n_l, l * 2), X, torch.zeros(n_l, (depth - 1 - l) * 2)], dim=1)
# else:
# X_l = torch.tensor(l, dtype=torch.float).repeat(n_l, 1)
# X = torch.cat([X, X_l], dim=1)
X_global.append(X)
# construct edges
index1 = torch.tensor(range(n_l), dtype=torch.long)
index1 = index1 + num_nodes
num_nodes += n_l
# #construct inter-graph edge
if l != depth-1:
index2 = np.array(range(n_l//4)).reshape(h_x_l//2, h_y_l//2) # torch.repeat is different from numpy
index2 = index2.repeat(2, axis = 0).repeat(2, axis = 1)
index2 = torch.tensor(index2).reshape(-1)
index2 = index2 + num_nodes
index2 = torch.tensor(index2, dtype=torch.long)
edge_index_inter1 = torch.cat([index1,index2], dim=-1).reshape(2,-1)
edge_index_inter2 = torch.cat([index2,index1], dim=-1).reshape(2,-1)
edge_index_inter = torch.cat([edge_index_inter1, edge_index_inter2], dim=1)
edge_attr_inter1 = torch.tensor((0, 0, 1), dtype=torch.float).repeat(n_l, 1)
edge_attr_inter2 = torch.tensor((0, 0,-1), dtype=torch.float).repeat(n_l, 1)
edge_attr_inter = torch.cat([edge_attr_inter1, edge_attr_inter2], dim=0)
edge_index_global.append(edge_index_inter)
edge_attr_global.append(edge_attr_inter)
X = torch.cat(X_global, dim=0)
edge_index = torch.cat(edge_index_global, dim=1)
edge_attr = torch.cat(edge_attr_global, dim=0)
mask_index = torch.tensor(range(n_x * n_y), dtype=torch.long)
# print('create multi_grid with size:', X.shape, edge_index.shape, edge_attr.shape, mask_index.shape)
return (X, edge_index, edge_attr, mask_index, num_nodes)
def multi_pole_grid1d(theta, theta_d, s, N, is_periodic=False):
grid_list = []
theta_list = []
edge_index_list = []
edge_index_list_cuda = []
level = int(np.log2(s) - 1)
print(level)
for l in range(1, level+1):
r_l = 2 ** (l - 1)
s_l = s // r_l
n_l = s_l
print('level',s_l,r_l,n_l)
xs = np.linspace(0.0, 1.0, s_l)
grid_l = xs
grid_l = torch.tensor(grid_l, dtype=torch.float)
print(grid_l.shape)
grid_list.append(grid_l)
theta_l = theta[:,:,:theta_d].reshape(N, s, theta_d)
theta_l = theta_l[:, ::r_l, :]
theta_l = theta_l.reshape(N, n_l, theta_d)
theta_l = torch.tensor(theta_l, dtype=torch.float)
print(theta_l.shape)
theta_list.append(theta_l)
# for the finest level, we construct the nearest neighbors (NN)
if l==1:
edge_index_nn = []
for x_i in range(s_l):
for x in (-1,1):
x_j = x_i + x
if is_periodic:
x_j = x_j % s_l
# if (xj, yj) is a valid node
if (x_j in range(s_l)):
edge_index_nn.append([x_i,x_j])
edge_index_nn = torch.tensor(edge_index_nn, dtype=torch.long)
edge_index_nn = edge_index_nn.transpose(0,1)
edge_index_list.append(edge_index_nn)
edge_index_list_cuda.append(edge_index_nn.cuda())
print('edge', edge_index_nn.shape)
# we then compute the interactive neighbors -- their parents are NN but they are not NearestNeighbor
edge_index_inter = []
for x_i in range(s_l):
for x in range(-3,4):
x_j = x_i + x
# if (xj, yj) is a valid node
if is_periodic:
x_j = x_j % s_l
if (x_j in range(s_l)):
# if (xi, yi), (xj, yj) not NearestNeighbor
if abs(x)>=2:
# if their parents are NN
if abs(x_i//2 - x_j//2)%(s_l//2) <=1:
edge_index_inter.append([x_i,x_j])
edge_index_inter = torch.tensor(edge_index_inter, dtype=torch.long)
edge_index_inter = edge_index_inter.transpose(0,1)
edge_index_list.append(edge_index_inter)
edge_index_list_cuda.append(edge_index_inter.cuda())
print('edge_inter', edge_index_inter.shape)
print(len(grid_list),len(edge_index_list),len(theta_list))
return grid_list, theta_list, edge_index_list, edge_index_list_cuda
def get_edge_attr(grid, theta, edge_index):
n_edges = edge_index.shape[1]
edge_attr = np.zeros((n_edges, 4))
edge_attr[:, 0:2] = grid[edge_index.transpose(0,1)].reshape((n_edges, -1))
edge_attr[:, 2] = theta[edge_index[0]]
edge_attr[:, 3] = theta[edge_index[1]]
return torch.tensor(edge_attr, dtype=torch.float)
| 2.109375 | 2 |
bitbots_motion/bitbots_hcm/scripts/test_subscriber.py | MosHumanoid/bitbots_thmos_meta | 3 | 12793515 | <reponame>MosHumanoid/bitbots_thmos_meta
#!/usr/bin/env python3
import rospy
import time
from sensor_msgs.msg import Imu
class SubTest():
def __init__(self):
rospy.init_node("test_sub")
self.arrt = []
self.arrn = []
self.sum =0
self.count=0
self.max = 0
self.sub = rospy.Subscriber("test", Imu, self.cb, queue_size=1)
self.f = open("latencies", 'w')
while not rospy.is_shutdown():
time.sleep(1)
if self.count !=0:
print("mean: " + str((self.sum/self.count)*1000))
print("max: " + str(self.max*1000))
i = 0
for n in self.arrn:
self.f.write(str(n) + "," + str(self.arrt[i]*1000) + "\n")
i+=1
self.f.close()
def cb(self, msg:Imu):
diff = rospy.get_time() - msg.header.stamp.to_sec()
self.arrt.append(diff)
self.arrn.append(msg.header.seq)
self.sum += diff
self.count +=1
self.max = max(self.max, diff)
if __name__ == "__main__":
SubTest()
| 2.34375 | 2 |
test_hello.py | bkwin66/python_testx | 0 | 12793516 | print "Hello World"
print "Print something else"
for i in range (10):
print i
| 3.390625 | 3 |
pycqed/measurement/qcodes_QtPlot_colors_override.py | nuttamas/PycQED_py3 | 60 | 12793517 | """
[2020-02-03] Modified version of the original qcodes.plots.colors
Mofied by <NAME> for Measurement Control
It modules makes available all the colors maps from the qcodes, context menu of
the color bar from pyqtgraph, the circular colormap created by me (Victo),
and the reversed version of all of them.
Feel free to add new colors
See "make_qcodes_anglemap" and "make_anglemap45_colorlist" below to get you
started.
"""
from pycqed.analysis.tools.plotting import make_anglemap45_colorlist
# default colors and colorscales, taken from plotly
color_cycle = [
"#1f77b4", # muted blue
"#ff7f0e", # safety orange
"#2ca02c", # cooked asparagus green
"#d62728", # brick red
"#9467bd", # muted purple
"#8c564b", # chestnut brown
"#e377c2", # raspberry yogurt pink
"#7f7f7f", # middle gray
"#bcbd22", # curry yellow-green
"#17becf", # blue-teal
]
colorscales_raw = {
"Greys": [[0, "rgb(0,0,0)"], [1, "rgb(255,255,255)"]],
"YlGnBu": [
[0, "rgb(8, 29, 88)"],
[0.125, "rgb(37, 52, 148)"],
[0.25, "rgb(34, 94, 168)"],
[0.375, "rgb(29, 145, 192)"],
[0.5, "rgb(65, 182, 196)"],
[0.625, "rgb(127, 205, 187)"],
[0.75, "rgb(199, 233, 180)"],
[0.875, "rgb(237, 248, 217)"],
[1, "rgb(255, 255, 217)"],
],
"Greens": [
[0, "rgb(0, 68, 27)"],
[0.125, "rgb(0, 109, 44)"],
[0.25, "rgb(35, 139, 69)"],
[0.375, "rgb(65, 171, 93)"],
[0.5, "rgb(116, 196, 118)"],
[0.625, "rgb(161, 217, 155)"],
[0.75, "rgb(199, 233, 192)"],
[0.875, "rgb(229, 245, 224)"],
[1, "rgb(247, 252, 245)"],
],
"YlOrRd": [
[0, "rgb(128, 0, 38)"],
[0.125, "rgb(189, 0, 38)"],
[0.25, "rgb(227, 26, 28)"],
[0.375, "rgb(252, 78, 42)"],
[0.5, "rgb(253, 141, 60)"],
[0.625, "rgb(254, 178, 76)"],
[0.75, "rgb(254, 217, 118)"],
[0.875, "rgb(255, 237, 160)"],
[1, "rgb(255, 255, 204)"],
],
"bluered": [[0, "rgb(0,0,255)"], [1, "rgb(255,0,0)"]],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
"RdBu": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(106, 137, 247)"],
[0.5, "rgb(190,190,190)"],
[0.6, "rgb(220, 170, 132)"],
[0.7, "rgb(230, 145, 90)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-negative numeric values
"Reds": [
[0, "rgb(220, 220, 220)"],
[0.2, "rgb(245, 195, 157)"],
[0.4, "rgb(245, 160, 105)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-positive numeric values
"Blues": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(40, 60, 190)"],
[0.5, "rgb(70, 100, 245)"],
[0.6, "rgb(90, 120, 245)"],
[0.7, "rgb(106, 137, 247)"],
[1, "rgb(220, 220, 220)"],
],
"picnic": [
[0, "rgb(0,0,255)"],
[0.1, "rgb(51,153,255)"],
[0.2, "rgb(102,204,255)"],
[0.3, "rgb(153,204,255)"],
[0.4, "rgb(204,204,255)"],
[0.5, "rgb(255,255,255)"],
[0.6, "rgb(255,204,255)"],
[0.7, "rgb(255,153,255)"],
[0.8, "rgb(255,102,204)"],
[0.9, "rgb(255,102,102)"],
[1, "rgb(255,0,0)"],
],
"rainbow": [
[0, "rgb(150,0,90)"],
[0.125, "rgb(0, 0, 200)"],
[0.25, "rgb(0, 25, 255)"],
[0.375, "rgb(0, 152, 255)"],
[0.5, "rgb(44, 255, 150)"],
[0.625, "rgb(151, 255, 0)"],
[0.75, "rgb(255, 234, 0)"],
[0.875, "rgb(255, 111, 0)"],
[1, "rgb(255, 0, 0)"],
],
"portland": [
[0, "rgb(12,51,131)"],
[0.25, "rgb(10,136,186)"],
[0.5, "rgb(242,211,56)"],
[0.75, "rgb(242,143,56)"],
[1, "rgb(217,30,30)"],
],
"jet": [
[0, "rgb(0,0,131)"],
[0.125, "rgb(0,60,170)"],
[0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"],
[0.875, "rgb(250,0,0)"],
[1, "rgb(128,0,0)"],
],
"hot": [
[0, "rgb(0,0,0)"],
[0.3, "rgb(230,0,0)"],
[0.6, "rgb(255,210,0)"],
[1, "rgb(255,255,255)"],
],
"blackbody": [
[0, "rgb(0,0,0)"],
[0.2, "rgb(230,0,0)"],
[0.4, "rgb(230,210,0)"],
[0.7, "rgb(255,255,255)"],
[1, "rgb(160,200,255)"],
],
"earth": [
[0, "rgb(0,0,130)"],
[0.1, "rgb(0,180,180)"],
[0.2, "rgb(40,210,40)"],
[0.4, "rgb(230,230,50)"],
[0.6, "rgb(120,70,20)"],
[1, "rgb(255,255,255)"],
],
"electric": [
[0, "rgb(0,0,0)"],
[0.15, "rgb(30,0,100)"],
[0.4, "rgb(120,0,100)"],
[0.6, "rgb(160,90,0)"],
[0.8, "rgb(230,200,0)"],
[1, "rgb(255,250,220)"],
],
"viridis": [
[0, "#440154"],
[0.06274509803921569, "#48186a"],
[0.12549019607843137, "#472d7b"],
[0.18823529411764706, "#424086"],
[0.25098039215686274, "#3b528b"],
[0.3137254901960784, "#33638d"],
[0.3764705882352941, "#2c728e"],
[0.4392156862745098, "#26828e"],
[0.5019607843137255, "#21918c"],
[0.5647058823529412, "#1fa088"],
[0.6274509803921569, "#28ae80"],
[0.6901960784313725, "#3fbc73"],
[0.7529411764705882, "#5ec962"],
[0.8156862745098039, "#84d44b"],
[0.8784313725490196, "#addc30"],
[0.9411764705882353, "#d8e219"],
[1, "#fde725"],
],
}
# Extracted https://github.com/pyqtgraph/pyqtgraph/blob/develop/pyqtgraph/graphicsItems/GradientEditorItem.py
Gradients = {
"thermal": [
(0.3333, (185, 0, 0, 255)),
(0.6666, (255, 220, 0, 255)),
(1, (255, 255, 255, 255)),
(0, (0, 0, 0, 255)),
],
"flame": [
(0.2, (7, 0, 220, 255)),
(0.5, (236, 0, 134, 255)),
(0.8, (246, 246, 0, 255)),
(1.0, (255, 255, 255, 255)),
(0.0, (0, 0, 0, 255)),
],
"yellowy": [
(0.0, (0, 0, 0, 255)),
(0.2328863796753704, (32, 0, 129, 255)),
(0.8362738179251941, (255, 255, 0, 255)),
(0.5257586450247, (115, 15, 255, 255)),
(1.0, (255, 255, 255, 255)),
],
"bipolar": [
(0.0, (0, 255, 255, 255)),
(1.0, (255, 255, 0, 255)),
(0.5, (0, 0, 0, 255)),
(0.25, (0, 0, 255, 255)),
(0.75, (255, 0, 0, 255)),
],
"spectrum": [
(1.0, (255, 0, 255, 255)),
(0.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
"cyclic": [
(0.0, (255, 0, 4, 255)),
(1.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
# "greyclip": [
# (0.0, (0, 0, 0, 255)),
# (0.99, (255, 255, 255, 255)),
# (1.0, (255, 0, 0, 255)),
# ],
"grey": [(0.0, (0, 0, 0, 255)), (1.0, (255, 255, 255, 255))],
# Perceptually uniform sequential colormaps from Matplotlib 2.0
"viridis": [
(0.0, (68, 1, 84, 255)),
(0.25, (58, 82, 139, 255)),
(0.5, (32, 144, 140, 255)),
(0.75, (94, 201, 97, 255)),
(1.0, (253, 231, 36, 255)),
],
"inferno": [
(0.0, (0, 0, 3, 255)),
(0.25, (87, 15, 109, 255)),
(0.5, (187, 55, 84, 255)),
(0.75, (249, 142, 8, 255)),
(1.0, (252, 254, 164, 255)),
],
"plasma": [
(0.0, (12, 7, 134, 255)),
(0.25, (126, 3, 167, 255)),
(0.5, (203, 71, 119, 255)),
(0.75, (248, 149, 64, 255)),
(1.0, (239, 248, 33, 255)),
],
"magma": [
(0.0, (0, 0, 3, 255)),
(0.25, (80, 18, 123, 255)),
(0.5, (182, 54, 121, 255)),
(0.75, (251, 136, 97, 255)),
(1.0, (251, 252, 191, 255)),
],
}
def make_qcodes_anglemap45():
anglemap_colorlist = make_anglemap45_colorlist(N=9, use_hpl=False)
len_colorlist = len(anglemap_colorlist)
color_scale = [
[i / (len_colorlist - 1), "rgb" + repr(tuple((int(x * 255) for x in col)))]
for i, col in enumerate(anglemap_colorlist)
]
return color_scale
qcodes_anglemap45 = make_qcodes_anglemap45()
colorscales_raw["anglemap45"] = qcodes_anglemap45
def make_rgba(colorscale):
return [(v, one_rgba(c)) for v, c in colorscale]
def one_rgba(c):
"""
convert a single color value to (r, g, b, a)
input can be an rgb string 'rgb(r,g,b)', '#rrggbb'
if we decide we want more we can make more, but for now this is just
to convert plotly colorscales to pyqtgraph tuples
"""
if c[0] == "#" and len(c) == 7:
return (int(c[1:3], 16), int(c[3:5], 16), int(c[5:7], 16), 255)
if c[:4] == "rgb(":
return tuple(map(int, c[4:-1].split(","))) + (255,)
raise ValueError("one_rgba only supports rgb(r,g,b) and #rrggbb colors")
colorscales = {}
for scale_name, scale in colorscales_raw.items():
colorscales[scale_name] = make_rgba(scale)
for scale_name, scale in Gradients.items():
colorscales[scale_name] = scale
for name, scale in list(colorscales.items()):
last_idx = len(scale) - 1
reversed_scale = [
(scale[last_idx - i][0], color[1]) for i, color in enumerate(scale)
]
colorscales[name + "_reversed"] = reversed_scale
# Generate also all scales with cliping at green
for name, scale in list(colorscales.items()):
clip_percent = 0.03
clip_color = (0, 255, 0, 255)
scale_low = list(scale)
scale_low.insert(1, scale[0])
scale_low[0] = (0.0, clip_color)
if scale[1][0] < clip_percent:
scale_low[1] = ((scale[1][0] + scale[0][0]) / 2, scale_low[1][1])
else:
scale_low[1] = (clip_percent, scale_low[1][1])
colorscales[name + "_clip_low"] = scale_low
scale_high = list(scale)
scale_high.insert(-1, scale[-1])
scale_high[-1] = (1.0, clip_color)
if scale[-2][0] > 1 - clip_percent:
scale_high[-2] = ((scale[-1][0] + scale[-2][0]) / 2, scale_high[-2][1])
else:
scale_high[-2] = (1 - clip_percent, scale_high[-2][1])
colorscales[name + "_clip_high"] = scale_high
| 2.0625 | 2 |
backend/src/baserow/config/asgi.py | ashishdhngr/baserow | 839 | 12793518 | import django
from channels.routing import ProtocolTypeRouter
from baserow.ws.routers import websocket_router
from django.core.asgi import get_asgi_application
django.setup()
django_asgi_app = get_asgi_application()
application = ProtocolTypeRouter(
{"http": django_asgi_app, "websocket": websocket_router}
)
| 1.671875 | 2 |
source/refresh-ta-check-lambda.py | awslabs/aws-trusted-advisor-explorer | 14 | 12793519 | ######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import re,boto3,logging,os
from datetime import date
from botocore.exceptions import ClientError
class AWSTrustedAdvisorExplorerGenericException(Exception): pass
logger = logging.getLogger()
if "LOG_LEVEL" in os.environ:
numeric_level = getattr(logging, os.environ['LOG_LEVEL'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(level=numeric_level)
def sanitize_json(x):
d = x.copy()
if os.environ['MASK_PII'].lower() == 'true':
for k, v in d.items():
if 'AccountId' in k:
d[k] = sanitize_string(v)
if 'AccountName' in k:
d[k] = v[:3]+'-MASKED-'+v[-3:]
if 'AccountEmail' in k:
d[k] = v[:3]+'-MASKED-'+v[-3:]
return d
def sanitize_string(x):
y = str(x)
if os.environ['MASK_PII'].lower() == 'true':
pattern=re.compile('\d{12}')
y = re.sub(pattern,lambda match: ((match.group()[1])+'XXXXXXX'+(match.group()[-4:])), y)
return y
def refresh_trusted_advisor_checks(supportClient,checkId):
logger.info('Refreshing Trusted Advisor Check:'+checkId)
response = supportClient.refresh_trusted_advisor_check(
checkId=checkId
)
logger.info(sanitize_json(response))
return response
def checkAssumeRoleFailure(error):
if "(AccessDenied) when calling the AssumeRole operation" in error:
pattern=re.compile('.*iam::(\d{12}):.*$')
match=pattern.match(error)
logger.info('Assume Role Error for Account:'+match.group(1))
if match != None:
key_name='Logs/AssumeRoleFailure/'+ str(date.today().year)+ '/'+str(date.today().month)+'/'+str(date.today().day)+'/'+str(match.group(1))+'.log'
client = boto3.client('s3')
client.put_object(ACL='bucket-owner-full-control',StorageClass='STANDARD',Body=error, Bucket=os.environ['S3BucketName'],Key=key_name)
return
#Assume Role in Child Account
def assumeRole(accountId):
logger.info('Variables passed to assumeRole(): '+sanitize_string(accountId))
roleArn="arn:aws:iam::"+str(accountId)+":role/"+os.environ['IAMRoleName']
#STS assume role call
stsClient = boto3.client('sts')
roleCredentials = stsClient.assume_role(RoleArn=roleArn, RoleSessionName="AWSTrustedAdvisorExplorerAssumeRole")
return roleCredentials
def lambda_handler(event, context):
try:
logger.info(sanitize_json(event))
logger.info("Assume Role in child account")
roleCredentials=assumeRole(event['AccountId'])
logger.info("Create boto3 support client using the temporary credentials")
supportClient=boto3.client("support",region_name="us-east-1",
aws_access_key_id = roleCredentials['Credentials']['AccessKeyId'],
aws_secret_access_key =
roleCredentials['Credentials']['SecretAccessKey'],
aws_session_token=roleCredentials['Credentials']['SessionToken'])
response = refresh_trusted_advisor_checks(
supportClient, event['CheckId'])
logger.info("Append the Refresh Status '"+response['status']['status']+"' to response." +
" This will be consumed by downstream Lambda")
event["RefreshStatus"] = response['status']['status']
return event
except ClientError as e:
checkAssumeRoleFailure(str(e))
e=sanitize_string(e)
logger.error("Unexpected client error %s" % e)
raise AWSTrustedAdvisorExplorerGenericException(e)
except Exception as f:
checkAssumeRoleFailure(str(f))
f=sanitize_string(f)
logger.error("Unexpected exception: %s" % f)
raise AWSTrustedAdvisorExplorerGenericException(f) | 1.828125 | 2 |
Homework/ArrayList.py | Frumka/python_developer | 0 | 12793520 | <reponame>Frumka/python_developer
from array import array, ArrayType
from typing import TypeVar, Iterable
from copy import deepcopy
T = TypeVar("T")
class ArrayList(object):
class Iterator(object):
__data: ArrayType
__index: int
def __init__(self, data: ArrayType):
self.__data = data
self.__index = -1
def __iter__(self) -> 'ArrayList.Iterator':
return self
def __next__(self) -> T:
if self.__index > len(self.__data) - 2:
raise StopIteration()
self.__index += 1
return self.__data[self.__index]
class ReverseIterator(object):
__data: ArrayType
__index: int
def __init__(self, data: ArrayType):
self.__data = data
self.__index = len(data)
def __iter__(self) -> 'ArrayList.Iterator':
return self
def __next__(self):
if self.__index == 0:
raise StopIteration()
self.__index -= 1
return self.__data[self.__index]
__array: ArrayType
def __init__(self, type_char: str, *args) -> None:
self.__array = array(type_char, args)
def __str__(self) -> str:
return self.__array.__str__()
def __getitem__(self, index: int) -> T:
return self.__array[index]
def __len__(self) -> int:
return len(self.__array)
def __contains__(self, target: T) -> bool:
for item in self.__array:
if target == item:
return True
return False
def __add__(self, other: "ArrayList") -> "ArrayList":
copy = deepcopy(self)
copy += other
return copy
def __iadd__(self, other: "ArrayList") -> "ArrayList":
self.__array += other.__array
return self
def __lt__(self, other: "ArrayList") -> bool:
if len(self.__array) < len(other.__array):
return True
elif len(self.__array) > len(other.__array):
return False
else:
not_equal = False
for i, elem in enumerate(self.__array):
if elem > other.__array[i]:
return False
if elem < other.__array[i]:
not_equal = True
return not_equal
def __eq__(self, other: "ArrayList") -> bool:
if len(self.__array) != len(other.__array):
return False
else:
for i, elem in enumerate(self.__array):
if elem != other.__array[i]:
return False
return True
def __le__(self, other: "ArrayList") -> bool:
if self < other:
return True
elif self == other:
return True
return False
def __ne__(self, other: "ArrayList") -> bool:
return not self == other
def __gt__(self, other: "ArrayList") -> bool:
return not self <= other
def __ge__(self, other: "ArrayList") -> bool:
return not self < other
def __mul__(self, mult: int) -> "ArrayList":
result = deepcopy(self)
result *= mult
return result
def __imul__(self, mult: int) -> "ArrayList":
old_len = len(self.__array)
new_len = old_len * mult
new_array = array(self.__array.typecode, [0 for _ in range(new_len)])
for i in range(new_len):
new_array[i] = self.__array[i % old_len]
self.__array = new_array
return self
def append(self, item: T) -> None:
self.__array += array(self.__array.typecode, [item])
def count(self, item: T) -> int:
count = 0
for elem in self.__array:
if elem == item:
count += 1
return count
def index(self, target: T, start=0, stop=None) -> int:
stop = stop if stop is not None else len(self.__array)
for i in range(start, stop):
if self.__array[i] == target:
return i
raise ValueError
def extend(self, *args: Iterable) -> None:
for elem in args:
self.__array += array(self.__array.typecode, elem)
def insert(self, index: int, item: T) -> None:
if index == -1:
self.append(item)
return
elif index < 0:
index = len(self.__array) + index + 1
old_array = self.__array
self.__array = array(old_array.typecode)
for i, elem in enumerate(old_array):
if i == index:
self.append(item)
self.append(elem)
else:
self.append(elem)
def __iter__(self) -> 'ArrayList.Iterator':
return ArrayList.Iterator(self.__array)
def pop(self, index=-1) -> T:
if index < 0:
index = len(self.__array) + index
if index > len(self.__array) - 1:
raise IndexError
new_array = array(self.__array.typecode, [0 for _ in range(len(self.__array) - 1)])
i = 0
is_found = False
for elem in self.__array:
if i == index and not is_found:
item = self.__array[i]
is_found = True
continue
new_array[i] = elem
i += 1
self.__array = new_array
return item
def remove(self, target: T) -> None:
index = self.index(target)
self.pop(index)
def __reversed__(self) -> 'ArrayList.ReverseIterator':
return ArrayList.ReverseIterator(self.__array)
def __setitem__(self, key: int, value: T) -> None:
self.__array[key] = value
def __delitem__(self, key: int) -> None:
self.pop(key)
| 3.46875 | 3 |
scripts/render.py | vitchyr/handful-of-trials | 1 | 12793521 | <reponame>vitchyr/handful-of-trials<gh_stars>1-10
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import argparse
import pprint
from dotmap import DotMap
from dmbrl.misc.MBExp import MBExperiment
from dmbrl.controllers.MPC import MPC
from dmbrl.config import create_config
def main(
env,
ctrl_type, ctrl_args, overrides, model_dir, logdir,
init_iter,
last_iter,
nrecord,
rawdir,
):
ctrl_args = DotMap(**{key: val for (key, val) in ctrl_args})
overrides.append(["ctrl_cfg.prop_cfg.model_init_cfg.model_dir", model_dir])
overrides.append(["ctrl_cfg.prop_cfg.model_init_cfg.load_model", "True"])
overrides.append(["ctrl_cfg.prop_cfg.model_pretrained", "True"])
overrides.append(["exp_cfg.exp_cfg.ninit_rollouts", "0"])
overrides.append(["exp_cfg.exp_cfg.init_iter", str(init_iter)])
overrides.append(["exp_cfg.exp_cfg.ntrain_iters", str(last_iter)])
overrides.append(["exp_cfg.log_cfg.nrecord", str(nrecord)])
overrides.append(["exp_cfg.log_cfg.rawdir", str(rawdir)])
cfg = create_config(env, ctrl_type, ctrl_args, overrides, logdir)
cfg.pprint()
if ctrl_type == "MPC":
cfg.exp_cfg.exp_cfg.policy = MPC(cfg.ctrl_cfg)
exp = MBExperiment(cfg.exp_cfg)
if os.path.exists(exp.logdir):
overwrite = user_prompt(
"{} already exists. Overwrite?".format(exp.logdir)
)
if not overwrite:
return
else:
os.makedirs(exp.logdir)
with open(os.path.join(exp.logdir, "config.txt"), "w") as f:
f.write(pprint.pformat(cfg.toDict()))
exp.run_experiment()
print("Saved to")
print(exp.logdir)
def user_prompt(question: str) -> bool:
"""
Prompt the yes/no-*question* to the user.
https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
from distutils.util import strtobool
while True:
user_input = input(question + " [y/n]: ").lower()
try:
result = strtobool(user_input)
return result
except ValueError:
print("Please use y/n or yes/no.\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-env', type=str, required=True)
parser.add_argument('-ca', '--ctrl_arg', action='append', nargs=2,
default=[])
parser.add_argument('-o', '--override', action='append', nargs=2,
default=[])
parser.add_argument('-model-dir', type=str, required=True)
parser.add_argument('-logdir', type=str, required=True)
parser.add_argument('-init-iter', type=int, default=0)
parser.add_argument('-last-iter', type=int, default=1)
parser.add_argument('-nrecord', type=int, default=1)
parser.add_argument('-no-raw-dir', action='store_true')
args = parser.parse_args()
main(
args.env,
"MPC",
args.ctrl_arg,
args.override,
args.model_dir,
args.logdir,
args.init_iter,
args.last_iter,
args.nrecord,
not args.no_raw_dir,
)
| 1.8125 | 2 |
layers/modules/multibox_loss_combined.py | Ze-Yang/Context-Transformer | 86 | 12793522 | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.box_utils import match
class MultiBoxLoss_combined(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
super(MultiBoxLoss_combined, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
def forward(self, predictions, priors, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
# loc_data[batch_size, num_priors, 4]
# conf_data[batch_size, num_priors, num_classes]
# obj_data[batch_size, num_priors, 2]
loc_data, conf_data, obj_data = predictions
device = loc_data.device
targets = [anno.to(device) for anno in targets]
num = loc_data.size(0)
num_priors = priors.size(0)
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4).to(device)
conf_t = torch.Tensor(num, num_priors, 2).to(device)
obj_t = torch.BoolTensor(num, num_priors).to(device)
# match priors with gt
for idx in range(num): # batch_size
truths = targets[idx][:, :-2].data # [obj_num, 4]
labels = targets[idx][:, -2:].data # [obj_num]
defaults = priors.data # [num_priors,4]
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, obj_t, idx)
pos = (conf_t[:, :, 0] > 0).bool() # [num, num_priors]
num_pos = (conf_t[:, :, 1] * pos.float()).sum(1, keepdim=True).long()
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
loc_p = loc_data[pos]
loc_t = loc_t[pos]
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='none')
weight_pos = conf_t[pos][:, 1]
loss_l = torch.sum(torch.sum(loss_l, dim=1) * weight_pos)
# Compute object loss across batch for hard negative mining
with torch.no_grad():
loss_obj = F.cross_entropy(obj_data.view(-1, 2), obj_t.long().view(-1), reduction='none')
# Hard Negative Mining
loss_obj[obj_t.view(-1)] = 0 # filter out pos boxes (label>0) and ignored boxes (label=-1) for now
loss_obj = loss_obj.view(num, -1)
_, loss_idx = loss_obj.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=num_priors - 1)
neg = idx_rank < num_neg.expand_as(idx_rank) # [num, num_priors]
# Object Loss Including Positive and Negative Examples
mask = pos | neg
weight = conf_t[mask][:, 1]
loss_obj = torch.sum(F.cross_entropy(obj_data[mask], obj_t[mask].long(), reduction='none') * weight)
# Confidence Loss (cosine distance to classes center)
# pos [num, num_priors]
# conf_data [num, num_priors, feature_dim]
batch_conf = conf_data.view(-1, self.num_classes-1)
# Compute max conf across batch for hard negative mining (logit-combined)
batch_obj = obj_data.view(-1, 2) # [num*num_priors, 2]
logit_0 = batch_obj[:, 0].unsqueeze(1) + torch.log(
torch.exp(batch_conf).sum(dim=1, keepdim=True))
logit_k = batch_obj[:, 1].unsqueeze(1).expand_as(batch_conf) + batch_conf
logit = torch.cat((logit_0, logit_k), 1)
# Confidence Loss Including Positive and Negative Examples
logit = logit.view(num, -1, self.num_classes)
loss_c = torch.sum(F.cross_entropy(logit[mask], conf_t[mask][:, 0].long(), reduction='none') * weight)
N = num_pos.sum()
loss_l /= N
loss_c /= N
loss_obj /= N
return {'loss_box_reg': loss_l, 'loss_cls': loss_c, 'loss_obj': loss_obj}
| 2.359375 | 2 |
tests/test_simple.py | mosquito/argclass | 2 | 12793523 | import logging
import os
import re
import uuid
from typing import List, Optional, FrozenSet
import pytest
import argclass
class TestBasics:
class Parser(argclass.Parser):
integers: List[int] = argclass.Argument(
"integers", type=int,
nargs=argclass.Nargs.ONE_OR_MORE, metavar="N",
help="an integer for the accumulator",
)
accumulate = argclass.Argument(
"--sum", action=argclass.Actions.STORE_CONST, const=sum,
default=max, help="sum the integers (default: find the max)",
)
def test_simple(self):
parser = self.Parser()
parser.parse_args(["1", "2", "3"])
assert parser.integers
assert parser.integers == [1, 2, 3]
class HostPortGroup(argclass.Group):
host: str
port: int
class TestFoo:
class Parser(argclass.Parser):
foo: str = argclass.Argument(help="foo")
http: HostPortGroup = HostPortGroup(
title="HTTP host and port", prefix="api", defaults={
"port": 80, "host": "0.0.0.0",
},
)
grpc: HostPortGroup = HostPortGroup(
title="GRPC host and port",
defaults={"port": 6000, "host": "::"},
)
def test_simple(self):
parser = self.Parser()
parser.parse_args(["--foo", "bar"])
assert parser.foo == "bar"
parser.parse_args(["--foo=bar"])
assert parser.foo == "bar"
def test_group(self):
parser = self.Parser()
parser.parse_args(["--foo", "bar"])
assert parser.foo == "bar"
parser.parse_args([
"--foo=bar",
"--api-host=127.0.0.1",
"--api-port=8080",
"--grpc-host=127.0.0.2",
"--grpc-port=9000",
])
assert parser.foo == "bar"
assert parser.http.host == "127.0.0.1"
assert parser.http.port == 8080
assert parser.grpc.host == "127.0.0.2"
assert parser.grpc.port == 9000
def test_group_defaults(self):
parser = self.Parser()
parser.parse_args(["--foo=bar"])
assert parser.foo == "bar"
assert parser.http.host == "0.0.0.0"
assert parser.http.port == 80
assert parser.grpc.host == "::"
assert parser.grpc.port == 6000
def test_parser_repr(self):
parser = self.Parser()
r = repr(parser)
assert r == "<Parser: 1 arguments, 2 groups, 0 subparsers>"
def test_access_to_not_parsed_attrs(self):
parser = self.Parser()
with pytest.raises(AttributeError):
_ = parser.foo
def test_environment(self, request: pytest.FixtureRequest):
prefix = re.sub(r"\d+", "", uuid.uuid4().hex + uuid.uuid4().hex).upper()
expected = uuid.uuid4().hex
os.environ[f"{prefix}_FOO"] = expected
request.addfinalizer(lambda: os.environ.pop(f"{prefix}_FOO"))
parser = self.Parser(auto_env_var_prefix=f"{prefix}_")
parser.parse_args([])
assert parser.foo == expected
def test_env_var(request: pytest.FixtureRequest):
env_var = re.sub(r"\d+", "", uuid.uuid4().hex + uuid.uuid4().hex).upper()
class Parser(argclass.Parser):
foo: str = argclass.Argument(env_var=env_var)
expected = uuid.uuid4().hex
os.environ[env_var] = expected
request.addfinalizer(lambda: os.environ.pop(env_var))
parser = Parser()
parser.parse_args([])
assert parser.foo == expected
def test_nargs():
class Parser(argclass.Parser):
foo: List[int] = argclass.Argument(
nargs=argclass.Nargs.ZERO_OR_MORE, type=int,
)
bar: int = argclass.Argument(nargs="*")
spam: int = argclass.Argument(nargs=1)
parser = Parser()
parser.parse_args(["--foo", "1", "2", "--bar=3", "--spam=4"])
assert parser.foo == [1, 2]
assert parser.bar == [3]
assert parser.spam == [4]
def test_group_aliases():
class Group(argclass.Group):
foo: str = argclass.Argument("-F")
class Parser(argclass.Parser):
group = Group()
parser = Parser()
parser.parse_args(["-F", "egg"])
assert parser.group.foo == "egg"
def test_short_parser_definition():
class Parser(argclass.Parser):
foo: str
bar: int
parser = Parser()
parser.parse_args(["--foo=spam", "--bar=1"])
assert parser.foo == "spam"
assert parser.bar == 1
def test_print_help(capsys: pytest.CaptureFixture):
class Parser(argclass.Parser):
foo: str
bar: int = 0
parser = Parser()
parser.print_help()
captured = capsys.readouterr()
assert "--foo" in captured.out
assert "--bar" in captured.out
assert "--help" in captured.out
assert "--foo FOO" in captured.out
assert "[--bar BAR]" in captured.out
def test_print_log_level(capsys: pytest.CaptureFixture):
class Parser(argclass.Parser):
log_level: int = argclass.LogLevel
parser = Parser()
parser.parse_args(["--log-level", "info"])
assert parser.log_level == logging.INFO
parser.parse_args(["--log-level=warning"])
assert parser.log_level == logging.WARNING
def test_optional_type():
class Parser(argclass.Parser):
flag: bool
optional: Optional[bool]
parser = Parser()
parser.parse_args([])
assert parser.optional is None
assert not parser.flag
parser.parse_args(["--flag"])
assert parser.flag
for variant in ("yes", "Y", "yeS", "enable", "ENABLED", "1"):
parser.parse_args([f"--optional={variant}"])
assert parser.optional is True
for variant in ("no", "crap", "false", "disabled", "MY_HANDS_TYPING_WORDS"):
parser.parse_args([f"--optional={variant}"])
assert parser.optional is False
def test_argument_defaults():
class Parser(argclass.Parser):
debug: bool = False
confused_default: bool = True
pool_size: int = 4
forks: int = 2
parser = Parser()
parser.parse_args([])
assert parser.debug is False
assert parser.confused_default is True
assert parser.pool_size == 4
assert parser.forks == 2
parser.parse_args([
"--debug", "--forks=8", "--pool-size=2", "--confused-default",
])
assert parser.debug is True
assert parser.confused_default is False
assert parser.pool_size == 2
assert parser.forks == 8
def test_inheritance():
class AddressPort(argclass.Group):
address: str
port: int
class Parser(argclass.Parser, AddressPort):
pass
parser = Parser()
parser.parse_args(["--address=0.0.0.0", "--port=9876"])
assert parser.address == "0.0.0.0"
assert parser.port == 9876
def test_config_for_required(tmp_path):
class Parser(argclass.Parser):
required: int = argclass.Argument(required=True)
config_path = tmp_path / "config.ini"
with open(config_path, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("required = 10\n")
fp.write("\n")
parser = Parser(config_files=[config_path])
parser.parse_args([])
assert parser.required == 10
parser = Parser(config_files=[])
with pytest.raises(SystemExit):
parser.parse_args([])
def test_minimal_optional(tmp_path):
class Parser(argclass.Parser):
optional: Optional[int]
parser = Parser()
parser.parse_args([])
assert parser.optional is None
parser.parse_args(["--optional=10"])
assert parser.optional == 10
def test_optional_is_not_required(tmp_path):
class Parser(argclass.Parser):
optional: Optional[int] = argclass.Argument(required=False)
parser = Parser()
parser.parse_args([])
assert parser.optional is None
parser.parse_args(["--optional=20"])
assert parser.optional == 20
def test_minimal_required(tmp_path):
class Parser(argclass.Parser):
required: int
parser = Parser()
with pytest.raises(SystemExit):
parser.parse_args([])
parser.parse_args(["--required=20"])
assert parser.required == 20
def test_log_group():
class LogGroup(argclass.Group):
level: int = argclass.LogLevel
format = argclass.Argument(
choices=("json", "stream"), default="stream"
)
class Parser(argclass.Parser):
log = LogGroup()
parser = Parser()
parser.parse_args([])
assert parser.log.level == logging.INFO
assert parser.log.format == "stream"
parser.parse_args(["--log-level=debug", "--log-format=json"])
assert parser.log.level == logging.DEBUG
assert parser.log.format == "json"
def test_log_group_defaults():
class LogGroup(argclass.Group):
level: int = argclass.LogLevel
format: str = argclass.Argument(
choices=("json", "stream")
)
class Parser(argclass.Parser):
log = LogGroup(defaults=dict(format="json", level="error"))
parser = Parser()
parser.parse_args([])
assert parser.log.level == logging.ERROR
assert parser.log.format == "json"
def test_environment_required():
class Parser(argclass.Parser):
required: int
parser = Parser(auto_env_var_prefix="TEST_")
os.environ['TEST_REQUIRED'] = "100"
parser.parse_args([])
assert parser.required == 100
os.environ.pop('TEST_REQUIRED')
with pytest.raises(SystemExit):
parser.parse_args([])
def test_nargs_and_converter():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs="+", converter=frozenset
)
parser = Parser()
parser.parse_args(["--args-set", "1", "2", "3", "4", "5"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1, 2, 3, 4, 5])
def test_nargs_and_converter_not_required():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset
)
parser = Parser()
parser.parse_args([])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([])
parser.parse_args(["--args-set", "1", "2", "3", "4", "5"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1, 2, 3, 4, 5])
def test_nargs_1():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs=1, converter=frozenset
)
parser = Parser()
parser.parse_args([])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([])
parser.parse_args(["--args-set", "1"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1])
def test_nargs_env_var():
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
os.environ['NARGS'] = "[1, 2, 3]"
try:
parser = Parser()
parser.parse_args([])
finally:
del os.environ['NARGS']
assert parser.nargs == frozenset({1, 2, 3})
def test_nargs_config_list(tmp_path):
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
conf_file = tmp_path / "config.ini"
with open(conf_file, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("nargs = [1, 2, 3, 4]\n")
parser = Parser(config_files=[conf_file])
parser.parse_args([])
assert parser.nargs == frozenset({1, 2, 3, 4})
def test_nargs_config_set(tmp_path):
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
conf_file = tmp_path / "config.ini"
with open(conf_file, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("nargs = {1, 2, 3, 4}\n")
parser = Parser(config_files=[conf_file])
parser.parse_args([])
assert parser.nargs == frozenset({1, 2, 3, 4})
| 2.625 | 3 |
tmapi/tests/models/test_association.py | ajenhl/django-tmapi | 2 | 12793524 | # Copyright 2011 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing tests for the Association model.
Most if not all of these tests are ported from the public domain tests
that come with the TMAPI 2.0 distribution (http://www.tmapi.org/2.0/).
"""
from tmapi.exceptions import ModelConstraintException
from tmapi_test_case import TMAPITestCase
class AssociationTest (TMAPITestCase):
def test_parent (self):
parent = self.tms.create_topic_map(
'http://www.tmapi.org/test/assoc/parent')
self.assertEqual(parent.get_associations().count(), 0,
'Expected new topic maps to be created with no ' +
'associations')
association = parent.create_association(parent.create_topic())
self.assertEqual(parent, association.get_parent(), 'Unexpected ' +
'association parent after creation')
self.assertEqual(1, parent.get_associations().count(),
'Expected association list size to increment for ' +
'topic map')
self.assertTrue(association in parent.get_associations(),
'Association is not part of get_associations()')
association.remove()
self.assertEqual(0, parent.get_associations().count(),
'Expected association list size to decrement for ' +
'topic map')
def test_role_creation (self):
association = self.create_association()
self.assertEqual(0, association.get_roles().count(),
'Expected no roles in a newly created association')
role_type = self.create_topic()
player = self.create_topic()
self.assertEqual(0, player.get_roles_played().count())
role = association.create_role(role_type, player)
self.assertEqual(role_type, role.get_type(), 'Unexpected role type')
self.assertEqual(player, role.get_player(), 'Unexpected role player')
self.assertEqual(1, player.get_roles_played().count())
self.assertTrue(role in player.get_roles_played())
def test_role_types (self):
association = self.create_association()
type1 = self.create_topic()
type2 = self.create_topic()
self.assertEqual(0, association.get_role_types().count())
role1 = association.create_role(type1, self.create_topic())
self.assertEqual(1, association.get_role_types().count())
self.assertTrue(type1 in association.get_role_types())
role2 = association.create_role(type2, self.create_topic())
self.assertEqual(2, association.get_role_types().count())
self.assertTrue(type1 in association.get_role_types())
self.assertTrue(type2 in association.get_role_types())
role3 = association.create_role(type2, self.create_topic())
self.assertEqual(2, association.get_role_types().count())
self.assertTrue(type1 in association.get_role_types())
self.assertTrue(type2 in association.get_role_types())
role3.remove()
self.assertEqual(2, association.get_role_types().count())
self.assertTrue(type1 in association.get_role_types())
self.assertTrue(type2 in association.get_role_types())
role2.remove()
self.assertEqual(1, association.get_role_types().count())
self.assertTrue(type1 in association.get_role_types())
self.assertFalse(type2 in association.get_role_types())
role1.remove()
self.assertEqual(0, association.get_role_types().count())
def test_role_filter (self):
association = self.create_association()
type1 = self.create_topic()
type2 = self.create_topic()
unused_type = self.create_topic()
self.assertEqual(0, association.get_roles(type1).count())
self.assertEqual(0, association.get_roles(type2).count())
self.assertEqual(0, association.get_roles(unused_type).count())
role1 = association.create_role(type1, self.create_topic())
self.assertEqual(1, association.get_roles(type1).count())
self.assertTrue(role1 in association.get_roles(type1))
self.assertEqual(0, association.get_roles(type2).count())
self.assertEqual(0, association.get_roles(unused_type).count())
role2 = association.create_role(type2, self.create_topic())
self.assertEqual(1, association.get_roles(type2).count())
self.assertTrue(role2 in association.get_roles(type2))
role3 = association.create_role(type2, self.create_topic())
self.assertEqual(2, association.get_roles(type2).count())
self.assertTrue(role2 in association.get_roles(type2))
self.assertTrue(role3 in association.get_roles(type2))
self.assertEqual(0, association.get_roles(unused_type).count())
role3.remove()
self.assertEqual(1, association.get_roles(type2).count())
self.assertTrue(role2 in association.get_roles(type2))
role2.remove()
self.assertEqual(0, association.get_roles(type2).count())
role1.remove()
self.assertEqual(0, association.get_roles(type1).count())
self.assertEqual(0, association.get_roles(unused_type).count())
def test_role_filter_illegal (self):
# This test is not applicable in this implementation.
pass
def test_role_creation_invalid_player (self):
association = self.create_association()
self.assertEqual(0, association.get_roles().count())
self.assertRaises(ModelConstraintException, association.create_role,
self.create_topic(), None)
def test_role_creation_invalid_type (self):
association = self.create_association()
self.assertEqual(0, association.get_roles().count())
self.assertRaises(ModelConstraintException, association.create_role,
None, self.create_topic())
| 1.96875 | 2 |
robustfpm/util/io.py | andreevnick/robust-financial-portfolio-management-framework | 1 | 12793525 | <reponame>andreevnick/robust-financial-portfolio-management-framework<filename>robustfpm/util/io.py<gh_stars>1-10
# Copyright 2021 portfolio-robustfpm-framework Authors
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
import os
__all__ = [
'fig2files'
]
def fig2files(plt, dirname, filename, dpi=None):
os.makedirs('{0}/png'.format(dirname), exist_ok=True)
plt.savefig('{0}/png/{1}.png'.format(dirname, filename), dpi=dpi)
os.makedirs('{0}/eps'.format(dirname), exist_ok=True)
plt.savefig('{0}/eps/{1}.eps'.format(dirname, filename), format='eps')
os.makedirs('{0}/svg'.format(dirname), exist_ok=True)
plt.savefig('{0}/svg/{1}.svg'.format(dirname, filename), format='svg')
| 1.984375 | 2 |
mmdet/core/anchor/builder.py | mrzhuzhe/mmdetection | 0 | 12793526 | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.utils import Registry, build_from_cfg
PRIOR_GENERATORS = Registry('Generator for anchors and points')
ANCHOR_GENERATORS = PRIOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
| 2.03125 | 2 |
list2/task2.py | ErykKrupa/python-course | 0 | 12793527 | <filename>list2/task2.py
from sys import argv, stderr
import re
_base64_str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def encode(raw_file_path, encrypted_file_path):
raw_file = open(raw_file_path, 'r')
encrypted_file = open(encrypted_file_path, 'w')
blob = ''.join(f'{char:08b}'
for char in raw_file.read().encode('utf-8'))
bits_list = re.findall(".{1,6}", blob)
if len(bits_list[-1]) == 2:
bits_list[-1] += '0000'
end = '=='
elif len(bits_list[-1]) == 4:
bits_list[-1] += '00'
end = '='
else:
end = ''
encrypted_file.write(''.join(_base64_str[int(bits, 2)] for bits in bits_list))
encrypted_file.write(end)
raw_file.close()
encrypted_file.close()
def decode(encrypted_file_path, raw_file_path):
encrypted_file = open(encrypted_file_path, 'r')
raw_file = open(raw_file_path, 'w')
blob = ''.join(f'{_base64_str.index(char):06b}'
for char in encrypted_file.read() if char != '=')
bits_list = re.findall(".{1,8}", blob)
if len(bits_list[-1]) != 8:
del bits_list[-1]
raw_file.write(''.join(chr(int(bits, 2)) for bits in bits_list))
raw_file.close()
encrypted_file.close()
if __name__ == '__main__':
if len(argv) <= 3:
print("Not enough parameters.", file=stderr)
exit(1)
try:
if argv[1] == '--encode' or argv[1] == '-e':
encode(argv[2], argv[3])
elif argv[1] == '--decode' or argv[1] == '-d':
decode(argv[2], argv[3])
else:
print(f"Unknown parameter {argv[1]}.", file=stderr)
exit(2)
except EnvironmentError:
print(f"Problem with access {argv[2]} or {argv[3]} occurred."
f"Does {argv[2]} exist?", file=stderr)
exit(3)
| 3.078125 | 3 |
x-tree/x-tree.py | bzliu94/algorithms | 0 | 12793528 | <reponame>bzliu94/algorithms<gh_stars>0
# 2016-08-21
# x-tree featuring enclosure and containment queries
# dimension is implicit (determined using points sampled) and assumed to be consistent
# we never split a super-node
# updated on 2016-08-23 to fix traditional/non-traditional isLeafNode() distinction
# updated on 2016-08-25 to fix overlap logic for determining when to attempt an overlap-minimal split
# updated on 2016-11-03 to re-structure and modify adjustTree();
# stop at root instead of non-existent parent of root;
# also, we implement delete(); note that our tree
# has entry-aware nodes; made bug fix for adjustTree();
# fixed bug with parent pointers for xtreeInsert();
# have supernode demotion when size decreases to or below M
# updated on 2016-11-06 to add single-start-rectangle-based
# close-descendant finding that takes O(log(n)) time on average
# for start rectangle taken from set of actual rectangles
# for an r-tree and O(n * log(n)) time at worst;
# and to add all-start-rectangles close-ancestor finding,
# which for a well-formed r-tree, takes O(n * log(n)) time;
# these times involve n, which is number of actual rectangles
# or leaves in r-tree; these times assume "maximal disjointedness"
# and depth-first stack for internal nodes and
# best-first priority queue for leaf nodes
# updated on 2016-11-16 to fix margin calculation
# note that we assume rectangles are unique for close-descendant
# and close-ancestor finding; the assumption is necessary
# to make strong running time estimates; the reason is that
# otherwise the directed graph implied by the r-tree
# is not acyclic and we have cliques
# note that we don't necessarily need PythonMagick
# note that nodes always point to same entries
# unless we explicitly create new entries,
# which we do do occasionally
# note that M of two works
import sys
# import PythonMagick
import heapq
from collections import deque
# min-pq
class PriorityQueue:
def __init__(self):
self.heap = []
def push(self, item, priority):
pair = (priority,item)
heapq.heappush(self.heap,pair)
def pop(self):
(priority,item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def peek(self):
heap = self.heap
pair = heap[0]
result = pair
return result
def toList(self):
pair_list = self.heap
items = [x[1] for x in pair_list]
return items
def getSize(self):
return len(self.heap)
import math
def getDistance(point1, point2):
x1, y1 = point1
x2, y2 = point2
change_x = x2 - x1
change_y = y2 - y1
distance = math.sqrt(change_x ** 2 + change_y ** 2)
return distance
class RTreeNode:
def __init__(self, parent, entries, is_leaf, entry = None, split_history_root_dimension = None, is_supernode = False):
self.parent = parent
self.is_leaf = is_leaf
self.m = 8
self.M = 16
self.child_to_entry_dict = {}
for curr_entry in entries:
curr_child = curr_entry.getChild()
(self.child_to_entry_dict)[curr_child] = curr_entry
self.split_history_root_dimension = split_history_root_dimension
self.is_supernode = is_supernode
self.entry = entry
def getEntry(self):
return self.entry
def setEntry(self, entry):
self.entry = entry
def isSuperNode(self):
return self.is_supernode
def setToSuperNode(self, is_supernode):
self.is_supernode = is_supernode
def getSplitHistoryRootDimension(self):
return self.split_history_root_dimension
def setSplitHistoryRootDimension(self, dim):
self.split_history_root_dimension = dim
def getParent(self):
return self.parent
def getEntries(self):
return (self.child_to_entry_dict).values()
def getEntryForChild(self, child_node):
return (self.child_to_entry_dict)[child_node]
def getChildren(self):
return (self.child_to_entry_dict).keys()
def getNumEntries(self):
return len(self.child_to_entry_dict)
def getNumChildren(self):
return self.getNumEntries()
def setParent(self, node):
self.parent = node
def isNonTraditionalLeafNode(self):
is_non_traditional_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and False not in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
return is_non_traditional_leaf_node
"""
def isTraditionalLeafNode(self):
is_traditional_leaf_node = self.getNumEntries() == 0
return is_traditional_leaf_node
"""
def isLeafNode(self):
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and False not in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
is_leaf_node = self.getNumChildren() == 0
return is_leaf_node
def addEntry(self, entry):
curr_child = entry.getChild()
(self.child_to_entry_dict)[curr_child] = entry
def removeEntry(self, entry):
curr_child = entry.getChild()
(self.child_to_entry_dict).pop(curr_child)
def getMinimumNumEntriesPerNode(self):
return self.m
def getMaximumNumEntriesPerNode(self):
return self.M
def isFull(self):
return self.getNumEntries() >= self.getMaximumNumEntriesPerNode()
def isUnderfull(self):
return self.getNumEntries() < self.getMinimumNumEntriesPerNode()
def retrieveEntryForChild(self, node):
return (self.child_to_entry_dict)[node]
def toString(self):
return str(self.getEntries())
class RTreeEntry:
def __init__(self, mbr, child):
self.mbr = mbr
self.child = child
def getMBR(self):
return self.mbr
def setMBR(self, mbr):
self.mbr = mbr
def getChild(self):
return self.child
def setChild(self, node):
self.child = node
@staticmethod
def draw(tree, entries, image, depth):
for entry in entries:
RTreeEntry.drawHelper(tree, entry, image, depth)
@staticmethod
def drawHelper(tree, entry, image, depth):
node = entry.getChild()
entries = node.getEntries()
mbr_list = [entry.getMBR()]
for mbr in mbr_list:
upper_left = mbr.getUpperLeft()
lower_right = mbr.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
multiplier = 1 / (1.0 * 6.5) * 0.8
offset = (1536 * 0.2) / 2
next_x1, next_y1 = (multiplier * x1 + offset, multiplier * y1 + offset)
next_x2, next_y2 = (multiplier * x2 + offset, multiplier * y2 + offset)
if depth != 0:
pass
color_choice = depth % 3
color = None
if color_choice == 0:
color = PythonMagick.Color(65535, 0, 0, 32767)
elif color_choice == 1:
color = PythonMagick.Color(0, 0, 65535, 32767)
elif color_choice == 2:
color = PythonMagick.Color(0, 65535, 0, 32767)
if upper_left == lower_right:
image.strokeColor("none")
image.fillColor(color)
center_x = next_x1
center_y = next_y1
radius = 4
perimeter_x = next_x1
perimeter_y = next_y1 + radius
image.draw(PythonMagick.DrawableCircle(center_x, center_y, perimeter_x, perimeter_y))
else:
image.strokeColor(color)
image.fillColor("none")
image.strokeWidth(4)
image.draw(PythonMagick.DrawableRectangle(next_x1, next_y1, next_x2, next_y2))
if len(entries) == 0:
parent = entry.getChild().getParent()
mbr = entry.getMBR()
location = Point.toPoint(mbr)
x, y = location
multiplier = 1 / (1.0 * 6.5) * 0.8
offset = (1536 * 0.2) / 2
next_x = multiplier * x
next_y = multiplier * y
image.strokeColor("none")
image.fillColor("black")
center_x = next_x + offset
center_y = next_y + offset
radius = 2
perimeter_x = next_x + offset
perimeter_y = next_y + offset + radius
image.draw(PythonMagick.DrawableCircle(center_x, center_y, perimeter_x, perimeter_y))
children = [x.getChild() for x in entries]
entry.draw(tree, entries, image, depth + 1)
class MBR:
def __init__(self, upper_left, lower_right):
self.upper_left = upper_left
self.lower_right = lower_right
def isRaw(self):
return False
def isComposite(self):
return False
def getUpperLeft(self):
return self.upper_left
def getLowerRight(self):
return self.lower_right
def getArea(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
sides = []
for i in xrange(self.getDimension()):
comp1 = upper_left[i]
comp2 = lower_right[i]
side = comp2 - comp1
sides.append(side)
area = reduce(lambda x, y: x * y, sides)
return area
@staticmethod
def getEnlargedMBR(base_mbr, mbr):
mbr_list = [base_mbr, mbr]
upper_left_points = [x.getUpperLeft() for x in mbr_list]
lower_right_points = [x.getLowerRight() for x in mbr_list]
points = upper_left_points + lower_right_points
min_components = []
max_components = []
for i in xrange(base_mbr.getDimension()):
components = [x[i] for x in points]
min_comp_value = min(components)
max_comp_value = max(components)
min_components.append(min_comp_value)
max_components.append(max_comp_value)
upper_left_point = tuple(min_components)
lower_right_point = tuple(max_components)
result_mbr_list = base_mbr.getMBRList() + [mbr]
mbr = CompositeMBR(upper_left_point, lower_right_point, result_mbr_list)
return mbr
@staticmethod
def getAreaEnlargement(base_mbr, mbr):
base_mbr_area = base_mbr.getArea()
enlarged_mbr = MBR.getEnlargedMBR(base_mbr, mbr)
enlarged_mbr_area = enlarged_mbr.getArea()
area_change = enlarged_mbr_area - base_mbr_area
return area_change
@staticmethod
def doOverlap(mbr_a, mbr_b, without_borders = False):
upper_left_a = mbr_a.getUpperLeft()
lower_right_a = mbr_a.getLowerRight()
upper_left_b = mbr_b.getUpperLeft()
lower_right_b = mbr_b.getLowerRight()
do_overlap = True
# assume that rectangles never have negative area
for i in xrange(mbr_a.getDimension()):
# a "left"
comp_a1 = min(upper_left_a[i], lower_right_a[i])
# a "right"
comp_a2 = max(upper_left_a[i], lower_right_a[i])
# b "left"
comp_b1 = min(upper_left_b[i], lower_right_b[i])
# b "right"
comp_b2 = max(upper_left_b[i], lower_right_b[i])
# print comp_a1, comp_a2, comp_b1, comp_b2
# do_overlap = True
if without_borders == True:
do_overlap = do_overlap and comp_a1 < comp_b2 and comp_a2 > comp_b1
else:
do_overlap = do_overlap and comp_a1 <= comp_b2 and comp_a2 >= comp_b1
if do_overlap == False:
break
return do_overlap
@staticmethod
def findOverlapArea(mbr_a, mbr_b):
if MBR.doOverlap(mbr_a, mbr_b) == False:
return 0
else:
upper_left_a = mbr_a.getUpperLeft()
lower_right_a = mbr_a.getLowerRight()
upper_left_b = mbr_b.getUpperLeft()
lower_right_b = mbr_b.getLowerRight()
dimension = mbr_a.getDimension()
sides = []
for i in xrange(dimension):
comp_a1 = upper_left_a[i]
comp_a2 = lower_right_a[i]
comp_b1 = upper_left_b[i]
comp_b2 = lower_right_b[i]
side = max(0, min(comp_a2, comp_b2) - max(comp_a1, comp_b1))
sides.append(side)
intersection_volume = reduce(lambda x, y: x * y, sides)
return intersection_volume
def getMarginValue(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
if self.getDimension() == 0:
raise Exception()
if self.getDimension() == 1:
x1 = upper_left[0]
x2 = lower_right[0]
margin = x2 - x1
return margin
if self.getDimension() == 2:
x1, y1 = upper_left
x2, y2 = lower_right
margin = 2 * (x2 - x1) + 2 * (y2 - y1)
return margin
surface_area = 0
for i in xrange(self.getDimension()):
comp_1a = upper_left[i]
comp_1b = lower_right[i]
term1 = comp_1b - comp_1a
for j in xrange(i + 1, self.getDimension()):
comp_2a = upper_left[j]
comp_2b = lower_right[j]
term2 = comp_2b - comp_2a
term = 2 * term1 * term2
surface_area += term
margin = surface_area
return margin
def toString(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
result = str(list(upper_left + lower_right) + [self.isRaw()])
return result
def getDimension(self):
return len(self.getUpperLeft())
def doesEnclose(self, mbr):
dimension = self.getDimension()
does_enclose = True
for i in xrange(dimension):
left_value1 = self.getUpperLeft()[i]
left_value2 = mbr.getUpperLeft()[i]
right_value1 = self.getLowerRight()[i]
right_value2 = mbr.getLowerRight()[i]
component_does_enclose = left_value1 <= left_value2 and right_value1 >= right_value2
if component_does_enclose == False:
does_enclose = False
break
return does_enclose
def isEqualTo(self, mbr):
upper_left1 = self.getUpperLeft()
lower_right1 = self.getLowerRight()
upper_left2 = mbr.getUpperLeft()
lower_right2 = mbr.getLowerRight()
is_equal = upper_left1 == upper_left2 and lower_right1 == lower_right2
return is_equal
class RawMBR(MBR):
def __init__(self, upper_left, lower_right, contained_item):
MBR.__init__(self, upper_left, lower_right)
self.contained_item = contained_item
def isRaw(self):
return True
@staticmethod
def makeMBRFromPoint(point):
upper_left = point
lower_right = point
result_mbr = RawMBR(upper_left, lower_right, point)
return result_mbr
def getContainedItem(self):
return self.contained_item
def getMBRList(self):
return [self]
def clone(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
contained_item = self.getContainedItem()
mbr = RawMBR(upper_left, lower_right, contained_item)
return mbr
def doesMatch(self, mbr):
upper_left_matches = self.getUpperLeft() == mbr.getUpperLeft()
lower_right_matches = self.getLowerRight() == mbr.getLowerRight()
result = upper_left_matches == True and lower_right_matches == True
return result
class CompositeMBR(MBR):
def __init__(self, upper_left, lower_right, mbr_list):
MBR.__init__(self, upper_left, lower_right)
self.mbr_list = mbr_list
def getMBRList(self):
return self.mbr_list
def isComposite(self):
return True
@staticmethod
def makeMBR(component_mbr_list):
upper_left_points = [x.getUpperLeft() for x in component_mbr_list]
lower_right_points = [x.getLowerRight() for x in component_mbr_list]
points = upper_left_points + lower_right_points
min_components = []
max_components = []
for i in xrange(component_mbr_list[0].getDimension()):
components = [x[i] for x in points]
min_comp_value = min(components)
max_comp_value = max(components)
min_components.append(min_comp_value)
max_components.append(max_comp_value)
upper_left_point = tuple(min_components)
lower_right_point = tuple(max_components)
result_mbr = CompositeMBR(upper_left_point, lower_right_point, component_mbr_list)
return result_mbr
class HyperRectangle:
def __init__(self, upper_left, lower_right, id_value):
self.upper_left = upper_left
self.lower_right = lower_right
self.id_value = id_value
def getUpperLeft(self):
return self.upper_left
def getLowerRight(self):
return self.lower_right
def getIDValue(self):
return self.id_value
class Point:
def __init__(self, vec, id_value):
self.vec = vec
self.id_value = id_value
@staticmethod
def toPoint(mbr):
if mbr.getUpperLeft() != mbr.getLowerRight():
raise Exception("attempted to turn a non-point mbr to a point")
return mbr.getUpperLeft()
def getVec(self):
return self.vec
def getComponent(self, d):
return self.getVec()[d]
def getIDValue(self):
return self.id_value
import string
class RTree:
def __init__(self):
root_node = RTreeNode(None, [], True)
root_mbr = CompositeMBR(None, None, None)
root_entry = RTreeEntry(root_mbr, root_node)
root_node.setEntry(root_entry)
self.setRootEntry(root_entry)
def getRootEntry(self):
return self.root_entry
def setRootEntry(self, root_entry):
self.root_entry = root_entry
def hasConsistentNonTraditionalLeafDepthValues(self):
root = self.getRootEntry().getChild()
curr_node = root
depth = 0
while curr_node.isLeafNode() == False:
curr_node = curr_node.getChildren()[0]
depth = depth + 1
return self.hasConsistentNonTraditionalLeafDepthValuesHelper(root, depth, 0)
def hasConsistentNonTraditionalLeafDepthValuesHelper(self, node, depth, curr_depth):
if node == None:
return
elif node.isLeafNode() == True:
if depth != curr_depth:
return False
else:
return True
else:
for curr_node in node.getChildren():
result = self.hasConsistentNonTraditionalLeafDepthValuesHelper(curr_node, depth, curr_depth + 1)
if result == False:
return False
return True
def toNumChildrenString(self):
root = self.getRootEntry().getChild()
return self.toNumChildrenStringHelper(root)
def toNumChildrenStringHelper(self, node):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
overall_str_list = None
if have_node_str == True:
curr_leaf_status = str(node.getNumChildren())
overall_str_list = [curr_leaf_status]
else:
overall_str_list = []
for entry in entries:
child = entry.getChild()
child_str = self.toNumChildrenStringHelper(child)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
def toEntriesArePresentString(self):
root = self.getRootEntry().getChild()
return self.toEntriesArePresentStringHelper(root)
def toEntriesArePresentStringHelper(self, node):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
overall_str_list = None
if have_node_str == True:
curr_leaf_status = "-" if (node.getParent() == None or (node.getParent() != None and node in node.getParent().getChildren())) == False else "+"
overall_str_list = [curr_leaf_status]
else:
overall_str_list = []
for entry in entries:
child = entry.getChild()
child_str = self.toEntriesArePresentStringHelper(child)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
def toLeafStatusString(self):
root = self.getRootEntry().getChild()
return self.toLeafStatusStringHelper(root)
def toLeafStatusStringHelper(self, node):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
overall_str_list = None
if have_node_str == True:
curr_leaf_status = "-" if node.isLeafNode() == False else "+"
overall_str_list = [curr_leaf_status]
else:
overall_str_list = []
for entry in entries:
child = entry.getChild()
child_str = self.toLeafStatusStringHelper(child)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
def toDepthString(self):
root = self.getRootEntry().getChild()
return self.toDepthStringHelper(root, 0)
def toDepthStringHelper(self, node, depth):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
overall_str_list = None
if have_node_str == True:
curr_depth = "-" if node.getNumEntries() != 0 else str(depth)
overall_str_list = [curr_depth]
else:
overall_str_list = []
for entry in entries:
child = entry.getChild()
child_str = self.toDepthStringHelper(child, depth + 1)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
def toString(self):
root = self.getRootEntry().getChild()
return self.toStringHelper(root)
def toStringHelper(self, node):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
is_root_node = node == self.getRootEntry().getChild()
if is_root_node == True:
have_node_str = True
overall_str_list = None
if is_root_node == False:
overall_str_list = [node.getEntry().getMBR().toString()]
# overall_str_list = [node.getEntry().getMBR().toString(), str(node)]
else:
overall_str_list = [] if node.getNumChildren() == 0 else [node.getEntry().getMBR().toString()]
# overall_str_list = [] if node.getNumChildren() == 0 else [node.getEntry().getMBR().toString(), str(node)]
for entry in entries:
child = entry.getChild()
child_str = self.toStringHelper(child)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
def chooseEntriesWithMinimalOverlapEnlargement(self, entries, entry):
mbr_to_entry_dict = {}
for i in range(len(entries)):
curr_entry = entries[i]
curr_mbr = curr_entry.getMBR()
mbr_to_entry_dict[curr_mbr] = curr_entry
mbr_list = [x.getMBR() for x in entries]
mbr = entry.getMBR()
tagged_enlargement_values = [(MBR.findOverlapArea(x, mbr), x) for x in mbr_list]
enlargement_values = [x[0] for x in tagged_enlargement_values]
min_enlargement_value = min(enlargement_values)
candidate_tagged_enlargement_values = [x for x in tagged_enlargement_values if x[0] == min_enlargement_value]
candidate_entries = [mbr_to_entry_dict[x[1]] for x in candidate_tagged_enlargement_values]
return candidate_entries
def chooseEntriesWithMinimalAreaEnlargement(self, entries, entry):
mbr_to_entry_dict = {}
for i in range(len(entries)):
curr_entry = entries[i]
curr_mbr = curr_entry.getMBR()
mbr_to_entry_dict[curr_mbr] = curr_entry
mbr_list = [x.getMBR() for x in entries]
mbr = entry.getMBR()
tagged_enlargement_values = [(MBR.getAreaEnlargement(x, mbr), x) for x in mbr_list]
enlargement_values = [x[0] for x in tagged_enlargement_values]
min_enlargement_value = min(enlargement_values)
candidate_tagged_enlargement_values = [x for x in tagged_enlargement_values if x[0] == min_enlargement_value]
candidate_entries = [mbr_to_entry_dict[x[1]] for x in candidate_tagged_enlargement_values]
return candidate_entries
def resolveEnlargementTie(self, entries, entry):
mbr = entry.getMBR()
tagged_mbr_list = []
for curr_entry in entries:
base_mbr = curr_entry.getMBR()
curr_mbr = MBR.getEnlargedMBR(base_mbr, mbr)
tagged_mbr_list.append((curr_mbr, curr_entry))
tagged_area_values = [(x[0].getArea(), x[1]) for x in tagged_mbr_list]
area_values = [x[0] for x in tagged_area_values]
min_area = min(area_values)
candidate_tagged_area_values = [x for x in tagged_area_values if x[0] == min_area]
candidate_entries = [x[1] for x in candidate_tagged_area_values]
return candidate_entries
@staticmethod
def rstarGenDistributions(entries, M, m):
result_list = []
if len(entries) > (M + 1):
raise Exception()
window_left_sizes = [m - 1 + k for k in range(1, M - 2 * m + 2 + 1)]
window_left_sizes = [x for x in window_left_sizes if x <= M and x >= m and (len(entries) - x) <= M and (len(entries) - x) >= m]
window_size_pairs = [(window_left_sizes[i], len(entries) - window_left_sizes[i]) for i in range(len(window_left_sizes))]
window_size_pairs = [x for x in window_size_pairs if x[0] <= M and x[0] >= m and x[1] <= M and x[1] >= m]
for i in xrange(entries[0].getMBR().getDimension()):
low_sorted_entries = entries[ : ]
low_sorted_entries.sort(key = lambda x: x.getMBR().getUpperLeft()[i])
low_distributions = [(low_sorted_entries[ : window_left_sizes[j]], low_sorted_entries[window_left_sizes[j] : ]) for j in xrange(len(window_left_sizes))]
upper_sorted_entries = entries[ : ]
upper_sorted_entries.sort(key = lambda x: x.getMBR().getLowerRight()[i])
upper_distributions = [(upper_sorted_entries[ : window_left_sizes[j]], upper_sorted_entries[window_left_sizes[j] : ]) for j in xrange(len(window_left_sizes))]
curr_tuple = (low_distributions, upper_distributions)
result_list.append(curr_tuple)
return result_list
@staticmethod
def rstarChooseSplitAxis(entries, M, m):
result = RTree.rstarGenDistributions(entries, M, m)
S_comp_dict = {}
for i in xrange(entries[0].getMBR().getDimension()):
low_comp_distributions, upper_comp_distributions = result[i]
S_comp_value = 0
low_constituent_mbr_list_pairs = [([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]) for x in low_comp_distributions]
low_mbr_pairs = [(CompositeMBR.makeMBR(x[0]), CompositeMBR.makeMBR(x[1])) for x in low_constituent_mbr_list_pairs]
low_margin_values = [x[0].getMarginValue() + x[1].getMarginValue() for x in low_mbr_pairs]
low_margin_value_sum = sum(low_margin_values)
S_comp_value += low_margin_value_sum
upper_constituent_mbr_list_pairs = [([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]) for x in upper_comp_distributions]
upper_mbr_pairs = [(CompositeMBR.makeMBR(x[0]), CompositeMBR.makeMBR(x[1])) for x in upper_constituent_mbr_list_pairs]
upper_margin_values = [x[0].getMarginValue() + x[1].getMarginValue() for x in upper_mbr_pairs]
upper_margin_value_sum = sum(upper_margin_values)
S_comp_value += upper_margin_value_sum
S_comp_dict[i] = S_comp_value
d_S_pairs = S_comp_dict.items()
min_S_value = min([x[1] for x in d_S_pairs])
min_S_value_d_S_pair_candidates = [x for x in d_S_pairs if x[1] == min_S_value]
chosen_d_S_pair = min_S_value_d_S_pair_candidates[0]
chosen_d_value = chosen_d_S_pair[0]
return chosen_d_value
@staticmethod
def rstarChooseSplitIndex(entries, axis, M, m):
result = RTree.rstarGenDistributions(entries, M, m)
candidate_distributions = None
candidate_distributions = result[axis][0] + result[axis][1]
mbr_list_pair_tagged_candidate_distributions = [(([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]), x) for x in candidate_distributions]
mbr_pair_tagged_candidate_distributions = [((CompositeMBR.makeMBR(x[0][0]), CompositeMBR.makeMBR(x[0][1])), x[1]) for x in mbr_list_pair_tagged_candidate_distributions]
overlap_value_tagged_candidate_distributions = [(MBR.findOverlapArea(x[0][0], x[0][1]), x[1]) for x in mbr_pair_tagged_candidate_distributions]
overlap_values = [x[0] for x in overlap_value_tagged_candidate_distributions]
min_overlap_value = min(overlap_values)
matching_overlap_value_tagged_candidate_distributions = [x for x in overlap_value_tagged_candidate_distributions if x[0] == min_overlap_value]
next_next_candidates = [x[1] for x in matching_overlap_value_tagged_candidate_distributions]
if len(matching_overlap_value_tagged_candidate_distributions) > 1:
next_candidate_distributions = next_next_candidates
mbr_list_pair_tagged_candidate_distributions = [(([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]), x) for x in next_candidate_distributions]
mbr_pair_tagged_next_candidate_distributions = [((CompositeMBR.makeMBR(x[0][0]), CompositeMBR.makeMBR(x[0][1])), x[1]) for x in mbr_list_pair_tagged_candidate_distributions]
combined_area_tagged_next_candidate_distributions = [(x[0][0].getArea() + x[0][1].getArea(), x[1]) for x in mbr_pair_tagged_next_candidate_distributions]
combined_area_values = [x[0] for x in combined_area_tagged_next_candidate_distributions]
min_combined_area_value = min(combined_area_values)
matching_combined_area_tagged_next_candidate_distributions = [x for x in combined_area_tagged_next_candidate_distributions if x[0] == min_combined_area_value]
next_next_candidates = [x[1] for x in matching_combined_area_tagged_next_candidate_distributions]
chosen_distribution_pair = next_next_candidates[0]
return chosen_distribution_pair
def chooseLeaf(self, entry):
return self.chooseLeafHelper(entry, self.getRootEntry().getChild())
def chooseLeafHelper(self, entry, node):
if node.isLeafNode() == True:
if node == self.getRootEntry().getChild():
return node
else:
return node.getParent()
else:
entries = node.getEntries()
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return self.chooseLeafHelper(entry, chosen_child)
def rstarChooseLeaf(self, entry):
return self.rstarChooseLeafHelper(entry, self.getRootEntry().getChild())
def rstarChooseLeafHelper(self, entry, node):
if node.isLeafNode() == True:
if node == self.getRootEntry().getChild():
return node
else:
return node.getParent()
else:
entries = node.getEntries()
candidate_entries = None
# if node.isLeafNode() == True:
candidate_entries = self.chooseEntriesWithMinimalOverlapEnlargement(entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(candidate_entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
else:
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return self.rstarChooseLeafHelper(entry, chosen_child)
def insert(self, entry):
return self.xtreeInsert(entry)
def chooseSubtree(self, entry, node):
entries = node.getEntries()
candidate_entries = None
# if node.isLeafNode() == True:
candidate_entries = self.chooseEntriesWithMinimalOverlapEnlargement(entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(candidate_entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
else:
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return chosen_entry
def xtreeInsert(self, entry):
# print "insert"
return self.xtreeInsertHelper(entry, self.getRootEntry().getChild())
SPLIT = 0
SUPERNODE = 1
NO_SPLIT = 2
def xtreeInsertHelper(self, entry, node):
split_status = None
next_mbr = None
if True:
# if node.getNumChildren() == 0 and node == self.getRootEntry().getChild():
# if node.getNumChildren() == 0:
# if node.isNonTraditionalLeafNode() == True:
if node.isLeafNode() == True and node == self.getRootEntry().getChild():
node.addEntry(entry)
curr_node = entry.getChild()
curr_node.setParent(node)
mbr = CompositeMBR.makeMBR([entry.getMBR()])
node.getEntry().setMBR(mbr)
# print "no split"
return (RTree.NO_SPLIT, [node])
if node.isLeafNode() == True:
# split just in case
# print "split"
return (RTree.SPLIT, [node])
elif node.isNonTraditionalLeafNode() == True:
node.addEntry(entry)
entry.getChild().setParent(node)
"""
elif node.getNumChildren() == 0:
pass
return (RTree.NO_SPLIT, [node])
"""
follow = self.chooseSubtree(entry, node).getChild()
result = self.xtreeInsertHelper(entry, follow)
split_status, added_nodes = result
curr_entry = node.getEntry()
curr_mbr = curr_entry.getMBR()
mbr = entry.getMBR()
next_mbr = MBR.getEnlargedMBR(curr_mbr, mbr)
node.getEntry().setMBR(next_mbr)
# this parent-setting step is crucial
# if node.isNonTraditionalLeafNode() == False:
# this is idempotent
for added_node in added_nodes:
node.addEntry(added_node.getEntry())
added_node.setParent(node)
if split_status == RTree.SPLIT:
# added_node.setParent(node)
if node.getNumChildren() > node.getMaximumNumEntriesPerNode():
split_result = self.xtreeSplitNode(node, entry)
was_successful, entry_collection1, entry_collection2, dimension = split_result
if was_successful == True:
mbr_collection1 = [x.getMBR() for x in entry_collection1]
mbr_collection2 = [x.getMBR() for x in entry_collection2]
# this line presumes that we have parent set correctly for a leaf,
# which is not the case when we initially insert
parent = node.getParent()
entry1 = RTreeEntry(CompositeMBR.makeMBR(mbr_collection1), None)
node1 = RTreeNode(parent, entry_collection1, None, entry1)
entry1.setChild(node1)
entry2 = RTreeEntry(CompositeMBR.makeMBR(mbr_collection2), None)
node2 = RTreeNode(parent, entry_collection2, None, entry2)
entry2.setChild(node2)
for curr_entry in entry_collection1:
curr_entry.getChild().setParent(node1)
for curr_entry in entry_collection2:
curr_entry.getChild().setParent(node2)
node1.setSplitHistoryRootDimension(dimension)
node2.setSplitHistoryRootDimension(dimension)
if self.getRootEntry().getChild() == node:
next_root_entry = RTreeEntry(next_mbr, None)
next_root = RTreeNode(None, [entry1, entry2], None, next_root_entry)
next_root_entry.setChild(next_root)
self.setRootEntry(next_root_entry)
node1.setParent(next_root)
node2.setParent(next_root)
else:
parent.removeEntry(node.getEntry())
parent.addEntry(entry1)
parent.addEntry(entry2)
# print "split #2"
return (RTree.SPLIT, [node1, node2])
else:
self.xtreeSupernodeInsert(node, [x.getEntry() for x in added_nodes])
# print "supernode #1"
return (RTree.SUPERNODE, [node])
elif split_status == RTree.SUPERNODE:
pass
# print "no split"
return (RTree.NO_SPLIT, [node])
def rstarInsert(self, entry):
leaf_node = self.rstarChooseLeaf(entry)
adjust_result = None
if leaf_node.isFull() == False:
leaf_node.addEntry(entry)
entry.getChild().setParent(leaf_node)
adjust_result = RTree.rstarAdjustTree(self, leaf_node, [entry], False)
else:
split_result = self.rstarSplitNode(leaf_node, entry)
l, ll, e, ee = split_result
adjust_result = RTree.rstarAdjustTree(self, l, [e, ee], True)
ended_with_split2, resulting_entries_from_split = adjust_result
if ended_with_split2 == True:
e, ee = resulting_entries_from_split
l = e.getChild()
ll = ee.getChild()
if (self.getRootEntry().getChild().getNumEntries() + 1) <= self.getRootEntry().getChild().getMaximumNumEntriesPerNode():
self.getRootEntry().getChild().addEntry(ee)
ll.setParent(self.getRootEntry().getChild())
else:
split_result = self.rstarSplitNode(self.getRootEntry().getChild(), ee)
l, ll, e, ee = split_result
resulting_entries_from_split = [e, ee]
next_root = RTreeNode(None, resulting_entries_from_split, False, self.getRootEntry())
l.setParent(next_root)
ll.setParent(next_root)
self.getRootEntry().setChild(next_root)
else:
pass
MAX_OVERLAP_RATIO = 0.2
def xtreeSplitNode(self, node, entry):
# we never split a super-node
if node.isSuperNode() == True:
# raise Exception()
return (False, None, None, None)
dimension = None
result1 = self.xtreeTopologicalSplit(node, entry)
entry_collection1, entry_collection2, dimension = result1
mbr_collection1 = [x.getMBR() for x in entry_collection1]
mbr_collection2 = [x.getMBR() for x in entry_collection2]
mbr1 = CompositeMBR.makeMBR(mbr_collection1)
mbr2 = CompositeMBR.makeMBR(mbr_collection2)
overlap_area = MBR.findOverlapArea(mbr1, mbr2)
area1 = mbr1.getArea()
area2 = mbr2.getArea()
union_area = area1 + area2 - overlap_area
ovelap_ratio = None
if union_area == 0:
if mbr1.isEqualTo(mbr2) == True:
overlap_ratio = 1
else:
overlap_ratio = 0
else:
overlap_ratio = overlap_area / (1.0 * union_area)
# raise Exception()
if overlap_ratio > RTree.MAX_OVERLAP_RATIO:
# raise Exception()
result2 = self.xtreeOverlapMinimalSplit(node, entry)
entry_collection3, entry_collection4, dimension, do_fail = result2
# raise Exception()
if do_fail == True or len(entry_collection3) < node.getMinimumNumEntriesPerNode() or len(entry_collection4) < node.getMinimumNumEntriesPerNode():
return (False, None, None, dimension)
else:
return (True, entry_collection3, entry_collection4, dimension)
else:
return (True, entry_collection1, entry_collection2, dimension)
def xtreeTopologicalSplit(self, node, entry):
m = self.getRootEntry().getChild().getMinimumNumEntriesPerNode()
M = self.getRootEntry().getChild().getMaximumNumEntriesPerNode()
E_overall = node.getEntries()
axis = RTree.rstarChooseSplitAxis(E_overall, M, m)
result = RTree.rstarChooseSplitIndex(E_overall, axis, M, m)
entry_group1, entry_group2 = result
next_result = (entry_group1, entry_group2, axis)
return next_result
def xtreeOverlapMinimalSplit(self, node, entry):
if node.getSplitHistoryRootDimension() == None:
return (None, None, None, True)
else:
m = self.getRootEntry().getChild().getMinimumNumEntriesPerNode()
M = self.getRootEntry().getChild().getMaximumNumEntriesPerNode()
E_overall = node.getEntries()
axis = node.getSplitHistoryRootDimension()
result = RTree.rstarChooseSplitIndex(E_overall, axis, M, m)
entry_group1, entry_group2 = result
next_result = (entry_group1, entry_group2, axis, False)
return next_result
def xtreeSupernodeInsert(self, node, entries):
if node.isSuperNode() == False:
node.setToSuperNode(True)
# questionable if this is really necessary
for entry in entries:
curr_node = entry.getChild()
node.addEntry(entry)
# needed this
curr_node.setParent(node)
"""
entries = node.getEntries()
mbr_list = [x.getMBR() for x in entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
"""
def rstarSplitNode(self, node, entry):
curr_node = node
E_overall = list(set(curr_node.getEntries() + [entry]))
return self.rstarSplitNodeHelper(node, E_overall, entry)
def rstarSplitNodeHelper(self, node, E_overall, entry):
# prev_leaf_status = node.isLeafNode()
prev_leaf_status = None
curr_node = node
m = self.getRootEntry().getChild().getMinimumNumEntriesPerNode()
M = self.getRootEntry().getChild().getMaximumNumEntriesPerNode()
axis = RTree.rstarChooseSplitAxis(E_overall, M, m)
result = RTree.rstarChooseSplitIndex(E_overall, axis, M, m)
entry_group1, entry_group2 = result
parent = curr_node.getParent()
"""
if parent != None and (node in parent.getChildren()):
pass
"""
node1 = RTreeNode(parent, entry_group1, prev_leaf_status)
node2 = RTreeNode(parent, entry_group2, prev_leaf_status)
for curr_entry in entry_group1:
curr_entry.getChild().setParent(node1)
for curr_entry in entry_group2:
curr_entry.getChild().setParent(node2)
mbr_group1 = [x.getMBR() for x in entry_group1]
mbr_group2 = [x.getMBR() for x in entry_group2]
curr_overall_mbr1 = CompositeMBR.makeMBR(mbr_group1)
curr_overall_mbr2 = CompositeMBR.makeMBR(mbr_group2)
for curr_entry in entry_group1:
next_curr_node = curr_entry.getChild()
if curr_entry != entry:
curr_node.removeEntry(curr_entry)
next_curr_node.setParent(node1)
for curr_entry in entry_group2:
next_curr_node = curr_entry.getChild()
if curr_entry != entry:
curr_node.removeEntry(curr_entry)
next_curr_node.setParent(node2)
entry1 = RTreeEntry(curr_overall_mbr1, node1)
entry2 = RTreeEntry(curr_overall_mbr2, node2)
node1.setEntry(entry1)
node2.setEntry(entry2)
if parent != None:
original_entry = parent.retrieveEntryForChild(curr_node)
parent.removeEntry(original_entry)
if node != self.getRootEntry().getChild():
parent.addEntry(entry1)
parent.addEntry(entry2)
node1.setParent(parent)
node2.setParent(parent)
else:
next_root = RTreeNode(None, [entry1, entry2], False)
self.getRootEntry().setChild(next_root)
next_root.setEntry(self.getRootEntry())
node1.setParent(next_root)
node2.setParent(next_root)
pass
return (node1, node2, entry1, entry2)
@staticmethod
def rstarPreadjustTree(self, leaf_node):
node = leaf_node
parent = node.getParent()
if parent != None:
curr_entries = node.getEntries()
entry = node.getParent().retrieveEntryForChild(node)
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
@staticmethod
def rstarAdjustTree(tree, node, resulting_entries_from_split, have_resulting_second_entry_from_split):
return tree.rstarAdjustTreeHelper(tree, node, resulting_entries_from_split, have_resulting_second_entry_from_split)
@staticmethod
def rstarAdjustTreeHelper(tree, node, resulting_entries_from_split,
have_resulting_second_entry_from_split):
if node.getParent() == None:
entry = tree.getRootEntry()
curr_entries = entry.getChild().getEntries()
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
return (have_resulting_second_entry_from_split, resulting_entries_from_split)
else:
parent = node.getParent()
curr_entries = node.getEntries()
entry = None
"""
if node.getParent() == None:
entry = tree.getRootEntry()
else:
entry = node.getParent().retrieveEntryForChild(node)
"""
entry = parent.retrieveEntryForChild(node)
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
partner_entry = None
if have_resulting_second_entry_from_split == True:
first_entry, second_entry = resulting_entries_from_split
partner_entry = second_entry
if have_resulting_second_entry_from_split == True:
partner_node = partner_entry.getChild()
partner_entries = partner_node.getEntries()
partner_children = [x.getChild() for x in partner_entries]
partner_mbr_list = [x.getMBR() for x in partner_entries]
partner_tight_overall_mbr = CompositeMBR.makeMBR(partner_mbr_list)
partner_entry.setMBR(partner_tight_overall_mbr)
if node.isLeafNode() == False:
if have_resulting_second_entry_from_split == True:
if (parent.getNumChildren() + 1) <= parent.getMaximumNumEntriesPerNode():
parent.addEntry(partner_entry)
partner_entry.getChild().setParent(parent)
return RTree.rstarAdjustTreeHelper(tree, node.getParent(), [entry], False)
else:
split_result = tree.rstarSplitNode(parent, partner_entry)
l, ll, e, ee = split_result
return RTree.rstarAdjustTreeHelper(tree, node.getParent(), [e, ee], True)
else:
return RTree.rstarAdjustTreeHelper(tree, node.getParent(), [entry], False)
else:
return RTree.rstarAdjustTreeHelper(tree, node.getParent(), resulting_entries_from_split,
have_resulting_second_entry_from_split)
"""
@staticmethod
def adjustTree(tree, node, resulting_entries_from_split, have_resulting_second_entry_from_split, is_first_call_after_first_pass):
if node == None:
return (False, [])
else:
parent = node.getParent()
curr_entries = node.getEntries()
entry = None
if node.getParent() == None:
entry = tree.getRootEntry()
else:
entry = parent.retrieveEntryForChild(node)
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
partner_entry = None
if have_resulting_second_entry_from_split == True:
first_entry, second_entry = resulting_entries_from_split
partner_entry = second_entry
if have_resulting_second_entry_from_split == True and is_first_call_after_first_pass != True:
partner_node = partner_entry.getChild()
partner_entries = partner_node.getEntries()
partner_children = [x.getChild() for x in partner_entries]
partner_mbr_list = [x.getMBR() for x in partner_entries]
partner_tight_overall_mbr = CompositeMBR.makeMBR(partner_mbr_list)
partner_entry.setMBR(partner_tight_overall_mbr)
if have_resulting_second_entry_from_split == True:
parent.removeEntry(entry)
if (parent.getNumChildren() + 2) <= parent.getMaximumNumEntriesPerNode():
parent.addEntry(entry)
parent.addEntry(partner_entry)
entry.getChild().setParent(parent)
partner_entry.getChild().setParent(parent)
return tree.adjustTree(tree, parent, [entry], False, False)
else:
parent.addEntry(entry)
entry.getChild().setParent(parent)
split_result = tree.splitNode(parent, partner_entry)
l, ll, e, ee = split_result
return tree.adjustTree(tree, l, [e, ee], True, False)
else:
return (False, [])
"""
# assume item is in tree
# returns a node, which can be None if no match is found
# finds one match if such a node exists
# def delete(self, E, RN):
def findLeaf(self, entry):
return self.findLeafHelper(entry, self.getRootEntry())
def findLeafHelper(self, entry, curr_entry):
"""
if node.isLeafNode() == False:
curr_mbr = entry.getMBR()
entries = self.getEntries()
tagged_mbr_list = [(x.getMBR(), x) for x in entries]
tagged_overlapped_mbr_list = [x for x in tagged_mbr_list if MBR.doOverlap(curr_mbr, x[0]) == True]
for tagged_overlapped_mbr in tagged_overlapped_mbr_list:
curr_mbr, curr_entry = tagged_overlapped_mbr
curr_node = curr_entry.getChild()
result = self.findLeafHelper(entry, curr_node)
if result == None:
continue
else:
return curr_node
return None
"""
# a little stilted since we don't need a O(log(n)) time operation
# to find the entry containing node; just look at parent of entry child
if curr_entry.getMBR().isRaw() == True:
if entry == curr_entry:
return True
else:
return False
else:
entries = curr_entry.getChild().getEntries()
for next_entry in entries:
if MBR.doOverlap(curr_entry.getMBR(), entry.getMBR()) == True:
result = self.findLeafHelper(entry, next_entry)
if result == True:
return result
return False
def delete(self, entry):
# print "hello"
did_find_leaf = self.findLeaf(entry)
child_node = entry.getChild()
# root node never has a raw mbr
# leaf is a non-traditional leaf
leaf_node = child_node.getParent() if entry != self.getRootEntry() else None
if leaf_node == None:
raise Exception("expected a node to be found for a delete")
# if parent has zero entries after removing this entry, this should be okay
leaf_node.removeEntry(entry)
self.condenseTree(leaf_node)
# root = self.getRootEntry().getChild()
"""
if root.getNumChildren() == 1:
# shorten tree
entries = root.getEntries()
chosen_entry = entries[0]
chosen_child = chosen_entry.getChild()
self.setRoot(chosen_child)
"""
# if RN is a leaf node
# search all entries of RN to find E.mbr
# else:
# RN is an internal node
# find all entries of RN that cover E.mbr
# follow the corresponding subtrees unti lthe leaf L that contains E is found
# remove E from L
# call algorithm condenseTree(L)
# if the root has only one child (and it is not a leaf)
# remove the root
# set as new root its only child
pass
def condenseTree(self, leaf_node):
Q = []
self.condenseTreeHelper(leaf_node, Q)
# Q is in order of low-level to high-level;
# wish to insert using order of high-level to low-level
# Q = list(set(Q))
Q.reverse()
for curr_node in Q:
curr_entry = curr_node.getEntry()
# print "mbr:", curr_entry.getMBR().toString()
# print "tree:", self.toString()
self.insert(curr_entry)
def condenseTreeHelper(self, node, Q):
# demote super-node if necessary
if node.isSuperNode() == True and node.getNumChildren() <= node.getMaximumNumEntriesPerNode():
node.setToSuperNode(False)
if node.getParent() == None:
# we are a root node
if self.getRootEntry().getChild().getNumChildren() == 0:
root_node = RTreeNode(None, [], True)
root_mbr = CompositeMBR(None, None, None)
root_entry = RTreeEntry(root_mbr, root_node)
root_node.setEntry(root_entry)
self.setRootEntry(root_entry)
return
else:
entry = self.getRootEntry()
curr_entries = entry.getChild().getEntries()
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
return
else:
# raise Exception()
# print "decision point"
"""
if node.isSuperNode() == True:
# print "supernode encountered"
parent = node.getParent()
parent.removeEntry(parent.retrieveEntryForChild(node))
Q.append(node)
# raise Exception()
if node.getNumChildren() <= 1:
# raise Exception()
node.setToSuperNode(False)
elif node.getNumChildren() <= node.getMaximumNumEntriesPerNode():
mbr_list = [x.getMBR() in node.getEntries()]
curr_x_tree = RTree()
overlap_area_sum = sum([x.getArea() for x in mbr_list])
for curr_mbr in mbr_list:
next_mbr = RawMBR(curr_mbr.getUpperLeft(), curr_mbr.getLowerRight(), None)
next_node = RTreeNode(None, [], True)
next_entry = RTreeEntry(next_mbr, next_node)
next_node.setEntry(next_entry)
curr_x_tree.insert(next_entry)
union_area = curr_x_tree.getUnionArea()
multi_overlap_ratio = overlap_area_sum / (1.0 * union_area)
if multi_overlap_ratio <= RTree.MAX_OVERLAP_RATIO:
node.setToSuperNode(False)
elif node.isUnderfull() == True:
"""
if node.isUnderfull() == True:
# print "underfull"
parent = node.getParent()
parent.removeEntry(parent.retrieveEntryForChild(node))
# don't use isLeafNode() for this, as internal nodes can temporarily look like leaf nodes
# keep_nodes = [x for x in self.getNodesForNode(node) if x.getEntry().getMBR().isRaw() == True]
keep_nodes = [x for x in self.getNodesForNode(node) if x.getEntry().getMBR().isRaw() == True]
for keep_node in keep_nodes:
Q.append(keep_node)
# only makes sense to speak of modifying mbr if we plan on keeping the node
if node.isUnderfull() == False:
# print "not underfull"
parent = node.getParent()
curr_entries = node.getEntries()
entry = parent.retrieveEntryForChild(node)
children = [x.getChild() for x in curr_entries]
mbr_list = [x.getMBR() for x in curr_entries]
tight_overall_mbr = CompositeMBR.makeMBR(mbr_list)
entry.setMBR(tight_overall_mbr)
self.condenseTreeHelper(node.getParent(), Q)
return
# not tested
# returns entries
# does intersection query
def doOverlapQuery(self, mbr, without_borders = False):
partial_result = []
self.doOverlapQueryHelper(mbr, self.getRootEntry(), partial_result, without_borders)
return partial_result
def doOverlapQueryHelper(self, mbr, entry, partial_result, without_borders):
if entry.getMBR().isRaw() == True:
if MBR.doOverlap(entry.getMBR(), mbr, without_borders) == True:
partial_result.append(entry)
else:
entries = entry.getChild().getEntries()
for curr_entry in entries:
if MBR.doOverlap(curr_entry.getMBR(), mbr) == True:
self.doOverlapQueryHelper(mbr, curr_entry, partial_result, without_borders)
# returns entries
def doEnclosureQuery(self, mbr):
partial_result = []
self.doEnclosureQueryHelper(mbr, self.getRootEntry(), partial_result)
return partial_result
def doEnclosureQueryHelper(self, mbr, entry, partial_result):
if entry.getMBR().isRaw() == True:
if entry.getMBR().doesEnclose(mbr) == True:
partial_result.append(entry)
else:
entries = entry.getChild().getEntries()
for curr_entry in entries:
if curr_entry.getMBR().doesEnclose(mbr) == True:
self.doEnclosureQueryHelper(mbr, curr_entry, partial_result)
def doEnclosureQueryWithEarlyStopping(self, mbr):
result = self.doEnclosureQueryWithEarlyStoppingHelper(mbr, self.getRootEntry())
return result
def doEnclosureQueryWithEarlyStoppingHelper(self, mbr, entry):
if entry.getMBR().isRaw() == True:
if entry.getMBR().doesEnclose(mbr) == True:
return True
else:
entries = entry.getChild().getEntries()
for curr_entry in entries:
if curr_entry.getMBR().doesEnclose(mbr) == True:
result = self.doEnclosureQueryWithEarlyStoppingHelper(mbr, curr_entry)
if result == True:
return True
return False
# returns entries
def doContainmentQuery(self, mbr):
partial_result = []
self.doContainmentQueryHelper(mbr, self.getRootEntry(), partial_result)
return partial_result
def doContainmentQueryHelper(self, mbr, entry, partial_result):
if entry.getMBR().isRaw() == True:
# print mbr.toString(), entry.getMBR().toString()
if mbr.doesEnclose(entry.getMBR()) == True:
partial_result.append(entry)
else:
entries = entry.getChild().getEntries()
for curr_entry in entries:
if MBR.doOverlap(curr_entry.getMBR(), mbr) == True:
self.doContainmentQueryHelper(mbr, curr_entry, partial_result)
# prefix order
def getNodes(self):
node_list = []
self.getNodesHelper(self.getRootEntry().getChild(), node_list)
return node_list
def getNodesHelper(self, node, partial_result):
partial_result.append(node)
for curr_node in node.getChildren():
self.getNodesHelper(curr_node, partial_result)
def getNodesForNode(self, node):
node_list = []
self.getNodesHelper(node, node_list)
return node_list
"""
def getUnionArea(self):
pass
"""
# takes O(log(n)) time on average for start rectangle
# taken from set of actual rectangles for an r-tree;
# takes O(n * log(n)) time at worst;
# assumes that rectangles are distinct
# return a list of entries
def getRectangleCloseDescendants(self, reference_entry):
# repeatedly pop nodes, prune using enclosure/containment
# w.r.t. reference rectangle, add children to priority queue,
# ignore if contained rectangle is contained by a rectangle in conflict x-tree,
# add actual rectangles to conflict x-tree,
# use as priority (prefer_contained, prefer_large_area_if_contained_else_small)
if self.getRootEntry().getChild().getNumChildren() == 0:
return []
reference_mbr = reference_entry.getMBR()
root_entry = self.getRootEntry()
root_node = root_entry.getChild()
root_mbr = root_entry.getMBR()
root_mbr_is_actual = root_mbr.isRaw()
root_mbr_is_contained = reference_mbr.doesEnclose(root_mbr)
root_mbr_area = root_mbr.getArea()
first_priority_component = 0 if root_mbr_is_contained == True else 1
second_priority_component = (-1 if root_mbr_is_contained == True else 1) * root_mbr_area
# min-pq
priority = (first_priority_component, second_priority_component)
# priority = -1 * root_mbr_area
# entry_pq = PriorityQueue()
heap = []
# entry_pq.push(root_entry, priority)
item = root_entry
pair = (priority,item)
heapq.heappush(heap,pair)
# print entry_pq
# raise Exception()
result_entry_list = []
self.getRectangleCloseDescendantsHelper(heap, reference_mbr, result_entry_list, reference_entry)
return result_entry_list
# def TopicKNearestNeighborBestFirstSearchHelper(self, heap, point, TopicKNearest, k):
def getRectangleCloseDescendantsHelper(self, heap, reference_mbr, result_entry_list, ignore_entry):
conflict_x_tree = RTree()
internal_node_stack_deque = deque()
# while len(heap) != 0:
while len(internal_node_stack_deque) != 0 or len(heap) != 0:
# entry = entry_pq.pop()
item = None
if len(heap) != 0:
(priority,item) = heapq.heappop(heap)
elif len(internal_node_stack_deque) != 0:
item = internal_node_stack_deque.popleft()
# (priority,item) = heapq.heappop(heap)
entry = item
node = entry.getChild()
mbr = entry.getMBR()
if mbr.doesEnclose(reference_mbr) == False and reference_mbr.doesEnclose(mbr) == False:
# ignore node if associated mbr does not enclose reference mbr
# and associated mbr is not contained within reference mbr
continue
if conflict_x_tree.doEnclosureQueryWithEarlyStopping(mbr) == True:
# ignore node if enclosing mbr exists in conflict x-tree
continue
if entry == ignore_entry:
# ignore node if its entry matches the ignore entry
continue
if node.isLeafNode() == True:
# could have a safe path to a leaf where the leaf mbr
# is not contained by reference rectangle;
# check explicitly for this case
if reference_mbr.doesEnclose(mbr) == False:
continue
# kick out close descendant candidates on occasion,
# if containment query for conflict x-tree returns entries
matching_entries = conflict_x_tree.doContainmentQuery(mbr)
for matching_entry in matching_entries:
# raise Exception()
conflict_x_tree.delete(matching_entry)
# if node is a leaf node, it has an actual rectangle
# decide whether to include associated entry in result;
# if we made it this far, we should add to conflict x-tree
result_entry_list.append(entry)
raw_mbr = mbr
next_mbr = raw_mbr.clone()
next_node = RTreeNode(None, [], True)
next_entry = RTreeEntry(next_mbr, next_node)
next_node.setEntry(next_entry)
conflict_x_tree.insert(next_entry)
elif node.isLeafNode() == False:
# if we made it this far, we should add children to priority queue
entries = node.getEntries()
priority_tagged_internal_entries = []
for curr_entry in entries:
# set priority correctly and add to priority queue
curr_node = curr_entry.getChild()
curr_mbr = curr_entry.getMBR()
curr_mbr_is_actual = curr_mbr.isRaw()
curr_mbr_is_contained = reference_mbr.doesEnclose(curr_mbr)
curr_mbr_area = curr_mbr.getArea()
first_priority_component = 0 if curr_mbr_is_contained == True else 1
second_priority_component = (-1 if curr_mbr_is_contained == True else 1) * curr_mbr_area
# min-pq
# priority = (first_priority_component, second_priority_component)
if curr_mbr.isRaw() == True:
priority = -1 * curr_mbr_area
item = curr_entry
pair = (priority,item)
heapq.heappush(heap,pair)
elif curr_mbr.isRaw() == False:
if curr_mbr.doesEnclose(reference_mbr) == False and reference_mbr.doesEnclose(curr_mbr) == False:
continue
# item = curr_entry
# internal_node_stack_deque.appendleft(item)
priority = (first_priority_component, second_priority_component)
priority_tagged_internal_entry = (priority, curr_entry)
priority_tagged_internal_entries.append(priority_tagged_internal_entry)
# item = curr_entry
# pair = (priority,item)
# if curr_mbr.doesEnclose(reference_mbr) == True or reference_mbr.doesEnclose(curr_mbr) == True:
# heapq.heappush(heap,pair)
priority_tagged_internal_entries.sort(key = lambda x: x[0], reverse = True)
for priority_tagged_internal_entry in priority_tagged_internal_entries:
priority, internal_entry = priority_tagged_internal_entry
item = internal_entry
internal_node_stack_deque.appendleft(item)
# print "conflict x-tree:", conflict_x_tree.toString()
# for a well-formed r-tree, this takes O(n * log(n)) time,
# where n is number of actual rectangles or leaves;
# assumes that rectangles are distinct
def getAllRectangleCloseAncestors(self):
start_rectangle_nodes = [x for x in self.getNodes() if x.getEntry().getMBR().isRaw() == True]
start_rectangle_entries = [x.getEntry() for x in start_rectangle_nodes]
start_rectangle_to_close_ancestor_entries_dict = {}
for start_rectangle_entry in start_rectangle_entries:
start_rectangle_to_close_ancestor_entries_dict[start_rectangle_entry] = []
for start_rectangle_entry in start_rectangle_entries:
close_descendant_entries = self.getRectangleCloseDescendants(start_rectangle_entry)
for close_descendant_entry in close_descendant_entries:
start_rectangle_to_close_ancestor_entries_dict[close_descendant_entry].append(start_rectangle_entry)
return start_rectangle_to_close_ancestor_entries_dict
def draw(self):
# im = Image.new("RGB", (512, 512), "white")
"""
im = Image.new("RGB", (768, 768), "white")
draw = ImageDraw.Draw(im)
root = self.getRoot()
root.draw(self, draw, 0)
im.save("tree.png", "PNG")
"""
# image = PythonMagick.Image(PythonMagick.Geometry("768x768"), "white")
image = PythonMagick.Image(PythonMagick.Geometry("1536x1536"), "white")
root_entry = self.getRootEntry()
entries = [root_entry]
RTreeEntry.draw(self, entries, image, 0)
"""
image.strokeColor("orange")
image.fillColor("none")
image.strokeWidth(4)
multiplier = 3 * 0.8
# offset = (768 * 0.2) / 2
offset = (1536 * 0.2) / 2
x1 = 0
y1 = 0
x2 = 47
y2 = 60
next_x1 = x1 * multiplier + offset
next_y1 = y1 * multiplier + offset
next_x2 = x2 * multiplier + offset
next_y2 = y2 * multiplier + offset
"""
# image.draw(PythonMagick.DrawableRectangle(next_x1, next_y1, next_x2, next_y2))
image.write("tree.png")
def main():
point1 = (30, 100, 0)
point2 = (40, 100, 0)
point3 = (50, 100, 0)
point4 = (60, 100, 0)
point5 = (70, 100, 0)
point6 = (80, 100, 0)
point7 = (90, 100, 0)
point8 = (110, 100, 0)
curr_mbr1 = RawMBR((100, 100, 0), (100, 100, 0), (100, 100, 0))
curr_mbr2 = RawMBR((50, 100, 0), (50, 100, 0), point3)
curr_mbr2b = RawMBR((50, 50, 0), (100, 100, 0), HyperRectangle((50, 50, 0), (100, 100, 0), 1))
tree = RTree()
print tree.toString()
curr_root = tree.getRootEntry().getChild()
mbr1 = RawMBR(point1, (110, 200, 100), point1)
node1 = RTreeNode(None, [], True)
entry1 = RTreeEntry(mbr1, node1)
node1.setEntry(entry1)
tree.insert(entry1)
mbr2 = RawMBR(point2, (110, 200, 100), point2)
node2 = RTreeNode(None, [], True)
entry2 = RTreeEntry(mbr2, node2)
node2.setEntry(entry2)
tree.insert(entry2)
mbr3 = RawMBR(point3, (110, 200, 100), point3)
node3 = RTreeNode(None, [], True)
entry3 = RTreeEntry(mbr3, node3)
node3.setEntry(entry3)
tree.insert(entry3)
mbr4 = RawMBR(point4, (110, 200, 100), point4)
node4 = RTreeNode(None, [], True)
entry4 = RTreeEntry(mbr4, node4)
node4.setEntry(entry4)
tree.insert(entry4)
mbr5 = RawMBR(point5, (110, 200, 100), point5)
node5 = RTreeNode(None, [], True)
entry5 = RTreeEntry(mbr5, node5)
node5.setEntry(entry5)
tree.insert(entry5)
mbr6 = RawMBR(point6, (110, 200, 100), point6)
node6 = RTreeNode(None, [], True)
entry6 = RTreeEntry(mbr6, node6)
node6.setEntry(entry6)
tree.insert(entry6)
mbr7 = RawMBR(point7, (110, 200, 100), point7)
node7 = RTreeNode(None, [], True)
entry7 = RTreeEntry(mbr7, node7)
node7.setEntry(entry7)
tree.insert(entry7)
mbr8 = RawMBR(point8, (110, 200, 100), point8)
node8 = RTreeNode(None, [], True)
entry8 = RTreeEntry(mbr8, node8)
node8.setEntry(entry8)
# problem here
tree.insert(entry8)
print tree.toString()
print tree.doEnclosureQuery(curr_mbr2)
curr_mbr3 = RawMBR((50, 100, 0), (110, 200, 100), None)
print tree.doContainmentQuery(curr_mbr3)
# raise Exception()
print tree.doOverlapQuery(curr_mbr2)
# raise Exception()
print tree.toString()
# tree.delete(entry1)
print tree.toString()
# tree.delete(entry8)
# tree.insert(entry1)
"""
tree.delete(entry1)
tree.delete(entry2)
tree.delete(entry3)
tree.delete(entry4)
tree.delete(entry5)
tree.delete(entry6)
tree.delete(entry7)
tree.delete(entry8)
"""
print tree.toString()
tree2 = RTree()
import random
entries = []
# lower_rights = [(3, 10, 10), (1, 10, 10), (8, 10, 10), (6, 10, 10), (9, 10, 10), (6, 10, 10), (9, 10, 10), (3, 10, 10), (1, 10, 10), (3, 10, 10)]
# for i in xrange(10):
# for i in xrange(4):
"""
ul_lr_pairs = [((797, 989, 602), (910, 1248, 1035)), \
((920, 974, 724), (1802, 1524, 1378)), \
((911, 953, 196), (1776, 1662, 455)), \
((596, 892, 131), (1543, 1838, 669)), \
((879, 319, 789), (1877, 744, 791)), \
((1081, 1056, 1020), (1708, 1075, 1542)), \
((358, 815, 372), (761, 1089, 594)), \
((294, 238, 1036), (785, 378, 1963)), \
((803, 1054, 307), (1776, 1597, 501)), \
((803, 233, 521), (1314, 717, 1487)), \
((660, 268, 962), (1293, 619, 1521)), \
((798, 928, 1028), (1762, 1795, 1309)), \
((225, 359, 290), (579, 950, 700)), \
((297, 196, 750), (1085, 718, 1259)), \
((808, 926, 151), (889, 1755, 320)), \
((945, 260, 1091), (1932, 332, 1133)), \
((262, 221, 872), (500, 279, 1521)), \
((332, 886, 493), (822, 1305, 1149)), \
((800, 709, 871), (1390, 1402, 1548)), \
((433, 499, 483), (1300, 1330, 1055))]
"""
# n = 10,000 works in 1 min. 54 sec. for pypy with m = 2 and M = 4
# n = 1,000 works in 2.996 sec. for pypy with m = 2 and M = 4
# n = 1,000 works in 3.428 sec. for pypy with m = 8 and M = 16
# n = 6,000 works in 56.672 sec. for pypy with m = 8 and M = 16
# these numbers are for upper-left's in (100, 10100) and
# lower-right's in (ul_i, ul_i + 10000)
# two strange things going on - saturation occurs
# if we increase n and do not increase domains and
# high inter-group overlap means maximal disjointedness
# is not going to be good enough to cut down branches explored;
# to counter saturation, domain has to grow with n
# n = 100 # 0.427 seconds (~1x slower for 1x growth; expected 1x slower)
# n = 1000 # 1.1649 seconds (~2.72x slower for 10x growth; expected 33x slower)
# n = 5500 # 23.899 seconds (~55.96x slower for 55x growth; expected 317x slower)
# n = 10000 # 84.222 seconds (~197x slower for 100x growth; expected 664x slower)
# n = 14500 # 170.053 seconds (~398x slower for 145x growth; expected 1040x slower)
# n = 20000 # 230.0411 seconds (~538x slower for 200x growth; expected 1528x slower)
# n = 2000
# n = 1000
# n = 20000
n = 1000
import math
for i in xrange(n):
upper_left = None
lower_right = None
"""
if i % 4 == 0:
upper_left = (0, 0)
lower_right = (10, 10)
elif i % 4 == 1:
upper_left = (20, 20)
lower_right = (40, 40)
elif i % 4 == 2:
upper_left = (60, 60)
lower_right = (80, 80)
elif i % 4 == 3:
upper_left = (100, 100)
lower_right = (120, 120)
"""
denominator = (100 * math.log(100, 2)) ** (1 / 3.0)
k = 1
# k = int(round(denominator / denominator)) # for n = 100
# k = int(round((1000 * math.log(1000, 2)) ** (1 / 3.0) / denominator)) # for n = 1000
# k = int(round((5500 * math.log(5500, 2)) ** (1 / 3.0) / denominator)) # for n = 5500
# k = int(round((10000 * math.log(10000, 2)) ** (1 / 3.0) / denominator)) # for n = 10000
# k = int(round((20000 * math.log(20000, 2)) ** (1 / 3.0) / denominator)) # for n = 20000
# k = int(round((14500 * math.log(14500, 2)) ** (1 / 3.0) / denominator)) # for n = 14500
# x1 = int(100 + random.randint(0, k) * 100)
# y1 = int(100 + random.randint(0, k) * 100)
# z1 = int(100 + random.randint(0, k) * 100)
# x2 = int(x1 + random.random() * 100)
# y2 = int(y1 + random.random() * 100)
# z2 = int(z1 + random.random() * 100)
x = random.randint(0, 10000)
y = random.randint(0, 10000)
# upper_left = (x1, y1, z1)
# lower_right = (x2, y2, z2)
upper_left = (x, y)
lower_right = (x, y)
# upper_left = ul_lr_pairs[i][0]
# lower_right = ul_lr_pairs[i][1]
# x = int(random.randint(1, 100))
# y = 10
# z = 10
# lower_right = (x, y, z)
# lower_right = lower_rights[i]
mbr = RawMBR(upper_left, lower_right, None)
node = RTreeNode(None, [], True)
entry = RTreeEntry(mbr, node)
node.setEntry(entry)
entries.append(entry)
"""
for i in xrange(10):
upper_left = (20, 20)
lower_right = (40, 40)
mbr = RawMBR(upper_left, lower_right, None)
node = RTreeNode(None, [], True)
entry = RTreeEntry(mbr, node)
node.setEntry(entry)
entries.append(entry)
for i in xrange(1000):
upper_left = (0, 0)
lower_right = (10, 10)
mbr = RawMBR(upper_left, lower_right, None)
node = RTreeNode(None, [], True)
entry = RTreeEntry(mbr, node)
node.setEntry(entry)
# entries.append(entry)
"""
# for entry in entries[0 : 4]:
# for entry in entries[0 : 15]:
for entry in entries:
tree2.insert(entry)
"""
if entry.getChild().getParent() == None:
raise Exception()
"""
# print tree.toString()
# for entry in entries[0 : 4]:
# print "supernodes:", [x for x in tree.getNodes() if x.isSuperNode() == True], tree.getRootEntry().getChild()
# tree2.draw()
print len(tree2.getNodes())
import time
time1 = time.time()
result = tree2.getAllRectangleCloseAncestors()
time2 = time.time()
time_diff = time2 - time1
print "time difference:", time_diff, "seconds"
# raise Exception()
for entry_to_close_ancestor_entry_list_pair in result.items():
entry, close_ancestor_entry_list = entry_to_close_ancestor_entry_list_pair
print "start rectangle:", entry.getMBR().toString()
for close_ancestor_entry in close_ancestor_entry_list:
print "close ancestor:", close_ancestor_entry.getMBR().toString()
# raise Exception()
# for entry in entries[0 : 15]:
for entry in entries:
# if len(tree.getNodes()) != 0:
# print "removing entry with mbr:", entry.getMBR().toString()
# print "tree, currently:", tree.toString()
# tree2.delete(entry)
pass
# print tree.toString()
result = tree.getRectangleCloseDescendants(entry8)
print result
result = tree.getAllRectangleCloseAncestors()
print result
print len(result)
for entry_to_close_ancestor_entry_list_pair in result.items():
entry, close_ancestor_entry_list = entry_to_close_ancestor_entry_list_pair
print "start rectangle:", entry.getMBR().toString()
for close_ancestor_entry in close_ancestor_entry_list:
print "close ancestor:", close_ancestor_entry.getMBR().toString()
if __name__ == "__main__":
main()
| 2.90625 | 3 |
tests/test_mailmerge.py | plysytsya/mailmerge | 0 | 12793529 | """
System tests.
<NAME> <<EMAIL>>
"""
import os
import re
import sh
from . import utils
def test_stdout():
"""Verify stdout and stderr.
pytest docs on capturing stdout and stderr
https://pytest.readthedocs.io/en/2.7.3/capture.html
"""
mailmerge_cmd = sh.Command("mailmerge")
output = mailmerge_cmd(
"--template", os.path.join(utils.TESTDATA, "simple_template.txt"),
"--database", os.path.join(utils.TESTDATA, "simple_database.csv"),
"--config", os.path.join(utils.TESTDATA, "server_open.conf"),
"--no-limit",
"--dry-run",
)
# Verify mailmerge output. We'll filter out the Date header because it
# won't match exactly.
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
assert stderr == ""
assert "Date:" in stdout
stdout = re.sub(r"Date.*\n", "", stdout)
assert stdout == """>>> message 0
TO: <EMAIL>
SUBJECT: Testing mailmerge
FROM: My Self <<EMAIL>>
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hi, Myself,
Your number is 17.
>>> sent message 0
>>> message 1
TO: <EMAIL>
SUBJECT: Testing mailmerge
FROM: My Self <<EMAIL>>
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hi, Bob,
Your number is 42.
>>> sent message 1
>>> This was a dry run. To send messages, use the --no-dry-run option.
"""
| 2.6875 | 3 |
src/vcslinks/tests/test_py_typed.py | tkf/vcslinks | 1 | 12793530 | <filename>src/vcslinks/tests/test_py_typed.py
from pathlib import Path
def test_py_typed():
assert (Path(__file__).parents[1] / "py.typed").exists()
| 1.859375 | 2 |
bogrod/banking/models.py | joostrijneveld/bogrod | 0 | 12793531 | <gh_stars>0
from django.db import models
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
class Account(models.Model):
iban = models.CharField(_('iban'), max_length=34, unique=True)
ACCOUNT_TYPES = (
('checking', 'Checking account'),
('savings', 'Savings account'),
# ('investment', 'Investment account'), # TODO how to differentiate
('secondparty', 'Second party account'),
('other', 'Other account'),
)
account_type = models.CharField(max_length=10, choices=ACCOUNT_TYPES,
default='other')
def __str__(self):
return self.iban
class Category(models.Model):
name = models.CharField(max_length=100)
# This model is purposefully specific to ASN bank transactions, as that is
# the main use case for development of bogrod at the moment.
class Transaction(models.Model):
booking_date = models.DateField()
account = models.ForeignKey(Account)
counter_account = models.ForeignKey(Account,
related_name='counter_transactions')
counter_name = models.CharField(max_length=70)
account_currency = models.CharField(max_length=3)
balance_before = models.DecimalField(max_digits=12, decimal_places=2)
mutation_currency = models.CharField(max_length=3)
mutation_value = models.DecimalField(max_digits=12, decimal_places=2)
journal_date = models.DateField()
value_date = models.DateField()
# ASN bank uses this field to internally identify transaction types.
# These are translated to the (potentially more generic) global code.
internal_code = models.IntegerField()
BOOKING_CODES = (
('ACC', 'Acceptgirobetaling'),
('AF', 'Afboeking'),
('AFB', 'Afbetalen'),
('BEA', 'Betaalautomaat'),
('BIJ', 'Bijboeking'),
('BTL', 'Buitenlandse Overboeking'),
('CHP', 'Chipknip'),
('CHQ', 'Cheque'),
('COR', 'Correctie'),
('DIV', 'Diversen'),
('EFF', 'Effectenboeking'),
('ETC', 'Euro traveller cheques'),
('GBK', 'GiroBetaalkaart'),
('GEA', 'Geldautomaat'),
('INC', 'Incasso'),
('IDB', 'iDEAL betaling'),
('IMB', 'iDEAL betaling via mobiel'),
('IOB', 'Interne Overboeking'),
('KAS', 'Kas post'),
('KTN', 'Kosten/provisies'),
('KST', 'Kosten/provisies'),
('OVB', 'Overboeking'),
('PRM', 'Premies'),
('PRV', 'Provisies'),
('RNT', 'Rente'),
('STO', 'Storno'),
('TEL', 'Telefonische Overboeking'),
('VV', 'Vreemde valuta'),
)
global_code = models.CharField(max_length=3, choices=BOOKING_CODES)
sequence_number = models.IntegerField()
reference = models.CharField(max_length=16)
description = models.CharField(max_length=140)
statement_number = models.IntegerField()
class Meta:
unique_together = ('sequence_number', 'journal_date')
class Flow(models.Model):
transaction = models.ForeignKey(Transaction)
value = models.DecimalField(max_digits=12, decimal_places=2)
category = models.ForeignKey(Category, blank=True, null=True)
def clean(self):
flow_sum = (Flow.objects.filter(transaction=self.transaction)
.exclude(pk=self.pk)
.aggregate(Sum('value')))
if abs(flow_sum + self.value) > abs(self.transaction.value):
raise ValidationError("Sum of flows cannot exceed transaction!")
class ExpectedTransaction(models.Model):
from_date = models.DateField(blank=True, null=True)
to_date = models.DateField(blank=True, null=True)
from_value = models.DecimalField(max_digits=12, decimal_places=2,
blank=True, null=True)
to_value = models.DecimalField(max_digits=12, decimal_places=2,
blank=True, null=True)
account = models.ForeignKey(Account, blank=True, null=True)
counter_account = models.ForeignKey(
Account, blank=True, null=True,
related_name='counter_expected_transactions'
)
category = models.ForeignKey(Category, blank=True, null=True)
# This could be much more generic, but as of yet there are no use-cases.
repeat_after_months = models.IntegerField(blank=True, null=True)
# Initialize flow once the transaction has occurred
flows = models.ManyToManyField(Flow, blank=True)
class Loan(models.Model):
title = models.CharField(max_length=100)
description = models.TextField(blank=True, null=True)
receipt = models.ImageField()
outgoing = models.ManyToManyField(Flow, blank=True)
payment = models.ManyToManyField(Flow, blank=True,
related_name='repaid_loans')
class InterestPeriod(models.Model):
annual_percentage = models.DecimalField(max_digits=5, decimal_places=2)
from_date = models.DateField()
to_date = models.DateField(blank=True, null=True) # NULL signifies today
loan = models.ForeignKey(Loan)
| 2.140625 | 2 |
readsdb_plot.py | ondrolexa/readsdb | 3 | 12793532 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ReadSDBDialog
A QGIS plugin
Read PySDB structural data into QGIS
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-11-03
git sha : $Format:%H$
copyright : (C) 2018 by <NAME>
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from qgis.core import *
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from apsg import *
# qhull workaroud
import platform
qgis_qhull_fails = platform.platform().startswith('Linux')
if qgis_qhull_fails:
from .stereogrid_workaround import StereoGrid as StereoGridQGIS
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'ui/readsdb_plot.ui'))
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None):
# fig, self.axes = plt.subplots()
# t = np.arange(0.0, 3.0, 0.01)
# s = np.sin(2 * np.pi * t)
# self.axes.plot(t, s)
self.net = StereoNet()
FigureCanvas.__init__(self, self.net.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class ReadSDBPlotDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, readsdb, parent=None):
"""Constructor."""
super(ReadSDBPlotDialog, self).__init__(parent, Qt.WindowStaysOnTopHint)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.pushApply.clicked.connect(self.plotnet)
self.data_layers = []
self.canvas = MyMplCanvas(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.mplLayout.addWidget(self.canvas)
self.mplLayout.addWidget(self.toolbar)
self.net = self.canvas.net
def opt(self, index, type, name):
return self.tabWidget.widget(index).findChild(type, name)
def plotnet(self):
self.net.grid = self.checkGrid.isChecked()
self.net.cla()
for idx, layer in self.data_layers[::-1]: # plot in right order
if layer.selectedFeatureCount():
features = layer.getSelectedFeatures()
else:
features = layer.getFeatures()
# Create data Group
if layer._is_planar:
g = Group([Fol(f.attribute('azi'), f.attribute('inc')) for f in features], layer.name())
else:
g = Group([Lin(f.attribute('azi'), f.attribute('inc')) for f in features], layer.name())
label = repr(g) if self.checkLabels.isChecked() else None
# contours
if self.opt(idx, QtWidgets.QCheckBox, 'checkContours').isChecked():
nlevels = self.opt(idx, QtWidgets.QSpinBox, 'spinLevels').value()
sigma = self.opt(idx, QtWidgets.QDoubleSpinBox, 'spinSigma').value()
if qgis_qhull_fails:
kwargs = {'cmap': 'Greys', 'zorder': 1}
d = StereoGridQGIS(g, sigma=sigma)
mn = d.values.min()
mx = d.values.max()
levels = np.linspace(mn, mx, nlevels)
levels[-1] += 1e-8
legend = True
if self.opt(idx, QtWidgets.QCheckBox, 'checkContoursFilled').isChecked():
if qgis_qhull_fails:
cs = self.net.fig.axes[self.net.active].tricontourf(d.triang, d.values, levels, **kwargs)
self.net.fig.axes[self.net.active].tricontour(d.triang, d.values, levels, colors="k")
else:
self.net.contourf(StereoGrid(g), levels=nlevels, sigma=sigma)
else:
if qgis_qhull_fails:
cs = self.net.fig.axes[self.net.active].tricontour(d.triang, d.values, levels, **kwargs)
else:
self.net.contour(StereoGrid(g), levels=nlevels, sigma=sigma)
if qgis_qhull_fails:
if legend:
ab = self.net.fig.axes[self.net.active].get_position().bounds
cbaxes = self.net.fig.add_axes([0.1, ab[1] + 0.1 * ab[3], 0.03, 0.8 * ab[3]])
cb = self.net.fig.colorbar(cs, cax=cbaxes)
if label:
cb.ax.set_title(label)
# principal
eigf = self.opt(idx, QtWidgets.QCheckBox, 'checkEigPlanes').isChecked()
eigl = self.opt(idx, QtWidgets.QCheckBox, 'checkEigLines').isChecked()
self.net.tensor(g.ortensor, eigenfols=eigf, eigenlins=eigl)
# plot data
markersize = self.opt(idx, QtWidgets.QSpinBox, 'spinSize').value()
marker = self.opt(idx, QtWidgets.QComboBox, 'comboStyle').currentText()
if layer._is_planar:
if self.opt(idx, QtWidgets.QCheckBox, 'checkShowData').isChecked():
if self.opt(idx, QtWidgets.QCheckBox, 'checkAsPoles').isChecked():
self.net.pole(g, marker=marker, markersize=markersize, label=label)
else:
self.net.plane(g, label=label)
else:
if self.opt(idx, QtWidgets.QCheckBox, 'checkShowData').isChecked():
self.net.line(g, marker=marker, markersize=markersize, label=label)
self.canvas.draw()
| 1.601563 | 2 |
Volume Estimation/demo.py | JessieRamaux/Food-Volume-Estimation | 10 | 12793533 | import argparse
import torch
import cv2
import os
import torch.nn.parallel
import modules, net, resnet, densenet, senet
import numpy as np
import loaddata_demo as loaddata
import pdb
import argparse
from volume import get_volume
from mask import get_mask
import matplotlib.image
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='KD-network')
parser.add_argument('--img', metavar='DIR',default="./input/test.jpg",
help='img to input')
parser.add_argument('--json', metavar='DIR',default="./input/test.json",
help='json file to input')
parser.add_argument('--output', metavar='DIR',default="./output",
help='dir to output')
args=parser.parse_args()
def define_model(is_resnet, is_densenet, is_senet):
if is_resnet:
original_model = resnet.resnet50(pretrained = True)
Encoder = modules.E_resnet(original_model)
model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
if is_densenet:
original_model = densenet.densenet161(pretrained=True)
Encoder = modules.E_densenet(original_model)
model = net.model(Encoder, num_features=2208, block_channel = [192, 384, 1056, 2208])
if is_senet:
original_model = senet.senet154(pretrained='imagenet')
Encoder = modules.E_senet(original_model)
model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
return model
def main():
if (not os.path.exists(args.output)):
print("Output directory doesn't exist! Creating...")
os.makedirs(args.output)
model = define_model(is_resnet=False, is_densenet=False, is_senet=True)
model = torch.nn.DataParallel(model).cuda()
model.load_state_dict(torch.load('./pretrained_model/model_senet'))
model.eval()
print
img = cv2.imread(args.img)
nyu2_loader = loaddata.readNyu2(args.img)
test(nyu2_loader, model, img.shape[1], img.shape[0])
def test(nyu2_loader, model, width, height):
for i, image in enumerate(nyu2_loader):
image = torch.autograd.Variable(image, volatile=True).cuda()
out = model(image)
out = out.view(out.size(2),out.size(3)).data.cpu().numpy()
max_pix = out.max()
min_pix = out.min()
out = (out-min_pix)/(max_pix-min_pix)*255
out = cv2.resize(out,(width,height),interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(args.output, "out_grey.png"),out)
out_grey = cv2.imread(os.path.join(args.output, "out_grey.png"),0)
out_color = cv2.applyColorMap(out_grey, cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(args.output, "out_color.png"),out_color)
vol = get_volume(out_grey, args.json)
print("Volume:")
print(vol)
print("unit: cm^3")
out_file = open(os.path.join(args.output, "out.txt"), "w")
out_file.write("Volume:\n")
out_file.write(str(vol))
out_file.write("\n")
out_file.write("unit: cm^3")
out_file.close()
get_mask(out_grey, args.json, args.output)
if __name__ == '__main__':
main()
| 2.28125 | 2 |
plugins/fake_msg/__init__.py | Orilx/Niko-py | 4 | 12793534 | <filename>plugins/fake_msg/__init__.py
from nonebot import on_command
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message, MessageSegment
from nonebot.params import CommandArg
from utils.message_builder import fake_forward_msg
from utils.utils import send_group_forward_msg
fake = on_command('fake', aliases={"假消息"}, priority=5, block=True)
@fake.handle()
async def _(event: GroupMessageEvent, args: Message = CommandArg()):
at = []
msg = []
fake_msg = []
for i in args:
if i.type == 'at':
if i.data["qq"] == 'all':
continue
else:
at.append(i.data["qq"])
else:
msg.append(i)
for i in msg:
if i.type == 'text':
if i.data["text"].strip():
fake_msg.append(MessageSegment.text(i.data["text"].strip()))
else:
fake_msg.append(i)
if at and fake_msg:
group_forward_msg = await fake_forward_msg(at, event.group_id, fake_msg)
await send_group_forward_msg(event.group_id, group_forward_msg)
else:
await fake.finish("参数有误~")
| 2.171875 | 2 |
devops-console/apps/features/migrations/0017_auto_20190911_1550.py | lilinghell/devops | 4 | 12793535 | # Generated by Django 2.1.5 on 2019-09-11 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0016_auto_20190605_1830'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='apps',
field=models.ManyToManyField(related_name='app_features', to='applications.Application', verbose_name='关联应用'),
),
]
| 1.445313 | 1 |
hextech_core/blog/models.py | duonghao314/hextech-core | 0 | 12793536 | from django.db import models
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import gettext_lazy as _
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.core.fields import RichTextField
from hextech_core.core.models.base_model import BaseModel, MetadataModel
from hextech_core.core.utils import no_accent_vietnamese
from hextech_core.core.utils.id import RandomID
from hextech_core.users.models import User
class BlogCategory(MetadataModel):
parent = models.ForeignKey(
"self",
on_delete=models.PROTECT,
related_name="child_categories",
null=True,
blank=True,
)
name = models.CharField(max_length=100)
slug = models.SlugField(blank=True, unique=True, db_index=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
class BlogTag(BaseModel):
tag = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.tag
@classmethod
def tagger(cls, tag: str) -> str:
tag = no_accent_vietnamese(tag)
tag = "".join([ele.title() for ele in tag.split(" ")])
return tag
def save(self, *args, **kwargs):
if not self.pk:
self.tag = self.tagger(self.tag)
return super().save()
class Blog(ClusterableModel, MetadataModel):
id = models.BigIntegerField(
_("Random id"), default=RandomID("blog.Blog"), primary_key=True
)
author = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="blogs", db_index=True
)
category = models.ForeignKey(
BlogCategory,
on_delete=models.SET_NULL,
related_name="blogs",
db_index=True,
null=True,
)
title = models.CharField(max_length=400)
content = RichTextField()
slug = models.SlugField(blank=True, unique=True, db_index=True, max_length=450)
tags = models.ManyToManyField(BlogTag, blank=True)
published = models.BooleanField(default=True)
published_at = models.DateTimeField(null=True, blank=True)
class Meta:
unique_together = ("author", "title")
def save(self, *args, **kwargs):
print(self.__dict__)
if self.published and not self.published_at:
self.published_at = timezone.now()
self.slug = f"{slugify(self.title)}-{self.author.id}"
super().save(*args, **kwargs)
def __str__(self):
return self.title
class BlogComment(BaseModel):
blog = ParentalKey(Blog, on_delete=models.CASCADE, related_name="comments")
content = RichTextField()
title = models.CharField(max_length=255)
created_by = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="+", null=True, blank=True
)
def __str__(self):
return f"#{self.blog.id} - {self.title if self.title else 'Untitled'}"
class BlogLike(BaseModel):
blog = models.ForeignKey(Blog, on_delete=models.CASCADE, related_name="likes")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="liked")
is_like = models.BooleanField(default=True)
class Meta:
unique_together = ("blog", "user")
| 1.960938 | 2 |
models/drocc.py | jbr-ai-labs/PU-OC | 0 | 12793537 | import torch
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data
from models.base_models import OCModel, PUModelRandomBatch
from models.classifiers import Net
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# code DROCC is borrowed from https://github.com/microsoft/EdgeML
class DROCC(OCModel):
def __init__(self,
model=Net,
lam=0.5,
radius=8,
gamma=2,
warmup_epochs=6,
ascent_step_size=0.001,
ascent_num_steps=50,
half=True):
super().__init__(model, 0)
self.lam = lam
self.radius = radius
self.gamma = gamma
self.warmup_epochs = warmup_epochs
self.ascent_step_size = ascent_step_size
self.ascent_num_steps = ascent_num_steps
self.half = half
def batch_loss(self, batch):
data, target = batch[0], batch[2]
data, target = data.to(device), target.to(device)
# Data Processing
data = data.to(torch.float)
target = target.to(torch.float)
target = torch.squeeze(target)
# Extract the logits for cross entropy loss
logits_start = self.model.forward_start(data)
logits = self.model.forward_end(logits_start)
logits = torch.squeeze(logits, dim=1)
ce_loss = F.binary_cross_entropy_with_logits(logits, target)
# Add to the epoch variable for printing average CE Loss
'''
Adversarial Loss is calculated only for the positive data points (label==1).
'''
if self.epoch >= self.warmup_epochs:
logits_start = logits_start[target == 1]
# AdvLoss
if not self.half:
adv_loss = self.one_class_adv_loss(data[target == 1].detach(), self.half)
else:
adv_loss = self.one_class_adv_loss(logits_start.detach(), self.half)
loss = ce_loss + adv_loss * self.lam
else:
# If only CE based training has to be done
loss = ce_loss
return loss
def one_class_adv_loss(self, x_train_data, half=True):
"""Computes the adversarial loss:
1) Sample points initially at random around the positive training
data points
2) Gradient ascent to find the most optimal point in set N_i(r)
classified as +ve (label=0). This is done by maximizing
the CE loss wrt label 0
3) Project the points between spheres of radius R and gamma * R
(set N_i(r))
4) Pass the calculated adversarial points through the model,
and calculate the CE loss wrt target class 0
Parameters
----------
x_train_data: Batch of data to compute loss on.
"""
batch_size = len(x_train_data)
# Randomly sample points around the training data
# We will perform SGD on these to find the adversarial points
x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
x_adv_sampled = x_adv + x_train_data
for step in range(self.ascent_num_steps):
with torch.enable_grad():
new_targets = torch.zeros(batch_size, 1).to(device)
# new_targets = (1 - targets).to(self.device)
new_targets = torch.squeeze(new_targets)
new_targets = new_targets.to(torch.float)
if half:
logits = self.model.forward_end(x_adv_sampled)
else:
logits = self.model(x_adv_sampled)
logits = torch.squeeze(logits, dim=1)
new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
grad_normalized = grad / grad_norm
with torch.no_grad():
x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
if (step + 1) % 10 == 0:
# Project the normal points to the set N_i(r)
h = x_adv_sampled - x_train_data
norm_h = torch.sqrt(torch.sum(h ** 2,
dim=tuple(range(1, h.dim()))))
alpha = torch.clamp(norm_h, self.radius,
self.gamma * self.radius).to(device)
# Make use of broadcast to project h
proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
h = proj * h
x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
if half:
adv_pred = self.model.forward_end(x_adv_sampled)
else:
adv_pred = self.model(x_adv_sampled)
adv_pred = torch.squeeze(adv_pred, dim=1)
adv_loss = F.binary_cross_entropy_with_logits(adv_pred, new_targets)
return adv_loss
# class DROCC(nn.Module):
# def __init__(self, ):
# super().__init__()
#
# self.model = CIFAR10_LeNet()
#
# def run_train(self,
# train_data,
# test_data,
# lamda=0.5,
# radius=8,
# gamma=2,
# verbose=False,
# learning_rate=1e-3,
# total_epochs=30,
# only_ce_epochs=6,
# ascent_step_size=0.001,
# ascent_num_steps=50,
# gamma_lr=1,
# batch_size=128,
# half=True):
#
# self.best_score = -np.inf
# best_model = None
# self.ascent_num_steps = ascent_num_steps
# self.ascent_step_size = ascent_step_size
# self.lamda = lamda
# self.radius = radius
# self.gamma = gamma
#
# self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
# lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma_lr)
#
# train_loader = torch.utils.data.DataLoader(train_data,
# batch_size=batch_size,
# shuffle=True)
#
# test_loader = torch.utils.data.DataLoader(test_data,
# batch_size=batch_size,
# shuffle=True)
#
# for epoch in range(total_epochs):
# # Make the weights trainable
# self.model.train()
#
# # Placeholder for the respective 2 loss values
# epoch_adv_loss = torch.tensor([0]).type(torch.float32).to(device) # AdvLoss
# epoch_ce_loss = 0 # Cross entropy Loss
#
# batch_idx = -1
# for data, target, _ in train_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# # Data Processing
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# self.optimizer.zero_grad()
#
# # Extract the logits for cross entropy loss
# logits_start = self.model.half_forward_start(data)
# logits = self.model.half_forward_end(logits_start)
#
# logits = torch.squeeze(logits, dim=1)
# ce_loss = F.binary_cross_entropy_with_logits(logits, target)
# # Add to the epoch variable for printing average CE Loss
# epoch_ce_loss += ce_loss
#
# '''
# Adversarial Loss is calculated only for the positive data points (label==1).
# '''
# if epoch >= only_ce_epochs:
# logits_start = logits_start[target == 1]
# # AdvLoss
# if not half:
# adv_loss = self.one_class_adv_loss(data[target == 1].detach(), target[target == 1], half)
# else:
# adv_loss = self.one_class_adv_loss(logits_start.detach(), target[target == 1], half)
# epoch_adv_loss += adv_loss
#
# loss = ce_loss + adv_loss * self.lamda
# else:
# # If only CE based training has to be done
# loss = ce_loss
#
# # Backprop
# loss.backward()
# self.optimizer.step()
#
# epoch_ce_loss = epoch_ce_loss / (batch_idx + 1) # Average CE Loss
# epoch_adv_loss = epoch_adv_loss / (batch_idx + 1) # Average AdvLoss
#
# if verbose:
# test_score = self.test(test_loader)
# if test_score > self.best_score:
# self.best_score = test_score
# best_model = copy.deepcopy(self.model)
#
# print('Epoch: {}, CE Loss: {}, AdvLoss: {}, {}: {}'.format(
# epoch, epoch_ce_loss.item(), epoch_adv_loss.item(),
# 'AUC', test_score))
# lr_scheduler.step()
# if verbose:
# self.model = copy.deepcopy(best_model)
# print('\nBest test {}: {}'.format(
# 'AUC', self.best_score
# ))
#
# def test(self, test_loader, metric='AUC'):
# """Evaluate the model on the given test dataset.
# Parameters
# ----------
# test_loader: Dataloader object for the test dataset.
# metric: Metric used for evaluation (AUC / F1).
# """
# self.model.eval()
# label_score = []
# batch_idx = -1
# for data, target, _ in test_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# logits = self.model(data)
# logits = torch.squeeze(logits, dim=1)
# sigmoid_logits = torch.sigmoid(logits)
# scores = logits
# label_score += list(zip(target.cpu().data.numpy().tolist(),
# scores.cpu().data.numpy().tolist()))
# # Compute test score
# labels, scores = zip(*label_score)
# labels = np.array(labels)
# scores = np.array(scores)
# if metric == 'AUC':
# test_metric = roc_auc_score(labels, scores)
# if metric == 'alpha':
# test_metric = (scores > 0.5).mean()
# return test_metric
#
# def one_class_adv_loss(self, x_train_data, targets, half=True):
# """Computes the adversarial loss:
# 1) Sample points initially at random around the positive training
# data points
# 2) Gradient ascent to find the most optimal point in set N_i(r)
# classified as +ve (label=0). This is done by maximizing
# the CE loss wrt label 0
# 3) Project the points between spheres of radius R and gamma * R
# (set N_i(r))
# 4) Pass the calculated adversarial points through the model,
# and calculate the CE loss wrt target class 0
#
# Parameters
# ----------
# x_train_data: Batch of data to compute loss on.
# """
# batch_size = len(x_train_data)
# # Randomly sample points around the training data
# # We will perform SGD on these to find the adversarial points
# x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
# x_adv_sampled = x_adv + x_train_data
#
# for step in range(self.ascent_num_steps):
# with torch.enable_grad():
#
# new_targets = torch.zeros(batch_size, 1).to(device)
# # new_targets = (1 - targets).to(self.device)
# new_targets = torch.squeeze(new_targets)
# new_targets = new_targets.to(torch.float)
#
# if half:
# logits = self.model.half_forward_end(x_adv_sampled)
# else:
# logits = self.model(x_adv_sampled)
#
# logits = torch.squeeze(logits, dim=1)
# new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
#
# grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
# grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
# grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
# grad_normalized = grad / grad_norm
# with torch.no_grad():
# x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
#
# if (step + 1) % 10 == 0:
# # Project the normal points to the set N_i(r)
# h = x_adv_sampled - x_train_data
# norm_h = torch.sqrt(torch.sum(h ** 2,
# dim=tuple(range(1, h.dim()))))
# alpha = torch.clamp(norm_h, self.radius,
# self.gamma * self.radius).to(device)
# # Make use of broadcast to project h
# proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
# h = proj * h
# x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
#
# if half:
# adv_pred = self.model.half_forward_end(x_adv_sampled)
# else:
# adv_pred = self.model(x_adv_sampled)
#
# adv_pred = torch.squeeze(adv_pred, dim=1)
# adv_loss = F.binary_cross_entropy_with_logits(adv_pred, (new_targets))
#
# return adv_loss
#
# def save(self, path):
# torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
#
# def load(self, path):
# self.model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
class PU_DROCC(PUModelRandomBatch):
def __init__(self,
model=Net,
lam=0.5,
radius=8,
gamma=2,
warmup_epochs=6,
ascent_step_size=0.001,
ascent_num_steps=50,
half=True):
super().__init__(model, 0)
self.lam = lam
self.radius = radius
self.gamma = gamma
self.warmup_epochs = warmup_epochs
self.ascent_step_size = ascent_step_size
self.ascent_num_steps = ascent_num_steps
self.half = half
def batch_loss(self, batch):
data, target = batch[0], batch[2]
data, target = data.to(device), target.to(device)
lab_ind = target == 1
unl_ind = target == 0
# lab_cnt = max(lab_ind.sum(), 1)
unl_cnt = max(unl_ind.sum(), 1)
# Extract the logits for cross entropy loss
logits_start = self.model.forward_start(data)
logits = self.model.forward_end(logits_start[lab_ind])
logits = torch.squeeze(logits, dim=1)
ce_loss = F.binary_cross_entropy_with_logits(logits, target[lab_ind])
# Add to the epoch variable for printing average CE Loss
'''
Adversarial Loss is calculated only for the positive data points (label==1).
'''
if self.epoch >= self.warmup_epochs and unl_cnt > 1:
logits_start = logits_start[unl_ind]
# AdvLoss
if not self.half:
adv_loss = self.one_class_adv_loss(data[unl_ind].detach(), self.half)
else:
adv_loss = self.one_class_adv_loss(logits_start[unl_ind].detach(), self.half)
loss = ce_loss + adv_loss * self.lam
else:
# If only CE based training has to be done
loss = ce_loss
return loss
def one_class_adv_loss(self, x_train_data, half=True):
"""Computes the adversarial loss:
1) Sample points initially at random around the positive training
data points
2) Gradient ascent to find the most optimal point in set N_i(r)
classified as +ve (label=0). This is done by maximizing
the CE loss wrt label 0
3) Project the points between spheres of radius R and gamma * R
(set N_i(r))
4) Pass the calculated adversarial points through the model,
and calculate the CE loss wrt target class 0
Parameters
----------
x_train_data: Batch of data to compute loss on.
"""
batch_size = len(x_train_data)
# Randomly sample points around the training data
# We will perform SGD on these to find the adversarial points
x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
x_adv_sampled = x_adv + x_train_data
for step in range(self.ascent_num_steps):
with torch.enable_grad():
new_targets = torch.zeros(batch_size, 1).to(device)
# new_targets = (1 - targets).to(self.device)
new_targets = torch.squeeze(new_targets)
new_targets = new_targets.to(torch.float)
if half:
logits = self.model.forward_end(x_adv_sampled)
else:
logits = self.model(x_adv_sampled)
logits = torch.squeeze(logits, dim=1)
new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
grad_normalized = grad / grad_norm
with torch.no_grad():
x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
if (step + 1) % 10 == 0:
# Project the normal points to the set N_i(r)
h = x_adv_sampled - x_train_data
norm_h = torch.sqrt(torch.sum(h ** 2,
dim=tuple(range(1, h.dim()))))
alpha = torch.clamp(norm_h, self.radius,
self.gamma * self.radius).to(device)
# Make use of broadcast to project h
proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
h = proj * h
x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
if half:
adv_pred = self.model.forward_end(x_adv_sampled)
else:
adv_pred = self.model(x_adv_sampled)
adv_pred = torch.squeeze(adv_pred, dim=1)
adv_loss = F.binary_cross_entropy_with_logits(adv_pred, new_targets)
return adv_loss
# class PU_DROCC(nn.Module):
# def __init__(self, ):
# super().__init__()
#
# self.model = CIFAR10_LeNet()
#
# def run_train(self,
# train_data,
# test_data,
# lamda=0.5,
# radius=1,
# gamma=2,
# verbose=False,
# learning_rate=5e-4,
# total_epochs=20,
# only_ce_epochs=2,
# ascent_step_size=5e-6,
# ascent_num_steps=10,
# gamma_lr=0.96,
# batch_size=512,
# half=True):
#
# self.best_score = -np.inf
# best_model = None
# self.ascent_num_steps = ascent_num_steps
# self.ascent_step_size = ascent_step_size
# self.lamda = lamda
# self.radius = radius
# self.gamma = gamma
#
# self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
# lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma_lr)
#
# train_loader = torch.utils.data.DataLoader(train_data,
# batch_size=batch_size,
# shuffle=True)
#
# test_loader = torch.utils.data.DataLoader(test_data,
# batch_size=batch_size,
# shuffle=True)
#
# for epoch in range(total_epochs):
# # Make the weights trainable
# self.model.train()
#
# # Placeholder for the respective 2 loss values
# epoch_adv_loss = torch.tensor([0]).type(torch.float32).to(device) # AdvLoss
# epoch_ce_loss = 0 # Cross entropy Loss
#
# batch_idx = -1
# for data, _, target in train_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# # Data Processing
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# self.optimizer.zero_grad()
#
# lab_ind = target == 1
# unl_ind = target == 0
#
# # lab_cnt = max(lab_ind.sum(), 1)
# unl_cnt = max(unl_ind.sum(), 1)
#
# # Extract the logits for cross entropy loss
# logits_start = self.model.half_forward_start(data)
# logits = self.model.half_forward_end(logits_start[lab_ind])
#
# logits = torch.squeeze(logits, dim=1)
# ce_loss = F.binary_cross_entropy_with_logits(logits, target[lab_ind])
# # Add to the epoch variable for printing average CE Loss
# epoch_ce_loss += ce_loss
#
# '''
# Adversarial Loss is calculated only for the positive data points (label==1).
# '''
# if epoch >= only_ce_epochs and unl_cnt > 1:
# logits_start = logits_start[unl_ind]
# # AdvLoss
# if not half:
# adv_loss = self.one_class_adv_loss(data[unl_ind].detach(), target[unl_ind], half)
# else:
# adv_loss = self.one_class_adv_loss(logits_start.detach(), target[unl_ind], half)
# epoch_adv_loss += adv_loss
#
# loss = ce_loss + adv_loss * self.lamda
# else:
# # If only CE based training has to be done
# loss = ce_loss
#
# # Backprop
# loss.backward()
# self.optimizer.step()
#
# epoch_ce_loss = epoch_ce_loss / (batch_idx + 1) # Average CE Loss
# epoch_adv_loss = epoch_adv_loss / (batch_idx + 1) # Average AdvLoss
#
# if verbose:
# test_score = self.test(test_loader)
# if test_score > self.best_score:
# self.best_score = test_score
# best_model = copy.deepcopy(self.model)
#
# print('Epoch: {}, CE Loss: {}, AdvLoss: {}, {}: {}'.format(
# epoch, epoch_ce_loss.item(), epoch_adv_loss.item(),
# 'AUC', test_score))
# lr_scheduler.step()
# if verbose:
# self.model = copy.deepcopy(best_model)
# print('\nBest test {}: {}'.format(
# 'AUC', self.best_score
# ))
#
# def test(self, test_loader, metric='AUC'):
# """Evaluate the model on the given test dataset.
# Parameters
# ----------
# test_loader: Dataloader object for the test dataset.
# metric: Metric used for evaluation (AUC / F1).
# """
# self.model.eval()
# label_score = []
# batch_idx = -1
# for data, target, _ in test_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# logits = self.model(data)
# logits = torch.squeeze(logits, dim=1)
# sigmoid_logits = torch.sigmoid(logits)
# scores = logits
# label_score += list(zip(target.cpu().data.numpy().tolist(),
# scores.cpu().data.numpy().tolist()))
# # Compute test score
# labels, scores = zip(*label_score)
# labels = np.array(labels)
# scores = np.array(scores)
# if metric == 'AUC':
# test_metric = roc_auc_score(labels, scores)
# if metric == 'alpha':
# test_metric = (scores > 0.5).mean()
# return test_metric
#
# def one_class_adv_loss(self, x_train_data, targets, half=True):
# """Computes the adversarial loss:
# 1) Sample points initially at random around the positive training
# data points
# 2) Gradient ascent to find the most optimal point in set N_i(r)
# classified as +ve (label=0). This is done by maximizing
# the CE loss wrt label 0
# 3) Project the points between spheres of radius R and gamma * R
# (set N_i(r))
# 4) Pass the calculated adversarial points through the model,
# and calculate the CE loss wrt target class 0
#
# Parameters
# ----------
# x_train_data: Batch of data to compute loss on.
# """
# batch_size = len(x_train_data)
# # Randomly sample points around the training data
# # We will perform SGD on these to find the adversarial points
# x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
# x_adv_sampled = x_adv + x_train_data
#
# for step in range(self.ascent_num_steps):
# with torch.enable_grad():
#
# new_targets = torch.zeros(batch_size, 1).to(device)
# # new_targets = (1 - targets).to(self.device)
# new_targets = torch.squeeze(new_targets)
# new_targets = new_targets.to(torch.float)
#
# if half:
# logits = self.model.half_forward_end(x_adv_sampled)
# else:
# logits = self.model(x_adv_sampled)
#
# logits = torch.squeeze(logits, dim=1)
# new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
#
# grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
# grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
# grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
# grad_normalized = grad / grad_norm
# with torch.no_grad():
# x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
#
# if (step + 1) % 10 == 0:
# # Project the normal points to the set N_i(r)
# h = x_adv_sampled - x_train_data
# norm_h = torch.sqrt(torch.sum(h ** 2,
# dim=tuple(range(1, h.dim()))))
# alpha = torch.clamp(norm_h, self.radius,
# self.gamma * self.radius).to(device)
# # Make use of broadcast to project h
# proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
# h = proj * h
# x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
#
# if half:
# adv_pred = self.model.half_forward_end(x_adv_sampled)
# else:
# adv_pred = self.model(x_adv_sampled)
#
# adv_pred = torch.squeeze(adv_pred, dim=1)
# adv_loss = F.binary_cross_entropy_with_logits(adv_pred, (new_targets))
#
# return adv_loss
#
# def save(self, path):
# torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
#
# def load(self, path):
# self.model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
| 2.453125 | 2 |
chat/chat.py | tima-fey/devman | 0 | 12793538 | import argparse
import asyncio
import logging
import datetime
import sys
import json
from aiofile import AIOFile
async def read_from_socket(host, port):
timer = 0
reader, writer = None, None
async with AIOFile("text.txt", 'a') as _file:
while True:
try:
if not reader or not writer:
reader, writer = await asyncio.open_connection(host=host, port=port)
text = await reader.readline()
time_now = datetime.datetime.now().strftime("%y.%m.%d %H.%M")
await _file.write('[{}] {}'.format(time_now, text.decode("utf-8")))
print(text.decode("utf-8"))
except (ConnectionRefusedError, ConnectionResetError):
logging.warning('sleep %s seconds', 2 ** timer)
await asyncio.sleep(2 ** timer)
reader, writer = None, None
timer += 1
except asyncio.CancelledError:
writer.close()
raise
async def submit_message(host, port, args):
timer = 0
try:
async with AIOFile(args.token_file, 'r') as _file:
token = await _file.read()
except FileNotFoundError:
token = None
while True:
try:
reader, writer = await asyncio.open_connection(host=host, port=port)
temp = await reader.readline()
logging.debug(temp.decode("utf-8"))
if not token:
writer.write('\n'.encode())
await register(reader, writer, args)
else:
await authorise(reader, writer, args, token)
writer.write('{}\n\n'.format(args.text.replace('\n', ' ')).encode())
logging.info('text has been successfully sent')
return
except (ConnectionRefusedError, ConnectionResetError):
logging.warning('sleep %s seconds', 2 ** timer)
await asyncio.sleep(2 ** timer)
timer += 1
except asyncio.CancelledError:
writer.close()
raise
async def register(reader, writer, args):
if not args.user:
logging.error("It's obligated to specidy login if you do not have the correct token file")
logging.error('exiting')
sys.exit()
temp = await reader.readline()
logging.debug(temp.decode("utf-8"))
user = '{}\n'.format(args.user.replace('\n', ' '))
writer.write(user.encode())
answer = await reader.readline()
logging.debug(answer.decode("utf-8"))
answer_dict = json.loads(answer)
token = answer_dict['account_hash']
logging.debug(token)
async with AIOFile(args.token_file, 'w') as _file:
await _file.write(token)
async def authorise(reader, writer, args, token):
writer.write('{}\n'.format(token.replace('\n', '')).encode())
answer = await reader.readline()
logging.debug(answer.decode("utf-8"))
if answer.decode("utf-8") == 'null\n':
logging.warning("Wrong token, let's get another one")
await register(reader, writer, args)
async def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='connect to secret chat')
parser.add_argument('--host', default='minechat.dvmn.org', help='Host to connect')
parser.add_argument('--rport', default=5000, type=int, help='Specify port to receive msg')
parser.add_argument('--sport', default=5050, type=int, help='Specify port to send msg')
parser.add_argument('--user', help="set a username, it's oblicated for first run")
parser.add_argument('--token_file', default="token.txt", help="set a file with token")
parser.add_argument('--text', help="set a text to send")
parser.add_argument('--send_only', action='store_true', help="set a send only mode")
args = parser.parse_args()
tasks = []
if not args.send_only:
tasks.append(asyncio.create_task(read_from_socket(args.host, args.rport)))
if args.text:
tasks.append(asyncio.create_task(submit_message(args.host, args.sport, args)))
for task in tasks:
await task
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
| 2.84375 | 3 |
sdk/python/pulumi_oci/identity/get_group.py | EladGabay/pulumi-oci | 5 | 12793539 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
]
@pulumi.output_type
class GetGroupResult:
"""
A collection of values returned by getGroup.
"""
def __init__(__self__, compartment_id=None, defined_tags=None, description=None, freeform_tags=None, group_id=None, id=None, inactive_state=None, name=None, state=None, time_created=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if group_id and not isinstance(group_id, str):
raise TypeError("Expected argument 'group_id' to be a str")
pulumi.set(__self__, "group_id", group_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inactive_state and not isinstance(inactive_state, str):
raise TypeError("Expected argument 'inactive_state' to be a str")
pulumi.set(__self__, "inactive_state", inactive_state)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID of the tenancy containing the group.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
The description you assign to the group. Does not have to be unique, and it's changeable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> str:
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The OCID of the group.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inactiveState")
def inactive_state(self) -> str:
"""
The detailed status of INACTIVE lifecycleState.
"""
return pulumi.get(self, "inactive_state")
@property
@pulumi.getter
def name(self) -> str:
"""
The name you assign to the group during creation. The name must be unique across all groups in the tenancy and cannot be changed.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
The group's current state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
Date and time the group was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
description=self.description,
freeform_tags=self.freeform_tags,
group_id=self.group_id,
id=self.id,
inactive_state=self.inactive_state,
name=self.name,
state=self.state,
time_created=self.time_created)
def get_group(group_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
This data source provides details about a specific Group resource in Oracle Cloud Infrastructure Identity service.
Gets the specified group's information.
This operation does not return a list of all the users in the group. To do that, use
[ListUserGroupMemberships](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/UserGroupMembership/ListUserGroupMemberships) and
provide the group's OCID as a query parameter in the request.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_group = oci.identity.get_group(group_id=oci_identity_group["test_group"]["id"])
```
:param str group_id: The OCID of the group.
"""
__args__ = dict()
__args__['groupId'] = group_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getGroup:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
freeform_tags=__ret__.freeform_tags,
group_id=__ret__.group_id,
id=__ret__.id,
inactive_state=__ret__.inactive_state,
name=__ret__.name,
state=__ret__.state,
time_created=__ret__.time_created)
| 1.757813 | 2 |
divulga/views.py | SaviorsServices/CommunityService | 0 | 12793540 | <filename>divulga/views.py
from email.mime.text import MIMEText
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from .forms import CommunityActionForm, DonationForm, HealthServiceForm, EstablishmentForm , VoluntaryServiceForm
from .models import Establishment, HealthService, Donation, VoluntaryService
# from .models import Divulgacoes
import requests
import urllib, json
import smtplib
# @login_required
# def formevent(request):
# if request.method == 'POST':
# divulgacao = Establishment()
# divulgacao.nomeEvento = request.POST['nomeEvento']
# divulgacao.categoria = request.POST['categoria']
# divulgacao.cidade = request.POST['cidade']
# #divulgacao.bairro = request.POST['bairro']
# divulgacao.endereco = request.POST['endereco']
# #divulgacao.cep = request.POST['cep']
# divulgacao.telefone = request.POST['telefone']
# divulgacao.horarioInicio = request.POST['horarioInicio']
# divulgacao.horarioFim = request.POST['horarioFim']
# divulgacao.data = request.POST['data']
# divulgacao.user = request.user
# divulgacao.save()
# return render(request , 'index.html')
# return render(request , 'formevent.html')
def mapa(request, id):
latitude = 0
longitude = 0
# if request.method == "POST":
chave = "<KEY>"
div = Establishment.objects.get(id=id)
address = div.endereco+"+"+div.cidade
r = requests.get("https://maps.googleapis.com/maps/api/geocode/json?address="+address+"&key="+chave)
if r.status_code == 200:
dados = json.loads(r.content)
latitude = dados["results"][0]["geometry"]["location"]["lat"]
longitude = dados["results"][0]["geometry"]["location"]["lng"]
#print(latitude)
#print(longitude)
return render(request, 'mapa.html', {'latitude':latitude, 'longitude': longitude})
# return render(request, 'index.html')
def servicolist(request):
estabelecimentos = Establishment.objects.all()
saude = HealthService.objects.all()
doacao = Donation.objects.all()
return render(request, 'servicolist.html', {'estabelecimentos':estabelecimentos,'saude':saude,'doacao':doacao})
@login_required
def create_establishment(request):
title = "Cadastrar Estabelecimento"
if request.method == "POST":
form = EstablishmentForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("index"))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = EstablishmentForm()
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def create_voluntary(request):
title = "Cadastrar Voluntário"
if request.method == "POST":
form = VoluntaryServiceForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("index"))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = VoluntaryServiceForm()
return render(request, 'establishment.html', {'form': form,'title':title})
from .forms import CommunityActionForm, DonationForm, HealthServiceForm, EstablishmentForm
@login_required
def create_health_service(request):
title = "Cadastrar Serviço"
if request.method == "POST":
form = HealthServiceForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("index"))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = HealthServiceForm()
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def create_donation(request):
title = "Cadastrar Doação"
if request.method == "POST":
form = DonationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("index"))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = DonationForm()
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def edit_establishment(request, id):
title = "Editar Estabelecimento"
establishment = get_object_or_404(Establishment, id=id)
if request.method == "POST":
form = EstablishmentForm(request.POST, instance=product)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('index'))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = EstablishmentForm(instance=establishment)
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def edit_health_service(request, id):
title = "Editar Serviço de Saude"
health_service = get_object_or_404(HealthService, id=id)
if request.method == "POST":
form = HealthServiceForm(request.POST, instance=health_service)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('index'))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = HealthServiceForm(instance=health_service)
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def edit_donation(request, id):
title = "Editar Doação"
donation = get_object_or_404(Donation, id=id)
if request.method == "POST":
form = DonationForm(request.POST, instance=donation)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('index'))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = DonationForm(instance=donation)
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def edit_voluntary(request, id):
title = "Editar Voluntário"
voluntary = get_object_or_404(VoluntaryService, id=id)
if request.method == "POST":
form = VoluntaryServiceForm(request.POST, instance=voluntary)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('index'))
return render(request, 'establishment.html', {'form': form,'title':title})
else:
form = VoluntaryServiceForm(instance=voluntary)
return render(request, 'establishment.html', {'form': form,'title':title})
@login_required
def delete_establishment(request, id):
establishment = get_object_or_404(Establishment, id=id)
establishment.delete()
return render(request, 'delete.html')
@login_required
def delete_health_service(request, id):
health_service = get_object_or_404(HealthService, id=id)
health_service.delete()
return render(request, 'delete.html')
@login_required
def delete_donation(request, id):
delete_donation = get_object_or_404(Donation, id=id)
delete_donation.delete()
return render(request, 'delete.html')
@login_required
def delete_voluntary(request, id):
delete_voluntary = get_object_or_404(VoluntaryService, id=id)
delete_voluntary.delete()
return render(request, 'delete.html')
def list_establishment(request):
establishments = Establishment.objects.all()
print(establishments)
return render(request, 'list_establishment.html', {"establishments": establishments})
def list_health_service(request):
health_services = HealthService.objects.all()
return render(request, 'list_health_service.html', {"health_services": health_services})
def list_donation(request):
donations = Donation.objects.all()
return render(request, 'list_donation.html', {"donations": donations})
def list_voluntary(request):
volunteers = VoluntaryService.objects.all()
print(volunteers)
return render(request, 'list_voluntary.html', {"volunteers": volunteers})
def fale_conosco(request, id):
servico = HealthService.objects.get(id=id)
if request.method == "POST":
nome = request.POST['nome']
email = request.POST['email']
texto = request.POST['mensagem']
m = MIMEText(texto)
m.set_charset('utf-8')
m['Subject'] = email
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login('<EMAIL>', 'fiscaeunb')
mail.sendmail('<EMAIL>', email, m.as_string())
return render(request, 'fale-conosco.html', {"servico": servico})
def perfil(request, id):
servico = HealthService.objects.get(id=id)
return render(request, 'perfil.html', {"servico":servico})
| 2.0625 | 2 |
voting/templatetags/voting/custom_helpers.py | lzh9102/ee-voting | 0 | 12793541 | <gh_stars>0
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
def strip_quotes(s):
if s[0] == s[-1] and s.startswith(('"', "'")): # is quoted string
return s[1:-1] # strip quotes
else:
return s
@register.simple_tag(takes_context=True)
def css_active(context, url):
request = context['request']
if request.path == url:
return "active"
return ""
@register.filter(name='css_class')
def css_class(value, arg):
return value.as_widget(attrs={'class': arg})
@register.tag(name='navlink')
def do_navlink(parser, token):
""" usage: {% navlink <view> %} ... {% endnavlink %}
Generate navigation link for <view>. The navigation link is a listitem
(<li><a>...</a></li>). If the link is the same as the current page, the
css class "active" will be added to the <li> tag
(i.e. <li class="active">...</li>).
"""
try:
args = token.split_contents()
if len(args) < 2:
raise template.TemplateSyntaxError(
"%r tag requires at least 2 arguments" % token.contents.split()[0])
# required arguments
tag_name = strip_quotes(args.pop(0))
view = strip_quotes(args.pop(0))
args = [template.Variable(arg) for arg in args]
nodelist = parser.parse(('endnavlink',))
parser.delete_first_token()
return NavigationLink(nodelist, view, args)
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument"
% (tokens.contents.split()[0]))
class NavigationLink(template.Node):
def __init__(self, nodelist, view, args):
self.nodelist = nodelist
self.view = view
self.args = args
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
url = reverse(self.view, args=args)
return '<li class="%(active)s"><a href="%(url)s">%(content)s</a></li>' % {
'url': url,
'active': css_active(context, url),
'content': self.nodelist.render(context),
}
| 2.484375 | 2 |
bin/cmssw_wm_create_process.py | khurtado/cmssw-wm-tools | 0 | 12793542 | <filename>bin/cmssw_wm_create_process.py<gh_stars>0
#!/usr/bin/env python
import FWCore.ParameterSet.Config as cms
import pickle
try:
import argparse
except ImportError: #get it from this package instead
import archived_argparse as argparse
import sys, re, os
import json
from tweak_program_helpers import make_parser
def create_process(args,func_args):
if args.funcname == "merge":
if not args.useErrorDataset:
func_args['outputmod_label'] = "MergedError"
try:
from Configuration.DataProcessing.Merge import mergeProcess
process = mergeProcess(**func_args)
except Exception as ex:
msg = "Failed to create a merge process."
print(msg)
raise ex
elif args.funcname == "repack":
try:
from Configuration.DataProcessing.Repack import repackProcess
process = repackProcess(**func_args)
except Exception as ex:
msg = "Failed to create a repack process."
print(msg)
raise ex
else:
try:
from Configuration.DataProcessing.GetScenario import getScenario
scenarioInst = getScenario(scenario)
except Exception as ex:
msg = "Failed to retrieve the Scenario named "
msg += str(scenario)
msg += "\nWith Error:"
msg += str(ex)
print(msg)
raise ex
try:
process = getattr(scenarioInst, args.funcname)(**func_args)
except Exception as ex:
msg = "Failed to load process from Scenario %s (%s)." % (scenario, scenarioInst)
print(msg)
raise ex
return process
def init_argparse():
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTION] [FILE]...",
description="Process creator (merge, DataProcessing etc)"
)
parser.add_argument('--funcname', required=True)
parser.add_argument('--funcargs', required=True)
parser.add_argument('--useErrorDataset', action="store_true", required=False)
parser.add_argument('--output_pkl', required=True)
return parser
def main():
parser = init_argparse()
args = parser.parse_args()
func_args={}
try:
with open(args.funcargs) as json_file:
json_data = json.load(json_file)
except Exception as e:
print("Error opening file "+args.funcargs)
sys.exit(1)
if not isinstance(json_data,dict):
print("Error loading dictionary "+args.funcargs)
sys.exit(1)
func_args = json_data
process=create_process(args, func_args)
with open(args.output_pkl, "wb") as output_file:
if output_file.closed:
print("Error loading pickle input "+args.output_pkl[i])
sys.exit(1)
pickle.dump(process, output_file, protocol=0)
main()
| 2.046875 | 2 |
posters/apps.py | postersession/postersession | 0 | 12793543 | <reponame>postersession/postersession<filename>posters/apps.py
from django.apps import AppConfig
class PostersConfig(AppConfig):
name = 'posters'
| 1.304688 | 1 |
tests/filters/test_interface.py | FlaskGuys/Flask-Imagine | 1 | 12793544 | import unittest
from flask.ext.imagine.filters.interface import ImagineFilterInterface
class TestImagineFilterInterface(unittest.TestCase):
interface = None
def setUp(self):
self.interface = ImagineFilterInterface()
def test_not_implemented_apply_method(self):
with self.assertRaises(NotImplementedError):
self.interface.apply('')
| 2.375 | 2 |
alura-python/gamelib/dao.py | wiltonpaulo/python-fullcourse | 0 | 12793545 | <filename>alura-python/gamelib/dao.py
from models import Game, User
SQL_DELETE_GAME = "delete from game where id = %s"
SQL_GAME_BY_ID = "SELECT id, name, category, console from game where id = %s"
SQL_USER_BY_ID = "SELECT id, name, password from user where id = %s"
SQL_UPDATE_GAME = "UPDATE game SET name=%s, category=%s, console=%s where id = %s"
SQL_SEARCH_GAMES = "SELECT id, name, category, console from game"
SQL_CREATE_GAME = "INSERT into game (name, category, console) values (%s, %s, %s)"
class GameDao:
def __init__(self, db):
self.__db = db
def save_game(self, game):
cursor = self.__db.connection.cursor()
if game.id:
cursor.execute(
SQL_UPDATE_GAME, (game.name, game.category, game.console, game.id)
)
else:
cursor.execute(SQL_CREATE_GAME, (game.name, game.category, game.console))
game.id = cursor.lastrowid
self.__db.connection.commit()
return game
def list_game(self):
cursor = self.__db.connection.cursor()
cursor.execute(SQL_SEARCH_GAMES)
games = translate_games(cursor.fetchall())
return games
def search_by_id(self, id):
cursor = self.__db.connection.cursor()
cursor.execute(SQL_GAME_BY_ID, (id,))
game_tuple = cursor.fetchone()
return Game(game_tuple[1], game_tuple[2], game_tuple[3], id=game_tuple[0])
def delete_game(self, id):
self.__db.connection.cursor().execute(SQL_DELETE_GAME, (id,))
self.__db.connection.commit()
class UserDao:
def __init__(self, db):
self.__db = db
def search_by_id(self, id):
cursor = self.__db.connection.cursor()
cursor.execute(SQL_USER_BY_ID, (id,))
data = cursor.fetchone()
user = translate_user(data) if data else None
return user
def translate_games(games):
def create_game_with_tuple(game_tuple):
return Game(game_tuple[1], game_tuple[2], game_tuple[3], id=game_tuple[0])
return list(map(create_game_with_tuple, games))
def translate_user(user_tuple):
return User(user_tuple[0], user_tuple[1], user_tuple[2])
| 3.21875 | 3 |
src/bgapi/gatt_server/rsp.py | GetAmbush/python-bgapi | 5 | 12793546 | <reponame>GetAmbush/python-bgapi
from struct import (unpack_from, calcsize, error)
def find_attribute(data: bytes, offset: int = 0):
FORMAT = '<HH'
result, sent_len = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
'sent_len': sent_len,
}
return payload, offset
def read_attribute_type(data: bytes, offset: int = 0):
FORMAT = '<HB'
result, n = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
_type = data[offset:offset + n]
offset += n
if len(_type) < n:
raise error
payload = {
'result': result,
'type': _type,
}
return payload, offset
def read_attribute_value(data: bytes, offset: int = 0):
FORMAT = '<HB'
result, n = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
value = data[offset:offset + n]
offset += n
if len(value) < n:
raise error
payload = {
'result': result,
'value': value,
}
return payload, offset
def send_characteristic_notification(data: bytes, offset: int = 0):
FORMAT = '<HH'
result, sent_len = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
'sent_len': sent_len,
}
return payload, offset
def send_user_read_response(data: bytes, offset: int = 0):
FORMAT = '<HH'
result, sent_len = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
'sent_len': sent_len,
}
return payload, offset
def send_user_write_response(data: bytes, offset: int = 0):
FORMAT = '<H'
result, = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
}
return payload, offset
def set_capabilities(data: bytes, offset: int = 0):
FORMAT = '<H'
result, = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
}
return payload, offset
def write_attribute_value(data: bytes, offset: int = 0):
FORMAT = '<H'
result, = unpack_from(FORMAT, data, offset=offset)
offset += calcsize(FORMAT)
payload = {
'result': result,
}
return payload, offset
| 2.296875 | 2 |
backend/api/migrations/0004_auto_20191114_1008.py | a-mazalov/django-vuejs | 0 | 12793547 | <filename>backend/api/migrations/0004_auto_20191114_1008.py
# Generated by Django 2.2.1 on 2019-11-14 07:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20191113_1639'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('direction', models.CharField(blank=True, choices=[('h', 'html'), ('j', 'js'), ('p', 'php'), ('py', 'python')], default='py', help_text='Направление курса', max_length=1)),
('date_start', models.DateField(blank=True, null=True)),
('date_start_registration', models.DateField(blank=True, null=True, verbose_name='Начало регистрации')),
('level', models.CharField(blank=True, choices=[('j', 'Junior'), ('m', 'Middle'), ('s', 'Senior'), ('l', 'Lead')], default='j', help_text='Уровень курса', max_length=1)),
('duration', models.IntegerField(blank=True, null=True)),
('description', models.TextField(help_text='Описание курса', max_length=2000, null=True)),
('members', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='courseparticipants',
name='id_course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Course'),
),
migrations.DeleteModel(
name='Courses',
),
]
| 1.5625 | 2 |
Utils/dicts/sensor_dicts.py | isse-augsburg/PermeabilityNets | 1 | 12793548 | sensor_indices = {
"1140": ((0, 1), (0, 1)),
"285": ((1, 2), (1, 2)),
"80": ((1, 4), (1, 4)),
"20": ((1, 8), (1, 8))
}
sensor_shape = {
"1140": (38, 30),
"285": (19, 15),
"80": (10, 8),
"20": (5, 4)
}
| 1.632813 | 2 |
data/altera_file_splitter.py | bradysalz/fpga-pin-trends | 0 | 12793549 | #!/usr/bin/env python3
"""Separates Altera's junky concatenated CSV files into unique files
We do this in two steps:
1. Move all existing *.txt files to *.tmp files
2. Go through and break up at the start of each CSV file into a new file
"""
import os
import sys
from pathlib import Path
def main(root: str):
root_path = Path(root)
# Stash old files
tmp_files = [f for f in root_path.glob('*.txt')]
for tmp_file in tmp_files:
os.rename(tmp_file, tmp_file.with_suffix('.tmp'))
# Loop through and separate the new files
orig_files = [f for f in root_path.glob('*.tmp')]
for orig_file in orig_files:
idx = 0
new_file_name = orig_file.parent.joinpath(orig_file.stem + '-' +
str(idx) + '.txt')
new_file = open(new_file_name, 'w')
with open(orig_file, 'r', encoding='cp1252') as orig:
lines = orig.readlines()
for line in lines:
if line.startswith('Bank'):
new_file.close()
idx = idx + 1
new_file_name = orig_file.parent.joinpath(
orig_file.stem + '-' + str(idx) + '.txt')
new_file = open(new_file_name, 'w')
new_file.write(line)
else:
new_file.write(line)
new_file.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
main(os.getcwd())
elif len(sys.argv) == 2:
main(sys.argv[1])
else:
print("Wrong number of args.")
| 3.375 | 3 |
B2G/gecko/testing/tps/tps/mozhttpd.py | wilebeast/FireFox-OS | 3 | 12793550 | <gh_stars>1-10
#!/usr/bin/python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import BaseHTTPServer
import SimpleHTTPServer
import threading
import sys
import os
import urllib
import re
from urlparse import urlparse
from SocketServer import ThreadingMixIn
DOCROOT = '.'
class EasyServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class MozRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# It appears that the default path is '/' and os.path.join makes the '/'
o = urlparse(path)
sep = '/'
if sys.platform == 'win32':
sep = ''
ret = '%s%s' % ( sep, DOCROOT.strip('/') )
# Stub out addons.mozilla.org search API, which is used when installing
# add-ons. The version is hard-coded because we want tests to fail when
# the API updates so we can update our stubbed files with the changes.
if o.path.find('/en-US/firefox/api/1.5/search/guid:') == 0:
ids = urllib.unquote(o.path[len('/en-US/firefox/api/1.5/search/guid:'):])
if ids.count(',') > 0:
raise Exception('Searching for multiple IDs is not supported.')
base = ids
at_loc = ids.find('@')
if at_loc > 0:
base = ids[0:at_loc]
ret += '/%s.xml' % base
else:
ret += '/%s' % o.path.strip('/')
return ret
# I found on my local network that calls to this were timing out
# I believe all of these calls are from log_message
def address_string(self):
return "a.b.c.d"
# This produces a LOT of noise
def log_message(self, format, *args):
pass
class MozHttpd(object):
def __init__(self, host="127.0.0.1", port=8888, docroot='.'):
global DOCROOT
self.host = host
self.port = int(port)
DOCROOT = docroot
def start(self):
self.httpd = EasyServer((self.host, self.port), MozRequestHandler)
self.server = threading.Thread(target=self.httpd.serve_forever)
self.server.setDaemon(True) # don't hang on exit
self.server.start()
#self.testServer()
#TODO: figure this out
def testServer(self):
fileList = os.listdir(DOCROOT)
filehandle = urllib.urlopen('http://%s:%s' % (self.host, self.port))
data = filehandle.readlines();
filehandle.close()
for line in data:
found = False
# '@' denotes a symlink and we need to ignore it.
webline = re.sub('\<[a-zA-Z0-9\-\_\.\=\"\'\/\\\%\!\@\#\$\^\&\*\(\) ]*\>', '', line.strip('\n')).strip('/').strip().strip('@')
if webline != "":
if webline == "Directory listing for":
found = True
else:
for fileName in fileList:
if fileName == webline:
found = True
if (found == False):
print "NOT FOUND: " + webline.strip()
def stop(self):
if self.httpd:
self.httpd.shutdown()
self.httpd.server_close()
__del__ = stop
| 2.171875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.