hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f62e27b225bd0318b85812c8f42343dc3b0fca8 | 1,436 | py | Python | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
]
| 3 | 2021-12-11T13:43:56.000Z | 2022-03-31T02:36:05.000Z | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
]
| 9 | 2021-10-01T22:46:57.000Z | 2021-12-16T18:01:31.000Z | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
]
| 2 | 2021-12-16T16:37:10.000Z | 2021-12-16T20:30:12.000Z | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
# Register your models here.
def set_active(modeladmin, request, queryset):
for user in queryset:
user.is_active = True
user.save()
set_active.short_description = 'Set Account Status: Active'
def deactivate(modeladmin, request, queryset):
for user in queryset:
user.is_active = False
user.save()
deactivate.short_description = 'Set Account Status: Inactive'
class AuthorAdmin(UserAdmin):
# display fields
fieldsets = (
(None, {'fields': ('email', 'displayName','github')}),
(('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
ordering = ('email',)
search_fields = ('email', 'displayName')
# list_display = ('email', 'displayName', 'is_staff', 'url')
list_display = ('email', 'auth_pk', 'displayName', 'github', 'is_active', 'is_staff', 'url')
actions = [set_active, deactivate,]
# admin.site.unregister(User)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Inbox)
admin.site.register(Like)
admin.site.register(Liked)
admin.site.register(FriendRequest)
admin.site.register(Followers) | 31.217391 | 96 | 0.639972 | 711 | 0.495125 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.314067 |
6f637c0d8807f40cbf867588212e880e31335fd4 | 26,912 | py | Python | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
]
| null | null | null | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
]
| null | null | null | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
]
| null | null | null | # -*- coding: utf-8 -*-
"""task module."""
from __future__ import unicode_literals
import sys
import os
import pydoc
import time
import json
import logging
import collections
import pkg_resources
import subprocess
import webbrowser
import websocket
from pyloco.parse import TaskArgParser, PylocoArgParser
from pyloco.proxy import ParentProxy
from pyloco.util import (load_pymod, type_check, pyloco_print, OS, urlparse, teval,
split_assert_expr, get_port, pack_websocket_message,
is_ipv6, pyloco_import, PylocoPickle, import_modulepath)
from pyloco.error import TestError, InternalError, UsageError
from pyloco.base import Object, Global, pyloco_builtins
def load_taskclass(taskpath, argv, subargv):
if not taskpath:
return None, None, None, None
# TODO: handle aliased task
if isinstance(taskpath, type):
if issubclass(taskpath, Task):
return taskpath, argv, subargv, None
raise UsageError("Not compatible task type: %s" % type(taskpath))
# TODO: move to callsite to load_taskclass
objs = {}
while "--import" in argv:
idx = argv.index("--import")
mpath = argv.pop(idx+1)
argv.pop(idx)
key, obj = import_modulepath(mpath)
objs[key] = obj
task_class = None
_p = taskpath.split("#", 1)
if len(_p) == 2:
taskpath, fragment = [x.strip() for x in _p]
else:
fragment = ""
if os.path.exists(taskpath):
mods = []
if os.path.isfile(taskpath):
head, base = os.path.split(taskpath)
if base.endswith(".py"):
mods.append(load_pymod(head, base[:-3]))
elif base.endswith(".plx"):
from pyloco.plxtask import PlXTask
task_class = PlXTask
argv.insert(0, taskpath)
elif base.endswith(".plz"):
from pyloco.plztask import PlZTask
task_class = PlZTask
argv.insert(0, taskpath)
elif os.path.isdir(taskpath):
# TODO: support Python package
pass
import pdb; pdb.set_trace()
candidates = {}
for mod in mods:
for name in dir(mod):
if not name.startswith("_"):
obj = getattr(mod, name)
if (type(obj) == type(Task) and issubclass(obj, Task) and
(obj.__module__ is None or
not obj.__module__.startswith("pyloco."))):
candidates[name] = obj
if candidates:
if fragment:
if hasattr(candidates, fragment):
task_class = getattr(candidates, fragment)
else:
raise UsageError("No task is found with a fragment of "
"'%s'." % fragment)
elif len(candidates) == 1:
task_class = candidates.popitem()[1]
else:
raise UsageError(
"More than one frame are found."
"Please add fragment to select one: %s" %
list(candidates.keys())
)
if task_class:
setattr(task_class, "_path_", os.path.abspath(taskpath))
#else:
# raise UsageError("Task class is not found. Please check path: %s" % taskpath)
if task_class is None:
from pyloco.manage import _ManagerBase
if taskpath in _ManagerBase._default_tasks_:
task_class = _ManagerBase._default_tasks_[taskpath]
if task_class is None:
for ep in pkg_resources.iter_entry_points(group='pyloco.task'):
if taskpath == ep.name:
task_class = ep.load()
from pyloco.plxtask import PlXTask
if task_class is PlXTask:
task_mod = pyloco_import(taskpath)
task_dir = os.path.dirname(task_mod.__file__)
argv.insert(0, os.path.join(task_dir, getattr(task_mod, "plx")))
break
if not task_class:
from pyloco.mgmttask import mgmt_tasks
from pyloco.stdtask import standard_tasks
if taskpath in mgmt_tasks:
task_class = mgmt_tasks[taskpath]
elif taskpath in standard_tasks:
task_class = standard_tasks[taskpath]
# TODO support remote task
# if not task_class:
#
# url = urlparse(taskpath)
#
# if url.netloc or url.scheme:
# argv.insert(0, taskpath)
# task_class = RemoteTask
if not task_class:
raise UsageError("Task '%s' is not found. Please check path." % taskpath)
return task_class, argv, subargv, objs
def taskclass(taskpath):
cls, _, _, _ = load_taskclass(taskpath, [], [])
return cls
class Task(Object):
"""Base class for pyloco Tasks
"""
_version_ = "0.1.0"
_argparser_ = TaskArgParser
def __new__(cls, parent, *vargs, **kwargs):
obj = super(Task, cls).__new__(cls)
obj.parent = parent
obj.subargv = None
obj.taskattr = {}
if not hasattr(obj, "_name_"):
obj._name_ = kwargs.pop("name", cls.__name__)
#obj._parser = TaskArgParser(obj)
obj._parser = cls._argparser_(obj)
obj._env = {"__builtins__": pyloco_builtins,
"__arguments__": {}}
obj._fwddefs = {}
obj._fwds = {}
obj._shrdefs = {}
#obj._rdcdefs = {}
obj._rdcs = {}
obj._logger = None
obj._verbose = False
obj._websocket_server = None
obj._websocket_client = None
obj._webserver = None
obj.tglobal = Global()
obj.parse_known_args = False
obj.unknown_args = []
return obj
def clone(self):
Task(self.parent)
def add_logger(self, logpath):
root, ext = os.path.splitext(logpath)
if ext == ".log":
self.parent.log_setup(filename=logpath)
else:
self.parent.log_setup(filename=logpath+".log")
self._logger = logging.getLogger(self.get_name())
def _log_level(self, level, *vargs, **kwargs):
logger = self._logger if self._logger else self.parent._logger
if logger:
getattr(logger, level)(*vargs, **kwargs)
def log_debug(self, *vargs, **kwargs):
self._log_level("debug", *vargs, **kwargs)
def log_info(self, *vargs, **kwargs):
self._log_level("info", *vargs, **kwargs)
def log_warn(self, *vargs, **kwargs):
self._log_level("warn", *vargs, **kwargs)
def log_warning(self, *vargs, **kwargs):
self._log_level("warning", *vargs, **kwargs)
def log_error(self, *vargs, **kwargs):
self._log_level("error", *vargs, **kwargs)
def log_critical(self, *vargs, **kwargs):
self._log_level("critical", *vargs, **kwargs)
def log_exception(self, *vargs, **kwargs):
self._log_level("exception", *vargs, **kwargs)
def get_name(self):
return self.parent.shared["parent_name"] + "." + self._name_
def get_mgrname(self):
return self.get_name().split(".")[0]
def get_proxy(self, proxycls=None, inherit_shared=False):
if proxycls is None:
proxycls = ParentProxy
proxy = proxycls(self)
if inherit_shared:
proxy.shared.update(self.parent.shared)
return proxy
def _register_check(self, dest):
if not dest:
raise UsageError("Incorrect name: %s" % dest)
if dest.startswith("_"):
raise UsageError("'Forward-name' should not start with an "
"underscore ('_'): %s" % dest)
if dest in self._fwddefs:
raise UsageError("'%s' is already registered for forwarding" %
dest)
if dest in self._shrdefs:
raise UsageError("'%s' is already registered for sharing" % dest)
#if dest in self._rdcdefs:
# raise UsageError("'%s' is already registered for reducing" % dest)
def register_forward(self, dest, type=None, help=None):
self._register_check(dest)
self._fwddefs[dest] = (type, help)
def register_shared(self, dest, type=None, help=None):
self._register_check(dest)
self._shrdefs[dest] = (type, help)
#def register_reduce(self, dest, type=None, help=None):
# self._register_check(dest)
# self._rdcdefs[dest] = (type, help)
def _add_transfer(self, defs, cont, **kwargs):
for dest, value in kwargs.items():
if dest not in defs:
raise UsageError("'%s' is not registered for data transfer." %
dest)
if type_check(value, defs[dest][0]):
cont[dest] = value
else:
if isinstance(value, str) and os.path.isfile(value):
import pdb; pdb.set_trace() # noqa: E702
else:
raise TestError("Data transfer type check failure: %s" % dest)
def add_forward(self, **kwargs):
self._add_transfer(self._fwddefs, self._fwds, **kwargs)
def add_shared(self, **kwargs):
self._add_transfer(self._shrdefs, self.parent.shared, **kwargs)
#def add_reduce(self, **kwargs):
# self._add_transfer(self._rdcdefs, self._rcds, **kwargs)
def write_pickle(self, pickler, data):
return data
def pre_perform(self, targs):
if targs.log:
self.add_logger(targs.log)
if targs.verbose:
self._verbose = True
if hasattr(targs, "assert_input") and targs.assert_input:
env = {"__builtins__": pyloco_builtins}
for k, v in self._env.items():
if not k.startswith("_"):
env[k] = v
for key, value in targs.__dict__.items():
if key == "assert_input":
continue
env[key] = value
for boolexpr in targs.assert_input:
for varg in boolexpr.vargs:
assert_result = eval(varg, env)
if assert_result:
if self._verbose:
pyloco_print('\nINPUT TEST PASSED with "%s"' %
varg)
else:
pairs = split_assert_expr(varg)
if not pairs:
raise TestError(
"\nINPUT TEST FAILED with '%s' =>"
" not True" % varg
)
elif len(pairs) == 1:
sep, (lexpr, rexpr) = pairs.popitem()
msg = (
"\nINPUT TEST(%s) is FAILED.\n "
"Left expr(%s) of '%s' is evaluated to '%s'"
" and\n right expr(%s) of '%s' "
"is evaluated to '%s'.\n"
) % (varg, lexpr, sep, eval(lexpr, env), rexpr,
sep, eval(rexpr, env))
raise TestError(msg)
else:
msg = (
"\nINPUT TEST(%s) FAILED: detected multiple"
" possibilities of this test failure\n") % varg
idx = 0
for sep, (lexpr, rexpr) in pairs.items():
idx += 1
try:
msg += (
"CASE%d:\n Left expr(%s)" " of"
" '%s' is evaluated to '%s' and\n"
" right expr(%s) of '%s' is "
"evaluated to '%s'.\n"
) % (idx, lexpr, sep, eval(lexpr, env),
rexpr, sep, eval(rexpr, env))
except Exception:
pass
raise TestError(msg)
# if targs.import_module:
# modpath = targs.import_module
# head, base = os.path.split(modpath)
# mod = None
#
# if os.path.isfile(modpath) and modpath.endswith(".py"):
# modname = base[:-3]
# mod = load_pymod(head, modname)
#
# elif (os.path.isdir(modpath) and
# os.path.isfile(os.path.join(modpath, "__init__.py"))):
# if base[-1] == os.sep:
# modname = base[:-1]
#
# else:
# modname = base
#
# mod = load_pymod(head, modname)
#
# else:
# try:
# modname = modpath
# mod = pyloco_import(modname)
#
# except ModuleNotFoundError as err:
# raise UsageError("'%s' module is not found." % modname)
# if mod:
# self._env[modname] = mod
if targs.calculate:
for calc in targs.calculate:
for expr in calc.vargs:
self._env["_"] = teval(expr, self._env)
for lhs, rhs in calc.kwargs.items():
self._env[lhs.strip()] = teval(rhs, self._env)
if targs.webapp:
appath = targs.webapp
# TODO: reuse webserver and websocket
# TODO: user-provided js can control if reuse or not through
# websocket init msg
if appath.endswith(".js"):
webapp = os.path.abspath(appath)[:-3]
elif appath.endswith(".plw"):
import pdb; pdb.set_trace() # noqa: E702
else:
webapp = os.path.abspath(appath)
here = os.path.dirname(__file__)
websocket_port = get_port()
websocket_path = os.path.join(here, "websocket.py")
webserver_port = get_port()
webserver_path = os.path.join(here, "webserver.py")
self._websocket_server = subprocess.Popen(
[sys.executable, websocket_path, str(websocket_port),
str(webserver_port)], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._webserver = subprocess.Popen(
[sys.executable, webserver_path, str(webserver_port),
str(websocket_port)] + [webapp], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
webbrowser.open("http://127.0.0.1:%d" % webserver_port)
if OS == "windows" and is_ipv6():
self._websocket_client = websocket.create_connection(
"ws://[::1]:%d/" % websocket_port)
else:
self._websocket_client = websocket.create_connection(
"ws://127.0.0.1:%d/" % websocket_port)
self._websocket_client.send("pyloco")
maxiter = 100
count = 0
while count < maxiter:
self._websocket_client.send("check_browser")
out = self._websocket_client.recv()
if out == "True":
break
time.sleep(0.1)
count += 1
def send_websocket_message(self, sender, msgtype, msg):
if self._websocket_client:
self._websocket_client.send(
json.dumps(pack_websocket_message(sender, msgtype, msg))
)
self._websocket_client.recv()
def post_perform(self, targs):
if targs.webapp:
appath = targs.webapp
wait2close = self.taskattr.get("webapp.wait2close", True)
if wait2close:
if self._websocket_server:
pyloco_print("Waiting for '%s' to be completed..." %
appath, end="")
sys.stdout.flush()
self._websocket_server.communicate(input=None)
if self._websocket_client:
self._websocket_client.close()
if self._webserver:
self._webserver.communicate(input=None)
pyloco_print("DONE.")
sys.stdout.flush()
env = dict(self._env)
env.update(self.parent.shared)
env.update(self._fwds)
lenv = {}
if targs.forward:
try:
for fwd in targs.forward:
for varg in fwd.vargs:
self._fwds[varg] = env[varg]
for dest, value in fwd.kwargs.items():
self._fwds[dest] = eval(value, env, lenv)
except Exception as err:
raise UsageError("failed on forwarding: %s" % str(err))
if targs.shared:
self._handle_sharedarg(targs.shared)
if hasattr(targs, "assert_output") and targs.assert_output:
aenv = {"__builtins__": pyloco_builtins}
for k, v in self._env.items():
if not k.startswith("_"):
aenv[k] = v
aenv.update(self.parent.shared)
aenv.update(self._fwds)
for boolexpr in targs.assert_output:
for varg in boolexpr.vargs:
assert_result = eval(varg, aenv)
if assert_result:
if self._verbose:
pyloco_print(
'\nOUTPUT TEST PASSED with "%s"' % varg
)
else:
pairs = split_assert_expr(varg)
if not pairs:
raise TestError(
"\nOUTPUT TEST FAILED with '%s' =>"
" not True" % varg
)
elif len(pairs) == 1:
sep, (lexpr, rexpr) = pairs.popitem()
msg = (
"\nOUTPUT TEST(%s) is FAILED.\n "
"Left expr(%s) of '%s' is evaluated to '%s'"
" and\n right expr(%s) of '%s' "
"is evaluated to '%s'.\n"
) % (varg, lexpr, sep, eval(lexpr, aenv), rexpr,
sep, eval(rexpr, aenv))
raise TestError(msg)
else:
msg = (
"\nOUTPUT TEST(%s) FAILED: detected multiple"
" possibilities of this test failure\n"
) % varg
idx = 0
for sep, (lexpr, rexpr) in pairs.items():
idx += 1
try:
msg += (
"CASE%d:\n Left expr(%s)" " of"
" '%s' is evaluated to '%s' and\n"
" right expr(%s) of '%s' is "
"evaluated to '%s'.\n"
) % (idx, lexpr, sep, eval(lexpr, aenv),
rexpr, sep, eval(rexpr, aenv))
except Exception:
pass
raise TestError(msg)
if targs.write_pickle:
ppf = PylocoPickle()
data = dict(self.parent.shared)
data.update(self._fwds)
data.pop("parent_name", None)
pdata = self.write_pickle(ppf, data)
ppf.dump(pdata, targs.write_pickle)
def write_pickle(self, pickler, data):
return data
def read_pickle(self, path):
import pdb; pdb.set_trace()
def _handle_sharedarg(self, shared, forwards):
try:
env = dict(self._env)
env.update(forwards)
for shr in shared:
for varg in shr.vargs:
self.parent.shared[varg] = env[varg]
for dest, value in shr.kwargs.items():
self.parent.shared[dest] = eval(value, env, {})
except Exception as err:
raise UsageError("failed on sharing variable: %s" % str(err))
def run(self, argv, subargv=None, forward=None):
"""task run function
"""
self.subargv = subargv
# attribute setting
if forward is None:
forward = {}
elif not isinstance(forward, dict):
raise InternalError("forward is not a dict type: %s" %
str(forward))
fpenv = {}
fpenv.update(forward)
fpenv.update(self.parent.shared)
if "--read-pickle" in argv:
idx = argv.index("--read-pickle")
ppath = argv.pop(idx+1)
argv.pop(idx)
ppickle = PylocoPickle()
penv = ppickle.load(ppath)
fpenv.update(penv)
# argument parsing
targs, self.unknown_args = self._parser.parse_args(argv, fpenv, parse_known_args=self.parse_known_args)
# pre perform
self.pre_perform(targs)
self.send_websocket_message("pyloco", "task", "Task '%s' is started."
% self._name_)
# perform
if hasattr(self, "_group_perform"):
retval = self._group_perform(targs)
else:
if "_pathid_" in fpenv and isinstance(fpenv["_pathid_"], int):
self._env["_pathid_"] = fpenv["_pathid_"]
retval = self.perform(targs)
if retval is None:
retval = 0
self.send_websocket_message("pyloco", "task", "Task '%s' is finished."
% self._name_)
# post perform
self.post_perform(targs)
_fwds = self._fwds
self._fwds = {}
return retval, _fwds
def perform(self, targs):
"""task perform functiion
Task should implement this function.
"""
raise NotImplementedError("'perform' method is not implemented in %s." % str(self.__class__))
def add_data_argument(self, *vargs, **kwargs):
self._parser.add_data_argument(*vargs, **kwargs)
def del_data_argument(self, name):
self._parser.del_data_argument(name)
def set_data_argument(self, *vargs, **kwargs):
self._parser.set_data_argument(*vargs, **kwargs)
def add_option_argument(self, *vargs, **kwargs):
self._parser.add_option_argument(*vargs, **kwargs)
class OptionTask(Task):
def _lines(name, title, tasks):
lines = [title]
lines.append("-"*len(title))
for task in sorted(tasks):
docs = tasks[task].__doc__
if docs:
lines.append("{0:10} : {1}".format(task,
pydoc.splitdoc(docs)[0]))
else:
lines.append("{0:10} : {0}".format(task))
return lines
def show_installed_tasks(self, tasks):
#installed_tasks = dict((n, t) for n, t in tasks.items)
return self._lines("installed tasks", tasks)
def show_standard_tasks(self):
from pyloco.stdtask import standard_tasks
return self._lines("standard tasks", standard_tasks)
def show_mgmt_tasks(self):
from pyloco.mgmttask import mgmt_tasks
return self._lines("management tasks", mgmt_tasks)
def run(self, argv, subargv=None, forward=None):
mgrname = self.parent.get_managerattr("name")
mgrver = self.parent.get_managerattr("version")
if not argv:
print(self.parent.get_managerattr("usage").format(manager=mgrname))
return 0, None
usage = self.parent.get_managerattr("usage").format(manager=mgrname)
if "--verbose" in argv:
long_desc = self.parent.get_managerattr("long_description")
list_help = self.parent.get_managerattr("list_help").format(
manager=mgrname)
epilog = self.parent.get_managerattr("epilog")
desc = long_desc + " " + list_help
parser = PylocoArgParser(mgrname, mgrver, description=desc,
usage=usage, epilog=epilog)
else:
desc = self.parent.get_managerattr("description")
parser = PylocoArgParser(mgrname, mgrver, description=desc,
usage=usage)
targs = parser.parse_args(argv)
if targs.list:
pyloco_print("")
pyloco_print("Please run '%s <task> -h' for task-specific "
"information." % mgrname)
# installed
installed_tasks = collections.OrderedDict()
default_tasks = self.parent.get_managerattr("default_tasks")
if default_tasks is not None:
for name, cls in default_tasks.items():
installed_tasks[name] = cls
for ep in pkg_resources.iter_entry_points(group='pyloco.task'):
if ep.name not in installed_tasks:
task_class = ep.load()
installed_tasks[ep.name] = task_class
pyloco_print("")
for line in self.show_installed_tasks(installed_tasks):
pyloco_print(line)
pyloco_print("")
for line in self.show_standard_tasks():
pyloco_print(line)
pyloco_print("")
for line in self.show_mgmt_tasks():
pyloco_print(line)
elif targs.verbose:
parser.print_help()
return 0, None
class RemoteTask(Task):
"""Remote task
RemoteTask downloads a remote task and runs it locally.
"""
_version_ = "0.1.0"
def run(self, argv, subargv=None, forward=None):
raise Exception("REMOTETASK")
import pdb; pdb.set_trace() # noqa: E702
class StandardTask(Task):
_installation_ = """'{name}' task is one of pyloco standard tasks.
Standard tasks are already installed when pyloco was installed."""
class ManagementTask(Task):
_installation_ = """'{name}' task is one of pyloco management tasks.
Management tasks are always available once pyloco is installed."""
| 31.329453 | 111 | 0.506986 | 21,994 | 0.817256 | 0 | 0 | 0 | 0 | 0 | 0 | 4,870 | 0.18096 |
6f644b09bfe662762ed95cb2b170c8fc73f84411 | 1,376 | py | Python | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
]
| null | null | null | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
]
| null | null | null | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
]
| null | null | null | """Azure IoT Device Library - Asynchronous
This library provides asynchronous clients for communicating with Azure IoT services
from an IoT device.
"""
from azure.iot.device.iothub.aio import *
from azure.iot.device.provisioning.aio import *
from . import patch_documentation
# Dynamically patch the clients to add shim implementations for all the inherited methods.
# This is necessary to generate accurate online docs.
# It SHOULD not impact the functionality of the methods themselves in any way.
# NOTE In the event of addition of new methods and generation of accurate documentation
# for those methods we have to append content to "patch_documentation.py" file.
# In order to do so please uncomment the "patch.add_shims" lines below,
# enable logging with level "DEBUG" in a python terminal and do
# "import azure.iot.device". The delta between the newly generated output
# and the existing content of "patch_documentation.py" should be appended to
# the function "execute_patch_for_sync" in "patch_documentation.py".
# Once done please again omment out the "patch.add_shims" lines below.
# patch.add_shims_for_inherited_methods(IoTHubDeviceClient) # noqa: F405
# patch.add_shims_for_inherited_methods(IoTHubModuleClient) # noqa: F405
# patch.add_shims_for_inherited_methods(ProvisioningDeviceClient) # noqa: F405
patch_documentation.execute_patch_for_async()
| 45.866667 | 90 | 0.805233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,185 | 0.861192 |
6f647632e2c96c2063ca3a82382e2a10a7664a9e | 1,716 | py | Python | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
]
| 1 | 2018-01-12T14:09:48.000Z | 2018-01-12T14:09:48.000Z | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
]
| 1 | 2019-09-10T05:03:47.000Z | 2019-09-10T05:03:47.000Z | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2013-2016 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""This module is for managing a reception desk and a waiting queue:
register clients into a waiting queue as they present themselves at a
reception desk (Empfangsschalter), and unregister them when they leave
again.
It depends on :mod:`lino_xl.lib.cal`. It does not add any model, but
adds some workflow states, actions and tables.
Extended by :mod:`lino_welfare.modlib.reception`.
.. autosummary::
:toctree:
models
workflows
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"See :class:`lino.core.Plugin`."
verbose_name = _("Reception")
needs_plugins = ['lino.modlib.system', 'lino_xl.lib.cal']
required_user_groups = 'reception'
"""The required user groups for viewing actors of this plugin.
This is overridden by Lino Welfare to include "coaching".
This way of configuring permissions is an example for why it would
be useful to replace user groups by a UserType class (and to
populate UserTypes with subclasses of it).
"""
def setup_main_menu(config, site, user_type, m):
app = site.plugins.reception
m = m.add_menu(app.app_name, app.verbose_name)
m.add_action('cal.EntriesByDay')
m.add_action('reception.WaitingVisitors')
m.add_action('reception.BusyVisitors')
m.add_action('reception.GoneVisitors')
# MyWaitingVisitors is maybe not needed as a menu entry since it
# is also a get_dashboard_items. if i remove it then i must edit
# `pcsw_tests.py`. Waiting for user feedback before doing this.
m.add_action('reception.MyWaitingVisitors')
| 28.6 | 72 | 0.701049 | 1,130 | 0.658508 | 0 | 0 | 0 | 0 | 0 | 0 | 1,276 | 0.74359 |
6f659c58598cee6e53216640aed93bdbc6f2a194 | 320 | py | Python | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
]
| null | null | null | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
]
| null | null | null | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
]
| null | null | null | from accent_analyser.rules.EngRule import EngRule
class RuleRemoveThe(EngRule):
def __init__(self, likelihood=1.0):
super().__init__(likelihood)
def _convert_core(self, words: list, current_index: int):
word = words[current_index].content
if word == "the":
return ""
else:
return word
| 22.857143 | 59 | 0.69375 | 267 | 0.834375 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.021875 |
6f6a38a0483844a3f770817a9b327db335f9b10a | 255 | py | Python | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
]
| 6 | 2019-06-07T19:25:51.000Z | 2019-10-30T01:56:29.000Z | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
]
| 1 | 2019-08-26T00:05:50.000Z | 2019-08-26T00:05:50.000Z | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
]
| 1 | 2019-10-30T01:56:33.000Z | 2019-10-30T01:56:33.000Z | # -*- coding:utf-8 -*-
class BaseProvider(object):
@staticmethod
def loads(link_url):
raise NotImplementedError("Implemetion required.")
@staticmethod
def dumps(conf):
raise NotImplementedError("Implemetion required.")
| 19.615385 | 58 | 0.670588 | 229 | 0.898039 | 0 | 0 | 190 | 0.745098 | 0 | 0 | 68 | 0.266667 |
6f6b28d63b93b95d61bab409bb560af9d95cf417 | 1,505 | py | Python | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/tips | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
]
| 2 | 2019-05-07T03:08:25.000Z | 2020-05-22T10:10:00.000Z | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/tips | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
]
| 7 | 2020-05-22T13:29:42.000Z | 2021-09-23T23:30:25.000Z | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/py | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
]
| null | null | null | session.forget()
def get(args):
if args[0].startswith('__'):
return None
try:
obj = globals(), get(args[0])
for k in range(1, len(args)):
obj = getattr(obj, args[k])
return obj
except:
return None
def vars():
"""the running controller function!"""
title = '.'.join(request.args)
attributes = {}
if not request.args:
(doc, keys, t, c, d, value) = ('Global variables', globals(), None, None, [], None)
elif len(request.args) < 3:
obj = get(request.args)
if obj:
doc = getattr(obj, '__doc__', 'no documentation')
keys = dir(obj)
t = type(obj)
c = getattr(obj, '__class__', None)
d = getattr(obj, '__bases__', None)
for key in keys:
a = getattr(obj, key, None)
if a and not isinstance(a, DAL):
doc1 = getattr(a, '__doc__', '')
t1 = type(a)
c1 = getattr(a, '__class__', None)
d1 = getattr(a, '__bases__', None)
key = '.'.join(request.args) + '.' + key
attributes[key] = (doc1, t1, c1, d1)
else:
doc = 'Unkown'
keys = []
t = c = d = None
else:
raise HTTP(400)
return dict(
title=title,
args=request.args,
t=t,
c=c,
d=d,
doc=doc,
attributes=attributes,
)
| 27.363636 | 91 | 0.453821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.105648 |
6f6c2c1c13418649733376c632ea6395a15039ac | 857 | py | Python | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
]
| null | null | null | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
]
| null | null | null | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
]
| 3 | 2018-02-09T02:46:48.000Z | 2021-02-20T08:32:03.000Z | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#223. Rectangle Area
#Find the total area covered by two rectilinear rectangles in a 2D plane.
#Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
#Assume that the total area is never beyond the maximum possible value of int.
#Credits:
#Special thanks to @mithmatt for adding this problem, creating the above image and all test cases.
#class Solution:
# def computeArea(self, A, B, C, D, E, F, G, H):
# """
# :type A: int
# :type B: int
# :type C: int
# :type D: int
# :type E: int
# :type F: int
# :type G: int
# :type H: int
# :rtype: int
# """
# Time Is Money | 32.961538 | 98 | 0.655776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 832 | 0.970828 |
6f6c63911e71ae7c84e18bedf35df7f0d63d41aa | 437 | py | Python | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
]
| 1 | 2019-04-29T16:07:51.000Z | 2019-04-29T16:07:51.000Z | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
]
| null | null | null | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
]
| null | null | null | import serial
import RPi.GPIO as GPIO
import time
ser=serial.Serial("/dev/ttyACM0",9600)
start_time = time.time()
imu = open("IMU.txt","w")
while time.time() - start_time <= 1:
ser.readline()
while time.time() - start_time <= 8:
read_ser=ser.readline()
if float(read_ser) == 0.00:
pass
else:
read = read_ser.strip('\n')
imu.write(read)
imu.write('\n')
imu.close()
| 19.863636 | 39 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.077803 |
6f6e858702c8ce5b6a0c7be5155f97db4b0d395c | 1,950 | py | Python | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
]
| null | null | null | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
]
| null | null | null | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
]
| 1 | 2022-01-03T21:56:14.000Z | 2022-01-03T21:56:14.000Z | from pyg_base._types import is_iterable
from pyg_base._loop import len0
__all__ = ['zipper', 'lens']
def lens(*values):
"""
measures (and enforces) a common length across all values
:Parameters:
----------------
*values : lists
Raises
------
ValueError
if you have values with multi lengths.
:Returns:
-------
int
common length.
:Example:
--------------
>>> assert lens() == 0
>>> assert lens([1,2,3], [2,4,5]) == 3
>>> assert lens([1,2,3], [2,4,5], [6]) == 3
"""
if len0(values) == 0:
return 0
all_lens = [len0(value) for value in values]
lens = set(all_lens) - {1}
if len(lens)>1:
raise ValueError('found multiple lengths %s '%lens)
return list(lens)[0] if lens else 1
def zipper(*values):
"""
a safer version of zip
:Examples: zipper works with single values as well as full list:
---------------
>>> assert list(zipper([1,2,3], 4)) == [(1, 4), (2, 4), (3, 4)]
>>> assert list(zipper([1,2,3], [4,5,6])) == [(1, 4), (2, 5), (3, 6)]
>>> assert list(zipper([1,2,3], [4,5,6], [7])) == [(1, 4, 7), (2, 5, 7), (3, 6, 7)]
>>> assert list(zipper([1,2,3], [4,5,6], None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
>>> assert list(zipper((1,2,3), np.array([4,5,6]), None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
:Examples: zipper rejects multi-length lists
---------------
>>> import pytest
>>> with pytest.raises(ValueError):
>>> zipper([1,2,3], [4,5])
:Parameters:
----------------
*values : lists
values to be zipped
:Returns:
-------
zipped values
"""
values = [list(value) if isinstance(value, zip) else value if is_iterable(value) else [value] for value in values]
n = lens(*values)
values = [value * n if len(value) == 1 else value for value in values]
return zip(*values)
| 27.083333 | 118 | 0.510256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,354 | 0.694359 |
6f6e961109cfe080e1074fb4fb957b034dcf9958 | 1,997 | py | Python | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
]
| null | null | null | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
]
| 13 | 2022-02-17T06:05:44.000Z | 2022-03-17T02:47:49.000Z | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
]
| 2 | 2021-09-28T08:01:42.000Z | 2021-09-28T08:18:31.000Z | import json
from typing import List
import requests
from pawls.preprocessors.model import Page
def fetch_grobid_structure(pdf_file: str, grobid_host: str = "http://localhost:8070"):
files = {
"input": (pdf_file, open(pdf_file, "rb"), "application/pdf", {"Expires": "0"})
}
url = "{}/api/processPdfStructure".format(grobid_host)
resp = requests.post(url, files=files)
if resp.status_code == 200:
return json.loads(resp.text)
else:
raise Exception("Grobid returned status code {}".format(resp.status_code))
def parse_annotations(grobid_structure) -> List[Page]:
pages = []
for grobid_page in grobid_structure["tokens"]["pages"]:
tokens = []
for token in grobid_page["tokens"]:
tokens.append(
dict(
text=token["text"],
x=token["x"],
y=token["y"],
width=token["width"],
height=token["height"],
)
)
page = dict(
page=dict(
width=grobid_page["page"]["width"],
height=grobid_page["page"]["height"],
index=grobid_page["page"]["pageNumber"] - 1,
),
tokens=tokens,
)
pages.append(page)
return pages
def process_grobid(
pdf_file: str,
grobid_host: str = "http://localhost:8070"
):
"""
Integration for importing annotations from grobid.
Depends on a grobid API built from our fork https://github.com/allenai/grobid.
Fetches a PDF by sha, sends it to the Grobid API and returns them.
pdf_file: str
The path to the pdf file to process.
grobid_host: str (optional, default="http://localhost:8070")
The forked grobid API which we use to produce the annotations.
"""
grobid_structure = fetch_grobid_structure(pdf_file, grobid_host)
annotations = parse_annotations(grobid_structure)
return annotations
| 31.203125 | 86 | 0.593891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.330996 |
6f6fd6c2d42d2b9282a1e6483b23196da4a8aeeb | 2,614 | py | Python | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
]
| 4 | 2021-09-16T16:46:53.000Z | 2022-02-06T13:00:14.000Z | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
]
| 80 | 2020-10-31T09:14:46.000Z | 2021-01-12T23:38:15.000Z | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
]
| 1 | 2020-10-02T13:28:26.000Z | 2020-10-02T13:28:26.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tests for custom eslint checks."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
from core import python_utils
from scripts import common
def main():
"""Run the tests."""
node_path = os.path.join(common.NODE_PATH, 'bin', 'node')
nyc_path = os.path.join('node_modules', 'nyc', 'bin', 'nyc.js')
mocha_path = os.path.join('node_modules', 'mocha', 'bin', 'mocha')
filepath = 'scripts/linters/custom_eslint_checks/rules/'
proc_args = [node_path, nyc_path, mocha_path, filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_tests_stdout, encoded_tests_stderr = proc.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
tests_stdout = encoded_tests_stdout.decode('utf-8')
tests_stderr = encoded_tests_stderr.decode('utf-8')
if tests_stderr:
python_utils.PRINT(tests_stderr)
sys.exit(1)
python_utils.PRINT(tests_stdout)
if 'failing' in tests_stdout:
python_utils.PRINT('---------------------------')
python_utils.PRINT('Tests not passed')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All tests passed')
python_utils.PRINT('---------------------------')
coverage_result = re.search = re.search(
r'All files\s*\|\s*(?P<stmts>\S+)\s*\|\s*(?P<branch>\S+)\s*\|\s*'
r'(?P<funcs>\S+)\s*\|\s*(?P<lines>\S+)\s*\|\s*', tests_stdout)
if (coverage_result.group('stmts') != '100' or
coverage_result.group('branch') != '100' or
coverage_result.group('funcs') != '100' or
coverage_result.group('lines') != '100'):
raise Exception('Eslint test coverage is not 100%')
if __name__ == '__main__':
main()
| 35.808219 | 74 | 0.649579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,265 | 0.483933 |
6f6fddc36a83d5396bed90f0e96d5995bd58f9a5 | 6,274 | py | Python | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
]
| null | null | null | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
]
| null | null | null | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import print_function
from pyomo.environ import *
from pyomo.core.base import Constraint, Objective, Suffix, minimize
from pyomo.opt import ProblemFormat, SolverFactory
from nmpc_mhe.dync.NMPCGenv2 import NmpcGen
from nmpc_mhe.mods.bfb.nob5_hi_t import bfb_dae
from snap_shot import snap
import sys, os
import itertools, sys
from numpy.random import normal as npm
# SWITCH TO JUST ONE COLLOCATION POINT AND FINITE ELEMENT
states = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
# x_noisy = ["Ngb", "Hgb", "Ngc", "Hgc", "Nsc", "Hsc", "Nge", "Hge", "Nse", "Hse", "mom"]
# x_noisy = ["Hse"]
x_noisy = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
u = ["u1"]
u_bounds = {"u1":(162.183495794 * 0.0005, 162.183495794 * 10000)}
ref_state = {("c_capture", ((),)): 0.63}
# Known targets 0.38, 0.4, 0.5
nfe_mhe = 10
y = ["Tgb", "vg"]
nfet = 10
ncpx = 3
nfex = 5
tfe = [i for i in range(1, nfe_mhe + 1)]
lfe = [i for i in range(1, nfex + 1)]
lcp = [i for i in range(1, ncpx + 1)]
lc = ['c', 'h', 'n']
y_vars = {
"Tgb": [i for i in itertools.product(lfe, lcp)],
"vg": [i for i in itertools.product(lfe, lcp)]
}
# x_vars = dict()
x_vars = {
# "Nge": [i for i in itertools.product(lfe, lcp, lc)],
# "Hge": [i for i in itertools.product(lfe, lcp)],
"Nsc": [i for i in itertools.product(lfe, lcp, lc)],
"Hsc": [i for i in itertools.product(lfe, lcp)],
"Nse": [i for i in itertools.product(lfe, lcp, lc)],
"Hse": [i for i in itertools.product(lfe, lcp)],
"Hgc": [i for i in itertools.product(lfe, lcp)],
"Hge": [i for i in itertools.product(lfe, lcp)],
# "mom": [i for i in itertools.product(lfe, lcp)]
}
# States -- (5 * 3 + 6) * fe_x * cp_x.
# For fe_x = 5 and cp_x = 3 we will have 315 differential-states.
e = NmpcGen(bfb_dae, 400/nfe_mhe, states, u,
ref_state=ref_state, u_bounds=u_bounds,
nfe_tnmpc=nfe_mhe, ncp_tnmpc=1,
nfe_t=5, ncp_t=1)
# 10 fe & _t=1000 definitely degenerate
# 10 fe & _t=900 definitely degenerate
# 10 fe & _t=120 sort-of degenerate
# 10 fe & _t=50 sort-of degenerate
# 10 fe & _t=50 eventually sort-of degenerate
# 10 fe & _t=1 eventually sort-of degenerate
e.SteadyRef.dref = snap
e.load_iguess_steady()
e.SteadyRef.create_bounds()
e.solve_steady_ref()
e.SteadyRef.report_zL(filename="mult_ss")
e.load_d_s(e.PlantSample)
e.PlantSample.create_bounds()
e.solve_dyn(e.PlantSample)
q_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp, lc):
q_cov[("Nse", j), ("Nse", j), i] = 7525.81478168 * 0.005
q_cov[("Nsc", j), ("Nsc", j), i] = 117.650089456 * 0.005
# q_cov[("Nse", j), ("Nse", j), i] = 735.706082714 * 0.005
for i in tfe:
for j in itertools.product(lfe, lcp):
# q_cov[("Hge", j), ("Hge", j), i] = 2194.25390583 * 0.005
q_cov[("Hse", j), ("Hse", j), i] = 731143.716603 * 0.005
q_cov[("Hsc", j), ("Hsc", j), i] = 16668.3312216 * 0.005
q_cov[("Hge", j), ("Hge", j), i] = 2166.86838591 * 0.005
q_cov[("Hgc", j), ("Hgc", j), i] = 47.7911012193 * 0.005
# q_cov[("mom", j), ("mom", j), i] = 1.14042251669 * 0.005
# for i in lfe:
# for j in [(1,1, 'c'), (5,3, 'c')]:
# m_cov[("yb", j), ("yb", j), i] = 1e-04
u_cov = {}
for i in [i for i in range(1, nfe_mhe+1)]:
u_cov["u1", i] = 162.183495794 * 0.005
m_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp):
m_cov[("Tgb", j), ("Tgb", j), i] = 40 * 0.005
m_cov[("vg", j), ("vg", j), i] = 0.902649386907 * 0.005
e.find_target_ss() #: Compute target-steady state (beforehand)
#: Create NMPC
e.create_nmpc()
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
ipsr = SolverFactory('ipopt', executable="/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR")
ref_state = {("c_capture", ((),)): 0.50}
e.find_target_ss(ref_state=ref_state) #: Compute target-steady state (beforehand)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
for i in range(1, 1000):
ps = e.solve_dyn(e.PlantSample, stop_if_nopt=False)
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
if ps != 0:
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
e.update_state_real() # update the current state
e.update_soi_sp_nmpc()
#
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
# if stat_nmpc != 0:
# stat_nmpc = e.solve_dyn(e.olnmpc,
# stop_if_nopt=True,
# skip_update=False,
# iter_max=300, ma57_pivtol=1e-12)
if stat_nmpc != 0:
strategy = 1
if strategy == 1:
if e.nfe_tnmpc == 1:
pass
else:
e.create_nmpc(newnfe=e.ncp_tnmpc-1, newncp=1)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+02)
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
else:
e.olnmpc.write_nl(name="bad.nl")
# e.olnmpc.pprint(filename="bad_" + str(i))
with open("ipopt.opt", "w") as f:
f.write("linear_solver ma57\n"
"ma57_dep_tol 1e-8\nbig_M 1e30\n")
f.close()
ipsr.solve(e.olnmpc, tee=True)
e.update_u(e.olnmpc)
e.print_r_nmpc()
e.cycleSamPlant(plant_step=True)
e.plant_uinject(e.PlantSample, src_kind="dict", nsteps=10, skip_homotopy=True)
# e.plant_input_gen(e.PlantSample, "mod", src=e.ss2)
| 36.690058 | 91 | 0.603602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,951 | 0.310966 |
6f709ca217e6ed7e435bf5ff768925bbdc7d9c7c | 493 | py | Python | csvapi/security.py | quaxsze/csvapi | 7e5ab5839fb6cbf667c756798a55c9b719394602 | [
"MIT"
]
| 15 | 2019-08-23T09:57:54.000Z | 2021-11-08T10:38:03.000Z | csvapi/security.py | quaxsze/csvapi | 7e5ab5839fb6cbf667c756798a55c9b719394602 | [
"MIT"
]
| 36 | 2019-08-21T10:05:53.000Z | 2022-03-23T08:58:02.000Z | csvapi/security.py | opendatateam/csvapi | 4e4ea7167f7265782c8f654619b060dc04112392 | [
"MIT"
]
| 1 | 2018-04-25T09:55:25.000Z | 2018-04-25T09:55:25.000Z | from urllib.parse import urlparse
from quart import current_app as app, request, jsonify
def filter_referrers():
filters = app.config.get('REFERRERS_FILTER')
if not filters:
return None
referrer = request.referrer
if referrer:
parsed = urlparse(referrer)
for filter in filters:
if parsed.hostname.endswith(filter):
return None
return jsonify({
'ok': False,
'error': 'Unauthorized',
}), 403
| 24.65 | 54 | 0.614604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.087221 |
6f70b2504b0ddf0927280e069e308de02195aea2 | 447 | py | Python | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
]
| 8 | 2019-06-11T14:09:12.000Z | 2021-09-09T09:37:47.000Z | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
]
| 7 | 2020-02-12T02:55:11.000Z | 2020-08-27T09:54:54.000Z | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
]
| 2 | 2020-06-18T09:54:20.000Z | 2022-02-17T08:33:13.000Z | from django.db import models
from filer.fields.file import FilerFileField
class FakeLink(models.Model):
"""
In our widget we need to manually render a AdminFileFormField. Basically for every other Field type this is not
a problem at all, but Failer needs a rel attribute which consists of a reverse relationship. We fake it
with this model.
"""
fake_file = FilerFileField(blank=True, null=True, on_delete=models.CASCADE)
| 37.25 | 115 | 0.753915 | 370 | 0.82774 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.572707 |
6f722918045c200389c503a068fc9c4194103a3f | 9,679 | py | Python | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
]
| null | null | null | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
]
| null | null | null | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
]
| null | null | null | from __future__ import print_function
import atexit
import errno
import logging
import os
import select
import signal
import sys
import time
from process_tests import setup_coverage
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
def handle_sigterm(signo, _frame):
# Simulate real termination
print("Terminated", file=OUTPUT)
sys.exit(128 + signo)
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
@atexit.register
def log_exit():
print("In atexit handler.", file=OUTPUT)
def setup_greenthreads(patch_threads=False):
try:
from gevent import monkey
monkey.patch_all(thread=False)
except (ImportError, SyntaxError):
pass
try:
import eventlet
eventlet.monkey_patch(thread=False)
except (ImportError, SyntaxError):
pass
def do_fork():
pid = os.fork()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
os.waitpid(pid, 0)
else:
time.sleep(TIMEOUT * 10)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
setup_coverage()
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
def handle_usr2(_sig, _frame):
print('Got USR2')
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._INST.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGCHLD])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGCHLD])
fd = signalfd.signalfd(0, [signal.SIGCHLD], signalfd.SFD_NONBLOCK|signalfd.SFD_CLOEXEC)
for i in range(200):
print('Forking %s:' % i)
pid = os.fork()
print(' - [%s/%s] forked' % (i, pid))
if pid:
while 1:
print(' - [%s/%s] selecting on: %s' % (i, pid, [fd]))
read_ready, _, errors = select.select([fd], [], [fd], 1)
if read_ready:
try:
print(' - [%s/%s] reading from signalfd ...' % (i, pid))
print(' - [%s] read from signalfd: %r ' % (i, os.read(fd, 128)))
break
except OSError as exc:
print(' - [%s/%s] reading from signalfd failed with errno %s' % (i, pid, exc.errno))
else:
print(' - [%s/%s] reading from signalfd failed - not ready !' % (i, pid))
if 'negative' in test_name:
time.sleep(1)
if errors:
raise RuntimeError("fd has error")
else:
print(' - [%s/%s] exiting' % (i, pid))
os._exit(0)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # pylint: disable=W0702
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
| 37.226923 | 116 | 0.510073 | 0 | 0 | 0 | 0 | 731 | 0.075524 | 0 | 0 | 1,630 | 0.168406 |
6f73d54d3a1a664d942bd0ee6d760eedb4233760 | 1,054 | py | Python | ecommerce/User/admin.py | AwaleRohin/commerce-fm | cb5b43c999ae5be37957b29de9c07d5affc66fb0 | [
"MIT"
]
| 18 | 2020-12-05T14:12:32.000Z | 2022-03-11T20:15:22.000Z | ecommerce/User/admin.py | AwaleRohin/commerce-fm | cb5b43c999ae5be37957b29de9c07d5affc66fb0 | [
"MIT"
]
| 1 | 2021-07-22T09:23:13.000Z | 2021-07-22T09:23:13.000Z | ecommerce/User/admin.py | shakyasaijal/commerce-fm | 358b6925f4b569dc374010d7cc7d4d560ede2b48 | [
"MIT"
]
| 13 | 2020-10-15T10:17:35.000Z | 2022-01-29T06:56:24.000Z | from django.contrib import admin
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import models
if settings.HAS_ADDITIONAL_USER_DATA:
try:
class UserProfileInline(admin.TabularInline):
model = models.UserProfile
extra = 0
except (Exception, KeyError) as e:
raise ImproperlyConfigured("User/admin.py:: Multi Vendor is turned on.")
class UserAdmin(admin.ModelAdmin):
list_display = ['get_full_name', 'email', 'is_verified']
search_fields = ['get_full_name', 'email', 'date_joined', 'username']
list_filter = ('groups',)
if settings.HAS_ADDITIONAL_USER_DATA:
inlines = [ UserProfileInline, ]
def save_model(self, request, obj, form, change):
if 'password' in form.changed_data:
obj.set_password(request.POST['password'])
obj.save()
admin.site.register(models.User, UserAdmin)
admin.site.register(models.IpAddress)
admin.site.register(models.CityFromIpAddress)
admin.site.register(models.Marketing) | 31 | 80 | 0.712524 | 563 | 0.534156 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.144213 |
6f741a22f6b69a36890074cd2db8d9ec2d946c37 | 38,189 | py | Python | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
]
| null | null | null | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
]
| null | null | null | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
]
| null | null | null | from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer
from client.config import config as c, language as l
from discord.ext import commands, tasks
from client.external.hiscores import hiscores_xp
from PIL import Image, ImageDraw, ImageFont
import discord, locale
class xp_tracker(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'xp_tracker'
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@staticmethod
async def fun_xptracker(ctx):
try:
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
if ctx.message.content == '.xptracker':
path3 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path3)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['tracker'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
else:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, c.CLIENT_ICON, l.xp_tracker[guild_l]['embed_1'].format(ctx.guild.name), new_json_string['data'], False)
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
async def fun_addxpevent(self, ctx):
try:
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 9:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = self.PRE
LIST2 = self.NAME
LIST3 = self.ICON
DATA1 = await json_manager.get_data(path2)
ID = await origin.randomize()
STATUS = True
STATUS2 = False
while STATUS:
for data in DATA1:
if data['id'] == ID:
STATUS2 = True
if not STATUS2:
STATUS = False
else:
ID = await origin.randomize()
EXTRA = ''
NAME = ''
for value in LIST2:
if str(value).lower() == STRING[2].lower():
NAME = str(value)
for index, event in enumerate(LIST1):
if STRING[2] == event:
RUSH = None
if STRING[1].isdigit() and int(STRING[1]) > 1:
RUSH = l.xp_tracker[guild_l]['configuration']['rush_point'].format(locale.format_string('%d', int(STRING[1]), grouping=True))
path4 = c.ORIGIN_PATH['embed.tracker.json']
DESCRIPTION = l.xp_tracker[guild_l]['description_1'].format(
ctx.author.mention,
STRING[4], STRING[6], NAME, STRING[5] if not RUSH else l.xp_tracker[guild_l]['extra_4'], STRING[7] if not RUSH else l.xp_tracker[guild_l]['extra_4'], RUSH if RUSH else ''
)
if len(STRING) >= 8:
for value in STRING[8:]:
EXTRA += '{} '.format(value)
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['addevent'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(EXTRA)
})
if int(key) == 2:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
if STRING[1].isdigit():
mode_type = 0
if int(STRING[1]) == c.EVENT_MODE[0]:
mode_type = 1
elif int(STRING[1]) >= c.EVENT_MODE[1]:
mode_type = 2
EVENT_CHANNEL = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
embed = await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, LIST3[index], l.xp_tracker[guild_l]['embed_2'].format(ctx.guild.name), new_json_string['data'], False, False, EVENT_CHANNEL, DESCRIPTION)
json_string = {'id': ID, 'user_id': ctx.author.id, 'message_id': embed.id, 'event_name': STRING[2], 'xp_target': int(STRING[1]), 'prize_count': int(STRING[3]), 'date_start': STRING[4], 'date_end': STRING[5], 'time_start': int(STRING[6]), 'time_end': int(STRING[7]), 'participants': 0, 'status': 0, 'type': mode_type, 'win_message': 0}
await json_manager.create(path2, json_string)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_1'])
CHANNEL1 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat0'])
CHANNEL2 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat1'])
if CHANNEL1:
await CHANNEL1.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
if CHANNEL2:
await CHANNEL2.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
@staticmethod
async def fun_removeallxp(ctx, system=None):
try:
guild_l = await origin.get_language(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path2 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path3 = c.GUILD_PATH['event.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
LIST1 = await json_manager.get_data(path3)
NEW_LIST1 = {'data': []}
NEW_LIST2 = {'data': []}
if hasattr(ctx, 'guild'):
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_2'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
elif system == 1:
if LIST1:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
except Exception as error:
await exception.error(error)
async def fun_axp(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 2:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
EVENT_LIST_DATA = []
SAFE_CHECK = 0
userName = ''
for name in STRING[1:]:
userName += '{} '.format(name)
userName = userName.replace('_', ' ')
userName = userName.rstrip()
for value in LIST1:
if value['user_id'] == ctx.author.id or value['user_rsn'] == userName:
STATUS1 = True
if not STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
USERNAME = USERNAME.replace('%20', ' ')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
json_string = {'user_id': ctx.author.id, 'user_username': ctx.author.mention, 'user_rsn': userName}
for value in EVENT_NAME:
json_string.update({value: USER.stats[value]['experience']})
json_string.update({'{}_current'.format(value): USER.stats[value]['experience']})
await json_manager.create(path1, json_string)
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
path4 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
role_id = await ini_manager.get_data('SECTION1', 'EVENT_ROLE', path4)
role = await discord_manager.get_role(self.client, ctx.guild.id, int(role_id))
if role:
user = await discord_manager.get_member(self.client, ctx.guild.id, ctx.author.id)
await user.add_roles(role, reason='{}'.format(c.DISCORD_MESSAGES['event_role_added']), atomic=True)
await ctx.send(l.xp_tracker[guild_l]['msg_1'].format(USERNAME, server_config['events']))
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
EVENT_STATUS = False
MEMBER_DATA = None
for MEMBER in LIST1:
if ctx.author.id == MEMBER['user_id']:
MEMBER_DATA = MEMBER
for EVENT in LIST2:
for key, value in MEMBER_DATA.items():
if (EVENT['type'] == 1 or EVENT['type'] == 2) and key == EVENT['event_name']:
EVENT_STATUS = True
if not EVENT_STATUS and (EVENT['type'] == 1 or EVENT['type'] == 2):
EVENT_STATUS = False
CHECK = True
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
target_keys = ['{}'.format(EVENT['event_name']), '{}_current'.format(EVENT['event_name'])]
target_values = [USER.stats[EVENT['event_name']]['experience'], USER.stats[EVENT['event_name']]['experience']]
await json_manager.update(path1, 'user_id', ctx.author.id, target_keys, target_values)
await user.send(l.xp_tracker[guild_l]['msg_6'].format(str(EVENT['event_name']).capitalize()))
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
EVENT_STATUS = False
await user.send(l.xp_tracker[guild_l]['msg_7'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_badformat_2'].format(ctx.author.mention))
except Exception as error:
await exception.error(error)
async def fun_xpupdate(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
guild_t = await origin.get_region(ctx.guild.id)
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
guild_current = await server_timer.get_current_time(guild_t)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
SAFE_CHECK = 0
MEMBER = None
userName = ''
for value in LIST1:
if value['user_id'] == ctx.author.id:
STATUS1 = True
userName = value['user_rsn']
MEMBER = value
if STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
for value in EVENT_NAME:
await json_manager.update(path1, 'user_id', ctx.author.id, '{}_current'.format(value), USER.stats[value]['experience'])
client_message = 'Guild id: {} | Event: {} | RSN: {} | Registration XP: {} | Current XP: {} | Guild time: {} | Status: {}'.format(ctx.guild.id, value, userName, MEMBER[value], USER.stats[value]['experience'], guild_current.strftime('%H:%M'), 'XP self update')
await console_interface.console_message('XP self update', client_message)
await user.send(l.xp_tracker[guild_l]['msg_success_4'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_3'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_4'].format(userName))
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
await user.send(l.xp_tracker[guild_l]['msg_error_5'])
except Exception as error:
await exception.error(error)
async def fun_xprank(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
LIST1 = self.PNG
LIST2 = self.PRE
INFO_PANEL_IMAGE = self.INFO_PANEL_IMAGE
INFO_PANEL_FIRST_IMAGE = self.INFO_PANEL_FIRST_IMAGE
INFO_PANEL_SECOND_IMAGE = self.INFO_PANEL_SECOND_IMAGE
INFO_PANEL_THIRD_IMAGE = self.INFO_PANEL_THIRD_IMAGE
COLOR_PLACE_FIRST = (255, 30, 215)
COLOR_PLACE_SECOND = (0, 174, 255)
COLOR_PLACE_THIRD = (255, 31, 31)
COLOR_PLACE_DEFAULT = (0, 239, 0)
FONT_PATH = self.FONT_PATH
INFO_PANEL_OBJECT = None
RANK = 0
sum = None
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
server_config = await json_manager.get_json(path)
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
STRING = str(ctx.message.content).split(' ')
def get_id(data_value):
return int(data_value.get('sum'))
if len(STRING) == 1:
user = self.client.get_user(ctx.author.id)
else:
DCID = await origin.find_and_replace(STRING[1])
user = self.client.get_user(DCID)
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
STATUS = None
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
if DATA1:
for index, data in enumerate(DATA1):
if DATA2:
for index2, data2 in enumerate(DATA2):
for key, value in data2.items():
if str(data['event_name']) == str(key):
sum = data2['{}_current'.format(key)] - data2[key]
DATA3.append({'user_rsn': data2['user_rsn'], 'user_id': data2['user_id'], 'sum': sum})
for index3, value3 in enumerate(LIST2):
if str(value3) == str(key):
INFO_PANEL_OBJECT = LIST1[index3]
DATA3.sort(key=get_id, reverse=True)
for index3, data3 in enumerate(DATA3):
RANK += 1
if RANK == 1:
PLACE_IMAGE = INFO_PANEL_FIRST_IMAGE
PLACE_COLOR = COLOR_PLACE_FIRST
elif RANK == 2:
PLACE_IMAGE = INFO_PANEL_SECOND_IMAGE
PLACE_COLOR = COLOR_PLACE_SECOND
elif RANK == 3:
PLACE_IMAGE = INFO_PANEL_THIRD_IMAGE
PLACE_COLOR = COLOR_PLACE_THIRD
else:
PLACE_IMAGE = INFO_PANEL_IMAGE
PLACE_COLOR = COLOR_PLACE_DEFAULT
if hasattr(user, 'id'):
if user.id == data3['user_id']:
with Image.open(PLACE_IMAGE).convert('RGBA') as im:
with Image.open(INFO_PANEL_OBJECT).convert('RGBA') as im2:
size1 = im.size
size2 = im2.size
y = int(size1[1] / 2) - int(size2[1] / 2)
im.paste(im2, (18, y), im2)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(FONT_PATH, 16)
draw.text((50, y - 12), l.xp_tracker[guild_l]['configuration']['rsn'], PLACE_COLOR, font=font)
draw.text((50, y + 2), l.xp_tracker[guild_l]['configuration']['rank'], PLACE_COLOR, font=font)
draw.text((50, y + 18), l.xp_tracker[guild_l]['configuration']['xp'], PLACE_COLOR, font=font)
draw.text((110 if guild_l == 'LT' else 95, y - 12), '{}'.format(data3['user_rsn']), (255, 255, 255), font=font)
draw.text((130 if guild_l == 'LT' else 100, y + 2), '{}'.format(RANK), (255, 255, 255), font=font)
draw.text((98 if guild_l == 'LT' else 70, y + 18), '{} XP'.format(locale.format_string('%d', data3['sum'], grouping=True)), (255, 255, 255), font=font)
TEMP_FILE = '{}_{}_{}.png'.format(data3['user_rsn'], data['event_name'], sum)
im.save(TEMP_FILE, 'PNG')
rank = open(TEMP_FILE, 'rb')
await ctx.send(file=discord.File(rank))
rank.close()
await file_manager.delete_file(TEMP_FILE)
STATUS = True
if not STATUS:
await ctx.send(l.xp_tracker[guild_l]['msg_error_6'].format(ctx.author.mention))
RANK = 0
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
async def fun_xpstats(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path)
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
LIST1 = self.ICON
LIST2 = self.PRE
IMAGE = None
EVENT_NAME = None
await origin.get_locale()
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
def get_id(INFO):
return int(INFO.get('sum'))
if DATA1:
for data1 in DATA1:
if DATA2:
for data2 in DATA2:
for key, value in data2.items():
if str(key) == str(data1['event_name']):
sum = data2['{}_current'.format(key)]-data2[key]
DATA3.append({'user_username': data2['user_username'], 'user_rsn': data2['user_rsn'], 'sum': sum})
if data1['type'] == 1:
EVENT_NAME = '{} [ S ]'.format(str(data1['event_name']).capitalize())
if data1['type'] == 2:
EVENT_NAME = '{} [ R ]'.format(str(data1['event_name']).capitalize())
for index, value3 in enumerate(LIST2):
if str(value3) == str(key):
IMAGE = LIST1[index]
DATA3.sort(key=get_id, reverse=True)
path4 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
STRING = ''
SUM = 0
for key, value in json_string[guild_l]['xp_tracker']['stats'].items():
if DATA3:
if int(key) == 1:
for index, data in enumerate(DATA3):
index = index + 1
if index <= 10:
if index == 1:
title = ':first_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 2:
title = ':second_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 3:
title = ':third_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
else:
title = '{}'.format(l.DISCORD_TOP[guild_l][index - 1])
STRING += l.xp_tracker[guild_l]['configuration']['current_xp'].format(title, data['user_username'], data['user_rsn'], locale.format_string('%d', data['sum'], grouping=True))
SUM += data['sum']
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'].format('\u200D'),
'value{}'.format(key): str(value['value']).format(STRING)
})
else:
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(ctx.guild.name)
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, IMAGE, l.xp_tracker[guild_l]['embed_3'].format(ctx.guild.name, EVENT_NAME), new_json_string['data'], False)
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@tasks.loop(count=1)
async def variable_init(self):
try:
path_global = c.GUILD_PATH['{}_g.ini'.format(self.name)]
ini = await ini_manager.get_ini(path_global)
self.PRE = await json_manager.get_ini_list(path_global, 'CONSTANT2', 'PRE')
self.NAME = await json_manager.get_ini_list(path_global, 'CONSTANT1', 'NAME')
self.ICON = await json_manager.get_ini_list(path_global, 'CONSTANT3', 'ICON')
self.PNG = await json_manager.get_ini_list(path_global, 'CONSTANT5', 'PNG')
self.INFO_PANEL_IMAGE = ini['CONSTANT5']['INFO_PANEL']
self.INFO_PANEL_FIRST_IMAGE = ini['CONSTANT5']['INFO_PANEL_FIRST']
self.INFO_PANEL_SECOND_IMAGE = ini['CONSTANT5']['INFO_PANEL_SECOND']
self.INFO_PANEL_THIRD_IMAGE = ini['CONSTANT5']['INFO_PANEL_THIRD']
self.FONT_PATH = ini['CONSTANT5']['FONT']
await console_interface.console_message(c.CLIENT_MESSAGES['variable_init'].format(self.name))
except Exception as error:
await exception.error(error)
def __init__(self, client):
self.PRE = None
self.NAME = None
self.ICON = None
self.PNG = None
self.INFO_PANEL_IMAGE = None
self.INFO_PANEL_FIRST_IMAGE = None
self.INFO_PANEL_SECOND_IMAGE = None
self.INFO_PANEL_THIRD_IMAGE = None
self.FONT_PATH = None
self.variable_init.start()
self.client = client
@commands.command()
async def xptracker(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xptracker)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def addxpevent(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_addxpevent)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def removeallxp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_removeallxp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def axp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_axp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpupdate(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpupdate)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xprank(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xprank)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpstats(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpstats)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
def setup(client):
client.add_cog(xp_tracker(client)) | 52.601928 | 366 | 0.468224 | 37,902 | 0.988911 | 0 | 0 | 8,310 | 0.216818 | 36,904 | 0.962872 | 3,777 | 0.098547 |
6f75a1523bdd37ab1cd4cc70ef59345c182747bf | 1,437 | py | Python | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
]
| null | null | null | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
]
| null | null | null | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
]
| null | null | null | from __future__ import annotations
import os
import collections
BASE_PATH = os.path.dirname(__file__)
INPUT_PATH = os.path.join(BASE_PATH, "input.txt")
OUTPUT_PATH = os.path.join(BASE_PATH, "output.txt")
def proceed_buffer(buffer: str) -> list[int]:
return [int(line) for line in buffer.splitlines()]
def main() -> int:
buffer: str = ""
with open(INPUT_PATH, mode='r', encoding="utf-8") as f:
buffer = f.read()
measurements: list[int] = proceed_buffer(buffer)
measurements_counter = collections.Counter()
output_buffer: str = ""
prev: int = None
for i in measurements:
if prev is None:
output_buffer += f"{i} (N/A - no previous measurement)\n"
measurements_counter["None"] += 1
elif prev > i:
output_buffer += f"{i} (decrease)\n"
measurements_counter["Decreased"] += 1
elif prev < i:
output_buffer += f"{i} (increase)\n"
measurements_counter["Increased"] += 1
elif prev == i:
output_buffer += f"{i} (not changed)\n"
measurements_counter["Equal"] += 1
prev = i
output_buffer += "\n====\n"
output_buffer += "Total:\n"
output_buffer += f": {measurements_counter}"
with open(OUTPUT_PATH, mode='w', encoding="utf-8") as f:
f.write(output_buffer)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 26.127273 | 69 | 0.599165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.166319 |
6f75fde6361af1d1bfaca77b15e701086bf2e3b2 | 13,684 | py | Python | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
]
| 290 | 2017-12-29T01:55:21.000Z | 2022-03-28T10:00:32.000Z | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
]
| 3 | 2018-08-02T11:45:51.000Z | 2020-09-24T14:34:58.000Z | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
]
| 76 | 2018-01-17T06:19:51.000Z | 2021-11-10T06:18:20.000Z | """Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
class TwoLayerNNEpsilonGreedy(Agent):
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
epsilon_param=0.0,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Epsilon-greedy agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
epsilon_param: fixed epsilon choice.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim) # initialize weights
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.epsilon_param = epsilon_param
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps # number of gradient steps we
# take during each time period
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, t):
"""Update the model by taking a few gradient steps."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.reward_hist[batch_ind]
out, cache = self._model_forward(action_batch)
dW1, dW2 = self._model_backward(out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * self.W1
dW2 += 2 / (self.prior_var * (t + 1)) * self.W2
self.W1 -= self.lr * dW1
self.W2 -= self.lr * dW2
def update_observation(self, observation, action, reward):
"""Learn from observations."""
t = observation
self.action_hist[t] = self.actions[action]
self.reward_hist[t] = reward
self._update_model(t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Fixed epsilon-greedy action selection."""
u = rd.rand()
if u < self.epsilon_param:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy):
"""Epsilon-greedy with an annealing epsilon:
epsilon = self.epsilon_param / (self.epsilon_param + t)
"""
def pick_action(self, observation):
"""Overload pick_action to dynamically recalculate epsilon-greedy."""
t = observation
epsilon = self.epsilon_param / (self.epsilon_param + t)
u = rd.rand()
if u < epsilon:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNDropout(TwoLayerNNEpsilonGreedy):
"""Dropout is used to represent model uncertainty.
ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142.
Follow up work suggests that this is flawed: TODO(iosband) add link.
"""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
drop_prob=0.5,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Dropout agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
drop_prob: probability of randomly zero-ing out weight component.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim)
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.p = drop_prob
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Note that dropout remains "on" so that forward pass is stochastic.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
dropout_mask = rd.rand(*relu_out.shape) > self.p
dropout_out = relu_out * dropout_mask
out = np.sum(dropout_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out, dropout_mask, dropout_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out, dropout_mask, dropout_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
ddropout_out = dout[:, np.newaxis] * self.W2
drelu_out = ddropout_out * dropout_mask
relu_mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = relu_mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def pick_action(self, observation):
"""Select the greedy action according to the output of a stochastic
forward pass."""
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEnsembleSampling(Agent):
"""An ensemble sampling agent maintains an ensemble of neural nets, each
fitted to a perturbed prior and perturbed observations."""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
num_models=10,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Ensemble sampling agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
num_models: Number of ensemble models to train.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.M = num_models
# initialize models by sampling perturbed prior means
self.W1_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim,
input_dim)
self.W2_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim)
self.W1 = np.copy(self.W1_model_prior)
self.W2 = np.copy(self.W2_model_prior)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.model_reward_hist = np.zeros((self.M, self.T))
def _model_forward(self, m, input_actions):
"""Neural network forward pass for single model of ensemble.
Args:
m: index of which network to evaluate.
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1[m], axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2[m], axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, m, out, cache, y):
"""Neural network backward pass (for backpropagation) for single network.
Args:
m: index of which network to evaluate.
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2[m]
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, m, t):
"""Apply SGD to model m."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.model_reward_hist[m][batch_ind]
out, cache = self._model_forward(m, action_batch)
dW1, dW2 = self._model_backward(m, out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * (
self.W1[m] - self.W1_model_prior[m])
dW2 += 2 / (self.prior_var * (t + 1)) * (
self.W2[m] - self.W2_model_prior[m])
self.W1[m] -= self.lr * dW1
self.W2[m] -= self.lr * dW2
return
def update_observation(self, observation, action, reward):
"""Learn from observations, shared across all models.
However, perturb the reward independently for each model and then update.
"""
t = observation
self.action_hist[t] = self.actions[action]
for m in range(self.M):
m_noise = np.sqrt(self.noise_var) * rd.randn()
self.model_reward_hist[m, t] = reward + m_noise
self._update_model(m, t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Select action via ensemble sampling.
Choose active network uniformly at random, then act greedily wrt that model.
"""
m = rd.randint(self.M)
model_out, _ = self._model_forward(m, self.actions)
action = np.argmax(model_out)
return action
| 33.621622 | 80 | 0.656168 | 13,282 | 0.970623 | 0 | 0 | 0 | 0 | 0 | 0 | 5,175 | 0.378179 |
6f762afe905140cf74ce1d262513f6770e5cf96a | 1,314 | py | Python | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
]
| null | null | null | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
]
| null | null | null | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
]
| null | null | null | import io
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.staticfiles import finders
from django.template.loader import render_to_string
from django.utils.translation import to_locale
class Command(BaseCommand):
def handle(self, *args, **options):
for code, name in settings.LANGUAGES:
code = to_locale(code)
print("Processing", name)
path = finders.find('storage/src/locale/{code}.json'.format(
code=code))
if not path:
print("No file for", code, "Skipping")
else:
with io.open(path, "r", encoding="utf-8") as f:
print("Found file", path)
self.render(code, f.read())
def render(self, code, json):
path = os.path.join(
settings.STATIC_ROOT,
"storage/src/locale/",
"{code}.js".format(code=code)
)
with io.open(path, "w", encoding="utf-8") as f:
content = render_to_string('leaflet_storage/locale.js', {
"locale": json,
"locale_code": code
})
print("Exporting to", path)
f.write(content)
| 33.692308 | 79 | 0.542618 | 1,059 | 0.805936 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.14688 |
6f76bbd91ccc6729e6385bce7b0f809d9736e91f | 37,910 | py | Python | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
]
| null | null | null | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
]
| null | null | null | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
]
| null | null | null | # Copyright © 2019. Allen Institute. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi, duration,
spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
duration=duration,
isi_threshold=isi_threshold,
min_isi=min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None,
verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
duration=duration)
return ratios
def calculate_num_spikes(spike_times, spike_clusters, total_units, spike_cluster_subset=None, verbose=True):
num_spikes = np.zeros((total_units,))
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return num_spikes
def calculate_firing_rates(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
duration=duration)
return firing_rates
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=None,
spike_cluster_subset=None,
verbose=True):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
num_channels = np.max(pc_feature_ind) + 1
all_pcs = np.zeros((spikes_for_silhouette, num_channels * num_pc_features))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id, :]
for j in range(0, num_pc_features):
all_pcs[idx, channels + num_channels * j] = pc_features[i, j, :]
cluster_labels = spike_clusters[random_spike_inds]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
SS = np.empty((total_units, total_units))
SS[:] = np.nan
seen_unit_pairs = set()
for idx1, i in enumerate(cluster_ids):
if verbose:
printProgressBar(idx1 + 1, len(cluster_ids))
for idx2, j in enumerate(all_cluster_ids):
if (i, j) not in seen_unit_pairs and (j, i) not in seen_unit_pairs and i != j:
inds = np.in1d(cluster_labels, np.array([i, j]))
X = all_pcs[inds, :]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i, j] = silhouette_score(X, labels, random_state=seed)
seen_unit_pairs.add((i, j))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a = np.nanmin(SS, 0)
b = np.nanmin(SS, 1)
return np.array([np.nanmin([a, b]) for a, b in zip(a, b)])
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval,
vertical_channel_spacing=10,
channel_locations=None,
spike_cluster_subset=None,
verbose=True):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
positions = get_spike_positions(spike_clusters, pc_features, pc_feature_ind, channel_locations,
vertical_channel_spacing)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
positions_for_cluster = positions[in_cluster]
median_positions = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_positions.append(np.median(positions_for_cluster[in_range], 0))
else:
median_positions.append([np.nan, np.nan])
median_positions = np.array(median_positions)
# Extract emi-matrix of shifts in positions (used to extract max_drift and cum_drift)
position_diffs = np.zeros((len(median_positions), len(median_positions)))
for i, pos_i in enumerate(median_positions):
for j, pos_j in enumerate(median_positions):
if j > i:
if not np.isnan(pos_i[0]) and not np.isnan(pos_j[0]):
position_diffs[i, j] = np.linalg.norm(pos_i - pos_j)
else:
position_diffs[i, j] = 0
# Maximum drift among all periods
if np.any(position_diffs > 0):
max_drift[cluster_id] = np.around(np.max(position_diffs[position_diffs > 0]), 2)
# The +1 diagonal contains the step-by-step drifts between intervals.
# Summing them up we obtain cumulative drift
cumulative_drift[cluster_id] = np.around(np.sum(np.diag(position_diffs, 1)), 2)
else:
# not enough spikes
max_drift[cluster_id] = 0
cumulative_drift[cluster_id] = 0
return max_drift, cumulative_drift
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, duration, isi_threshold, min_isi=0):
"""Calculate Inter-Spike Interval (ISI) violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Originally written in Matlab by Nick Steinmetz (https://github.com/cortex-lab/sortingQuality)
Converted to Python by Daniel Denman
Inputs:
-------
spike_train : array of monotonically increasing spike times (in seconds) [t1, t2, t3, ...]
duration : length of recording (seconds)
isi_threshold : threshold for classifying adjacent spikes as an ISI violation
- this is the biophysical refractory period
min_isi : minimum possible inter-spike interval (default = 0)
- this is the artificial refractory period enforced by the data acquisition system
or post-processing algorithms
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
- higher values indicate more contamination
num_violations : total number of violations detected
"""
isis_initial = np.diff(spike_train)
if min_isi > 0:
duplicate_spikes = np.where(isis_initial <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2 * num_spikes * (isi_threshold - min_isi)
total_rate = firing_rate(spike_train, duration)
violation_rate = num_violations / violation_time
fpRate = violation_rate / total_rate
return fpRate, num_violations
def presence_ratio(spike_train, duration, num_bin_edges=101):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
duration : length of recording (seconds)
num_bin_edges : number of bin edges for histogram
- total bins = num_bin_edges - 1
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(0, duration, num_bin_edges))
return np.sum(h > 0) / (num_bin_edges - 1)
def firing_rate(spike_train, duration):
"""Calculate firing rate for a spike train.
If either temporal bound is not specified, the first and last spike time are used by default.
Inputs:
-------
spike_train : array of spike times (in seconds)
duration : length of recording (in seconds)
Outputs:
--------
fr : float
Firing rate in Hz
"""
fr = spike_train.size / duration
return fr
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
num_histogram_bins : int
Number of bins for calculating amplitude histogram
histogram_smoothing_value : float
Gaussian filter window for smoothing amplitude histogram
Output:
-------
fraction_missing : float
Fraction of missing spikes (ranges between 0 and 0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
def mahalanobis_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates isolation distance and L-ratio (metrics computed from Mahalanobis distance)
Based on metrics described in Schmitzer-Torbert et al. (2005) Neurosci 131: 1-11
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
isolation_distance : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
pcs_for_this_unit = all_pcs[all_labels == this_unit_id, :]
pcs_for_other_units = all_pcs[all_labels != this_unit_id, :]
mean_value = np.expand_dims(np.mean(pcs_for_this_unit, 0), 0)
try:
VI = np.linalg.inv(np.cov(pcs_for_this_unit.T))
except np.linalg.linalg.LinAlgError: # case of singular matrix
return np.nan, np.nan
mahalanobis_other = np.sort(cdist(mean_value,
pcs_for_other_units,
'mahalanobis', VI=VI)[0])
mahalanobis_self = np.sort(cdist(mean_value,
pcs_for_this_unit,
'mahalanobis', VI=VI)[0])
n = np.min([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]) # number of spikes
if n >= 2:
dof = pcs_for_this_unit.shape[1] # number of features
l_ratio = np.sum(1 - chi2.cdf(pow(mahalanobis_other, 2), dof)) / mahalanobis_self.shape[0]
isolation_distance = pow(mahalanobis_other[n - 1], 2)
# if math.isnan(l_ratio):
# print("NaN detected", mahalanobis_other, VI)
else:
l_ratio = np.nan
isolation_distance = np.nan
return isolation_distance, l_ratio
def lda_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates d-prime based on Linear Discriminant Analysis
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
d_prime : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
X = all_pcs
y = np.zeros((X.shape[0],), dtype='bool')
y[all_labels == this_unit_id] = True
lda = LDA(n_components=1)
X_flda = lda.fit_transform(X, y)
flda_this_cluster = X_flda[np.where(y)[0]]
flda_other_cluster = X_flda[np.where(np.invert(y))[0]]
d_prime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster)) / np.sqrt(
0.5 * (np.std(flda_this_cluster) ** 2 + np.std(flda_other_cluster) ** 2))
return d_prime
def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, spikes_for_nn, n_neighbors):
""" Calculates unit contamination based on NearestNeighbors search in PCA space
Based on metrics described in Chung, Magland et al. (2017) Neuron 95: 1381-1394
A is a (hopefully) representative subset of cluster X
NN_hit(X) = 1/k \sum_i=1^k |{x in A such that ith closest neighbor is in X}| / |A|
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
spikes_for_nn : Int
number of spikes to use (calculation can be very slow when this number is >20000)
n_neighbors : Int
number of neighbors to use
Outputs:
--------
hit_rate : float
Fraction of neighbors for target cluster that are also in target cluster
miss_rate : float
Fraction of neighbors outside target cluster that are in target cluster
"""
total_spikes = all_pcs.shape[0]
ratio = spikes_for_nn / total_spikes
this_unit = all_labels == this_unit_id
X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)
n = np.sum(this_unit)
if ratio < 1:
inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')
X = X[inds, :]
n = int(n * ratio)
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
this_cluster_inds = np.arange(n)
this_cluster_nearest = indices[:n, 1:].flatten()
other_cluster_nearest = indices[n:, 1:].flatten()
hit_rate = np.mean(this_cluster_nearest < n)
miss_rate = np.mean(other_cluster_nearest < n)
return hit_rate, miss_rate
# ==========================================================
# HELPER FUNCTIONS:
# ==========================================================
def make_index_mask(spike_clusters, unit_id, min_num, max_num, seed=None):
""" Create a mask for the spike index dimensions of the pc_features array
Inputs:
-------
spike_clusters : numpy.ndarray (num_spikes x 0)
Contains cluster IDs for all spikes in pc_features array
unit_id : Int
ID for this unit
min_num : Int
Minimum number of spikes to return; if there are not enough spikes for this unit, return all False
max_num : Int
Maximum number of spikes to return; if too many spikes for this unit, return a random subsample
seed: int
Random seed for reproducibility
Output:
-------
index_mask : numpy.ndarray (boolean)
Mask of spike indices for pc_features array
"""
index_mask = spike_clusters == unit_id
inds = np.where(index_mask)[0]
if len(inds) < min_num:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
else:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
order = np.random.RandomState(seed=seed).permutation(inds.size)
index_mask[inds[order[:max_num]]] = True
return index_mask
def make_channel_mask(unit_id, pc_feature_ind, channels_to_use):
""" Create a mask for the channel dimension of the pc_features array
Inputs:
-------
unit_id : Int
ID for this unit
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
channels_to_use : np.ndarray
Channels to use for calculating metrics
Output:
-------
channel_mask : numpy.ndarray
Channel indices to extract from pc_features array
"""
these_inds = pc_feature_ind[unit_id, :]
channel_mask = [np.argwhere(these_inds == i)[0][0] for i in channels_to_use]
return np.array(channel_mask)
def get_unit_pcs(these_pc_features, index_mask, channel_mask):
""" Use the index_mask and channel_mask to return PC features for one unit
Inputs:
-------
these_pc_features : numpy.ndarray (float)
Array of pre-computed PC features (num_spikes x num_PCs x num_channels)
index_mask : numpy.ndarray (boolean)
Mask for spike index dimension of pc_features array
channel_mask : numpy.ndarray (boolean)
Mask for channel index dimension of pc_features array
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
unit_PCs = these_pc_features[index_mask, :, :]
unit_PCs = unit_PCs[:, :, channel_mask]
return unit_PCs
def find_neighboring_channels(peak_channel, channel_list, num_channels_to_compare, channel_locations):
"""
Finds k nearest channels to the peak channel of a unit
Parameters
----------
peak_channel: int
ID of channel with largest waveform amplitude
channel_list: numpy.ndarray
IDs of channels being considered
num_channels_to_compare: int
Number of nearest channels to return
channel_locations: numpy.ndarray, (n_channels, 2)
x,y coordinates of the channels in channel_list
Returns
-------
neighboring_channels: array_like
id of k channels that neighbor peak channel (including the peak channel itself)
"""
# get peak channel location
channel_idx = list(channel_list).index(peak_channel)
peak_channel_location = channel_locations[channel_idx]
# compute pairwise distance
distances = [np.linalg.norm(peak_channel_location - loc) for loc in channel_locations]
# get k closest channels (+1 because distance 0 is peak_channel)
neighboring_channels_inds = np.argsort(distances)[:num_channels_to_compare]
neighboring_channels = channel_list[neighboring_channels_inds]
return neighboring_channels
| 39.163223 | 120 | 0.603324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,550 | 0.331038 |
6f76bcfc2a09b5cceb410578869827df3cb772bb | 23,746 | py | Python | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
]
| null | null | null | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
]
| null | null | null | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import copy
from .pdp_calc_utils import _sample_data, _find_onehot_actual, _find_closest
from sklearn.cluster import MiniBatchKMeans, KMeans
def _pdp_plot_title(n_grids, feature_name, ax, multi_flag, which_class, plot_params):
"""
Draw pdp plot title
:param n_grids: number of grids
:param feature_name: name of the feature
:param ax: axes to plot on
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'PDP for %s' % feature_name
subtitle = "Number of unique grid points: %d" % n_grids
title_fontsize = 15
subtitle_fontsize = 12
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if multi_flag:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.45, "For Class %d" % which_class, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family)
ax.text(0, 0.25, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
else:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.4, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
ax.axis('off')
def _axes_modify(font_family, ax, top=False, right=False, legend=False):
# modify the axes
for tick in ax.get_xticklabels():
tick.set_fontname(font_family)
for tick in ax.get_yticklabels():
tick.set_fontname(font_family)
ax.set_facecolor('white')
ax.tick_params(axis='both', which='major', labelsize=10, labelcolor='#424242', colors='#9E9E9E')
for d in ['top', 'bottom', 'right', 'left']:
ax.spines[d].set_visible(False)
if not legend:
if top:
ax.get_xaxis().tick_top()
elif right:
ax.get_yaxis().tick_right()
else:
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
ax.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
else:
ax.set_xticks([])
ax.set_yticks([])
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_org_pts, plot_lines, frac_to_plot,
cluster, n_cluster_centers, cluster_method, x_quantile, ax, plot_params):
"""
Plot partial dependent plot
:param pdp_isolate_out: instance of pdp_isolate_obj
a calculated pdp_isolate_obj instance
:param feature_name: string
name of the feature, not necessary the same as the column name
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
xticks_rotation = 0
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
# modify axes
_axes_modify(font_family, ax)
ax.set_xlabel(feature_name, fontsize=10)
feature_type = pdp_isolate_out.feature_type
feature_grids = pdp_isolate_out.feature_grids
display_columns = pdp_isolate_out.display_columns
actual_columns = pdp_isolate_out.actual_columns
if feature_type == 'binary' or feature_type == 'onehot' or x_quantile:
x = range(len(feature_grids))
ax.set_xticks(x)
ax.set_xticklabels(display_columns, rotation=xticks_rotation)
else:
# for numeric feature
x = feature_grids
ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines)
pdp_y = copy.deepcopy(pdp_isolate_out.pdp)
# whether to fill between std upper and lower
# whether to highlight pdp line
std_fill = True
pdp_hl = False
# whether to center the plot
if center:
pdp_y -= pdp_y[0]
for col in feature_grids[1:]:
ice_lines[col] -= ice_lines[feature_grids[0]]
ice_lines['actual_preds'] -= ice_lines[feature_grids[0]]
ice_lines[feature_grids[0]] = 0
if cluster or plot_lines:
std_fill = False
pdp_hl = True
if cluster:
_ice_cluster_plot(x=x, ice_lines=ice_lines, feature_grids=feature_grids, n_cluster_centers=n_cluster_centers,
cluster_method=cluster_method, ax=ax, plot_params=plot_params)
else:
ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot)
_ice_line_plot(x=x, ice_plot_data=ice_plot_data, feature_grids=feature_grids, ax=ax, plot_params=plot_params)
if plot_org_pts:
ice_lines_temp = ice_lines.copy()
if feature_type == 'onehot':
ice_lines_temp['x'] = ice_lines_temp[actual_columns].apply(lambda x: _find_onehot_actual(x), axis=1)
ice_lines_temp = ice_lines_temp[~ice_lines_temp['x'].isnull()].reset_index(drop=True)
elif feature_type == 'numeric':
feature_grids = pdp_isolate_out.feature_grids
ice_lines_temp = ice_lines_temp[(ice_lines_temp[actual_columns[0]] >= feature_grids[0])
& (ice_lines_temp[actual_columns[0]] <= feature_grids[-1])]
if x_quantile:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]].apply(lambda x: _find_closest(x, feature_grids))
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
ice_plot_data_pts = _sample_data(ice_lines=ice_lines_temp, frac_to_plot=frac_to_plot)
_ice_plot_pts(ice_plot_data_pts=ice_plot_data_pts, ax=ax, plot_params=plot_params)
std = ice_lines[feature_grids].std().values
_pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=ax, plot_params=plot_params)
def _pdp_std_plot(x, y, std, std_fill, pdp_hl, ax, plot_params):
"""
PDP basic plot
:param x: x axis values
:param y: pdp values
:param std: std values
:param std_fill: whether to fill between std upper and lower
:param pdp_hl: whether to highlight pdp line
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
upper = y + std
lower = y - std
pdp_color = '#1A4E5D'
pdp_hl_color = '#FEDC00'
pdp_linewidth = 2
zero_color = '#E75438'
zero_linewidth = 1.5
fill_color = '#66C2D7'
fill_alpha = 0.2
markersize = 5
if plot_params is not None:
if 'pdp_color' in plot_params.keys():
pdp_color = plot_params['pdp_color']
if 'pdp_hl_color' in plot_params.keys():
pdp_hl_color = plot_params['pdp_hl_color']
if 'pdp_linewidth' in plot_params.keys():
pdp_linewidth = plot_params['pdp_linewidth']
if 'zero_color' in plot_params.keys():
zero_color = plot_params['zero_color']
if 'zero_linewidth' in plot_params.keys():
zero_linewidth = plot_params['zero_linewidth']
if 'fill_color' in plot_params.keys():
fill_color = plot_params['fill_color']
if 'fill_alpha' in plot_params.keys():
fill_alpha = plot_params['fill_alpha']
if 'markersize' in plot_params.keys():
markersize = plot_params['markersize']
if pdp_hl:
ax.plot(x, y, color=pdp_hl_color, linewidth=pdp_linewidth * 3, alpha=0.8)
ax.plot(x, y, color=pdp_color, linewidth=pdp_linewidth, marker='o', markersize=markersize)
ax.plot(x, [0] * y, linestyle='--', linewidth=zero_linewidth, color=zero_color)
if std_fill:
ax.fill_between(x, upper, lower, alpha=fill_alpha, color=fill_color)
ax.set_ylim(np.min([np.min(lower) * 2, 0]), np.max([np.max(upper) * 2, 0]))
def _ice_plot_pts(ice_plot_data_pts, ax, plot_params):
"""
Plot the real data points
:param ice_plot_data_pts: data points to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
point_size = 50
point_pos_color = '#5BB573'
point_neg_color = '#E75438'
if plot_params is not None:
if 'point_size' in plot_params.keys():
point_size = plot_params['point_size']
if 'point_pos_color' in plot_params.keys():
point_pos_color = plot_params['point_pos_color']
if 'point_neg_color' in plot_params.keys():
point_neg_color = plot_params['point_neg_color']
ice_plot_data_pts['color'] = ice_plot_data_pts['actual_preds'].apply(lambda x: point_pos_color if x >= 0 else point_neg_color)
ax.scatter(ice_plot_data_pts['x'], ice_plot_data_pts['actual_preds'], s=point_size, marker="+", linewidth=1,
color=ice_plot_data_pts['color'])
def _ice_line_plot(x, ice_plot_data, feature_grids, ax, plot_params):
"""
Plot the ice lines
:param x: x axis values
:param ice_plot_data: ice lines to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
linewidth = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
linealpha = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
line_cmap = 'Blues'
if plot_params is not None:
if 'line_cmap' in plot_params.keys():
line_cmap = plot_params['line_cmap']
colors = plt.get_cmap(line_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(ice_plot_data)):
y = list(ice_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=linewidth, c=colors[i % 10], alpha=linealpha)
def _ice_cluster_plot(x, ice_lines, feature_grids, n_cluster_centers, cluster_method, ax, plot_params):
"""
Cluster the ice lines and plot out the cluster centers
:param x: x axis values
:param ice_lines: ice lines
:param n_cluster_centers: number of cluster centers
:param cluster_method: cluster method
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
if cluster_method == 'approx':
kmeans = MiniBatchKMeans(n_clusters=n_cluster_centers, random_state=0, verbose=0)
else:
kmeans = KMeans(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)
kmeans.fit(ice_lines[feature_grids])
cluster_plot_data = pd.DataFrame(kmeans.cluster_centers_, columns=feature_grids)
cluster_cmap = 'Blues'
if plot_params is not None:
if 'cluster_cmap' in plot_params.keys():
cluster_cmap = plot_params['cluster_cmap']
colors = plt.get_cmap(cluster_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(cluster_plot_data)):
y = list(cluster_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=1, c=colors[i % 10])
def _pdp_interact_plot_title(pdp_interact_out, feature_names, ax,
multi_flag, which_class, only_inter, plot_params):
"""
Draw pdp interaction plot title
:param pdp_interact_out: instance of pdp_interact_obj
:param feature_name: name of the features
:param ax: axes to plot on
:param figsize: figure size
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param only_inter: whether only draw interaction plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'Interaction PDP between %s and %s' % (feature_names[0], feature_names[1])
title_fontsize = 14
subtitle_fontsize = 12
if type(pdp_interact_out) == dict:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out['class_0'].feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out['class_0'].feature_grids[1]))
else:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out.feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out.feature_grids[1]))
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if only_inter:
ax.text(0, 0.8, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.62, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.45, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.3, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.55, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.4, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.6, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.53, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.axis('off')
def _pdp_interact_plot(pdp_interact_out, feature_names, center, plot_org_pts, plot_lines, frac_to_plot, cluster,
n_cluster_centers, cluster_method, x_quantile, figsize, plot_params, multi_flag, which_class):
"""
Plot interaction plot
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param figsize: figure size
:param plot_params: dict, default=None
values of plot parameters
:param multi_flag: boolean, default=False
whether it is a subplot of a multi-class plot
:param which_class: integer, default=None
must not be None under multi-class mode
"""
if figsize is None:
fig = plt.figure(figsize=(15, 15))
else:
fig = plt.figure(figsize=figsize)
pdp_plot_params = None
if plot_params is not None:
if 'pdp' in plot_params.keys():
pdp_plot_params = plot_params['pdp']
gs = GridSpec(2, 2)
ax0 = plt.subplot(gs[0, 0])
_pdp_interact_plot_title(pdp_interact_out=pdp_interact_out, feature_names=feature_names, ax=ax0,
multi_flag=multi_flag, which_class=which_class, only_inter=False, plot_params=plot_params)
ax1 = plt.subplot(gs[0, 1])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out1, feature_name=feature_names[0], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile,
ax=ax1, plot_params=pdp_plot_params)
ax2 = plt.subplot(gs[1, 0])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out2, feature_name=feature_names[1], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile, ax=ax2,
plot_params=pdp_plot_params)
ax3 = plt.subplot(gs[1, 1])
_pdp_contour_plot(pdp_interact_out=pdp_interact_out, feature_names=feature_names, x_quantile=x_quantile,
ax=ax3, fig=fig, plot_params=plot_params)
class ColorBarLocator(object):
def __init__(self, pax, pad=60, width=20):
self.pax = pax
self.pad = pad
self.width = width
def __call__(self, ax, renderer):
x, y, w, h = self.pax.get_position().bounds
fig = self.pax.get_figure()
inv_trans = fig.transFigure.inverted()
pad, _ = inv_trans.transform([self.pad, 0])
width, _ = inv_trans.transform([self.width, 0])
return [x, y - pad, w, width]
def _pdp_contour_plot(pdp_interact_out, feature_names, x_quantile, ax, fig, plot_params):
"""
Plot PDP contour
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param fig: plt figure
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
contour_color = 'white'
contour_cmap = 'viridis'
xticks_rotation = 0
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'contour_color' in plot_params['pdp_inter'].keys():
contour_color = plot_params['pdp_inter']['contour_color']
if 'contour_cmap' in plot_params['pdp_inter'].keys():
contour_cmap = plot_params['pdp_inter']['contour_cmap']
if 'font_family' in plot_params['pdp_inter'].keys():
font_family = plot_params['pdp_inter']['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
_axes_modify(font_family, ax)
feature_types = pdp_interact_out.feature_types
pdp = copy.deepcopy(pdp_interact_out.pdp)
new_feature_names = []
for i, feature_type in enumerate(feature_types):
if feature_type == 'onehot':
new_col = 'onehot_%d' % (i)
pdp[new_col] = pdp.apply(lambda x: list(x[pdp_interact_out.features[i]]).index(1), axis=1)
new_feature_names.append(new_col)
else:
new_feature_names.append(pdp_interact_out.features[i])
if (feature_types[0] == 'numeric') and x_quantile:
pdp[new_feature_names[0]] = pdp[new_feature_names[0]].apply(
lambda x: list(pdp_interact_out.feature_grids[0]).index(x))
if (feature_types[1] == 'numeric') and x_quantile:
pdp[new_feature_names[1]] = pdp[new_feature_names[1]].apply(
lambda x: list(pdp_interact_out.feature_grids[1]).index(x))
X, Y = np.meshgrid(pdp[new_feature_names[0]].unique(), pdp[new_feature_names[1]].unique())
Z = []
for i in range(X.shape[0]):
zs = []
for j in range(X.shape[1]):
x = X[i, j]
y = Y[i, j]
z = pdp[(pdp[new_feature_names[0]] == x) & (pdp[new_feature_names[1]] == y)]['preds'].values[0]
zs.append(z)
Z.append(zs)
Z = np.array(Z)
if feature_types[0] == 'onehot':
ax.set_xticks(range(X.shape[1]))
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
elif feature_types[0] == 'binary':
ax.set_xticks([0, 1])
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
else:
if x_quantile:
ax.set_xticks(range(len(pdp_interact_out.feature_grids[0])))
ax.set_xticklabels(pdp_interact_out.feature_grids[0], rotation=xticks_rotation)
if feature_types[1] == 'onehot':
ax.set_yticks(range(Y.shape[0]))
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
elif feature_types[1] == 'binary':
ax.set_yticks([0, 1])
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
else:
if x_quantile:
ax.set_yticks(range(len(pdp_interact_out.feature_grids[1])))
ax.set_yticklabels(pdp_interact_out.feature_grids[1])
level = np.min([X.shape[0], X.shape[1]])
c1 = ax.contourf(X, Y, Z, N=level, origin='lower', cmap=contour_cmap)
c2 = ax.contour(c1, levels=c1.levels, colors=contour_color, origin='lower')
ax.clabel(c2, contour_label_fontsize=9, inline=1)
ax.set_xlabel(feature_names[0], fontsize=10)
ax.set_ylabel(feature_names[1], fontsize=10)
ax.get_yaxis().tick_right()
if fig is not None:
cax = fig.add_axes([0, 0, 0, 0], axes_locator=ColorBarLocator(ax))
fig.colorbar(c1, cax=cax, orientation='horizontal')
| 40.730703 | 130 | 0.660111 | 470 | 0.019793 | 0 | 0 | 0 | 0 | 0 | 0 | 6,722 | 0.283079 |
6f76ec963af630c9f2623b7e32036a92ed42bb1c | 8,778 | py | Python | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
]
| 1 | 2020-08-31T09:32:39.000Z | 2020-08-31T09:32:39.000Z | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
]
| 13 | 2020-04-08T10:53:26.000Z | 2022-03-30T09:51:29.000Z | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
]
| 1 | 2020-04-12T13:10:51.000Z | 2020-04-12T13:10:51.000Z | import os
import pytest
from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer
def get_test_directory():
return os.path.dirname(os.path.abspath(__file__)) + "/../test_documents/"
@pytest.mark.skip
def test_html_rollup():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news.kdxa'), 'rb').read())
# before rollup
assert document.select('//a')[0].content == 'HSBC'
assert document.select('//a')[1].content == 'Hang Seng Index'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 1
# Collapse out all the <a> tags
step = RollupTransformer(collapse_type_res=["a"])
step.process(document)
# after rollup
assert len(document.select('//a')) == 0
# see where the href rolled up
assert document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_all_content() == 'The London-headquartered bank is a heavyweight component of the Hang Seng Index . HSBC shares in Hong Kong closed 2.78% lower.'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 3
def test_tag_key_value():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store')
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 45
assert context.get_store('test_store').rows[14][0] == 'LOC'
assert context.get_store('test_store').rows[14][1] == 'Europe'
def test_tag_key_value_include_exclude():
# Testing include parameter
include_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 11
# Testing exclude parameter
exclude_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 34
# Testing both include and exclude parameters
include_tags = ['LOC']
exclude_tags = ['DATE']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags, exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
# Testing both include - this should be the same as before as 'exclude' shouldn't have really done anything
include_tags = ['LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
@pytest.mark.skip
def test_rollup_of_pdf():
# first test - collapsing words and lines up to their common parent
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
# how many pre-rollup lines?
assert len(test_doc.select('//line')) == 3824
# how many pre-rollup words?
assert len(test_doc.select('//word')) == 52903
# how many pre-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the pre-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(collapse_type_res=["word", "line"], separator_character=' '))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# second test - just collapse the line up to its parent (content-area) - roll up the line's children
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(
RollupTransformer(collapse_type_res=["line"], separator_character=' ', get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# third test - select specific nodes in which we'll do the roll ups
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
node_selector = "//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]"
# verify we have 3 nodes match this selector
node_matches = test_doc.select(node_selector)
assert len(node_matches) == 3
# before we rollup, let's make sure the matching nodes conform to known expectations
assert len(node_matches[0].select('//word')) == 2
assert len(node_matches[0].select('//line')) == 1
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 2
assert len(node_matches[1].select('//line')) == 1
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 71
assert len(node_matches[2].select('//line')) == 6
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(selector="//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]",
collapse_type_res=["line"], separator_character=' ',
get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# check those matching nodes - we shouldn't have any words or lines, but
# all other node_types should exist and the content should stay the same.
assert len(node_matches[0].select('//word')) == 0
assert len(node_matches[0].select('//line')) == 0
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 0
assert len(node_matches[1].select('//line')) == 0
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 0
assert len(node_matches[2].select('//line')) == 0
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
# how many post-rollup lines? (still have some lines, but fewer than we started with)
assert len(test_doc.select('//line')) == 3816
# how many post-rollup words? (still have some words, but fewer than we started with)
assert len(test_doc.select('//word')) == 52828
# how many post-rollup content-areas? (same number of content-areas)
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
| 45.71875 | 224 | 0.691729 | 0 | 0 | 0 | 0 | 6,339 | 0.722146 | 0 | 0 | 2,927 | 0.333447 |
6f79392055980ee88fc9adbd173f470e11c846bf | 158 | py | Python | dftimewolf/lib/containers/__init__.py | fooris/dftimewolf | 5df863dad1518e4c4109f0563efa7458df26f7d2 | [
"Apache-2.0"
]
| 1 | 2021-01-21T19:53:37.000Z | 2021-01-21T19:53:37.000Z | dftimewolf/lib/containers/__init__.py | joachimmetz/dftimewolf | 9181bd9e860a467495ca4ab66e2c3873cbcbf529 | [
"Apache-2.0"
]
| null | null | null | dftimewolf/lib/containers/__init__.py | joachimmetz/dftimewolf | 9181bd9e860a467495ca4ab66e2c3873cbcbf529 | [
"Apache-2.0"
]
| null | null | null | """Make containers available here."""
from .report import Report
from .threat_intelligence import ThreatIntelligence
from .stackdriver import StackdriverLogs
| 31.6 | 51 | 0.835443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.234177 |
6f79949d19627c5156b74487a315345109a1b4e7 | 2,327 | py | Python | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
]
| null | null | null | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
]
| null | null | null | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
]
| 1 | 2022-03-01T06:42:04.000Z | 2022-03-01T06:42:04.000Z | #!/usr/bin/env python3
"""Script for simulating IOT measurement stream to ModelConductor experiment."""
import pandas as pd
import numpy as np
import sqlalchemy as sqla
from datetime import datetime as dt
from time import sleep, time
import logging
import sys, os, asyncio
from hbmqtt.client import MQTTClient, ConnectException
from hbmqtt.version import get_version
from docopt import docopt
from hbmqtt.utils import read_yaml_config
from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2
logger = logging.getLogger(__name__)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
csv_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'experiment_2019-10-03_20-37-36.csv')
data = np.random.rand(100, 4)
data = np.insert(data, 0, np.arange(100), axis=1)
data = pd.DataFrame(data, columns =['time', 'A', 'B', 'C', 'D'])
BROKER_URL = "mqtt://localhost:1883"
def main():
if sys.version_info[:2] < (3, 4):
logger.fatal("Error: Python 3.4+ is required")
sys.exit(-1)
config = None
config = read_yaml_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_client.yaml'))
logger.debug("Using default configuration")
loop = asyncio.get_event_loop()
client_id = "mqtt_publisher_exp"
client = MQTTClient(client_id=client_id, config=config, loop=loop)
try:
logger.info("%s Connecting to broker" % client.client_id)
loop.run_until_complete(client.connect(uri=BROKER_URL))
qos = QOS_1
topic = "topic_1"
for _, row in data.iterrows():
row['TIMING_client_request_timestamp'] = time()
message = row.to_json().encode(encoding='utf-8')
logger.info("%s Publishing to '%s'" % (client.client_id, topic))
loop.run_until_complete(client.publish(topic, message, qos))
sleep(0.1)
except KeyboardInterrupt:
loop.run_until_complete(client.disconnect())
logger.info("%s Disconnected from broker" % client.client_id)
except ConnectException as ce:
logger.fatal("connection to '%s' failed: %r" % (BROKER_URL, ce))
except asyncio.CancelledError as cae:
logger.fatal("Publish canceled due to prvious error")
if __name__ == "__main__":
main() | 33.724638 | 111 | 0.689729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.22905 |
6f7aa07e116a65a70f05b7ef70691b8f299b021f | 694 | py | Python | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
]
| null | null | null | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
]
| null | null | null | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
]
| null | null | null | import sys
import math
L = []
f = open(sys.argv[1],"r")
for item in f:
L.append(item.strip())
def find_id(sequence):
rows = sequence[:7]
seats = sequence[7:]
upper = 127
lower = 0
for letter in rows:
half = math.ceil((upper-lower)/2)
if letter == "F":
upper -= half
if letter == "B":
lower += half
row = upper
lower = 0
upper = 7
for letter in seats:
half = math.ceil((upper-lower)/2)
if letter == "L":
upper -= half
if letter == "R":
lower += half
seat = lower
return 8*row+seat
ids = []
max_id = 0
for sequence in L:
id = find_id(sequence)
ids.append(id)
if id > max_id:
max_id = id
ids.sort()
old = 35
for id in ids:
print(id)
old = id
| 13.09434 | 35 | 0.597983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.021614 |
488a4b657eabc94e1d145860d1dd73207641241d | 2,890 | py | Python | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
]
| 1 | 2018-03-26T23:46:01.000Z | 2018-03-26T23:46:01.000Z | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
]
| 4 | 2018-03-24T15:41:12.000Z | 2018-04-08T23:49:35.000Z | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
]
| 1 | 2018-04-03T23:49:37.000Z | 2018-04-03T23:49:37.000Z | # -*- coding: utf-8 -*-
"""
Injector.
A partir de um arquivo binario, de uma tabela binaria gerada com o Finder,
e um arquivo de substituição, o Injector é capaz de injetar um texto
no binario trocando o texto in-game
O Injector faz automaticamente a adequação do tamanho do texto ao tamanho da caixa,
truncando se maior e colocando corretamente as quebras de linha
@author Yan Uehara
"""
from __future__ import print_function
import os
import sys
import binascii
import pickle
class Injector:
def __init__(self, sfc, tbl, substituto):
self.sfc = sfc
self.tbl = tbl
self.substituto = substituto
self.bytefile = None
self.dictionary = None
self.inv_dictionary = None
self.offset = 0
"""
pega o arquivo e retorna seus bytes em um array de bytes
"""
def fileToByteArray(self):
with open(self.sfc, 'rb') as f:
hexdata = binascii.hexlify(f.read())
self.bytefile = map(''.join, zip(hexdata[::2], hexdata[1::2]))
"""
Lê a tabela binaria de conversao
"""
def readBinaryTbl(self):
with open(self.tbl, 'rb') as btblobj:
self.dictionary = pickle.Unpickler(btblobj).load()
self.offset = self.dictionary["offset"]
del self.dictionary["offset"]
self.inv_dictionary = {v: k for k, v in self.dictionary.items()}
def inject(self):
_txt = []
char_count = 0
with open(self.substituto, "r") as _txtfile:
_txt = _txtfile.read().replace('\n', '')
for numero_linha in xrange(1, 9):
for numero_coluna in xrange(1, 18):
try:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]]
if numero_coluna is 18:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]+"\n"]
except IndexError:
pass
char_count = char_count + 1
self.offset = self.offset + 1
# with open(self.sfc.replace(".sfc", ".modified.sfc"), "wb") as sfc_file:
sfc_file = open(self.sfc.replace(".sfc", ".modified.sfc"), "wb")
for byte in self.bytefile:
sfc_file.write(
binascii.unhexlify(byte)
)
"""
Entry-point da classe
"""
def run(self):
self.fileToByteArray()
self.readBinaryTbl()
self.inject()
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Use: python extractor.py [sfc] [tbl] [substituto]")
sys.exit(1)
sfc = sys.argv[1]
tbl = sys.argv[2]
substituto = sys.argv[3]
if os.path.exists(sfc) and os.path.isfile(tbl):
inj = Injector(sfc, tbl, substituto)
inj.run()
| 29.489796 | 96 | 0.565052 | 2,033 | 0.702003 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.266575 |
488aa98c813700f0bcd537993c300646573e9ada | 10,556 | py | Python | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 2 | 2020-09-15T02:37:59.000Z | 2020-09-21T04:34:38.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
# Latest versions available at:
# https://developer.nvidia.com/rdp/cudnn-download
# Archived versions available at:
# https://developer.nvidia.com/rdp/cudnn-archive
# Note that download links don't work from command line,
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart']
# cuDNN 8.0.2
version('8.0.2.39-11.0-linux-x64',
sha256='672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345')
version('8.0.2.39-11.0-linux-ppc64le',
sha256='b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057')
version('8.0.2.39-10.2-linux-x64',
sha256='c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f')
version('8.0.2.39-10.2-linux-ppc64le',
sha256='c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262')
version('8.0.2.39-10.1-linux-x64',
sha256='82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d')
version('8.0.2.39-10.1-linux-ppc64le',
sha256='8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9')
# cuDNN 8.0
version('8.0.0.180-11.0-linux-x64',
sha256='9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e')
version('8.0.0.180-11.0-linux-ppc64le',
sha256='1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060')
version('8.0.0.180-10.2-linux-x64',
sha256='0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d')
version('8.0.0.180-10.2-linux-ppc64le',
sha256='59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f')
# cuDNN 7.6.5
version('7.6.5.32-10.2-linux-x64',
sha256='600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
preferred=True)
version('7.6.5.32-10.2-linux-ppc64le',
sha256='7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b')
version('7.6.5.32-10.1-linux-x64',
sha256='7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3')
version('7.6.5.32-10.1-osx-x64',
sha256='8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff')
version('7.6.5.32-10.1-linux-ppc64le',
sha256='97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56')
version('7.6.5.32-10.0-linux-x64',
sha256='28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba')
version('7.6.5.32-10.0-osx-x64',
sha256='6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47')
version('7.6.5.32-10.0-linux-ppc64le',
sha256='b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b')
version('7.6.5.32-9.2-linux-x64',
sha256='a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4')
version('7.6.5.32-9.2-linux-ppc64le',
sha256='a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76')
version('7.6.5.32-9.0-linux-x64',
sha256='bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8')
# cuDNN 7.6.4
version('7.6.4.38-10.1-linux-x64',
sha256='32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099')
version('7.6.4.38-10.1-osx-x64',
sha256='bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236')
version('7.6.4.38-10.1-linux-ppc64le',
sha256='f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce')
version('7.6.4.38-10.0-linux-x64',
sha256='417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf')
version('7.6.4.38-10.0-osx-x64',
sha256='af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1')
version('7.6.4.38-10.0-linux-ppc64le',
sha256='c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb')
version('7.6.4.38-9.2-linux-x64',
sha256='c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27')
version('7.6.4.38-9.2-linux-ppc64le',
sha256='98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884')
version('7.6.4.38-9.0-linux-x64',
sha256='8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d')
# cuDNN 7.6.3
version('7.6.3.30-10.1-linux-x64',
sha256='352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961')
version('7.6.3.30-10.1-linux-ppc64le',
sha256='f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee')
# cuDNN 7.5.1
version('7.5.1.10-10.1-linux-x64',
sha256='2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0')
version('7.5.1.10-10.1-linux-ppc64le',
sha256='a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663')
version('7.5.1.10-10.0-linux-x64',
sha256='c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985')
version('7.5.1.10-10.0-linux-ppc64le',
sha256='d9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70')
# cuDNN 7.5.0
version('7.5.0.56-10.1-linux-x64',
sha256='c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8')
version('7.5.0.56-10.1-linux-ppc64le',
sha256='15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638')
version('7.5.0.56-10.0-linux-x64',
sha256='701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781')
version('7.5.0.56-10.0-linux-ppc64le',
sha256='f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a')
# cuDNN 7.3.0
version('7.3.0.29-9.0-linux-x64',
sha256='403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b')
# cuDNN 7.2.1
version('7.2.1.38-9.0-linux-x64',
sha256='cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37')
# cuDNN 7.1.3
version('7.1.3-9.1-linux-x64',
sha256='dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228')
version('7.1.3-9.1-linux-ppc64le',
sha256='e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931')
# cuDNN 6.0
version('6.0-8.0-linux-x64',
sha256='9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c')
# cuDNN 5.1
version('5.1-8.0-linux-x64',
sha256='c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce')
# CUDA 10.2
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.2-linux-x64')
# CUDA 10.1
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-ppc64le')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-x64')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-ppc64le')
# CUDA 10.0
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-ppc64le')
# CUDA 9.2
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-ppc64le')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-ppc64le')
# CUDA 9.1
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-x64')
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-ppc64le')
# CUDA 9.0
depends_on('[email protected]:9.0.999', when='@7.6.5.32-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.6.4.38-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.3.0.29-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.2.1.38-9.0-linux-x64')
# CUDA 8.0
depends_on('[email protected]:8.0.999', when='@6.0-8.0-linux-x64')
depends_on('[email protected]:8.0.999', when='@5.1-8.0-linux-x64')
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-v{2}.tgz'
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
symlink(os.path.join(prefix, 'targets', 'ppc64le-linux', 'lib'),
prefix.lib)
symlink(
os.path.join(prefix, 'targets', 'ppc64le-linux', 'include'),
prefix.include)
| 45.304721 | 98 | 0.670519 | 10,325 | 0.978117 | 0 | 0 | 0 | 0 | 0 | 0 | 6,853 | 0.649204 |
488b91ca767e9611a3e2258e676d32094fa0687f | 4,023 | py | Python | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
]
| null | null | null | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
]
| null | null | null | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
]
| null | null | null | import numpy as np
import argparse
import os.path
import plots as plot
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
import time
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import StratifiedKFold
def loadData(XPath, yPath):
X = np.genfromtxt(XPath, delimiter=",", dtype="float32")
y = np.genfromtxt(yPath, delimiter=",", dtype="float32")
return (X, y)
def convertToClasses(targetVector):
return np.argmax(targetVector[:,1:5], axis=1)
def standardize(featureVector):
scaler = StandardScaler()
return scaler.fit_transform(featureVector)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-x", "--xTrain", required = True,
help = "path to training feature set")
ap.add_argument("-y", "--yTrain", required = True,
help = "path to training target set")
ap.add_argument("-X", "--xTest", required = True,
help = "path to testing feature set")
ap.add_argument("-Y", "--yTest", required = True,
help = "path to testing target set")
ap.add_argument("-o", "--optimize", type = int, default = 0,
help = "optomization mode: 0 use default, 1 optomize, 2 use pkl model if possible")
ap.add_argument("-m", "--multiClass", type = int, default=1,
help = "exclusive multi class or regression")
ap.add_argument("-p", "--pickle", default="models/svmModel.pkl",
help = "pickle dump of model (output if optomize = 1, input if optomize = 0)")
ap.add_argument("-v", "--visualize", type=int, default=0,
help = "whether or not to show visualizations after a run")
args = vars(ap.parse_args())
(trainX, trainY) = loadData(args["xTrain"], args["yTrain"])
(testX, testY) = loadData(args["xTest"], args["yTest"])
# required scaling for SVM
trainX = standardize(trainX)
testX = standardize(testX)
if (args["multiClass"] == 1):
trainY = convertToClasses(trainY)
testY = convertToClasses(testY)
# check to see if a grid search should be done
if args["optimize"] == 1:
#configure stratified k-fold cross validation
cv = StratifiedKFold(y=trainY, n_folds=4, shuffle=True)
# perform a grid search on the 'C' and 'gamma' parameter
# of SVM
print "SEARCHING SVM"
C_range = 2. ** np.arange(-15, 15, step=1)
gamma_range = 2. ** np.arange(-15, 15, step=1)
param_grid = dict(gamma=gamma_range, C=C_range)
start = time.time()
gs = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, n_jobs = -1, verbose = 2)
gs.fit(trainX, trainY)
# print diagnostic information to the user and grab the
# best model
print "done in %0.3fs" % (time.time() - start)
print "best score: %0.3f" % (gs.best_score_)
print "SVM PARAMETERS"
bestParams = gs.best_estimator_.get_params()
# loop over the parameters and print each of them out
# so they can be manually set
print("Best Estimator: %s" % gs.best_estimator_)
#for p in sorted(params.keys()):
# print "\t %s: %f" % (p, bestParams[p])
print("Accuracy Score On Validation Set: %s\n" % accuracy_score(testY, gs.predict(testX)))
# show a reminder message
print "\nIMPORTANT"
print "Now that your parameters have been searched, manually set"
print "them and re-run this script with --optomize 0"
joblib.dump(gs.best_estimator_, args["pickle"])
# otherwise, use the manually specified parameters
else:
# evaluate using SVM
if (os.path.isfile(args["pickle"]) and args["optimize"] == 2):
clf = joblib.load(args["pickle"])
else:
clf = svm.SVC()
clf.fit(trainX, trainY)
print "SVM PERFORMANCE"
pred = clf.predict(testX)
print classification_report(testY, pred)
print("Accuracy Score: %s\n" % accuracy_score(testY, pred))
if (args["visualize"] == 1):
plot.accuracy(testY, pred, "SVM")
| 35.60177 | 94 | 0.675864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,469 | 0.36515 |
488d15bc02d47b9fb1ebe771ea194aa64ab3caea | 3,545 | py | Python | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
]
| 4 | 2019-10-23T06:58:31.000Z | 2021-11-01T17:08:34.000Z | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
]
| 1 | 2021-06-02T00:35:23.000Z | 2021-06-02T00:35:23.000Z | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
]
| 1 | 2019-10-23T06:58:45.000Z | 2019-10-23T06:58:45.000Z | import abc
import json
import logging
import socket
import sys
import time
import aio_logstash
import traceback
from aio_logstash import constants
from datetime import datetime, date
class BaseFormatter(logging.Formatter):
def __init__(self, message_type='aio_logstash', fqdn=False):
super().__init__()
self._message_type = message_type
self._host = socket.getfqdn() if fqdn else socket.gethostname()
self._interpreter = sys.executable
self._interpreter_vesion = '{major}.{minor}.{micro}'.format(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro
)
self._program_name = sys.argv[0]
@staticmethod
def _format_timestamp(_time):
tstamp = datetime.utcfromtimestamp(_time)
return tstamp.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (tstamp.microsecond / 1000) + "Z"
@staticmethod
def _format_stack_trace(exc_info):
if exc_info:
return ''.join(traceback.format_exception(*exc_info))
return None
@staticmethod
def _serialize(message):
return bytes(json.dumps(message), encoding='utf-8')
@abc.abstractmethod
def format(self, record):
pass
def _get_base_fields(self):
base_fields = {
'host': self._host,
'type': self._message_type,
'interpreter': self._interpreter,
'interpreter_version': self._interpreter_vesion,
'program': self._program_name,
'aio_logstash_version': aio_logstash.__version__,
}
return base_fields
def _get_record_fields(self, record):
record_fields = {
'message': record.getMessage(),
'pid': record.process,
'func_name': record.funcName,
'line': record.lineno,
'logger_name': record.name,
'path': record.pathname,
'thread_name': record.threadName,
'level': record.levelname,
'process_name': record.processName,
'stack_trace': self._format_stack_trace(record.exc_info)
}
return record_fields
def _get_extra_fields(self, record):
extra_fields = dict()
for k, v in record.__dict__.items():
if k not in constants.LOG_RECORD_DEFAULT_ATTRIBUTES:
extra_fields[k] = self._get_value_repr(v)
return extra_fields
def _get_value_repr(self, value):
easy_types = (bool, float, type(None), str, int)
if isinstance(value, dict):
return {k: self._get_value_repr(v) for k, v in value.items()}
elif isinstance(value, (tuple, list)):
return [self._get_value_repr(v) for v in value]
elif isinstance(value, (datetime, date)):
return self._format_timestamp(time.mktime(value.timetuple()))
elif isinstance(value, easy_types):
return value
else:
return repr(value)
class V1Formatter(BaseFormatter):
def format(self, record):
message = {
'@timestamp': self._format_timestamp(record.created),
'@version': '1'
}
base_fields = self._get_base_fields()
message.update(base_fields)
record_fields = self._get_record_fields(record)
message.update(record_fields)
extra_fields = self._get_extra_fields(record)
message.update({
'extra': extra_fields
})
return self._serialize(message)
| 29.297521 | 97 | 0.618336 | 3,356 | 0.946685 | 0 | 0 | 520 | 0.146685 | 0 | 0 | 283 | 0.079831 |
488df2d8a33bbefd7d27eb53f611e19d0eba095d | 18,352 | py | Python | .venv/lib/python2.7/site-packages/celery/events/cursesmon.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
]
| 4 | 2018-10-19T04:36:20.000Z | 2020-02-13T16:14:09.000Z | .venv/lib/python2.7/site-packages/celery/events/cursesmon.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
]
| 3 | 2020-02-11T23:03:45.000Z | 2021-06-10T18:05:11.000Z | oo/lib/python3.5/site-packages/celery/events/cursesmon.py | chunky2808/SPOJ-history-Django-App | 490c58b1593cd3626f0ddc27fdd09c6e8d1c56e1 | [
"MIT"
]
| 1 | 2019-10-26T04:20:52.000Z | 2019-10-26T04:20:52.000Z | # -*- coding: utf-8 -*-
"""Graphical monitor of Celery events using curses."""
from __future__ import absolute_import, print_function, unicode_literals
import curses
import sys
import threading
from datetime import datetime
from itertools import count
from textwrap import wrap
from time import time
from math import ceil
from celery import VERSION_BANNER
from celery import states
from celery.app import app_or_default
from celery.five import items, values
from celery.utils.text import abbr, abbrtask
__all__ = ['CursesMonitor', 'evtop']
BORDER_SPACING = 4
LEFT_BORDER_OFFSET = 3
UUID_WIDTH = 36
STATE_WIDTH = 8
TIMESTAMP_WIDTH = 8
MIN_WORKER_WIDTH = 15
MIN_TASK_WIDTH = 16
# this module is considered experimental
# we don't care about coverage.
STATUS_SCREEN = """\
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
"""
class CursesMonitor(object): # pragma: no cover
"""A curses based Celery task monitor."""
keymap = {}
win = None
screen_delay = 10
selected_task = None
selected_position = 0
selected_str = 'Selected: '
foreground = curses.COLOR_BLACK
background = curses.COLOR_WHITE
online_str = 'Workers online: '
help_title = 'Keys: '
help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit')
greet = 'celery events {0}'.format(VERSION_BANNER)
info_str = 'Info: '
def __init__(self, state, app, keymap=None):
self.app = app
self.keymap = keymap or self.keymap
self.state = state
default_keymap = {
'J': self.move_selection_down,
'K': self.move_selection_up,
'C': self.revoke_selection,
'T': self.selection_traceback,
'R': self.selection_result,
'I': self.selection_info,
'L': self.selection_rate_limit,
}
self.keymap = dict(default_keymap, **self.keymap)
self.lock = threading.RLock()
def format_row(self, uuid, task, worker, timestamp, state):
mx = self.display_width
# include spacing
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
if uuid_space < UUID_WIDTH:
uuid_width = uuid_space
else:
uuid_width = UUID_WIDTH
detail_width = detail_width - uuid_width - 1
task_width = int(ceil(detail_width / 2.0))
worker_width = detail_width - task_width - 1
uuid = abbr(uuid, uuid_width).ljust(uuid_width)
worker = abbr(worker, worker_width).ljust(worker_width)
task = abbrtask(task, task_width).ljust(task_width)
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
timestamp, state)
if self.screen_width is None:
self.screen_width = len(row[:mx])
return row[:mx]
@property
def screen_width(self):
_, mx = self.win.getmaxyx()
return mx
@property
def screen_height(self):
my, _ = self.win.getmaxyx()
return my
@property
def display_width(self):
_, mx = self.win.getmaxyx()
return mx - BORDER_SPACING
@property
def display_height(self):
my, _ = self.win.getmaxyx()
return my - 10
@property
def limit(self):
return self.display_height
def find_position(self):
if not self.tasks:
return 0
for i, e in enumerate(self.tasks):
if self.selected_task == e[0]:
return i
return 0
def move_selection_up(self):
self.move_selection(-1)
def move_selection_down(self):
self.move_selection(1)
def move_selection(self, direction=1):
if not self.tasks:
return
pos = self.find_position()
try:
self.selected_task = self.tasks[pos + direction][0]
except IndexError:
self.selected_task = self.tasks[0][0]
keyalias = {curses.KEY_DOWN: 'J',
curses.KEY_UP: 'K',
curses.KEY_ENTER: 'I'}
def handle_keypress(self):
try:
key = self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
return
key = self.keyalias.get(key) or key
handler = self.keymap.get(key)
if handler is not None:
handler()
def alert(self, callback, title=None):
self.win.erase()
my, mx = self.win.getmaxyx()
y = blank_line = count(2)
if title:
self.win.addstr(next(y), 3, title,
curses.A_BOLD | curses.A_UNDERLINE)
next(blank_line)
callback(my, mx, next(y))
self.win.addstr(my - 1, 0, 'Press any key to continue...',
curses.A_BOLD)
self.win.refresh()
while 1:
try:
return self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
pass
def selection_rate_limit(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if not task.name:
return curses.beep()
my, mx = self.win.getmaxyx()
r = 'New rate limit: '
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
rlimit = self.readline(my - 2, 3 + len(r))
if rlimit:
reply = self.app.control.rate_limit(task.name,
rlimit.strip(), reply=True)
self.alert_remote_control_reply(reply)
def alert_remote_control_reply(self, reply):
def callback(my, mx, xs):
y = count(xs)
if not reply:
self.win.addstr(
next(y), 3, 'No replies received in 1s deadline.',
curses.A_BOLD + curses.color_pair(2),
)
return
for subreply in reply:
curline = next(y)
host, response = next(items(subreply))
host = '{0}: '.format(host)
self.win.addstr(curline, 3, host, curses.A_BOLD)
attr = curses.A_NORMAL
text = ''
if 'error' in response:
text = response['error']
attr |= curses.color_pair(2)
elif 'ok' in response:
text = response['ok']
attr |= curses.color_pair(3)
self.win.addstr(curline, 3 + len(host), text, attr)
return self.alert(callback, 'Remote Control Command Replies')
def readline(self, x, y):
buffer = str()
curses.echo()
try:
i = 0
while 1:
ch = self.win.getch(x, y + i)
if ch != -1:
if ch in (10, curses.KEY_ENTER): # enter
break
if ch in (27,):
buffer = str()
break
buffer += chr(ch)
i += 1
finally:
curses.noecho()
return buffer
def revoke_selection(self):
if not self.selected_task:
return curses.beep()
reply = self.app.control.revoke(self.selected_task, reply=True)
self.alert_remote_control_reply(reply)
def selection_info(self):
if not self.selected_task:
return
def alert_callback(mx, my, xs):
my, mx = self.win.getmaxyx()
y = count(xs)
task = self.state.tasks[self.selected_task]
info = task.info(extra=['state'])
infoitems = [
('args', info.pop('args', None)),
('kwargs', info.pop('kwargs', None))
] + list(info.items())
for key, value in infoitems:
if key is None:
continue
value = str(value)
curline = next(y)
keys = key + ': '
self.win.addstr(curline, 3, keys, curses.A_BOLD)
wrapped = wrap(value, mx - 2)
if len(wrapped) == 1:
self.win.addstr(
curline, len(keys) + 3,
abbr(wrapped[0],
self.screen_width - (len(keys) + 3)))
else:
for subline in wrapped:
nexty = next(y)
if nexty >= my - 1:
subline = ' ' * 4 + '[...]'
elif nexty >= my:
break
self.win.addstr(
nexty, 3,
abbr(' ' * 4 + subline, self.screen_width - 4),
curses.A_NORMAL,
)
return self.alert(
alert_callback, 'Task details for {0.selected_task}'.format(self),
)
def selection_traceback(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if task.state not in states.EXCEPTION_STATES:
return curses.beep()
def alert_callback(my, mx, xs):
y = count(xs)
for line in task.traceback.split('\n'):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Exception Traceback for {0.selected_task}'.format(self),
)
def selection_result(self):
if not self.selected_task:
return
def alert_callback(my, mx, xs):
y = count(xs)
task = self.state.tasks[self.selected_task]
result = (getattr(task, 'result', None) or
getattr(task, 'exception', None))
for line in wrap(result or '', mx - 2):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Result for {0.selected_task}'.format(self),
)
def display_task_row(self, lineno, task):
state_color = self.state_colors.get(task.state)
attr = curses.A_NORMAL
if task.uuid == self.selected_task:
attr = curses.A_STANDOUT
timestamp = datetime.utcfromtimestamp(
task.timestamp or time(),
)
timef = timestamp.strftime('%H:%M:%S')
hostname = task.worker.hostname if task.worker else '*NONE*'
line = self.format_row(task.uuid, task.name,
hostname,
timef, task.state)
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
if state_color:
self.win.addstr(lineno,
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
task.state, state_color | attr)
def draw(self):
with self.lock:
win = self.win
self.handle_keypress()
x = LEFT_BORDER_OFFSET
y = blank_line = count(2)
my, mx = win.getmaxyx()
win.erase()
win.bkgd(' ', curses.color_pair(1))
win.border()
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
next(blank_line)
win.addstr(next(y), x, self.format_row('UUID', 'TASK',
'WORKER', 'TIME', 'STATE'),
curses.A_BOLD | curses.A_UNDERLINE)
tasks = self.tasks
if tasks:
for row, (uuid, task) in enumerate(tasks):
if row > self.display_height:
break
if task.uuid:
lineno = next(y)
self.display_task_row(lineno, task)
# -- Footer
next(blank_line)
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
# Selected Task Info
if self.selected_task:
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
info = 'Missing extended info'
detail = ''
try:
selection = self.state.tasks[self.selected_task]
except KeyError:
pass
else:
info = selection.info()
if 'runtime' in info:
info['runtime'] = '{0:.2f}'.format(info['runtime'])
if 'result' in info:
info['result'] = abbr(info['result'], 16)
info = ' '.join(
'{0}={1}'.format(key, value)
for key, value in items(info)
)
detail = '... -> key i'
infowin = abbr(info,
self.screen_width - len(self.selected_str) - 2,
detail)
win.addstr(my - 5, x + len(self.selected_str), infowin)
# Make ellipsis bold
if detail in infowin:
detailpos = len(infowin) - len(detail)
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
detail, curses.A_BOLD)
else:
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
# Workers
if self.workers:
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
win.addstr(my - 4, x + len(self.online_str),
', '.join(sorted(self.workers)), curses.A_NORMAL)
else:
win.addstr(my - 4, x, 'No workers discovered.')
# Info
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
win.addstr(
my - 3, x + len(self.info_str),
STATUS_SCREEN.format(
s=self.state,
w_alive=len([w for w in values(self.state.workers)
if w.alive]),
w_all=len(self.state.workers),
),
curses.A_DIM,
)
# Help
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
curses.A_DIM)
win.refresh()
def safe_add_str(self, y, x, string, *args, **kwargs):
if x + len(string) > self.screen_width:
string = string[:self.screen_width - x]
self.win.addstr(y, x, string, *args, **kwargs)
def init_screen(self):
with self.lock:
self.win = curses.initscr()
self.win.nodelay(True)
self.win.keypad(True)
curses.start_color()
curses.init_pair(1, self.foreground, self.background)
# exception states
curses.init_pair(2, curses.COLOR_RED, self.background)
# successful state
curses.init_pair(3, curses.COLOR_GREEN, self.background)
# revoked state
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
# greeting
curses.init_pair(5, curses.COLOR_BLUE, self.background)
# started state
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
self.state_colors = {states.SUCCESS: curses.color_pair(3),
states.REVOKED: curses.color_pair(4),
states.STARTED: curses.color_pair(6)}
for state in states.EXCEPTION_STATES:
self.state_colors[state] = curses.color_pair(2)
curses.cbreak()
def resetscreen(self):
with self.lock:
curses.nocbreak()
self.win.keypad(False)
curses.echo()
curses.endwin()
def nap(self):
curses.napms(self.screen_delay)
@property
def tasks(self):
return list(self.state.tasks_by_time(limit=self.limit))
@property
def workers(self):
return [hostname for hostname, w in items(self.state.workers)
if w.alive]
class DisplayThread(threading.Thread): # pragma: no cover
def __init__(self, display):
self.display = display
self.shutdown = False
threading.Thread.__init__(self)
def run(self):
while not self.shutdown:
self.display.draw()
self.display.nap()
def capture_events(app, state, display): # pragma: no cover
def on_connection_error(exc, interval):
print('Connection Error: {0!r}. Retry in {1}s.'.format(
exc, interval), file=sys.stderr)
while 1:
print('-> evtop: starting capture...', file=sys.stderr)
with app.connection_for_read() as conn:
try:
conn.ensure_connection(on_connection_error,
app.conf.broker_connection_max_retries)
recv = app.events.Receiver(conn, handlers={'*': state.event})
display.resetscreen()
display.init_screen()
recv.capture()
except conn.connection_errors + conn.channel_errors as exc:
print('Connection lost: {0!r}'.format(exc), file=sys.stderr)
def evtop(app=None): # pragma: no cover
"""Start curses monitor."""
app = app_or_default(app)
state = app.events.State()
display = CursesMonitor(state, app)
display.init_screen()
refresher = DisplayThread(display)
refresher.start()
try:
capture_events(app, state, display)
except Exception:
refresher.shutdown = True
refresher.join()
display.resetscreen()
raise
except (KeyboardInterrupt, SystemExit):
refresher.shutdown = True
refresher.join()
display.resetscreen()
if __name__ == '__main__': # pragma: no cover
evtop()
| 33.797422 | 78 | 0.520325 | 16,025 | 0.873202 | 0 | 0 | 679 | 0.036999 | 0 | 0 | 1,483 | 0.080809 |
488ea1167c4ff5c98e7760397218e331d094d166 | 1,705 | py | Python | features/extraction/3_extraction/feature_extractors/utilization.py | bayesimpact/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
]
| 19 | 2016-10-06T18:10:36.000Z | 2018-04-04T02:30:09.000Z | features/extraction/3_extraction/feature_extractors/utilization.py | BeaconLabs/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
]
| 2 | 2017-10-26T19:22:58.000Z | 2017-11-16T07:44:58.000Z | features/extraction/3_extraction/feature_extractors/utilization.py | bayesimpact/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
]
| 9 | 2016-11-15T14:13:20.000Z | 2021-12-19T20:27:58.000Z | """A feature extractor for patients' utilization."""
from __future__ import absolute_import
import logging
import pandas as pd
from sutter.lib import postgres
from sutter.lib.feature_extractor import FeatureExtractor
log = logging.getLogger('feature_extraction')
class UtilizationExtractor(FeatureExtractor):
"""
Generates features related to the number of previous ER visits.
Features:
`pre_[n]_month_[adm_type]` - Number of [adm_type] (emergency, inpatient, outpatient) visits
during the [n] (3, 6, 12) month before admission
`er_visits_lace` - LACE score associated with number of ER visits:
the greater of number of emergency visits
during the 6 month before admission or 4.
"""
def extract(self):
query = """
SELECT
*
FROM {}.bayes_vw_feature_utilization
""".format(self._schema)
engine = postgres.get_connection()
res = pd.read_sql(query, engine)
log.info('The pre-pivot table has %d rows.' % len(res))
pivoted = pd.pivot_table(data=res, index='hsp_acct_study_id', columns='pre_adm_type',
aggfunc=sum, dropna=True, fill_value=0,
values=['pre_3_month', 'pre_6_month', 'pre_12_month'])
df_columns = [top + "_" + bottom.lower() for top, bottom in pivoted.columns.values]
df = pd.DataFrame(index=res.hsp_acct_study_id.unique())
df[df_columns] = pivoted
df.fillna(0, inplace=True)
df['er_visits_lace'] = df['pre_6_month_emergency'].apply(lambda cnt: min(cnt, 4))
return self.emit_df(df)
| 34.1 | 95 | 0.626979 | 1,434 | 0.841056 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.462757 |
4893210d0b7c805a88b25dd46688e23dd6ed78a0 | 6,517 | py | Python | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
]
| 120 | 2021-08-16T13:55:47.000Z | 2022-03-31T10:31:42.000Z | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
]
| 10 | 2021-10-19T07:19:23.000Z | 2022-03-24T18:43:02.000Z | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
]
| 24 | 2021-08-28T17:21:09.000Z | 2022-03-31T10:31:44.000Z | """Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
class RunningMeanStd():
"""Calulates the running mean and std of a data stream.
Attributes:
mean (np.array): mean of data stream.
var (np.array): variance of data stream.
count (float): total count of data steam.
"""
def __init__(self, epsilon=1e-4, shape=()):
"""Initializes containers for data mean and variance.
Args:
epsilon (float): helps with arithmetic issues.
shape (tuple): the shape of the data stream's output.
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr):
"""Update current stats with a new stream of data.
Args:
arr (np.array): 1D array of data, (batch_size, *shape).
"""
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Util function for `update` method.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class BaseNormalizer(object):
"""Template/default normalizer.
Attributes:
read_only (bool): if to freeze the current stats being tracked.
"""
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def __call__(self, x, *args, **kwargs):
"""Invokes normalization on the given input.
"""
return x
def state_dict(self):
"""Returns snapshot of current stats.
"""
return {}
def load_state_dict(self, _):
"""Restores the stats from a snapshot.
"""
pass
class MeanStdNormalizer(BaseNormalizer):
"""Normalize by the running average.
"""
def __init__(self, shape=(), read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
shape (tuple): shape of data being tracked.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
super().__init__(read_only)
self.read_only = read_only
self.rms = RunningMeanStd(shape=shape)
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
"""Update tracker given data, optionally normalize the data.
"""
x = np.asarray(x)
if not self.read_only:
self.rms.update(x)
return np.clip(
(x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean, 'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RewardStdNormalizer(MeanStdNormalizer):
"""Reward normalization by running average of returns.
Papers:
* arxiv.org/pdf/1808.04355.pdf
* arxiv.org/pdf/1810.12894.pdf
Also see:
* github.com/openai/baselines/issues/538
"""
def __init__(self, gamma=0.99, read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
gamma (float): discount factor for rewards.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
# Reward has default shape (1,) or just ().
super().__init__((), read_only, clip, epsilon)
self.gamma = gamma
self.ret = None
def __call__(self, x, dones):
"""Update tracker given reward, optionally normalize the reward (only scaling).
"""
x = np.asarray(x)
if not self.read_only:
# Track running average of forward discounted returns.
if self.ret is None:
self.ret = np.zeros(x.shape[0])
self.ret = self.ret * self.gamma + x
self.rms.update(self.ret)
# Prevent information leak from previous episodes.
self.ret[dones.astype(np.long)] = 0
return np.clip(x / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip)
class RescaleNormalizer(BaseNormalizer):
"""Apply constant scaling.
"""
def __init__(self, coef=1.0):
"""Initializes with fixed scaling constant.
Args:
coef (float): scaling coefficient.
"""
super().__init__(self)
self.coef = coef
def __call__(self, x):
"""Scale the input.
"""
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
"""Scale image pixles from [0,255] to [0,1].
"""
def __init__(self):
super().__init__(self, 1.0 / 255)
class ActionUnnormalizer(BaseNormalizer):
"""Assumes policy output action is in [-1,1], unnormalize it for gym env.
"""
def __init__(self, action_space):
"""Defines the mean and std for the bounded action space.
"""
super().__init__()
assert isinstance(action_space, Box), "action space must be gym.spaces.Box"
low, high = action_space.low, action_space.high
self.mean = (low + high) / 2.0
self.std = (high - low) / 2.0
def __call__(self, action):
"""Unnormalizes given input action.
"""
x = np.asarray(action)
return self.mean + x * self.std
| 27.041494 | 98 | 0.584778 | 6,263 | 0.961025 | 0 | 0 | 0 | 0 | 0 | 0 | 2,690 | 0.412767 |
48935c63c2620e531593d07e9af2473ca805cfae | 5,125 | py | Python | networking/pycat.py | itsbriany/PythonSec | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
]
| 1 | 2016-01-12T19:38:59.000Z | 2016-01-12T19:38:59.000Z | networking/pycat.py | itsbriany/Security-Tools | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
]
| null | null | null | networking/pycat.py | itsbriany/Security-Tools | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import socket
import threading
import sys # Support command line args
import getopt # Support command line option parsing
import os # Kill the application
import signal # Catch an interrupt
import time # Thread sleeping
# Global variables definitions
target = ""
port = False
listen = False
command = ""
upload = False
# This tool should be able to replace netcat
# The tool should be able to act as a server and as a client depending on the arguments
###############################################################################
# Start menu
def menu():
print "pycat, a python implementation of netcat"
print ""
print "Usage:"
print ""
print "-h, --help: Display this menu"
print "-t, --target: The IP to bind to"
print "-l, --listen: Listen mode (act as a server)"
print "-p, --port: The port number to bind to"
print "-c, --command: The command you wish to execute via pycat"
print "-u --upload: Set this flag to upload a file"
print ""
print ""
print "By default, pycat will act as a client unless the -p flag is specified"
print ""
print "Examples will happen later..."
print ""
sys.exit(0)
###############################################################################
# Connect as a client
def connectMode(client_socket, address):
global kill_thread
# Get raw input which is terminated with \n
try:
while True:
buffer = raw_input()
buffer += "\n"
if buffer == "quit\n" or buffer == "q\n":
client_socket.close()
sys.exit(0)
if not client_socket:
print "[!!] No connection on the other end!"
client_socket.close()
break
client_socket.send(buffer)
except Exception as err:
print "[!!] Caught exception in client thread: %s!" % err
client_socket.close()
###############################################################################
# Handle the connection from the client.
def handle_client(client_socket, address):
print "[*] Got a connection from %s:%d" % (address[0], address[1])
try:
while True:
# Wait for a response
request = client_socket.recv(4096)
# If the client disconnects, request is 0
if not request:
break
# Output what the client has given us
print request
client_socket.close()
except Exception as err:
print "[!!] Caught exception in server thread: %s" % err
client_socket.close()
sys.exit(0)
###############################################################################
# This is the listening functionality of the program
def serverMode():
global target
global port
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not len(target):
target = "0.0.0.0"
try:
server.bind((target, port))
except socket.error as err:
print err
sys.exit(0)
server.listen(5)
print "[*] Listening on %s:%d" % (target, port)
while True:
try:
# This will wait until we get a connection
client, address = server.accept()
# Create a thread to handle incoming responses
# Daemonic threads will die as soon as the main thread dies
listen_thread = threading.Thread(target = handle_client, args = (client, address))
listen_thread.daemon = True
listen_thread.start()
# Create a thread to handle outgoing requests
client_thread = threading.Thread(target = connectMode, args = (client, address))
client_thread.daemon = True
client_thread.start()
time.sleep(1)
'''
# The problem is that python does NOT pass by refernece!
This means that the sockets are simply copies and the actual socket that gets closed
does not do anything!
'''
except (KeyboardInterrupt, SystemExit):
print "Cleaning up sockets..."
client.close()
sys.stdout.write("Exiting form main thread...\n")
sys.exit(0)
###############################################################################
# main definition
def main():
global target
global listen
global port
global command
global upload
# Set the option
# If the options are not parsing properly, then try gnu_getopt
if not len(sys.argv[1:]):
menu()
try:
options, remainder = getopt.getopt(sys.argv[1:], 'ht:lp:cu', ['help', 'target', 'listen', 'port', 'command', 'upload'])
except getopt.GetoptError as err:
print str(err)
menu()
for opt, arg in options:
if opt in ('-h', '--help'):
menu()
elif opt in ('-t', '--target'):
target = arg
elif opt in ('-l', '--listen'):
listen = True
elif opt in ('-p', '--port'):
port = int(arg)
elif opt in ('-c', '--command'):
command = arg
elif opt in ('-u', '--upload'):
upload = True
else:
assert False, "Invalid option" # This throws an error
print "Target: %s" % target
print "Listen: %s" % listen
print "Port: %d" % port
if port > 0:
if not listen and len(target):
print "Client mode"
elif listen:
serverMode()
else: # This could probably be cleaned up a little since the functions will have looping
menu()
else:
menu()
###############################################################################
# Program execution
try:
main()
except KeyboardInterrupt:
print ""
sys.exit(0)
| 23.617512 | 121 | 0.607415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,533 | 0.494244 |
48938090ba940fdf1245ccfb1e1b41da0dfdb8ec | 4,356 | py | Python | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
]
| 1 | 2020-09-23T22:16:10.000Z | 2020-09-23T22:16:10.000Z | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
]
| null | null | null | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
]
| null | null | null | import nest
import pylab as pl
import pickle
from nest import voltage_trace
from nest import raster_plot as rplt
import numpy as np
from params import *
seed = [np.random.randint(0, 9999999)] * num_threads
def calcFI():
#amplitudesList = np.arange(3.5,4.5,0.1)
amplitudesList = np.arange(100, 500, 50.)
listD1 = []
listD2 = []
for amp in amplitudesList:
nest.ResetKernel()
nest.SetKernelStatus({"resolution": timestep, "overwrite_files": True, "rng_seeds": seed,
"print_time": True, "local_num_threads": num_threads})
nest.CopyModel("iaf_cond_alpha", "d1", d1_params)
#nest.CopyModel("izhikevich", "d1", d1_params_iz)
nest.CopyModel("iaf_cond_alpha", "d2", d2_params)
#nest.CopyModel("izhikevich", "d2", d2_params_iz)
d1 = nest.Create("d1", 1)
d2 = nest.Create("d2", 1)
dc = nest.Create("dc_generator", 1)
sd = nest.Create("spike_detector", 2)
mult = nest.Create("multimeter", 1, params={
"withgid": True, "withtime": True, "record_from": ["V_m"]})
nest.Connect(d1, [sd[0]])
nest.Connect(d2, [sd[1]])
nest.Connect(dc, d1)
nest.Connect(dc, d2)
nest.Connect(mult, d1)
nest.Connect(mult, d2)
nest.SetStatus(dc, params={"amplitude": amp})
nest.Simulate(10000.)
evs_d1 = nest.GetStatus([sd[0]])[0]["events"]["senders"]
ts_d1 = nest.GetStatus([sd[0]])[0]["events"]["times"]
evs_d2 = nest.GetStatus([sd[1]])[0]["events"]["senders"]
ts_d2 = nest.GetStatus([sd[1]])[0]["events"]["times"]
listD1.append(len(ts_d1) / 10.0)
listD2.append(len(ts_d2) / 10.0)
# voltage_trace.from_device(mult)
# pl.show()
FI = dict()
FI["d1"] = listD1
FI["d2"] = listD2
pickle.dump(FI, open("../../data/FI.pickle", "w"))
pl.figure()
pl.text(70, 62, "A", fontweight='bold', fontsize=15)
pl.plot(amplitudesList, listD1, 'bo-', label='D1', linewidth=1.5)
pl.plot(amplitudesList, listD2, 'go-', label='D2', linewidth=1.5)
pl.legend(loc='best')
pl.xlabel("Amplitude(pA)", fontweight='bold', fontsize=14)
pl.ylabel("Firing rate (sps)", fontweight='bold', fontsize=14)
for x in pl.gca().get_xticklabels():
x.set_fontweight('bold')
x.set_fontsize(10)
for x in pl.gca().get_yticklabels():
x.set_fontweight('bold')
x.set_fontsize(10)
pl.savefig("../../data/FI.pdf")
print "d1", FI["d1"], "d2", FI["d2"], amplitudesList
pl.figure()
voltage_trace.from_device(mult)
pl.show()
def checkConninMV():
nest.ResetKernel()
nest.SetKernelStatus({"resolution": timestep, "overwrite_files": True, "rng_seeds": seed,
"print_time": True, "local_num_threads": num_threads})
nest.CopyModel("iaf_cond_alpha", "d21", d2_params)
#nest.CopyModel("izhikevich", "d1", d1_params_iz)
nest.CopyModel("iaf_cond_alpha", "d22", d2_params)
#nest.CopyModel("izhikevich", "d2", d2_params_iz)
d21 = nest.Create("d21", 1)
d22 = nest.Create("d22", 1)
nest.SetStatus(d22, {'I_e': 27.}) # Has to be tuned so that d2 is at -80
# nest.SetStatus(d1,{'I_e':69.}) # Has to be tuned so that d1 is at -80
dc = nest.Create("dc_generator", 1)
sd = nest.Create("spike_detector", 2)
mult = nest.Create("multimeter", 1, params={
"withgid": True, "withtime": True, "record_from": ["V_m"]})
nest.Connect(d21, [sd[0]])
nest.Connect(d22, [sd[1]])
nest.Connect(dc, d21)
# nest.Connect(dc,d2)
# nest.Connect(mult,d1)
nest.Connect(mult, d22)
nest.Connect(d21, d22, syn_spec={'weight': jd2d2})
nest.SetStatus(dc, params={"amplitude": 250.})
nest.Simulate(1000.)
evs_d1 = nest.GetStatus([sd[0]])[0]["events"]["senders"]
ts_d1 = nest.GetStatus([sd[0]])[0]["events"]["times"]
V_m = nest.GetStatus(mult)[0]["events"]["V_m"]
ts = nest.GetStatus(mult)[0]["events"]["times"]
inds = np.where(ts > 400.)
Vmmin = np.min(V_m[inds])
print "conn_strength", Vmmin + 80.
# pl.figure(1)
# rplt.from_device(sd)
pl.figure(2)
voltage_trace.from_device(mult)
pl.plot(ts_d1, np.ones(len(ts_d1)) * -80., 'r|', markersize=10)
pl.show()
calcFI()
# checkConninMV()
| 32.75188 | 97 | 0.597107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,223 | 0.280762 |
4893c3ed4760195e110268be8d490ec224a54ecd | 1,434 | py | Python | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
]
| 690 | 2020-07-31T15:37:59.000Z | 2022-03-31T20:51:46.000Z | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
]
| 90 | 2020-07-25T11:00:15.000Z | 2022-03-31T01:59:59.000Z | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
]
| 68 | 2020-07-21T23:21:29.000Z | 2022-03-30T16:12:01.000Z | import os
from fastf1.core import Session, Weekend
from fastf1.livetiming.data import LiveTimingData
def test_file_loading_w_errors():
# load file with many errors and invalid data without crashing
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt')
livedata.load()
def test_file_loading():
# load a valid file
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt')
livedata.load()
weekend = Weekend(2021, 1)
session = Session(weekend=weekend, session_name='test_session')
session.load_laps(with_telemetry=True, livedata=livedata)
assert session.laps.shape == (274, 26)
assert session.car_data['44'].shape == (17362, 10)
def test_duplicate_removal(tmpdir):
# create a temporary file with two identical lines of data
tmpfile = os.path.join(tmpdir, 'tmpfile.txt')
data = "['TimingAppData', {'Lines': {'22': {'Stints': {'0': {" \
"'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false'," \
"'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':" \
"0}}}}}, '2021-03-27T12:00:32.086Z']\n"
with open(tmpfile, 'w') as fobj:
fobj.write(data)
fobj.write(data)
livedata = LiveTimingData(tmpfile)
assert len(livedata.get('TimingAppData')) == 1
livedata = LiveTimingData(tmpfile, remove_duplicates=False)
assert len(livedata.get('TimingAppData')) == 2
| 34.142857 | 87 | 0.679219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.36053 |
48946b441f56097b2a5a11c0168a86635a484d94 | 1,768 | py | Python | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
]
| 4 | 2021-12-13T09:16:07.000Z | 2022-01-06T15:45:02.000Z | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
]
| null | null | null | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 20:09:08 2021
######################
##### read h5 ########
######################
# 1.read h5-file
h5_file = h5py.File(files[1],'r')
# 2.show all keys in h5-file
h5_file.keys()
# 3.循环读取所有 keys in h5-file
for key in h5_file.keys():
onekey = key
onekey_name = h5_file[key].name
# 4.已知某个group的 key "NN"
h5_file["NN"]
h5_file["NN"].keys()
f_dict = dict(h5_file["NN"])
f_dict.keys() # 所有的keyword
# 5.读取 group 的 datasets
data = f_dict["data"][()] # 建议
data = f_dict["data"].value # data 是 numpy 的 ndarray 多维数组模式
trace = data[0] # 某一道数据
# 6.读取 group 的 Int Float 类型
baz = f_dict["baz"].value
baz = h5_file["NN"]["baz"].value
# 7.读取 group 的 字符串
# encode的作用是将unicode编码转换成其他编码的字符串,如str2.encode(‘utf8’),表示将unicode编码的字符串str2转换成utf8编码
comp = h5_file["NN"]["comp"].value[0].decode('utf-8')
# 8. 关闭文件
f_dict.close()
######################
##### write h5 ########
######################
@author: yf
"""
#%%
import numpy as np
import h5py
import os
import glob
#%% 1. set parameter
file = "../../data/BJ.081_BJ.084__2020_04_11_00_00_00T2021_04_13_00_00_00__all.jld2"
chan = "NN"
dt = 0.005
#%% 2. read h5
# open file
f = h5py.File(file,'r')
# read data
data = f[chan]["data"][0]
# read parameters
azi = f[chan]["azi"][()]
baz = f[chan]["baz"][()]
maxlag = f[chan]["maxlag"][()]
cc_len = f[chan]["cc_len"][()]
cc_step = f[chan]["cc_step"][()]
corr_type = f[chan]["corr_type"][()]
comp = f[chan]["comp"][()]
dist = f[chan]["dist"][()] # dist = f[chan]["dist"].value
lat = f[chan]["lat"][()]
lon = f[chan]["lon"][()]
N_glob = f[chan]["N_glob"][()]
N_read = f[chan]["N_read"][()]
N_good = f[chan]["N_good"][()]
name = f[chan]["name"][()][0].decode('utf-8')
# close h5-file
f.close()
| 19.644444 | 84 | 0.581448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,461 | 0.754649 |
48946d309358ecb51872d9f0d80dff7d64dcb48a | 872 | py | Python | setup.py | MrJakeSir/theming | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
]
| 3 | 2021-10-02T02:23:50.000Z | 2021-10-02T16:03:33.000Z | setup.py | MrJakeSir/themify | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
]
| null | null | null | setup.py | MrJakeSir/themify | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
]
| null | null | null | from distutils.core import setup
setup(
name = 'colormate',
packages = ['colormate'],
version = '0.2214',
license='MIT',
description = 'A package to theme terminal scripts with custom colors and text formatting',
author = 'Rodrigo',
author_email = '[email protected]',
url = 'https://github.com/mrjakesir/themify',
download_url = 'https://github.com/MrJakeSir/themify/archive/refs/tags/v_0.3.1.tar.gz',
keywords = ['Colors', 'Scripting', 'Theme', 'Theming'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 36.333333 | 93 | 0.661697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.693807 |
4894cec7ad1d16f91926da91173205b79ee1b463 | 1,620 | py | Python | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
]
| 37 | 2019-08-28T08:16:48.000Z | 2022-03-14T21:18:39.000Z | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
]
| 1 | 2019-09-02T23:13:29.000Z | 2019-09-08T01:43:10.000Z | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
]
| 3 | 2019-08-28T12:23:11.000Z | 2020-02-08T19:22:31.000Z | import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
def test_compount_where_and():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" and b == "c" and x > 0 and y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
def test_compount_where_amp():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" & b == "c" & x > 0 & y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
| 28.421053 | 80 | 0.56358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.131481 |
4895a29e1cbfd7f3cbc0290d21c2ee285348e317 | 385 | py | Python | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
]
| null | null | null | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
]
| 7 | 2021-03-19T01:09:50.000Z | 2022-03-12T00:20:49.000Z | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
]
| null | null | null | from django.contrib import admin
from .models import Student
# Register your models here.
class StudentAdmin(admin.ModelAdmin):
list_display = ('first_name','middle_name',
'last_name','identification_number')
search_fields = ('first_name','middle_name',
'last_name','identification_number')
admin.site.register(Student,StudentAdmin)
| 27.5 | 56 | 0.696104 | 248 | 0.644156 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.379221 |
4896bd7de479f88113218577909931ad2456610b | 18,819 | py | Python | lshmm/viterbi/vit_diploid_variants_samples.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
]
| null | null | null | lshmm/viterbi/vit_diploid_variants_samples.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
]
| 9 | 2022-02-24T14:20:09.000Z | 2022-03-01T17:54:47.000Z | lshmm/vit_diploid_variants_samples.py | astheeggeggs/ls_hmm | 11af1eb886ef3db2869cdd50954fba5565fcef51 | [
"MIT"
]
| 1 | 2022-02-28T17:07:36.000Z | 2022-02-28T17:07:36.000Z | """Collection of functions to run Viterbi algorithms on dipoid genotype data, where the data is structured as variants x samples."""
import numba as nb
import numpy as np
# https://github.com/numba/numba/issues/1269
@nb.njit
def np_apply_along_axis(func1d, axis, arr):
"""Create numpy-like functions for max, sum etc."""
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit
def np_amax(array, axis):
"""Numba implementation of numpy vectorised maximum."""
return np_apply_along_axis(np.amax, axis, array)
@nb.njit
def np_sum(array, axis):
"""Numba implementation of numpy vectorised sum."""
return np_apply_along_axis(np.sum, axis, array)
@nb.njit
def np_argmax(array, axis):
"""Numba implementation of numpy vectorised argmax."""
return np_apply_along_axis(np.argmax, axis, array)
# def forwards_viterbi_dip_naive(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m, n, n))
# P = np.zeros((m, n, n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V[l-1,k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[l,j1,j2] = np.amax(v) * e[l, index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V[l - 1, k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# r_n = r/n
# # Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# # Diploid Viterbi, with smaller memory footprint.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V_previous[k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V)
# V_previous = np.copy(V) / c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# Diploid Viterbi, with smaller memory footprint.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V_previous[k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V)
V_previous = np.copy(V) / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n, n))
# P = np.zeros((m,n,n)).astype(np.int64)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# c = np.ones(m)
# r_n = r/n
# # Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# c[l] = np.amax(V_previous)
# argmax = np.argmax(V_previous)
# V_previous *= 1/c[l]
# V_rowcol_max = np_amax(V_previous, 0)
# arg_rowcol_max = np_argmax(V_previous, 0)
# no_switch = (1 - r[l])**2 + 2*(r_n[l]*(1 - r[l])) + r_n[l]**2
# single_switch = r_n[l]*(1 - r[l]) + r_n[l]**2
# double_switch = r_n[l]**2
# j1_j2 = 0
# for j1 in range(n):
# for j2 in range(n):
# V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
# P_single_switch = np.argmax(np.array([V_rowcol_max[j1], V_rowcol_max[j2]]))
# if P_single_switch == 0:
# template_single_switch = j1*n + arg_rowcol_max[j1]
# else:
# template_single_switch = arg_rowcol_max[j2]*n + j2
# V[j1,j2] = V_previous[j1,j2] * no_switch # No switch in either
# P[l, j1, j2] = j1_j2
# # Single or double switch?
# single_switch_tmp = single_switch * V_single_switch
# if (single_switch_tmp > double_switch):
# # Then single switch is the alternative
# if (V[j1,j2] < single_switch * V_single_switch):
# V[j1,j2] = single_switch * V_single_switch
# P[l, j1, j2] = template_single_switch
# else:
# # Double switch is the alternative
# if V[j1, j2] < double_switch:
# V[j1, j2] = double_switch
# P[l, j1, j2] = argmax
# V[j1,j2] *= e[l, index[j1, j2]]
# j1_j2 += 1
# V_previous = np.copy(V)
# ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
"""LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
c[l] = np.amax(V_previous)
argmax = np.argmax(V_previous)
V_previous *= 1 / c[l]
V_rowcol_max = np_amax(V_previous, 0)
arg_rowcol_max = np_argmax(V_previous, 0)
no_switch = (1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
single_switch = r_n[l] * (1 - r[l]) + r_n[l] ** 2
double_switch = r_n[l] ** 2
j1_j2 = 0
for j1 in range(n):
for j2 in range(n):
V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
P_single_switch = np.argmax(
np.array([V_rowcol_max[j1], V_rowcol_max[j2]])
)
if P_single_switch == 0:
template_single_switch = j1 * n + arg_rowcol_max[j1]
else:
template_single_switch = arg_rowcol_max[j2] * n + j2
V[j1, j2] = V_previous[j1, j2] * no_switch # No switch in either
P[l, j1, j2] = j1_j2
# Single or double switch?
single_switch_tmp = single_switch * V_single_switch
if single_switch_tmp > double_switch:
# Then single switch is the alternative
if V[j1, j2] < single_switch * V_single_switch:
V[j1, j2] = single_switch * V_single_switch
P[l, j1, j2] = template_single_switch
else:
# Double switch is the alternative
if V[j1, j2] < double_switch:
V[j1, j2] = double_switch
P[l, j1, j2] = argmax
V[j1, j2] *= e[l, index[j1, j2]]
j1_j2 += 1
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
return V, P, ll
# def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m,n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# # Jumped the gun - vectorising.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# v = (r_n[l]**2) * np.ones((n,n))
# v[j1,j2] += (1-r[l])**2
# v[j1, :] += (r_n[l] * (1 - r[l]))
# v[:, j2] += (r_n[l] * (1 - r[l]))
# v *= V[l-1,:,:]
# V[l,j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.jit
def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
"""Vectorised LS diploid Viterbi algorithm using numpy."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Jumped the gun - vectorising.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
v = (r_n[l] ** 2) * np.ones((n, n))
v[j1, j2] += (1 - r[l]) ** 2
v[j1, :] += r_n[l] * (1 - r[l])
v[:, j2] += r_n[l] * (1 - r[l])
v *= V[l - 1, :, :]
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
def forwards_viterbi_dip_naive_full_vec(n, m, G, s, e, r):
"""Fully vectorised naive LS diploid Viterbi algorithm using numpy."""
char_both = np.eye(n * n).ravel().reshape((n, n, n, n))
char_col = np.tile(np.sum(np.eye(n * n).reshape((n, n, n, n)), 3), (n, 1, 1, 1))
char_row = np.copy(char_col).T
rows, cols = np.ogrid[:n, :n]
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
index = (
4 * np.equal(G[0, :, :], s[0, 0]).astype(np.int64)
+ 2 * (G[0, :, :] == 1).astype(np.int64)
+ np.int64(s[0, 0] == 1)
)
V[0, :, :] = 1 / (n ** 2) * e[0, index]
r_n = r / n
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
v = (
(r_n[l] ** 2)
+ (1 - r[l]) ** 2 * char_both
+ (r_n[l] * (1 - r[l])) * (char_col + char_row)
)
v *= V[l - 1, :, :]
P[l, :, :] = np.argmax(v.reshape(n, n, -1), 2) # Have to flatten to use argmax
V[l, :, :] = v.reshape(n, n, -1)[rows, cols, P[l, :, :]] * e[l, index]
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
@nb.jit
def backwards_viterbi_dip(m, V_last, P):
"""Run a backwards pass to determine the most likely path."""
assert V_last.ndim == 2
assert V_last.shape[0] == V_last.shape[1]
# Initialisation
path = np.zeros(m).astype(np.int64)
path[m - 1] = np.argmax(V_last)
# Backtrace
for j in range(m - 2, -1, -1):
path[j] = P[j + 1, :, :].ravel()[path[j + 1]]
return path
def get_phased_path(n, path):
"""Obtain the phased path."""
return np.unravel_index(path, (n, n))
@nb.jit
def path_ll_dip(n, m, G, phased_path, s, e, r):
"""Evaluate log-likelihood path through a reference panel which results in sequence s."""
index = (
4 * np.int64(np.equal(G[0, phased_path[0][0], phased_path[1][0]], s[0, 0]))
+ 2 * np.int64(G[0, phased_path[0][0], phased_path[1][0]] == 1)
+ np.int64(s[0, 0] == 1)
)
log_prob_path = np.log10(1 / (n ** 2) * e[0, index])
old_phase = np.array([phased_path[0][0], phased_path[1][0]])
r_n = r / n
for l in range(1, m):
index = (
4 * np.int64(np.equal(G[l, phased_path[0][l], phased_path[1][l]], s[0, l]))
+ 2 * np.int64(G[l, phased_path[0][l], phased_path[1][l]] == 1)
+ np.int64(s[0, l] == 1)
)
current_phase = np.array([phased_path[0][l], phased_path[1][l]])
phase_diff = np.sum(~np.equal(current_phase, old_phase))
if phase_diff == 0:
log_prob_path += np.log10(
(1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
)
elif phase_diff == 1:
log_prob_path += np.log10(r_n[l] * (1 - r[l]) + r_n[l] ** 2)
else:
log_prob_path += np.log10(r_n[l] ** 2)
log_prob_path += np.log10(e[l, index])
old_phase = current_phase
return log_prob_path
| 33.307965 | 132 | 0.435145 | 0 | 0 | 0 | 0 | 10,251 | 0.544715 | 0 | 0 | 8,128 | 0.431904 |
4896e1b1c5caef0d1e5aee9a140b1ba801b67e72 | 6,704 | py | Python | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
]
| null | null | null | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
]
| null | null | null | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
]
| null | null | null | from alembic_utils.pg_function import PGFunction
from alembic_utils.replaceable_entity import register_entities
from alembic_utils.testbase import TEST_VERSIONS_ROOT, run_alembic_command
TO_UPPER = PGFunction(
schema="public",
signature="toUpper(some_text text default 'my text!')",
definition="""
returns text
as
$$ begin return upper(some_text) || 'abc'; end; $$ language PLPGSQL;
""",
)
def test_create_revision(engine) -> None:
register_entities([TO_UPPER])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "create"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_create.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.create_entity" in migration_contents
assert "op.drop_entity" in migration_contents
assert "op.replace_entity" not in migration_contents
assert "from alembic_utils.pg_function import PGFunction" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_update_revision(engine) -> None:
engine.execute(TO_UPPER.to_sql_statement_create())
# Update definition of TO_UPPER
UPDATED_TO_UPPER = PGFunction(
TO_UPPER.schema,
TO_UPPER.signature,
r'''returns text as
$$
select upper(some_text) || 'def' -- """ \n \\
$$ language SQL immutable strict;''',
)
register_entities([UPDATED_TO_UPPER])
# Autogenerate a new migration
# It should detect the change we made and produce a "replace_function" statement
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "2", "message": "replace"},
)
migration_replace_path = TEST_VERSIONS_ROOT / "2_replace.py"
with migration_replace_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.replace_entity" in migration_contents
assert "op.create_entity" not in migration_contents
assert "op.drop_entity" not in migration_contents
assert "from alembic_utils.pg_function import PGFunction" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_noop_revision(engine) -> None:
engine.execute(TO_UPPER.to_sql_statement_create())
register_entities([TO_UPPER])
output = run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "3", "message": "do_nothing"},
)
migration_do_nothing_path = TEST_VERSIONS_ROOT / "3_do_nothing.py"
with migration_do_nothing_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.create_entity" not in migration_contents
assert "op.drop_entity" not in migration_contents
assert "op.replace_entity" not in migration_contents
assert "from alembic_utils" not in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_drop(engine) -> None:
# Manually create a SQL function
engine.execute(TO_UPPER.to_sql_statement_create())
# Register no functions locally
register_entities([], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "drop"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_drop.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" in migration_contents
assert "op.create_entity" in migration_contents
assert "from alembic_utils" in migration_contents
assert migration_contents.index("op.drop_entity") < migration_contents.index("op.create_entity")
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_has_no_parameters(engine) -> None:
# Error was occuring in drop statement when function had no parameters
# related to parameter parsing to drop default statements
SIDE_EFFECT = PGFunction(
schema="public",
signature="side_effect()",
definition="""
returns integer
as
$$ select 1; $$ language SQL;
""",
)
# Register no functions locally
register_entities([SIDE_EFFECT], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "no_arguments"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_no_arguments.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_ignores_extension_functions(engine) -> None:
# Extensions contain functions and don't have local representations
# Unless they are excluded, every autogenerate migration will produce
# drop statements for those functions
try:
engine.execute("create extension if not exists unaccent;")
register_entities([], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "no_drops"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_no_drops.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" not in migration_contents
finally:
engine.execute("drop extension if exists unaccent;")
| 34.735751 | 100 | 0.702118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,309 | 0.344421 |
48976b6d6b5db52348271fa437cb2c3858865703 | 1,723 | py | Python | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
]
| null | null | null | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
]
| null | null | null | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
]
| null | null | null | import numpy as np
np.random.seed(0)
class Environment(object):
def __init__(self, alpha, T, mining_cost=0.5):
self.alpha = alpha
self.T = T
self.current_state = None
self.mining_cost = mining_cost
def reset(self):
self.current_state = (0, 0)
return self.current_state
def getNextStateAdopt(self, rand_val):
self.current_state = (0, 0)
return np.asarray(self.current_state), 0
def getNextStateOverride(self, rand_val):
a, h = self.current_state
if a <= h:
self.current_state = (0, 0)
return np.asarray(self.current_state), -100
self.current_state = (a - h - 1, 0)
return np.asarray(self.current_state), h + 1
def getNextStateMine(self, rand_val):
a, h = self.current_state
if (a == self.T) or (h == self.T):
return self.getNextStateAdopt(rand_val)
if rand_val < self.alpha:
self.current_state = (a + 1, h)
else:
self.current_state = (a, h + 1)
return np.asarray(self.current_state), -1*self.alpha*self.mining_cost
def takeAction(self, action, rand_val=None):
assert(action in [0, 1, 2])
if not rand_val:
rand_val = np.random.uniform()
if action == 0:
return self.getNextStateAdopt(rand_val)
elif action == 1:
return self.getNextStateOverride(rand_val)
else:
return self.getNextStateMine(rand_val)
def main():
env = Environment(alpha=0.35, T=9)
print(env.reset(0.01))
print(env.takeAction(2, 0.01))
print(env.takeAction(1, 0.01))
if __name__ == "__main__":
main() | 31.327273 | 77 | 0.585607 | 1,488 | 0.86361 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.005804 |
4897778aee005c5aa1bda6eba1bb9679879bf2ca | 549 | py | Python | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
]
| null | null | null | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
]
| 1 | 2020-08-11T22:00:51.000Z | 2020-08-11T23:55:48.000Z | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
]
| 1 | 2020-08-10T15:50:21.000Z | 2020-08-10T15:50:21.000Z | from setuptools import setup, find_packages
setup(
name='passgen-py',
packages=find_packages(),
version='1.1',
description='Generate Passwords Deterministically based on a Master Password.',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
python_requires='>=3.6, <4',
entry_points={
'console_scripts': [
'passgen=src:cli',
],
},
install_requires=['click', 'pyperclip'],
)
| 26.142857 | 83 | 0.599271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.466302 |
489780fa9ccacfe9a097c426e6e4d2cf96e01913 | 163 | py | Python | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
]
| null | null | null | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
]
| null | null | null | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
]
| null | null | null | # https://codegolf.stackexchange.com/a/11480
multiplication = []
for i in range(10):
multiplication.append(i * (i + 1))
for x in multiplication:
print(x) | 20.375 | 44 | 0.680982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.269939 |
4898793ace916333da3e62990ff5fb14ce91eb0e | 4,762 | py | Python | bin/mkSampleInfo.py | icbi-lab/nextNEOpi | d9f6ccf5178e7ef1742b95e740ce3f39405f21dd | [
"BSD-3-Clause-Clear"
]
| 24 | 2021-06-16T07:20:43.000Z | 2022-03-23T05:40:01.000Z | bin/mkSampleInfo.py | abyssum/nextNEOpi | f7de4c76c7d98be485f8db0999ad278cd17fa642 | [
"BSD-3-Clause-Clear"
]
| 2 | 2021-12-09T16:43:45.000Z | 2022-02-18T14:03:36.000Z | bin/mkSampleInfo.py | abyssum/nextNEOpi | f7de4c76c7d98be485f8db0999ad278cd17fa642 | [
"BSD-3-Clause-Clear"
]
| 5 | 2021-08-25T06:54:47.000Z | 2022-03-03T06:11:31.000Z | #!/usr/bin/env python
"""
Requirements:
* Python >= 3.7
* Pysam
Copyright (c) 2021 Dietmar Rieder <[email protected]>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"1",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import os
import sys
import argparse
def parse_csin(csin_fh, csin_info):
for line in csin_fh:
if line.find("MHC I CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCI"] = round(float(csin_v.strip()), 3)
if line.find("MHC II CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCII"] = round(float(csin_v.strip()), 3)
if line.find("Total CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["combined"] = round(float(csin_v.strip()), 3)
return csin_info
def parse_tmb(tmb_fh, tmb_info, tmb_type):
for line in tmb_fh:
if line.find("Coverage") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["cov_genome"] = v.strip()
if tmb_type == "coding":
tmb_info["cov_coding"] = v.strip()
if line.find("Variants") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["variants_tot"] = v.strip()
if tmb_type == "coding":
tmb_info["variants_coding"] = v.strip()
if line.find("Mutational load (") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_coding"] = round(float(v.strip()), 3)
if line.find("Mutational load clonal") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB_clonal"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_clonal_coding"] = round(float(v.strip()), 3)
return tmb_info
def write_output(out_fh, tmb_info, csin_info, sample_name):
header_fields = [
"SampleID",
"TMB",
"TMB_clonal",
"TMB_coding",
"TMB_clonal_coding",
"variants_total",
"variants_coding",
"coverage_genome",
"coverage_coding",
"CSiN_MHC_I",
"CSiN_MHC_II",
"CSiN_combined",
]
data_fields = [
sample_name,
tmb_info["TMB"],
tmb_info["TMB_clonal"],
tmb_info["TMB_coding"],
tmb_info["TMB_clonal_coding"],
tmb_info["variants_tot"],
tmb_info["variants_coding"],
tmb_info["cov_genome"],
tmb_info["cov_coding"],
csin_info["MHCI"],
csin_info["MHCII"],
csin_info["combined"],
]
out_fh.write("\t".join(header_fields) + "\n")
out_fh.write("\t".join(map(str, data_fields)) + "\n")
def _file_write(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "w")
def _file_read(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "r")
if __name__ == "__main__":
usage = __doc__.split("\n\n\n")
parser = argparse.ArgumentParser(description="Compile sample info sheet")
parser.add_argument(
"--tmb",
required=True,
type=_file_read,
help="TMB file",
)
parser.add_argument(
"--tmb_coding",
required=True,
type=_file_read,
help="TMB coding file",
)
parser.add_argument(
"--csin",
required=True,
type=_file_read,
help="CSiN file",
)
parser.add_argument(
"--out",
required=True,
type=_file_write,
help="Output file",
)
parser.add_argument(
"--sample_name",
required=True,
type=str,
help="Sample name",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
args = parser.parse_args()
tmb = args.tmb
tmb_coding = args.tmb_coding
csin = args.csin
out = args.out
sample_name = args.sample_name
tmb_info = {
"cov_genome": 0,
"cov_coding": 0,
"variants_tot": 0,
"variants_coding": 0,
"TMB": 0,
"TMB_clonal": 0,
"TMB_coding": 0,
"TMB_clonal_coding": 0,
}
csin_info = {"MHCI": 0, "MHCII": 0, "combined": 0}
tmb_info = parse_tmb(tmb, tmb_info, "all")
tmb_info = parse_tmb(tmb_coding, tmb_info, "coding")
csin_info = parse_csin(csin, csin_info)
write_output(out, tmb_info, csin_info, sample_name)
out.close()
| 26.309392 | 77 | 0.545569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,284 | 0.269635 |
489a6ae22cd0b248814c3b6aa65494aabadf9db8 | 3,115 | py | Python | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
]
| null | null | null | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
]
| null | null | null | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import sys
import colorama
from pick_db_file import pick_db_file
import db_connection
import card_repository
from review_cards import review_cards
from new_card import new_card
from new_cards import new_cards
import review
from user_colors import print_info, print_instruction, print_error
from usage_info import print_usage_info
def main():
# Initialise colorama
colorama.init()
valid_args = ['-n', '-n2', '-s', '-s2', '-e', '-e2', '-b', '-bf', '-bb', '-bs', '-bl', '-br']
if len(sys.argv) > 1 and sys.argv[1] not in valid_args:
print_usage_info(sys.argv)
if sys.argv[1] not in ['-h', '--help']:
sys.exit(1)
sys.exit()
db_file = pick_db_file()
conn, cursor = db_connection.connect(db_file)
card_repository.create_table_if_not_exists(conn, cursor)
if len(sys.argv) == 1:
table_is_empty = card_repository.check_if_empty(cursor)
if table_is_empty:
print_error("You don't have any cards yet.")
print_instruction(
'Create some cards by launching garrick with one of the following options first:'
)
print_instruction('\t-n\tCreate cards starting in one-way mode.')
print_instruction('\t-n2\tCreate cards starting in two-way mode.')
print_instruction('\t-s\tCreate cards starting in single-line and one-way mode.')
print_instruction('\t-s2\tCreate cards starting in single-line and two-way mode.')
print_instruction('\t-e\tCreate cards starting in editor mode and in one-way mode.')
print_instruction('\t-s2\tCreate cards starting in editor mode and in two-way mode.')
else:
review.review(conn, cursor)
elif sys.argv[1] == '-n':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-n2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-s':
new_cards(conn, cursor, two_way_card=False, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-s2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-e':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-e2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-b':
review.browse_by_regex(conn, cursor)
elif sys.argv[1] == '-bf':
review.browse_by_regex_front(conn, cursor)
elif sys.argv[1] == '-bb':
review.browse_by_regex_back(conn, cursor)
elif sys.argv[1] == '-bs':
review.browse_by_score(conn, cursor)
elif sys.argv[1] == '-bl':
review.browse_by_last_viewed(conn, cursor)
elif sys.argv[1] == '-br':
review.browse_by_last_viewed_reverse(conn, cursor)
print_info('Kbai')
db_connection.disconnect(conn, cursor)
if __name__ == '__main__':
main()
| 40.454545 | 97 | 0.65618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.206421 |
489b07c5f60a2349d39829b932ee2b381db5353d | 14,996 | py | Python | perceiver/train/dataset.py | kawa-work/deepmind-research | 8fb75643598f680fdde8d20342b1b82bd2c0abb2 | [
"Apache-2.0"
]
| 10,110 | 2019-08-27T20:05:30.000Z | 2022-03-31T16:31:56.000Z | perceiver/train/dataset.py | subhayuroy/deepmind-research | 769bfdbeafbcb472cb8e2c6cfa746b53ac82efc2 | [
"Apache-2.0"
]
| 317 | 2019-11-09T10:19:10.000Z | 2022-03-31T00:05:19.000Z | perceiver/train/dataset.py | subhayuroy/deepmind-research | 769bfdbeafbcb472cb8e2c6cfa746b53ac82efc2 | [
"Apache-2.0"
]
| 2,170 | 2019-08-28T12:53:36.000Z | 2022-03-31T13:15:11.000Z | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
class Split(enum.Enum):
"""ImageNet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
# batch_dims should be:
# [device_count, per_device_batch_size] or [total_batch_size]
batch_dims: Sequence[int],
augmentation_settings: Mapping[str, Any],
# The shape to which images are resized.
im_dim: int = INPUT_DIM,
threadpool_size: int = 48,
max_intra_op_parallelism: int = 1,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
im_size = (im_dim, im_dim)
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = threadpool_size
options.experimental_threading.max_intra_op_parallelism = (
max_intra_op_parallelism)
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def crop_augment_preprocess(example):
image, _ = _preprocess_image(
example['image'], is_training, im_size, augmentation_settings)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if is_training:
if augmentation_settings['cutmix']:
out['mask'] = cutmix_padding(*im_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augmentation_settings['mixup_alpha'] is not None:
beta = tfp.distributions.Beta(
augmentation_settings['mixup_alpha'],
augmentation_settings['mixup_alpha'])
out['mixup_ratio'] = beta.sample()
return out
ds = ds.map(crop_augment_preprocess, num_parallel_calls=AUTOTUNE)
# Mixup/cutmix by temporarily batching (using the per-device batch size):
use_cutmix = augmentation_settings['cutmix']
use_mixup = augmentation_settings['mixup_alpha'] is not None
if is_training and (use_cutmix or use_mixup):
inner_batch_size = batch_dims[-1]
# Apply mixup, cutmix, or mixup + cutmix on batched data.
# We use data from 2 batches to produce 1 mixed batch.
ds = ds.batch(inner_batch_size * 2)
if not use_cutmix and use_mixup:
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif use_cutmix and not use_mixup:
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif use_cutmix and use_mixup:
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# Unbatch for further processing.
ds = ds.unbatch()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
yield from tfds.as_numpy(ds)
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
| 35.367925 | 80 | 0.684449 | 520 | 0.034676 | 3,217 | 0.214524 | 400 | 0.026674 | 0 | 0 | 3,759 | 0.250667 |
489b33857f5199eb9d0dc568f0aa601495f6f304 | 12,621 | py | Python | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
]
| null | null | null | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
]
| null | null | null | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
]
| null | null | null | import numpy as np
from scipy.special import factorial
from pyapprox.indexing import hash_array
from pyapprox.indexing import compute_hyperbolic_level_indices
def multiply_multivariate_polynomials(indices1,coeffs1,indices2,coeffs2):
"""
TODO: instead of using dictionary to colect terms consider using
unique_indices,repeated_idx=np.unique(
indices[active_idx,:],axis=1,return_inverse=True)
as is done in multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion. Choose which one is faster
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
"""
num_vars = indices1.shape[0]
num_indices1 = indices1.shape[1]
num_indices2 = indices2.shape[1]
assert num_indices1==coeffs1.shape[0]
assert num_indices2==coeffs2.shape[0]
assert num_vars==indices2.shape[0]
indices_dict = dict()
max_num_indices = num_indices1*num_indices2
indices = np.empty((num_vars,max_num_indices),int)
coeffs = np.empty((max_num_indices),float)
kk = 0
for ii in range(num_indices1):
index1 = indices1[:,ii]
coeff1 = coeffs1[ii]
for jj in range(num_indices2):
index= index1+indices2[:,jj]
key = hash_array(index)
coeff = coeff1*coeffs2[jj]
if key in indices_dict:
coeffs[indices_dict[key]]+=coeff
else:
indices_dict[key]=kk
indices[:,kk]=index
coeffs[kk]=coeff
kk+=1
indices = indices[:,:kk]
coeffs = coeffs[:kk]
return indices, coeffs
def coeffs_of_power_of_nd_linear_polynomial(num_vars, degree, linear_coeffs):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a linear multivariate polynomial (no constant term) to some power.
Parameters
----------
num_vars : integer
The number of variables
degree : integer
The power of the linear polynomial
linear_coeffs: np.ndarray (num_vars)
The coefficients of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
assert len(linear_coeffs)==num_vars
coeffs, indices=multinomial_coeffs_of_power_of_nd_linear_polynomial(
num_vars, degree)
for ii in range(indices.shape[1]):
index = indices[:,ii]
for dd in range(num_vars):
degree = index[dd]
coeffs[ii] *= linear_coeffs[dd]**degree
return coeffs, indices
def substitute_polynomial_for_variables_in_polynomial(
indices_in,coeffs_in,indices,coeffs,var_idx):
num_vars, num_terms = indices.shape
new_indices = []
new_coeffs = []
for ii in range(num_terms):
index = indices[:,ii]
pows = index[var_idx]
ind,cf = substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,index,coeffs[ii],var_idx,pows)
new_indices.append(ind)
new_coeffs.append(cf)
new_indices = np.hstack(new_indices)
new_coeffs = np.vstack(new_coeffs)
return new_indices, new_coeffs
def substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,basis_index,basis_coeff,var_idx,global_var_idx,
num_global_vars):
"""
var_idx : np.ndarray (nsub_vars)
The dimensions in basis_index which will be substituted
global_var_idx : [ np.ndarray(nvars[ii]) for ii in num_inputs]
The index of the active variables for each input
"""
num_inputs = var_idx.shape[0]
assert num_inputs==len(indices_in)
assert num_inputs==len(coeffs_in)
assert basis_coeff.shape[0]==1
assert var_idx.max()<basis_index.shape[0]
assert basis_index.shape[1]==1
assert len(global_var_idx)==num_inputs
# store input indices in global_var_idx
temp = []
for ii in range(num_inputs):
ind = np.zeros((num_global_vars,indices_in[ii].shape[1]))
ind[global_var_idx,:] = indices_in[ii]
temp.append(ind)
indices_in = temp
jj=0
degree = basis_index[var_idx[jj]]
c1,ind1 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
for jj in range(1,var_idx.shape[0]):
degree = basis_index[var_idx[jj]]
c2,ind2 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
ind1,c1 = multiply_multivariate_polynomials(ind1,c1,ind2,c2)
# this mask may be wrong. I might be confusing global and var idx
mask = np.ones(basis_index.shape[0],dtype=bool); mask[var_idx]=False
print(ind1.shape,mask.shape)
ind1[mask,:] += basis_index[mask]
c1*=basis_coeff
return ind1, c1
def composition_of_polynomials(indices_list,coeffs_list):
npolys = len(indices_list)
assert npolys==len(coeffs_list)
for ii in range(1,npolys):
new_poly = 2
return new_poly
def coeffs_of_power_of_polynomial(indices, coeffs, degree):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a multivariate polynomial to some power.
TODO: Deprecate coeffs_of_power_of_nd_linear_polynomial as that function
can be obtained as a special case of this function
Parameters
----------
indices : np.ndarray (num_vars,num_terms)
The indices of the multivariate polynomial
coeffs: np.ndarray (num_vars)
The coefficients of the polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
num_vars, num_terms = indices.shape
assert indices.shape[1]==coeffs.shape[0]
multinomial_coeffs, multinomial_indices = \
multinomial_coeffs_of_power_of_nd_linear_polynomial(num_terms, degree)
new_indices = np.zeros((num_vars,multinomial_indices.shape[1]))
new_coeffs = np.tile(multinomial_coeffs[:,np.newaxis],coeffs.shape[1])
for ii in range(multinomial_indices.shape[1]):
multinomial_index = multinomial_indices[:,ii]
for dd in range(num_terms):
deg = multinomial_index[dd]
new_coeffs[ii] *= coeffs[dd]**deg
new_indices[:,ii] += indices[:,dd]*deg
return new_coeffs, new_indices
def group_like_terms(coeffs, indices):
if coeffs.ndim==1:
coeffs = coeffs[:,np.newaxis]
num_vars,num_indices = indices.shape
indices_dict = {}
for ii in range(num_indices):
key = hash_array(indices[:,ii])
if not key in indices_dict:
indices_dict[key] = [coeffs[ii],ii]
else:
indices_dict[key] = [indices_dict[key][0]+coeffs[ii],ii]
new_coeffs = np.empty((len(indices_dict),coeffs.shape[1]))
new_indices = np.empty((num_vars,len(indices_dict)),dtype=int)
ii=0
for key, item in indices_dict.items():
new_indices[:,ii] = indices[:,item[1]]
new_coeffs[ii] = item[0]
ii+=1
return new_coeffs, new_indices
def multinomial_coefficient(index):
"""Compute the multinomial coefficient of an index [i1,i2,...,id].
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
coeff : double
the multinomial coefficient
"""
level = index.sum()
denom = np.prod(factorial(index))
coeff = factorial(level)/denom
return coeff
def multinomial_coefficients(indices):
coeffs = np.empty((indices.shape[1]),float)
for i in range(indices.shape[1]):
coeffs[i] = multinomial_coefficient(indices[:,i])
return coeffs
def multinomial_coeffs_of_power_of_nd_linear_polynomial(num_vars,degree):
""" Compute the multinomial coefficients of the individual terms
obtained when taking the power of a linear polynomial
(without constant term).
Given a linear multivariate polynomial e.g.
e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
return the coefficients of each quadratic term, i.e.
[1,2,2,1,2,1]
Parameters
----------
num_vars : integer
the dimension of the multivariate polynomial
degree : integer
the power of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
the multinomial coefficients of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
indices: np.ndarray (num_terms)
the indices of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
"""
indices = compute_hyperbolic_level_indices(num_vars,degree,1.0)
coeffs = multinomial_coefficients(indices)
return coeffs, indices
def add_polynomials(indices_list, coeffs_list):
"""
Add many polynomials together.
Example:
p1 = x1**2+x2+x3, p2 = x2**2+2*x3
p3 = p1+p2
return the degrees of each term in the the polynomial
p3 = x1**2+x2+3*x3+x2**2
[2, 1, 1, 2]
and the coefficients of each of these terms
[1., 1., 3., 1.]
Parameters
----------
indices_list : list [np.ndarray (num_vars,num_indices_i)]
List of polynomial indices. indices_i may be different for each
polynomial
coeffs_list : list [np.ndarray (num_indices_i,num_qoi)]
List of polynomial coefficients. indices_i may be different for each
polynomial. num_qoi must be the same for each list element.
Returns
-------
indices: np.ndarray (num_vars,num_terms)
the polynomial indices of the polynomial obtained from
summing the polynomials. This will be the union of the indices
of the input polynomials
coeffs: np.ndarray (num_terms,num_qoi)
the polynomial coefficients of the polynomial obtained from
summing the polynomials
"""
num_polynomials = len(indices_list)
assert num_polynomials==len(coeffs_list)
indices_dict = dict()
indices = []
coeff = []
ii=0; kk=0
for jj in range(indices_list[ii].shape[1]):
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
index=indices_list[ii][:,jj]
indices_dict[hash_array(index)]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
for ii in range(1,num_polynomials):
#print indices_list[ii].T,num_polynomials
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
for jj in range(indices_list[ii].shape[1]):
index=indices_list[ii][:,jj]
key = hash_array(index)
if key in indices_dict:
nn = indices_dict[key]
coeff[nn]+=coeffs_list[ii][jj,:]
else:
indices_dict[key]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
indices = np.asarray(indices).T
coeff = np.asarray(coeff)
return indices, coeff
def get_indices_double_set(indices):
"""
Given muultivariate indices
[i1,i2,...,]
Compute its double set by
[i1*i1,i1*i2,...,i2*i2,i2*i3...]
The double set will only contain unique indices
Parameters
----------
indices : np.ndarray (num_vars,num_indices)
The initial indices
Returns
-------
double_set_indices : np.ndarray (num_vars,num_indices)
The double set of indices
"""
dummy_coeffs = np.zeros(indices.shape[1])
double_set_indices = multiply_multivariate_polynomials(
indices,dummy_coeffs,indices,dummy_coeffs)[0]
return double_set_indices
def shift_momomial_expansion(coef,shift,scale):
assert coef.ndim==1
shifted_coef = np.zeros_like(coef)
shifted_coef[0]=coef[0]
nterms = coef.shape[0]
for ii in range(1,nterms):
temp = np.polynomial.polynomial.polypow([1,-shift],ii)
shifted_coef[:ii+1] += coef[ii]*temp[::-1]/scale**ii
return shifted_coef
#Some of these functions can be replaced by numpy functions described at
#https://docs.scipy.org/doc/numpy/reference/routines.polynomials.polynomial.html
| 32.44473 | 120 | 0.659853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,061 | 0.400998 |
489cc2435903d89dac82418e6c3f47ec952a38f4 | 12,303 | py | Python | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
]
| null | null | null | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
]
| null | null | null | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
]
| null | null | null | # --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuhao Cui https://github.com/cuiyuhao1996
# --------------------------------------------------------
import h5py
import pickle
import random
import numpy as np
from numpy.random import default_rng
import pandas as pd
import glob, json, torch, time
from torch.utils.data import Dataset, DataLoader
from core.data.data_utils import img_feat_path_load, img_feat_load, ques_load, tokenize, ans_stat
from core.data.data_utils import pad_img_feat, proc_ques, proc_ans, proc_mimic_ans
class CustomDataset(Dataset):
def __init__(self, opt):
self.opt = opt
# ---- Raw data loading ----
# Loading all image paths
# if self.opt.preload:
self.img_feat_path_list = []
split_list = opt.split[opt.run_mode].split('+') # change this for split
# split_list = ['test']
for split in split_list:
if split in ['train', 'val', 'test']:
self.img_feat_path_list += glob.glob(opt.img_feat_path[split] + '*.npz')
# Loading question word list
self.stat_ques_list = \
json.load(open(opt.question_path['train'], 'r'))['questions'] + \
json.load(open(opt.question_path['val'], 'r'))['questions'] + \
json.load(open(opt.question_path['test'], 'r'))['questions'] + \
json.load(open(opt.question_path['vg'], 'r'))['questions']
# Loading answer word list
# self.stat_ans_list = \
# json.load(open(__C.answer_path['train'], 'r'))['annotations'] + \
# json.load(open(__C.answer_path['val'], 'r'))['annotations']
# Loading question and answer list
self.ques_list = []
self.ans_list = []
# split_list = opt.split[opt.run_mode].split('+')
# split_list = ['train', 'val', 'test']
for split in split_list:
self.ques_list += json.load(open(opt.question_path[split], 'r'))['questions']
# if opt.run_mode in ['train']:
self.ans_list += json.load(open(opt.answer_path[split], 'r'))['annotations']
# Define run data size
if opt.run_mode in ['train']:
self.data_size = self.ans_list.__len__()
else:
self.data_size = self.ques_list.__len__()
print('== Dataset size:', self.data_size)
# ---- Data statistic ----
# {image id} -> {image feature absolutely path}
if self.opt.preload:
print('==== Pre-Loading features ...')
time_start = time.time()
self.iid_to_img_feat = img_feat_load(self.img_feat_path_list)
time_end = time.time()
print('==== Finished in {}s'.format(int(time_end-time_start)))
else:
self.iid_to_img_feat_path = img_feat_path_load(self.img_feat_path_list)
# {question id} -> {question}
self.qid_to_ques = ques_load(self.ques_list)
# Tokenize
self.token_to_ix, self.pretrained_emb = tokenize(self.stat_ques_list, opt.use_glove)
self.token_size = self.token_to_ix.__len__()
print('== Question token vocab size:', self.token_size)
# Answers statistic
# Make answer dict during training does not guarantee
# the same order of {ans_to_ix}, so we published our
# answer dict to ensure that our pre-trained model
# can be adapted on each machine.
# Thanks to Licheng Yu (https://github.com/lichengunc)
# for finding this bug and providing the solutions.
# self.ans_to_ix, self.ix_to_ans = ans_stat(self.stat_ans_list, __C.ANS_FREQ)
self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
self.ans_size = self.ans_to_ix.__len__()
print('== Answer vocab size (occurr more than {} times):'.format(8), self.ans_size)
print('load dataset finished.')
def __getitem__(self, idx):
# For code safety
img_feat_iter = np.zeros(1)
ques_ix_iter = np.zeros(1)
ans_iter = np.zeros(1)
# Process ['train'] and ['val', 'test'] respectively
if self.opt.run_mode in ['train']:
# Load the run data from list
ans = self.ans_list[idx]
ques = self.qid_to_ques[str(ans['question_id'])]
# Process image feature from (.npz) file
if self.opt.preload:
img_feat_x = self.iid_to_img_feat[str(ans['image_id'])]
else:
img_feats = np.load(self.iid_to_img_feat_path[str(ans['image_id'])])
img_feat_x = img_feats['x'].transpose((1, 0))
img_feat_iter = pad_img_feat(img_feat_x, self.opt.img_feat_pad_size)
boxes = pad_img_feat(img_feats['bbox'], self.opt.img_feat_pad_size)
# Process question
ques_ix_iter = proc_ques(ques, self.token_to_ix, self.opt.max_token)
# Process answer
ans_iter = proc_ans(ans, self.ans_to_ix)
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), \
torch.from_numpy(ans_iter), torch.from_numpy(boxes), torch.tensor([idx]), self.opt.run_mode
else:
# Load the run data from list
ques = self.ques_list[idx]
# # Process image feature from (.npz) file
# img_feat = np.load(self.iid_to_img_feat_path[str(ques['image_id'])])
# img_feat_x = img_feat['x'].transpose((1, 0))
# Process image feature from (.npz) file
if self.opt.preload:
img_feat_x = self.iid_to_img_feat[str(ques['image_id'])]
else:
img_feats = np.load(self.iid_to_img_feat_path[str(ques['image_id'])])
img_feat_x = img_feats['x'].transpose((1, 0))
img_feat_iter = pad_img_feat(img_feat_x, self.opt.img_feat_pad_size)
# Process question
ques_ix_iter = proc_ques(ques, self.token_to_ix, self.opt.max_token)
# only works for batchsize=1
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), \
torch.from_numpy(ans_iter), img_feats, idx
def __len__(self):
return self.data_size
class MIMICDatasetBase(Dataset):
"""
MIMIC dataset base: includes everything but (train/val/test) qa pickles
"""
def __init__(self, opt) -> None:
super().__init__()
self.opt = opt
# self.chexpert_df = pd.read_csv(f'/home/qiyuan/2021summer/physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-chexpert.csv.gz')
f1 = h5py.File(opt.cxr_img_feat_path, 'r')
print(f'keys: {f1.keys()}')
self.image_features = f1['image_features'] # [377110, 60, 1024], 36 ana + 24 finding
# self.bbox_label = f1['bbox_label'] # [377k, 60]
# self.image_adj_matrix = f1['image_adj_matrix'] # [377k, 100, 100]
self.image_bb = f1['image_bb'] # [377k, 60, 4]
# self.pos_boxes = f1['pos_boxes'] # [377k, 2]
# self.semantic_adj_matrix = f1['semantic_adj_matrix'] # [377k, 100, 100]
self.spatial_features = f1['spatial_features'] # [377k, 60, 6]
f5 = h5py.File(opt.ana_pool_finding_path, 'r')
print(f'f5 keys: {f5.keys()}')
# anatomical box pooled findings feature
self.ana_pooled_feats = f5['image_features'] # [377k, 26, 1024]
self.v_dim = self.ana_pooled_feats.chunks[-1] # visual feat dim
self.s_dim = self.spatial_features.chunks[-1] # spatial dim
with open(opt.mimic_ans_dict_path['ans2idx'], 'rb') as f3:
self.ans_to_ix = pickle.load(f3)
# because no_finding becomes yes or no, so 15 labels
with open(opt.mimic_ans_dict_path['idx2ans'], 'rb') as f4:
self.ix_to_ans = pickle.load(f4)
self.ans_size = self.ans_to_ix.__len__() # was self.ans_to_ix.__len__()
print('== Answer vocab size (occurr more than {} times):'.format(8), self.ans_size)
print('load mimic base dataset finished.')
class MIMICDatasetSplit(MIMICDatasetBase):
"""
train/val/test split of MIMIC QA dataset
"""
def __init__(self, opt) -> None:
super().__init__(opt)
with open(opt.mimic_qa_path[opt.run_mode], 'rb') as f2:
self.qa = pickle.load(f2) # qa pairs
# if opt.run_mode == 'train':
# self.qa = random.sample(self.qa, 20000)
self.token_to_ix, self.pretrained_emb = tokenize(self.qa, opt.use_glove)
self.token_size = self.token_to_ix.__len__()
self.data_size = self.qa.__len__()
print('== Question token vocab size:', self.token_size)
def __getitem__(self, idx):
img_feat_iter = np.zeros(1)
ques_ix_iter = np.zeros(1)
ans_iter = np.zeros(1)
qa = self.qa[idx]
# subject_id = int(qa['subject_id'][:-2])
# study_id = int(qa['study_id'][:-2])
# multi_label = (self.chexpert_df[(self.chexpert_df['study_id']==study_id) & (self.chexpert_df['subject_id']==subject_id)] > 0).values
# multi_label = multi_label[0][2:].astype('float32')
# Process question
ques_ix_iter = proc_ques(qa, self.token_to_ix, self.opt.max_token)
# Process answer
ans_iter = np.array(proc_mimic_ans(qa['answer'], self.ans_to_ix)) # only train for yes
if self.opt.run_mode in ['train']:
# randomly dropout some dim of features
rand_dim = np.random.choice(np.arange(self.v_dim), replace=False,
size=int(self.v_dim * 0.2))
img_feats = np.copy(self.image_features[qa['image']]) # must, or can't dropout
img_feats[:, rand_dim] = 0
# img_feats = np.array(self.image_features[qa['image']])
# ana_find_feats = np.array(self.ana_pooled_feats[qa['image']])
# img_feats = ana_find_feats
img_feat_iter = pad_img_feat(img_feats, self.opt.img_feat_pad_size)
# return torch.from_numpy(img_feat_iter), \
# torch.from_numpy(ques_ix_iter), torch.from_numpy(ans_iter), \
# torch.tensor([idx]), # self.opt.run_mode
else: # ['val', 'test']
img_feats = self.image_features[qa['image']]
img_feat_iter = pad_img_feat(img_feats, self.opt.img_feat_pad_size)
boxes = pad_img_feat(self.image_bb[qa['image']], self.opt.img_feat_pad_size)
# only works for batchsize=1
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), torch.from_numpy(ans_iter), \
torch.tensor([idx]) # img_feats, boxes,
def __len__(self):
return self.data_size
class CustomLoader(DataLoader):
def __init__(self, dataset, opt):
# self.dataset = dataset
self.opt = opt
self.init_kwargs = {
'dataset': dataset,
'batch_size': self.opt.batch_size,
'shuffle': True,
'collate_fn': self.collate_fn,
'num_workers': self.opt.num_workers,
'pin_memory': self.opt.pin_mem,
'drop_last': True,
}
super().__init__(**self.init_kwargs)
@staticmethod
def collate_fn(data):
# if data[0][-1] == 'train':
# img_feat_iter, ques_ix_iter, ans_iter, idx = zip(*data)
# img_feat_iter = torch.stack(img_feat_iter, dim=0)
# ques_ix_iter = torch.stack(ques_ix_iter, dim=0)
# ans_iter = torch.stack(ans_iter, dim=0)
# idx = torch.stack(idx, dim=0)
# # multi_label = torch.stack(multi_label, dim=0)
# return img_feat_iter, ques_ix_iter, ans_iter, idx
# else:
img_feat_iter, ques_ix_iter, ans_iter, idx = zip(*data)
img_feat_iter = torch.stack(img_feat_iter, dim=0)
ques_ix_iter = torch.stack(ques_ix_iter, dim=0)
ans_iter = torch.stack(ans_iter, dim=0)
idx = torch.stack(idx, dim=0)
# multi_label = torch.stack(multi_label, dim=0)
return img_feat_iter, ques_ix_iter, ans_iter, idx
# tentatively removed img_feats, boxes,
| 42.71875 | 142 | 0.600992 | 11,633 | 0.945542 | 0 | 0 | 943 | 0.076648 | 0 | 0 | 4,608 | 0.374543 |
489dcb5eb95e27bdfa01e5e5808a8eedc54c5b9e | 140 | py | Python | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
]
| null | null | null | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
]
| null | null | null | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
]
| null | null | null | from .database import Db
from .initiatives import InitiativeBase, Platform, ImportBatch, InitiativeImport, BatchImportState, InitiativeGroup | 70 | 115 | 0.864286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
489e4aad3b2feb84feec86ee49098494b4522381 | 3,987 | py | Python | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
]
| 1 | 2019-06-12T17:31:10.000Z | 2019-06-12T17:31:10.000Z | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
]
| null | null | null | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
]
| 1 | 2019-01-16T06:51:50.000Z | 2019-01-16T06:51:50.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Widget that handles communications between a console in debugging
mode and Spyder
"""
import ast
from qtpy.QtCore import Qt
from qtconsole.rich_jupyter_widget import RichJupyterWidget
class DebuggingWidget(RichJupyterWidget):
"""
Widget with the necessary attributes and methods to handle
communications between a console in debugging mode and
Spyder
"""
# --- Public API --------------------------------------------------
def write_to_stdin(self, line):
"""Send raw characters to the IPython kernel through stdin"""
self.kernel_client.input(line)
def set_spyder_breakpoints(self):
"""Set Spyder breakpoints into a debugging session"""
if self._reading:
self.kernel_client.input(
"!get_ipython().kernel._set_spyder_breakpoints()")
def dbg_exec_magic(self, magic, args=''):
"""Run an IPython magic while debugging."""
code = "!get_ipython().kernel.shell.run_line_magic('{}', '{}')".format(
magic, args)
self.kernel_client.input(code)
def refresh_from_pdb(self, pdb_state):
"""
Refresh Variable Explorer and Editor from a Pdb session,
after running any pdb command.
See publish_pdb_state in utils/ipython/spyder_kernel.py and
notify_spyder in utils/site/sitecustomize.py and
"""
if 'step' in pdb_state and 'fname' in pdb_state['step']:
fname = pdb_state['step']['fname']
lineno = pdb_state['step']['lineno']
self.sig_pdb_step.emit(fname, lineno)
if 'namespace_view' in pdb_state:
self.sig_namespace_view.emit(ast.literal_eval(
pdb_state['namespace_view']))
if 'var_properties' in pdb_state:
self.sig_var_properties.emit(ast.literal_eval(
pdb_state['var_properties']))
# ---- Private API (overrode by us) ----------------------------
def _handle_input_request(self, msg):
"""Save history and add a %plot magic."""
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.')
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_client.iopub_channel.flush()
def callback(line):
# Save history to browse it later
self._control.history.append(line)
# This is the Spyder addition: add a %plot magic to display
# plots while debugging
if line.startswith('%plot '):
line = line.split()[-1]
code = "__spy_code__ = get_ipython().run_cell('%s')" % line
self.kernel_client.input(code)
else:
self.kernel_client.input(line)
if self._reading:
self._reading = False
self._readline(msg['content']['prompt'], callback=callback,
password=msg['content']['password'])
def _event_filter_console_keypress(self, event):
"""Handle Key_Up/Key_Down while debugging."""
key = event.key()
if self._reading:
self._control.current_prompt_pos = self._prompt_pos
if key == Qt.Key_Up:
self._control.browse_history(backward=True)
return True
elif key == Qt.Key_Down:
self._control.browse_history(backward=False)
return True
elif key in (Qt.Key_Return, Qt.Key_Enter):
self._control.reset_search_pos()
return super(DebuggingWidget,
self)._event_filter_console_keypress(event)
else:
return super(DebuggingWidget,
self)._event_filter_console_keypress(event)
| 36.577982 | 80 | 0.600953 | 3,639 | 0.912487 | 0 | 0 | 0 | 0 | 0 | 0 | 1,569 | 0.39343 |
489e5789fc9bdd522af9556ca44141058ccb8f59 | 27 | py | Python | python/testData/completion/relativeImport/pkg/main.after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
]
| 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/completion/relativeImport/pkg/main.after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
]
| 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/completion/relativeImport/pkg/main.after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
]
| 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from .string import <caret> | 27 | 27 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
48a016a1659ce678fbb42661714443c79c8a4486 | 9,848 | py | Python | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
]
| 1 | 2021-11-28T09:29:33.000Z | 2021-11-28T09:29:33.000Z | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
]
| null | null | null | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
]
| null | null | null | import numpy as np
import sklearn.metrics
from dataset_chunking_fxns import subsample_df_by_groups
import sklearn
import sklearn.linear_model
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import time
# learning function: logistic regression multi-class
def fit_logistic_regression_multiclass(X,
y,
seed,
model_kwargs = {'penalty': 'l2', 'C':1},
weights=None):
if weights is None:
weights = np.ones(len(y))
else:
weights = weights
clf = sklearn.linear_model.LogisticRegression(**model_kwargs,
random_state = seed,
multi_class='multinomial',
max_iter=1000,
n_jobs = None)
clf.fit(X, y, sample_weight=weights)
return clf
# learning function: logistic regression
def fit_logistic_regression(X,
y,
seed,
model_kwargs = {'penalty': 'l2', 'C':1},
weights=None):
if weights is None:
weights = np.ones(len(y))
else:
weights = weights
clf = sklearn.linear_model.LogisticRegression(**model_kwargs,
random_state = seed,
multi_class='ovr',
max_iter=5000,
n_jobs = None)
clf.fit(X, y, sample_weight=weights)
return clf
def fit_rf_classifier(X, y, seed,
model_kwargs = {'max_depth': None, 'n_estimators': 100},
weights=None):
clf = RandomForestClassifier(**model_kwargs, random_state = seed, n_jobs=20)
if weights is None:
weights = np.ones(y.shape)
clf.fit(X, y, sample_weight=weights)
return clf
def fit_rf_regressor(X, y, seed,
model_kwargs = {'max_depth': None, 'n_estimators': 100},
weights=None):
clf = RandomForestRegressor(**model_kwargs, random_state = seed, n_jobs=20)
if weights is None:
weights = 1.0
clf.fit(X, y, sample_weight = weights)
return clf
def fit_ridge_regression(X, y, seed,
model_kwargs = {'alpha': 1.0},
weights=None):
reg = sklearn.linear_model.Ridge(**model_kwargs, normalize=True, random_state = seed, solver='svd')
if weights is None:
weights = np.ones(len(y))
reg.fit(X, y, sample_weight=weights)
return reg
def subset_and_train(data,
features,
group_key,
label_key,
subset_sizes,
pred_fxn,
model_kwargs,
acc_fxns,
predict_prob=False,
reweight = False,
reweight_target_dist = None,
fold_key = 'fold',
eval_key='test',
seed_start = 0,
num_seeds = 5,
verbose=True):
accs_total, accs_by_group = {}, {}
for acc_key in acc_fxns.keys():
accs_total[acc_key] = np.zeros((subset_sizes.shape[1],num_seeds))
accs_by_group[acc_key] = np.zeros((2,subset_sizes.shape[1],num_seeds))
groups = [[x] for x in range(subset_sizes.shape[0])]
# run the training
for s,seed in enumerate(range(seed_start,seed_start + num_seeds)):
rs_this = np.random.RandomState(seed)
print(seed,": ", end='')
for i in range(subset_sizes.shape[1]):
t1 = time.time()
print(i, end = ' ')
group_sizes_this = subset_sizes[:,i]
if verbose:
print(group_sizes_this, end = '')
# subsample the dataset (training points only)
data_subset = subsample_df_by_groups(data,
group_key,
groups,
fold_key = fold_key,
group_sizes = group_sizes_this,
rs = rs_this,
keep_test_val = True, shuffle=True)
data_subset_train = data_subset[data_subset[fold_key] == 'train']
# eval on the following set
data_subset_val = data_subset[data_subset[fold_key] == eval_key]
# index into features
train_idxs_this_round = data_subset_train['X_idxs']
val_idxs_this_round = data_subset_val['X_idxs']
X_train = features[train_idxs_this_round]
X_val = features[val_idxs_this_round]
y_train, g_train = data_subset_train[label_key].values, data_subset_train[group_key].values
y_val, g_val = data_subset_val[label_key].values, data_subset_val[group_key].values
if reweight:
# weights per group
group_fracs_this = group_sizes_this / group_sizes_this.sum()
train_weights_per_group = np.array(reweight_target_dist) / group_fracs_this
# print('train_weights_per_group ', train_weights_per_group)
# print(train_weights_per_group)
# weight per instance
train_weights = np.array(train_weights_per_group)[g_train.astype(int)]
# scale so that weights sum to n_train
train_weights = len(train_weights) * train_weights / train_weights.sum()
else:
train_weights = None
# allow for passing in lists of model kwargs, in case HPs need to change with allocation
if isinstance(model_kwargs, (list)):
model_kwargs_this = model_kwargs[i]
if verbose:
print(model_kwargs_this)
else:
model_kwargs_this = model_kwargs
clf = pred_fxn(X_train, y_train, seed,
weights=train_weights, model_kwargs=model_kwargs_this)
if predict_prob:
# take probability of class 1 as the prediction
preds = clf.predict_proba(X_val)[:,1]
else:
preds = clf.predict(X_val)
# if preds are already binary, this won't change anything
rounded_preds = np.asarray([int(p > 0.5) for p in preds])
for acc_key, acc_fxn in acc_fxns.items():
if acc_key == 'acc':
accs_total[acc_key][i,s] = acc_fxn(y_val, rounded_preds)
else:
accs_total[acc_key][i,s] = acc_fxn(y_val, preds)
for g in range(2):
for acc_key, acc_fxn in acc_fxns.items():
if acc_key == 'acc':
accs_by_group[acc_key][g,i,s] = acc_fxn(y_val[g_val == g], rounded_preds[g_val == g])
else:
accs_by_group[acc_key][g,i,s] = acc_fxn(y_val[g_val == g], preds[g_val == g])
t2 = time.time()
#print()
if verbose:
print('took {0} minutes'.format((t2-t1)/60))
print()
return accs_by_group, accs_total
def cv_subset_and_train(data,
features,
group_key,
label_key,
subset_sizes,
pred_fxn,
model_kwargs,
acc_fxns,
predict_prob = False,
reweight=False,
reweight_target_dist=None,
num_seeds = 5,
verbose=True):
accs_total, accs_by_group = {}, {}
for acc_key in acc_fxns.keys():
accs_total[acc_key] = np.zeros((subset_sizes.shape[1],num_seeds))
accs_by_group[acc_key] = np.zeros((2,subset_sizes.shape[1],num_seeds))
for seed in range(num_seeds):
r = subset_and_train(data,
features,
group_key=group_key,
label_key=label_key,
subset_sizes=subset_sizes,
pred_fxn = pred_fxn,
model_kwargs = model_kwargs,
acc_fxns = acc_fxns,
reweight=reweight,
reweight_target_dist = reweight_target_dist,
predict_prob = predict_prob,
eval_key='val',
fold_key = 'cv_fold_{0}'.format(seed),
seed_start = seed,
num_seeds = 1,
verbose=verbose)
accs_by_group_this_seed, accs_total_this_seed = r
for acc_key in acc_fxns.keys():
accs_total[acc_key][:,seed] = accs_total_this_seed[acc_key].reshape(-1)
accs_by_group[acc_key][:,:,seed] = accs_by_group_this_seed[acc_key].reshape(2,-1)
return accs_by_group, accs_total | 39.870445 | 109 | 0.491166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.081844 |
48a147ad6df1458c845aa4fd687c23becb0926e9 | 6,206 | py | Python | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
]
| null | null | null | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
]
| null | null | null | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
]
| null | null | null | """
PSET-7
Part 2: Triggers (PhraseTriggers)
At this point, you have no way of writing a trigger that matches on
"New York City" -- the only triggers you know how to write would be
a trigger that would fire on "New" AND "York" AND "City" -- which
also fires on the phrase "New students at York University love the
city". It's time to fix this. Since here you're asking for an exact
match, we will require that the cases match, but we'll be a little
more flexible on word matching. So, "New York City" will match:
* New York City sees movie premiere
* In the heart of New York City's famous cafe
* New York Cityrandomtexttoproveapointhere
but will not match:
* I love new york city
* I love New York City!!!!!!!!!!!!!!
PROBLEM 9
Implement a phrase trigger (PhraseTrigger) that fires when a given
phrase is in any of the story's subject, title, or summary. The
phrase should be an argument to the class's constructor.
"""
# Enter your code for WordTrigger, TitleTrigger,
# SubjectTrigger, SummaryTrigger, and PhraseTrigger in this box
class WordTrigger(Trigger):
def __init__(self, word):
self.word = word
def internalAreCharsEqualIgnoreCase(self, c1, c2):
if type(c1) != str or type(c2) != str:
raise TypeError("Arg not of type str")
if len(c1) > 1 or len(c2) > 1:
raise TypeError("Expected a char. Length not equal to 1")
return c1[0] == c2[0] or \
(ord(c1[0]) > 0x60 and (ord(c1[0]) - 0x20 == ord(c2[0])) or ord(c1[0]) < 0x5A and (ord(c1[0]) + 0x20 == ord(c2[0])))
def isWordIn(self, text):
"""
Returns True if word is present in text as
whole word. False otherwise.
"""
charsMatched = 0
firstCharMatchInd = -1
for i in range( len(text) ):
if self.internalAreCharsEqualIgnoreCase(text[i], self.word[0]):
# case-insensitive check for text[i] == self.word[0]
firstCharMatchInd = i
charsMatched += 1
wordInd = 1
while wordInd < len(self.word) and wordInd + firstCharMatchInd < len(text):
if self.internalAreCharsEqualIgnoreCase(self.word[wordInd], text[wordInd + firstCharMatchInd]):
# case-insensitive check for self.word[wordInd] == text[wordInd + firstCharMatchInd]
charsMatched += 1
wordInd += 1
elif self.internalAreCharsEqualIgnoreCase(self.word[wordInd], self.word[0]):
# case-insensitive check for text[i] == self.word[0]
charsMatched = 1
firstCharMatchInd = wordInd + firstCharMatchInd
wordInd = firstCharMatchInd
continue
else:
charsMatched = 0
i = wordInd + firstCharMatchInd
break
if charsMatched == len(self.word):
if len(self.word) == len(text):
return True
elif firstCharMatchInd > 0 and firstCharMatchInd + len(self.word) == len(text):
if text[firstCharMatchInd - 1].isspace() or text[firstCharMatchInd - 1] in string.punctuation:
return True
elif firstCharMatchInd == 0 and firstCharMatchInd + len(self.word) + 1 < len(text):
if text[firstCharMatchInd + len(self.word)].isspace() or text[firstCharMatchInd + len(self.word)] in string.punctuation:
return True
else:
if (text[firstCharMatchInd - 1].isspace() or text[firstCharMatchInd - 1] in string.punctuation) \
and (text[firstCharMatchInd + len(self.word)].isspace() or text[firstCharMatchInd + len(self.word)] in string.punctuation):
return True
return False
class TitleTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getTitle() )
class SubjectTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getSubject() )
class SummaryTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getSummary() )
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.word = phrase
def isWordIn(self, text):
charsMatched = 0
firstCharMatchInd = -1
for i in range( len(text) ):
if text[i] == self.word[0]:
firstCharMatchInd = i
charsMatched += 1
wordInd = 1
while wordInd < len(self.word) and wordInd + firstCharMatchInd < len(text):
if self.word[wordInd] == text[wordInd + firstCharMatchInd]:
charsMatched += 1
wordInd += 1
elif self.word[wordInd] == self.word[0]:
charsMatched = 1
firstCharMatchInd = wordInd + firstCharMatchInd
wordInd = firstCharMatchInd
continue
else:
charsMatched = 0
i = wordInd + firstCharMatchInd
break
if charsMatched == len(self.word):
return True
return False
def evaluate(self, story):
return self.isWordIn( story.getTitle() ) or \
self.isWordIn( story.getSubject() ) or \
self.isWordIn( story.getSummary() )
| 41.099338 | 150 | 0.549468 | 5,104 | 0.82243 | 0 | 0 | 0 | 0 | 0 | 0 | 1,795 | 0.289236 |
48a22194d70cb5daa8b009c12fc1c26cc1c8d905 | 1,522 | py | Python | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
]
| 1 | 2017-03-02T19:32:31.000Z | 2017-03-02T19:32:31.000Z | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
]
| null | null | null | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
]
| 2 | 2018-03-14T16:56:53.000Z | 2018-03-14T17:20:07.000Z | # -*- coding: utf-8 -*-
from unittest import TestCase
from mock import Mock, patch
from vault.tests.fakes import fake_request
from vault.views import SetProjectView
from django.utils.translation import ugettext as _
class SetProjectTest(TestCase):
def setUp(self):
self.view = SetProjectView.as_view()
self.request = fake_request(method='GET')
self.request.user.is_authenticated = lambda: False
def tearDown(self):
patch.stopall()
def test_set_project_needs_authentication(self):
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
@patch('vault.views.switch')
def test_set_new_project_id_to_session(self, mock_switch):
self.request.user.is_authenticated = lambda: True
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
@patch('vault.views.switch')
def test_set_new_project_id_to_session_exception(self, mock_switch):
self.request.user.is_authenticated = lambda: True
mock_switch.side_effect = ValueError()
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Unable to change your project.'))
| 33.086957 | 78 | 0.704336 | 1,301 | 0.854796 | 0 | 0 | 884 | 0.580815 | 0 | 0 | 154 | 0.101183 |
48a2c6f00acb55753f06b34ad48a128100334441 | 2,364 | py | Python | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
]
| 4 | 2020-04-08T03:42:02.000Z | 2020-10-01T20:34:48.000Z | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
]
| 93 | 2020-03-26T14:29:14.000Z | 2020-11-12T05:54:55.000Z | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
]
| 23 | 2020-03-24T10:28:44.000Z | 2020-09-24T09:42:19.000Z | from tasks.cephfs.cephfs_test_case import CephFSTestCase
import random
import os
class TestDumpTree(CephFSTestCase):
def get_paths_to_ino(self):
inos = {}
p = self.mount_a.run_shell(["find", "./"])
paths = p.stdout.getvalue().strip().split()
for path in paths:
inos[path] = self.mount_a.path_to_ino(path, False)
return inos
def populate(self):
self.mount_a.run_shell(["git", "clone",
"https://github.com/ceph/ceph-qa-suite"])
def test_basic(self):
self.mount_a.run_shell(["mkdir", "parent"])
self.mount_a.run_shell(["mkdir", "parent/child"])
self.mount_a.run_shell(["touch", "parent/child/file"])
self.mount_a.run_shell(["mkdir", "parent/child/grandchild"])
self.mount_a.run_shell(["touch", "parent/child/grandchild/file"])
inos = self.get_paths_to_ino()
tree = self.fs.mds_asok(["dump", "tree", "/parent/child", "1"])
target_inos = [inos["./parent/child"], inos["./parent/child/file"],
inos["./parent/child/grandchild"]]
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
def test_random(self):
random.seed(0)
self.populate()
inos = self.get_paths_to_ino()
target = random.sample(inos.keys(), 1)[0]
if target != "./":
target = os.path.dirname(target)
subtree = [path for path in inos.keys() if path.startswith(target)]
target_inos = [inos[path] for path in subtree]
tree = self.fs.mds_asok(["dump", "tree", target[1:]])
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
target_depth = target.count('/')
maxdepth = max([path.count('/') for path in subtree]) - target_depth
depth = random.randint(0, maxdepth)
target_inos = [inos[path] for path in subtree \
if path.count('/') <= depth + target_depth]
tree = self.fs.mds_asok(["dump", "tree", target[1:], str(depth)])
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
| 35.283582 | 76 | 0.571489 | 2,281 | 0.96489 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.160745 |
48a598d9751db785f23d9a8e28422d557cff93bc | 966 | py | Python | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
]
| null | null | null | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
]
| null | null | null | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
]
| null | null | null |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
control_pins = [7,11,13,15]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
# speed from 0 to 1 (one being the fastest)
# steps 50 steps = one rotation
def move_backward(steps, speed):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(max(0.001/speed,0.001))
def move_forward(steps, speed):
for i in range(steps):
for halfstep in range(7,-1,-1):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(max(0.001/speed,0.001))
for k in range(1,10,1):
move_forward(50,0.1)
time.sleep(0.5)
#move_forward(50,0.25)
time.sleep(1)
#move_backward(500,0.5)
GPIO.cleanup()
| 18.576923 | 67 | 0.635611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.123188 |
48a7ec0bb39b709f9863a091b7d85367791f1dab | 2,924 | py | Python | Experimental/OpenCVExp.py | awesomesauce12/6DBytes-CV | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
]
| 1 | 2016-06-24T23:09:43.000Z | 2016-06-24T23:09:43.000Z | Experimental/OpenCVExp.py | awesomesauce12/image-recognition | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
]
| null | null | null | Experimental/OpenCVExp.py | awesomesauce12/image-recognition | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
]
| null | null | null | import numpy as np
import cv2
import os
import math
os.system("fswebcam -r 507x456 --no-banner image11.jpg")
def showImage(capImg):
cv2.imshow('img', capImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('image11.jpg',-1)
height, width, channel = img.shape
topy= height
topx = width
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0,255,255])
upper_color = np.array([0,255,255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(img,img, mask=mask)
'''def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x,y), 100, (255,255,255), -1)'''
'''cap = cv2.VideoCapture(-1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('hjhj', gray)
if cv2.waitKey(0) & 0xFF -- ord('q'):
break
cap.release()
cv2.destroyAllWindows()'''
propx = (topx/512)
propy = (topy/512)
'''lineX1 = int(0*propx)
lineY2 = int(0*propy)
lineX2 = int(511*propx)
lineY1 = int(511*propy)
img = cv2.line(img, (lineX1,lineY1), (lineX2, lineY2), (255,255,255), 5)'''
w = 100*(propx+propy)/2
x1 = int(topx/2 - w/2)
x2 = int(topx/2 + w/2)
y1 = int(topy/2 + w/2)
y2 = int(topy/2 - w/2)
img = cv2.rectangle(res, (x1,y1), (x2,y2), (0,255,0),3)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, 0)
showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#showImage(image)
cv2.drawContours(img, contours, 0, (0,255,0), 3)
showImage(img)
print('Num of Contours ', len(contours))
cnt = contours[0]
M = cv2.moments(cnt)
print (M)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(cnt)
print (cx)
print (cy)
print (area)
'''xCircle = 40*propx
xCircle = int(xCircle)
yCircle = xCircle
radCircle = xCircle
img = cv2.circle(img, (xCircle, yCircle), radCircle, (0,0,255),-1)
x3 = int(topx - 60*propx)
y3 = int(topy - 110*propy)
minAx = int(50*propx)
majAx = int(100*propy)
img = cv2.ellipse(img, (x3, y3), (minAx,majAx), 0, 0, 360, (0,150,255), -1)'''
'''pt1X = int(70*propx)
pt1Y = int(60*propy)
pt2X = int(154*propx)
pt2Y = int(23*propy)
pt3X = int(500*propx)
pt3Y = int(3*propy)'''
#pts = np.array([[pt1X, pt1Y], [pt2X, pt2Y], [pt3X, pt3Y]], np.int32)
#pts = pts.reshape((-1,1,2))
#img = cv2.polylines(img, [pts], True, (100,100,234))
#font = cv2.FONT_HERSHEY_SIMPLEX
#startPtX = int(240*propx)
#startPtY = int(240*propy)
#scale = 2*(propx + propy)/2
#cv2.putText(img, 'Apurva', (startPtX, startPtY), font, scale, (210, 80, 150), 4, cv2.LINE_AA)
#cv2.imshow("kl", img)
'''cv2.setMouseCallback('kl', draw_circle)'''
''''''
#cv2.imshow('frame', img)
#cv2.imshow('mask',mask)
cv2.imshow('res',res)
'''sd = img[130:200, 175:245]
img[20:90, 140:210]=sd
cv2.imshow("kl", img)'''
cv2.waitKey(0)
cv2.destroyAllWindows()
| 21.5 | 94 | 0.651505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,707 | 0.583789 |
48a84cb7d32acc3cbc3af963ca0e81cc7ff163d9 | 424 | py | Python | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
]
| null | null | null | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
]
| null | null | null | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
]
| null | null | null | from django.conf.urls import include
from django.http import HttpResponseRedirect
from django.urls import re_path
from Poem.poem_super_admin.admin import mysuperadmin
urlpatterns = [
re_path(r'^$', lambda x: HttpResponseRedirect('/poem/superadmin/')),
re_path(r'^superadmin/', mysuperadmin.urls),
re_path(r'^saml2/', include(('djangosaml2.urls', 'poem'),
namespace='saml2')),
]
| 32.615385 | 72 | 0.688679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.188679 |
48a8ac917e1b840d71d262dd221cf4cb43769865 | 902 | py | Python | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
]
| null | null | null | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
]
| 1 | 2022-03-12T00:01:12.000Z | 2022-03-12T00:01:12.000Z | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
]
| 3 | 2021-12-23T19:53:31.000Z | 2022-03-27T23:12:03.000Z | import json
from optimism.JaxConfig import *
from optimism import Mesh
def read_json_mesh(meshFileName):
with open(meshFileName, 'r', encoding='utf-8') as jsonFile:
meshData = json.load(jsonFile)
coordinates = np.array(meshData['coordinates'])
connectivity = np.array(meshData['connectivity'], dtype=int)
nodeSets = {}
for key in meshData['nodeSets']:
nodeSets[key] = np.array(meshData['nodeSets'][key])
sideSets = {}
exodusSideSets = meshData['sideSets']
for key in exodusSideSets:
elements = np.array(exodusSideSets[key][0], dtype=int)
sides = np.array(exodusSideSets[key][1], dtype=int)
sideSets[key] = np.column_stack((elements, sides))
blocks=None
return Mesh.construct_mesh_from_basic_data(coordinates, connectivity,
blocks, nodeSets, sideSets)
| 31.103448 | 74 | 0.643016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.074279 |
48a9df7987bdd5e6e1faa8cd6a7c8279d997c6ae | 1,058 | py | Python | networkx/algorithms/approximation/ramsey.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
]
| 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/ramsey.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
]
| 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/ramsey.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
]
| 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | # -*- coding: utf-8 -*-
"""
Ramsey numbers.
"""
# Copyright (C) 2011 by
# Nicholas Mancuso <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from ...utils import arbitrary_element
__all__ = ["ramsey_R2"]
__author__ = """Nicholas Mancuso ([email protected])"""
def ramsey_R2(G):
r"""Approximately computes the Ramsey number `R(2;s,t)` for graph.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_pair : (set, set) tuple
Maximum clique, Maximum independent set.
"""
if not G:
return set(), set()
node = arbitrary_element(G)
nbrs = nx.all_neighbors(G, node)
nnbrs = nx.non_neighbors(G, node)
c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
c_1.add(node)
i_2.add(node)
# Choose the larger of the two cliques and the larger of the two
# independent sets, according to cardinality.
return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
| 24.604651 | 70 | 0.63138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.550095 |
48acd263a6439030b9241f1881827f94f5753592 | 677 | py | Python | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
]
| 1 | 2021-11-10T15:24:36.000Z | 2021-11-10T15:24:36.000Z | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
]
| 12 | 2020-01-15T00:19:41.000Z | 2021-05-11T14:52:04.000Z | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
]
| 2 | 2015-01-31T11:51:06.000Z | 2015-01-31T21:29:19.000Z | """Linux-specific code"""
from pysyte.types import paths
def xdg_home():
"""path to $XDG_CONFIG_HOME
>>> assert xdg_home() == paths.path('~/.config').expand()
"""
return paths.environ_path("XDG_CONFIG_HOME", "~/.config")
def xdg_home_config(filename):
"""path to that file in $XDG_CONFIG_HOME
>>> assert xdg_home_config('fred') == paths.path('~/.config/fred').expand()
"""
return xdg_home() / filename
def xdg_dirs():
"""paths in $XDG_CONFIG_DIRS"""
return paths.environ_paths("XDG_CONFIG_DIRS")
def xdg_homes():
return [xdg_home()]
bash_paste = "xclip -selection clipboard"
bash_copy = "xclip -selection clipboard -o"
| 19.911765 | 79 | 0.660266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.57164 |
48ae6c1d7db7737a61286051c58656fa1c61b3ae | 387 | py | Python | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
]
| 2 | 2020-07-30T14:07:19.000Z | 2020-08-01T05:28:29.000Z | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
]
| null | null | null | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
]
| null | null | null | import mpv
import keyboard
import time
p = mpv.MPV()
p.play("song_name.mp4")
def play_pause():
p.pause = not p.pause
keyboard.add_hotkey("e", play_pause)
def full():
p.fullscreen = not p.fullscreen
keyboard.add_hotkey("2", full)
def go_to_start():
p.time_pos = 2
keyboard.add_hotkey("1", go_to_start)
while 1:
time.sleep(40)
| 12.09375 | 38 | 0.620155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.062016 |
48b02d948060f886a636e1dc8c11abff122b4be1 | 91,989 | py | Python | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
]
| null | null | null | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
]
| null | null | null | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.db """
from __future__ import with_statement
import hashlib
import os
import unittest
from shutil import rmtree, copy
from StringIO import StringIO
from time import sleep, time
from uuid import uuid4
import simplejson
import sqlite3
import swift.common.db
from swift.common.db import AccountBroker, chexor, ContainerBroker, \
DatabaseBroker, DatabaseConnectionError, dict_factory, get_db_connection
from swift.common.utils import normalize_timestamp
from swift.common.exceptions import LockTimeout
class TestDatabaseConnectionError(unittest.TestCase):
def test_str(self):
err = \
DatabaseConnectionError(':memory:', 'No valid database connection')
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
err = DatabaseConnectionError(':memory:',
'No valid database connection', timeout=1357)
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
self.assert_('1357' in str(err))
class TestDictFactory(unittest.TestCase):
def test_normal_case(self):
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
conn.commit()
curs = conn.execute('SELECT one, two FROM test')
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'abc', 'two': 123})
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'def', 'two': 456})
class TestChexor(unittest.TestCase):
def test_normal_case(self):
self.assertEquals(chexor('d41d8cd98f00b204e9800998ecf8427e',
'new name', normalize_timestamp(1)),
'4f2ea31ac14d4273fe32ba08062b21de')
def test_invalid_old_hash(self):
self.assertRaises(TypeError, chexor, 'oldhash', 'name',
normalize_timestamp(1))
def test_no_name(self):
self.assertRaises(Exception, chexor,
'd41d8cd98f00b204e9800998ecf8427e', None, normalize_timestamp(1))
class TestGetDBConnection(unittest.TestCase):
def test_normal_case(self):
conn = get_db_connection(':memory:')
self.assert_(hasattr(conn, 'execute'))
def test_invalid_path(self):
self.assertRaises(DatabaseConnectionError, get_db_connection,
'invalid database path / name')
class TestDatabaseBroker(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(os.path.dirname(__file__), 'db')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_memory_db_init(self):
broker = DatabaseBroker(':memory:')
self.assertEqual(broker.db_file, ':memory:')
self.assertRaises(AttributeError, broker.initialize,
normalize_timestamp('0'))
def test_disk_db_init(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertEqual(broker.db_file, db_file)
self.assert_(broker.conn is None)
def test_initialize(self):
self.assertRaises(AttributeError,
DatabaseBroker(':memory:').initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
for key in stub_dict.keys():
del stub_dict[key]
stub_dict['args'] = args
for key, value in kwargs.items():
stub_dict[key] = value
broker = DatabaseBroker(':memory:')
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
def test_delete_db(self):
def init_stub(conn, put_timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)',
(str(uuid4),))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
stub_called = [False]
def delete_stub(*a, **kw):
stub_called[0] = True
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker._initialize = init_stub
# Initializes a good broker for us
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.conn is not None)
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
broker._initialize = init_stub
broker.initialize(normalize_timestamp('1'))
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
# ensure that metadata was cleared
m2 = broker.metadata
self.assert_(not any(v[0] for v in m2.itervalues()))
self.assert_(all(v[1] == normalize_timestamp('2')
for v in m2.itervalues()))
def test_get(self):
broker = DatabaseBroker(':memory:')
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('CREATE TABLE test (one TEXT)')
try:
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
raise Exception('test')
conn.commit()
except Exception:
pass
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], [])
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
orig_renamer = swift.common.db.renamer
try:
swift.common.db.renamer = lambda a, b: b
qpath = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(self.testdir))))
if qpath:
qpath += '/quarantined/tests/db'
else:
qpath = 'quarantined/tests/db'
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),
os.path.join(self.testdir, '1.db'))
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception, err:
exc = err
self.assertEquals(str(exc),
'Quarantined %s to %s due to malformed database' %
(self.testdir, qpath))
# Test corrupted database
copy(os.path.join(os.path.dirname(__file__),
'corrupted_example.db'),
os.path.join(self.testdir, '1.db'))
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception, err:
exc = err
self.assertEquals(str(exc),
'Quarantined %s to %s due to corrupted database' %
(self.testdir, qpath))
finally:
swift.common.db.renamer = orig_renamer
def test_lock(self):
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
got_exc = False
try:
with broker.lock():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.lock():
pass
with broker.lock():
pass
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
broker2._initialize = stub
with broker.lock():
got_exc = False
try:
with broker2.lock():
pass
except LockTimeout:
got_exc = True
self.assert_(got_exc)
try:
with broker.lock():
raise Exception('test')
except Exception:
pass
with broker.lock():
pass
def test_newid(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][0], -1)
self.assertEquals(points[0][1], uuid2)
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
uuid3 = str(uuid4())
broker.newid(uuid3)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid3)
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid2)
def test_get_items_since(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.execute('INSERT INTO test (one) VALUES ("3")')
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
self.assertEquals(broker.get_items_since(-1, 10),
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(-1, 2),
[{'one': '1'}, {'one': '2'}])
self.assertEquals(broker.get_items_since(1, 2),
[{'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(3, 2), [])
self.assertEquals(broker.get_items_since(999, 2), [])
def test_get_sync(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
pass
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
self.assertEquals(broker.get_sync(uuid2), -1)
broker.newid(uuid2)
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
self.assertEquals(broker.get_sync(uuid3), -1)
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.commit()
broker.newid(uuid3)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 2)
def test_merge_syncs(self):
broker = DatabaseBroker(':memory:')
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
{'sync_point': 4, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 3)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 4)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 5)
def test_get_replication_info(self):
self.get_replication_info_tester(metadata=False)
def test_get_replication_info_with_metadata(self):
self.get_replication_info_tester(metadata=True)
def get_replication_info_tester(self, metadata=False):
broker = DatabaseBroker(':memory:', account='a')
broker.db_type = 'test'
broker.db_contains_type = 'test'
broker_creation = normalize_timestamp(1)
broker_uuid = str(uuid4())
broker_metadata = metadata and simplejson.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript('''
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE,
created_at TEXT
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + 1,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER test_update BEFORE UPDATE ON test
BEGIN
SELECT RAISE(FAIL,
'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - 1,
hash = chexor(hash, old.name, old.created_at);
END;
CREATE TABLE test_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
test_count INTEGER,
hash TEXT default '00000000000000000000000000000000',
id TEXT
%s
);
INSERT INTO test_stat (test_count) VALUES (0);
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
conn.execute('''
UPDATE test_stat
SET account = ?, created_at = ?, id = ?, put_timestamp = ?
''', (broker.account, broker_creation, broker_uuid, put_timestamp))
if metadata:
conn.execute('UPDATE test_stat SET metadata = ?',
(broker_metadata,))
conn.commit()
broker._initialize = _initialize
put_timestamp = normalize_timestamp(2)
broker.initialize(put_timestamp)
info = broker.get_replication_info()
self.assertEquals(info, {'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': -1, 'id': broker_uuid,
'metadata': broker_metadata})
insert_timestamp = normalize_timestamp(3)
with broker.get() as conn:
conn.execute('''
INSERT INTO test (name, created_at) VALUES ('test', ?)
''', (insert_timestamp,))
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {'count': 1,
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
with broker.get() as conn:
conn.execute('DELETE FROM test')
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
return broker
def test_metadata(self):
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
# Add our first item
first_timestamp = normalize_timestamp(1)
first_value = '1'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
# Add our second item
second_timestamp = normalize_timestamp(2)
second_value = '2'
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Update our first item
first_timestamp = normalize_timestamp(3)
first_value = '1b'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Delete our second item (by setting to empty string)
second_timestamp = normalize_timestamp(4)
second_value = ''
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point before second item was deleted
broker.reclaim(normalize_timestamp(3))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point second item was deleted
broker.reclaim(normalize_timestamp(4))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim after point second item was deleted
broker.reclaim(normalize_timestamp(5))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' not in broker.metadata)
class TestContainerBroker(unittest.TestCase):
""" Tests for swift.common.db.ContainerBroker """
def test_creation(self):
""" Test swift.common.db.ContainerBroker.__init__ """
broker = ContainerBroker(':memory:', account='a', container='c')
self.assertEqual(broker.db_file, ':memory:')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
def test_exception(self):
""" Test swift.common.db.ContainerBroker throwing a conn away after
unhandled exception """
first_conn = None
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEquals(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assert_(broker.conn is None)
def test_empty(self):
""" Test swift.common.db.ContainerBroker.empty """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.empty())
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
self.assert_(not broker.empty())
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
self.assert_(broker.empty())
def test_reclaim(self):
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
broker.initialize(normalize_timestamp('1'))
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(normalize_timestamp(time()), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
# Test the return values of reclaim()
broker.put_object('w', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('x', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('y', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('z', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
# Test before deletion
res = broker.reclaim(normalize_timestamp(time()), time())
broker.delete_db(normalize_timestamp(time()))
def test_delete_object(self):
""" Test swift.common.db.ContainerBroker.delete_object """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
def test_put_object(self):
""" Test swift.common.db.ContainerBroker.put_object """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
# Create initial object
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Reput same event
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old event
otimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_object('"{<object \'&\' name>}"', otimestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old delete event
dtimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_object('"{<object \'&\' name>}"', dtimestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# We'll use this later
sleep(.0001)
in_between_timestamp = normalize_timestamp(time())
# New post event
sleep(.0001)
previous_timestamp = timestamp
timestamp = normalize_timestamp(time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0],
previous_timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.put_object('"{<object \'&\' name>}"', timestamp, 456,
'application/x-test3',
'6af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 456)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test3')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'6af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
def test_get_info(self):
""" Test swift.common.db.ContainerBroker.get_info """
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['container'], 'test2')
self.assertEquals(info['hash'], '00000000000000000000000000000000')
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
broker.put_object('o1', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 123)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 246)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 1000)
sleep(.00001)
broker.delete_object('o2', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_set_x_syncs(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
broker.set_x_container_sync_points(1, 2)
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], 1)
self.assertEquals(info['x_container_sync_point2'], 2)
def test_get_report_info(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['container'], 'test2')
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
broker.put_object('o1', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 123)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 246)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
put_timestamp = normalize_timestamp(time())
sleep(.001)
delete_timestamp = normalize_timestamp(time())
broker.reported(put_timestamp, delete_timestamp, 2, 1123)
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
self.assertEquals(info['reported_put_timestamp'], put_timestamp)
self.assertEquals(info['reported_delete_timestamp'], delete_timestamp)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 1000)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o2', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
def test_list_objects_iter(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
for obj1 in xrange(4):
for obj2 in xrange(125):
broker.put_object('%d/%04d' % (obj1, obj2),
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in xrange(125):
broker.put_object('2/0051/%04d' % obj,
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in xrange(125):
broker.put_object('3/%04d/0049' % obj,
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0099')
listing = broker.list_objects_iter(100, '', '0/0050', None, '')
self.assertEquals(len(listing), 50)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0049')
listing = broker.list_objects_iter(100, '0/0099', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '1/0074')
listing = broker.list_objects_iter(55, '1/0074', None, None, '')
self.assertEquals(len(listing), 55)
self.assertEquals(listing[0][0], '1/0075')
self.assertEquals(listing[-1][0], '2/0004')
listing = broker.list_objects_iter(10, '', None, '0/01', '')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_objects_iter(10, '', None, '0/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, '', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_objects_iter(10, '2', None, None, '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['2/', '3/'])
listing = broker.list_objects_iter(10, '2/',None, None, '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/'])
listing = broker.list_objects_iter(10, '2/0050', None, '2/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '2/0051')
self.assertEquals(listing[1][0], '2/0051/')
self.assertEquals(listing[2][0], '2/0052')
self.assertEquals(listing[-1][0], '2/0059')
listing = broker.list_objects_iter(10, '3/0045', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_object('3/0049/', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3/0048', None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/',
'3/0049/0049', '3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_objects_iter(10, '3/0048', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052', '3/0052/', '3/0053'])
listing = broker.list_objects_iter(10, None, None, '3/0049/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing],
['3/0049/', '3/0049/0049'])
listing = broker.list_objects_iter(10, None, None, None, None,
'3/0049')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/0049/0049'])
listing = broker.list_objects_iter(2, None, None, '3/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['3/0000', '3/0000/'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['3/0000', '3/0001'])
def test_list_objects_iter_prefix_delim(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('/pets/dogs/1', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/dogs/2', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish/a', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish/b', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish_info.txt', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/snakes', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
#def list_objects_iter(self, limit, marker, prefix, delimiter, path=None,
# format=None):
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/a', '/pets/fish/b'])
def test_double_check_trailing_delimiter(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter for a
container that has an odd file with a trailing delimiter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(15, None, None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b', 'b/a', 'b/b', 'c'])
listing = broker.list_objects_iter(15, None, None, '', '/')
self.assertEquals(len(listing), 5)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_objects_iter(15, None, None, 'a/', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['a/', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_objects_iter(15, None, None, 'b/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['b/a', 'b/b'])
def test_chexor(self):
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hasha = hashlib.md5('%s-%s' % ('a', '0000000001.00000')).digest()
hashb = hashlib.md5('%s-%s' % ('b', '0000000002.00000')).digest()
hashc = ''.join(('%2x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hashb = hashlib.md5('%s-%s' % ('b', '0000000003.00000')).digest()
hashc = ''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_newid(self):
"""test DatabaseBroker.newid"""
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
id = broker.get_info()['id']
broker.newid('someid')
self.assertNotEquals(id, broker.get_info()['id'])
def test_get_items_since(self):
"""test DatabaseBroker.get_items_since"""
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
max_row = broker.get_replication_info()['max_row']
broker.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
items = broker.get_items_since(max_row, 1000)
self.assertEquals(len(items), 1)
self.assertEquals(items[0]['name'], 'b')
def test_sync_merging(self):
""" exercise the DatabaseBroker sync functions a bit """
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
self.assertEquals(broker2.get_sync('12345'), -1)
broker1.merge_syncs([{'sync_point': 3, 'remote_id': '12345'}])
broker2.merge_syncs(broker1.get_syncs())
self.assertEquals(broker2.get_sync('12345'), 3)
def test_merge_items(self):
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 2)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_object('c', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 3)
self.assertEquals(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def test_merge_items_overwrite(self):
"""test DatabaseBroker.merge_items"""
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', normalize_timestamp(4), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
def test_merge_items_post_overwrite_out_of_order(self):
"""test DatabaseBroker.merge_items"""
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', normalize_timestamp(4), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
self.assertEquals(rec['content_type'], 'text/plain')
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
broker1.put_object('b', normalize_timestamp(5), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(5))
self.assertEquals(rec['content_type'], 'text/plain')
def premetadata_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeMetadata(TestContainerBroker):
"""
Tests for swift.common.db.ContainerBroker against databases created before
the metadata column was added.
"""
def setUp(self):
self._imported_create_container_stat_table = \
ContainerBroker.create_container_stat_table
ContainerBroker.create_container_stat_table = \
premetadata_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM container_stat')
except BaseException, err:
exc = err
self.assert_('no such column: metadata' in str(exc))
def tearDown(self):
ContainerBroker.create_container_stat_table = \
self._imported_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT metadata FROM container_stat')
def prexsync_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeXSync(TestContainerBroker):
"""
Tests for swift.common.db.ContainerBroker against databases created before
the x_container_sync_point[12] columns were added.
"""
def setUp(self):
self._imported_create_container_stat_table = \
ContainerBroker.create_container_stat_table
ContainerBroker.create_container_stat_table = \
prexsync_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('''SELECT x_container_sync_point1
FROM container_stat''')
except BaseException, err:
exc = err
self.assert_('no such column: x_container_sync_point1' in str(exc))
def tearDown(self):
ContainerBroker.create_container_stat_table = \
self._imported_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT x_container_sync_point1 FROM container_stat')
class TestAccountBroker(unittest.TestCase):
""" Tests for swift.common.db.AccountBroker """
def test_creation(self):
""" Test swift.common.db.AccountBroker.__init__ """
broker = AccountBroker(':memory:', account='a')
self.assertEqual(broker.db_file, ':memory:')
got_exc = False
try:
with broker.get() as conn:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
def test_exception(self):
""" Test swift.common.db.AccountBroker throwing a conn away after
exception """
first_conn = None
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEquals(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assert_(broker.conn is None)
def test_empty(self):
""" Test swift.common.db.AccountBroker.empty """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.empty())
broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
self.assert_(not broker.empty())
sleep(.00001)
broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
self.assert_(broker.empty())
def test_reclaim(self):
broker = AccountBroker(':memory:', account='test_account')
broker.initialize(normalize_timestamp('1'))
broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('c', 0, normalize_timestamp(time()), 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(normalize_timestamp(time()), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
# Test reclaim after deletion. Create 3 test containers
broker.put_container('x', 0, 0, 0, 0)
broker.put_container('y', 0, 0, 0, 0)
broker.put_container('z', 0, 0, 0, 0)
res = broker.reclaim(normalize_timestamp(time()), time())
# self.assertEquals(len(res), 2)
# self.assert_(isinstance(res, tuple))
# containers, account_name = res
# self.assert_(containers is None)
# self.assert_(account_name is None)
# Now delete the account
broker.delete_db(normalize_timestamp(time()))
res = broker.reclaim(normalize_timestamp(time()), time())
# self.assertEquals(len(res), 2)
# self.assert_(isinstance(res, tuple))
# containers, account_name = res
# self.assertEquals(account_name, 'test_account')
# self.assertEquals(len(containers), 3)
# self.assert_('x' in containers)
# self.assert_('y' in containers)
# self.assert_('z' in containers)
# self.assert_('a' not in containers)
def test_delete_container(self):
""" Test swift.common.db.AccountBroker.delete_container """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
def test_get_container_timestamp(self):
""" Test swift.common.db.AccountBroker.get_container_timestamp """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
# Create initial container
timestamp = normalize_timestamp(time())
broker.put_container('container_name', timestamp, 0, 0, 0)
# test extant map
ts = broker.get_container_timestamp('container_name')
self.assertEquals(ts, timestamp)
# test missing map
ts = broker.get_container_timestamp('something else')
self.assertEquals(ts, None)
def test_put_container(self):
""" Test swift.common.db.AccountBroker.put_container """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
# Create initial container
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Reput same event
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old event
otimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_container('"{<container \'&\' name>}"', otimestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old delete event
dtimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_container('"{<container \'&\' name>}"', 0, dtimestamp, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
dtimestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', 0, timestamp, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
def test_get_info(self):
""" Test swift.common.db.AccountBroker.get_info """
broker = AccountBroker(':memory:', account='test1')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['hash'], '00000000000000000000000000000000')
info = broker.get_info()
self.assertEquals(info['container_count'], 0)
broker.put_container('c1', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 2)
sleep(.00001)
broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 2)
sleep(.00001)
broker.put_container('c1', 0, normalize_timestamp(time()), 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', 0, normalize_timestamp(time()), 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 0)
def test_list_containers_iter(self):
""" Test swift.common.db.AccountBroker.list_containers_iter """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
for cont1 in xrange(4):
for cont2 in xrange(125):
broker.put_container('%d/%04d' % (cont1, cont2),
normalize_timestamp(time()), 0, 0, 0)
for cont in xrange(125):
broker.put_container('2/0051/%04d' % cont,
normalize_timestamp(time()), 0, 0, 0)
for cont in xrange(125):
broker.put_container('3/%04d/0049' % cont,
normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(100, '', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0099')
listing = broker.list_containers_iter(100, '', '0/0050', None, '')
self.assertEquals(len(listing), 51)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0050')
listing = broker.list_containers_iter(100, '0/0099', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '1/0074')
listing = broker.list_containers_iter(55, '1/0074', None, None, '')
self.assertEquals(len(listing), 55)
self.assertEquals(listing[0][0], '1/0075')
self.assertEquals(listing[-1][0], '2/0004')
listing = broker.list_containers_iter(10, '', None, '0/01', '')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_containers_iter(10, '', None, '0/01', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_containers_iter(10, '', None, '0/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0009')
listing = broker.list_containers_iter(10, '', None, '', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_containers_iter(10, '2/', None, None, '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/'])
listing = broker.list_containers_iter(10, '', None, '2', '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['2/'])
listing = broker.list_containers_iter(10, '2/0050', None, '2/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '2/0051')
self.assertEquals(listing[1][0], '2/0051/')
self.assertEquals(listing[2][0], '2/0052')
self.assertEquals(listing[-1][0], '2/0059')
listing = broker.list_containers_iter(10, '3/0045', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_container('3/0049/', normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(10, '3/0048', None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/', '3/0049/0049',
'3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_containers_iter(10, '3/0048', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052',
'3/0052/', '3/0053'])
listing = broker.list_containers_iter(10, None, None, '3/0049/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing],
['3/0049/', '3/0049/0049'])
def test_double_check_trailing_delimiter(self):
""" Test swift.common.db.AccountBroker.list_containers_iter for an
account that has an odd file with a trailing delimiter """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(15, None, None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b',
'b/a', 'b/b', 'c'])
listing = broker.list_containers_iter(15, None, None, '', '/')
self.assertEquals(len(listing), 5)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_containers_iter(15, None, None, 'a/', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['a/', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_containers_iter(15, None, None, 'b/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['b/a', 'b/b'])
def test_chexor(self):
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('a', normalize_timestamp(1),
normalize_timestamp(0), 0, 0)
broker.put_container('b', normalize_timestamp(2),
normalize_timestamp(0), 0, 0)
hasha = hashlib.md5('%s-%s' %
('a', '0000000001.00000-0000000000.00000-0-0')
).digest()
hashb = hashlib.md5('%s-%s' %
('b', '0000000002.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_container('b', normalize_timestamp(3),
normalize_timestamp(0), 0, 0)
hashb = hashlib.md5('%s-%s' %
('b', '0000000003.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_merge_items(self):
broker1 = AccountBroker(':memory:', account='a')
broker1.initialize(normalize_timestamp('1'))
broker2 = AccountBroker(':memory:', account='a')
broker2.initialize(normalize_timestamp('1'))
broker1.put_container('a', normalize_timestamp(1), 0, 0, 0)
broker1.put_container('b', normalize_timestamp(2), 0, 0, 0)
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 2)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_container('c', normalize_timestamp(3), 0, 0, 0)
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 3)
self.assertEquals(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def premetadata_create_account_stat_table(self, conn, put_timestamp):
"""
Copied from swift.common.db.AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, normalize_timestamp(time()), str(uuid4()),
put_timestamp))
class TestAccountBrokerBeforeMetadata(TestAccountBroker):
"""
Tests for swift.common.db.AccountBroker against databases created before
the metadata column was added.
"""
def setUp(self):
self._imported_create_account_stat_table = \
AccountBroker.create_account_stat_table
AccountBroker.create_account_stat_table = \
premetadata_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM account_stat')
except BaseException, err:
exc = err
self.assert_('no such column: metadata' in str(exc))
def tearDown(self):
AccountBroker.create_account_stat_table = \
self._imported_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT metadata FROM account_stat')
if __name__ == '__main__':
unittest.main()
| 45.270177 | 94 | 0.582309 | 86,499 | 0.940319 | 0 | 0 | 0 | 0 | 0 | 0 | 25,042 | 0.272228 |
48b05f987fe0e54d587244c5320a33f91ef59a44 | 10,824 | py | Python | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
]
| 92 | 2020-01-22T22:15:29.000Z | 2022-03-31T05:19:16.000Z | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
]
| 604 | 2020-01-25T17:13:27.000Z | 2022-03-31T18:58:24.000Z | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
]
| 39 | 2020-02-06T00:38:06.000Z | 2022-03-15T06:14:19.000Z | import os
import pytest
from robocorp_ls_core.protocols import IConfigProvider
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.unittest_tools.cases_fixture import CasesFixture
from robocorp_code.protocols import IRcc, ActionResult
import sys
from typing import Any
from pathlib import Path
from robocorp_code_tests.protocols import IRobocorpLanguageServerClient
log = get_logger(__name__)
IMAGE_IN_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAb8AAAAiCAYAAADPnNdbAAAAAXNSR0IArs4c6QAAAJ1JREFUeJzt1TEBACAMwDDAv+fhAo4mCvp1z8wsAAg5vwMA4DXzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgJwLXQ0EQMJRx4AAAAAASUVORK5CYII="
@pytest.fixture
def language_server_client_class():
from robocorp_code_tests.robocode_language_server_client import (
RobocorpLanguageServerClient,
)
return RobocorpLanguageServerClient
@pytest.fixture
def language_server_class():
from robocorp_code.robocorp_language_server import RobocorpLanguageServer
return RobocorpLanguageServer
@pytest.fixture
def main_module():
from robocorp_code import __main__
return __main__
@pytest.fixture
def rcc_location() -> str:
from robocorp_code.rcc import download_rcc
from robocorp_code.rcc import get_default_rcc_location
location = get_default_rcc_location()
download_rcc(location, force=False)
return location
@pytest.fixture
def ci_endpoint() -> str:
ci_endpoint = os.environ.get("CI_ENDPOINT")
if ci_endpoint is None:
raise AssertionError("CI_ENDPOINT env variable must be specified for tests.")
return ci_endpoint
@pytest.fixture
def ci_credentials() -> str:
ci_credentials = os.environ.get("CI_CREDENTIALS")
if ci_credentials is None:
raise AssertionError("ci_credentials env variable must be specified for tests.")
return ci_credentials
@pytest.fixture
def rcc_config_location(tmpdir) -> str:
config_dir = tmpdir.join("config")
os.makedirs(str(config_dir))
return str(config_dir.join("config_test.yaml"))
@pytest.fixture(scope="session")
def cases(tmpdir_factory) -> CasesFixture:
basename = "res áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), "_resources")
assert os.path.exists(original_resources_dir)
return CasesFixture(copy_to, original_resources_dir)
@pytest.fixture
def robocorp_home(tmpdir) -> str:
# import shutil
#
# ret = "c:/temp/tests_robohome"
# shutil.rmtree(os.path.join(ret, ".robocorp_code"), ignore_errors=True)
# return ret
return str(tmpdir.join("robocorp_home"))
@pytest.fixture
def config_provider(
ws_root_path: str,
rcc_location: str,
ci_endpoint: str,
rcc_config_location: str,
robocorp_home: str,
):
from robocorp_code.robocorp_config import RobocorpConfig
from robocorp_ls_core.ep_providers import DefaultConfigurationProvider
config = RobocorpConfig()
config.update(
{
"robocorp": {
"home": robocorp_home,
"rcc": {
"location": rcc_location,
"endpoint": ci_endpoint,
"config_location": rcc_config_location,
},
}
}
)
return DefaultConfigurationProvider(config)
@pytest.fixture
def rcc(config_provider: IConfigProvider, rcc_config_location: str) -> IRcc:
from robocorp_code.rcc import Rcc
rcc = Rcc(config_provider)
# We don't want to track tests.
# There's a bug in which the --do-not-track doesn't work the first time.
result = rcc._run_rcc(
"configure identity --do-not-track --config".split() + [rcc_config_location]
)
assert result.success
result_msg = result.result
assert result_msg
if "disabled" not in result_msg:
raise AssertionError(f"Did not expect {result_msg}")
return rcc
@pytest.fixture
def rcc_conda_installed(rcc: IRcc):
result = rcc.check_conda_installed()
assert result.success, r"Error: {result}"
return rcc
_WS_INFO = (
{
"id": "workspace_id_1",
"name": "CI workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "654321",
"shortName": "123456", # Can be some generated number or something provided by the user.
"state": "active",
"url": "http://url1",
},
{
"id": "workspace_id_2",
"name": "My Other workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "1234567",
"shortName": "7654321",
"state": "active",
"url": "http://url2",
},
)
_PACKAGE_INFO_WS_2: dict = {}
_PACKAGE_INFO_WS_1: dict = {
"activities": [
{"id": "452", "name": "Package Name 1"},
{"id": "453", "name": "Package Name 2"},
]
}
class RccPatch(object):
def __init__(self, monkeypatch, tmpdir):
from robocorp_code.rcc import Rcc
self.monkeypatch = monkeypatch
self._current_mock = self.mock_run_rcc_default
self._original = Rcc._run_rcc
self._package_info_ws_1 = _PACKAGE_INFO_WS_1
self.custom_handler: Any = None
self.tmpdir = tmpdir
def mock_run_rcc(self, args, *starargs, **kwargs) -> ActionResult:
return self._current_mock(args, *starargs, **kwargs)
def mock_run_rcc_default(self, args, *sargs, **kwargs) -> ActionResult:
import json
import copy
from robocorp_code.rcc import ACCOUNT_NAME
import shutil
if self.custom_handler is not None:
ret = self.custom_handler(args, *sargs, **kwargs)
if ret is not None:
return ret
if args[:4] == ["cloud", "workspace", "--workspace", "workspace_id_1"]:
# List packages for workspace 1
return ActionResult(
success=True, message=None, result=json.dumps(self._package_info_ws_1)
)
if args[:4] == ["cloud", "workspace", "--workspace", "workspace_id_2"]:
# List packages for workspace 2
return ActionResult(
success=True, message=None, result=json.dumps(_PACKAGE_INFO_WS_2)
)
if args[:3] == ["cloud", "workspace", "--config"]:
# List workspaces
workspace_info = _WS_INFO
return ActionResult(
success=True, message=None, result=json.dumps(workspace_info)
)
if args[:3] == ["cloud", "push", "--directory"]:
if args[4:8] == ["--workspace", "workspace_id_1", "--robot", "2323"]:
return ActionResult(success=True)
if args[4:8] == ["--workspace", "workspace_id_1", "--robot", "453"]:
return ActionResult(success=True)
if args[:5] == ["cloud", "new", "--workspace", "workspace_id_1", "--robot"]:
# Submit a new package to ws 1
cp = copy.deepcopy(self._package_info_ws_1)
cp["activities"].append({"id": "2323", "name": args[5]})
self._package_info_ws_1 = cp
return ActionResult(
success=True,
message=None,
result="Created new robot named {args[5]} with identity 2323.",
)
if args[:4] == ["config", "credentials", "-j", "--verified"]:
return ActionResult(
success=True,
message=None,
result=json.dumps(
[
{
"account": ACCOUNT_NAME,
"identifier": "001",
"endpoint": "https://endpoint.foo.bar",
"secret": "123...",
"verified": 1605525807,
}
]
),
)
if args[:3] == ["holotree", "variables", "--space"]:
space_name = args[3]
conda_prefix = Path(self.tmpdir.join(f"conda_prefix_{space_name}"))
conda_prefix.mkdir()
conda_yaml = args[-2]
assert conda_yaml.endswith("conda.yaml")
shutil.copyfile(conda_yaml, conda_prefix / "identity.yaml")
return ActionResult(
success=True,
message=None,
result=json.dumps(
[
{"key": "PYTHON_EXE", "value": sys.executable},
{"key": "SPACE_NAME", "value": args[3]},
{"key": "CONDA_PREFIX", "value": str(conda_prefix)},
{"key": "TEMP", "value": str(self.tmpdir.join("_temp_dir_"))},
]
),
)
raise AssertionError(f"Unexpected args: {args}")
def mock_run_rcc_should_not_be_called(self, args, *sargs, **kwargs):
raise AssertionError(
"This should not be called at this time (data should be cached)."
)
def apply(self) -> None:
from robocorp_code.rcc import Rcc
self.monkeypatch.setattr(Rcc, "_run_rcc", self.mock_run_rcc)
def disallow_calls(self) -> None:
self._current_mock = self.mock_run_rcc_should_not_be_called
@pytest.fixture
def rcc_patch(monkeypatch, tmpdir):
return RccPatch(monkeypatch, tmpdir)
@pytest.fixture
def initialization_options():
return {"do-not-track": True}
@pytest.fixture
def language_server_initialized(
language_server_tcp: IRobocorpLanguageServerClient,
ws_root_path: str,
rcc_location: str,
ci_endpoint: str,
rcc_config_location: str,
initialization_options,
):
from robocorp_code.commands import ROBOCORP_RUN_IN_RCC_INTERNAL
language_server = language_server_tcp
language_server.initialize(
ws_root_path, initialization_options=initialization_options
)
language_server.settings(
{
"settings": {
"robocorp": {
"rcc": {
"location": rcc_location,
"endpoint": ci_endpoint,
"config_location": rcc_config_location,
}
}
}
}
)
result = language_server.execute_command(
ROBOCORP_RUN_IN_RCC_INTERNAL,
[
{
"args": "configure identity --do-not-track --config".split()
+ [rcc_config_location]
}
],
)
assert result["result"]["success"]
if "disabled" not in result["result"]["result"]:
raise AssertionError(f"Unexpected result: {result}")
return language_server
| 30.490141 | 324 | 0.610957 | 4,396 | 0.405947 | 0 | 0 | 4,820 | 0.445101 | 0 | 0 | 2,475 | 0.228553 |
48b0b31dabd46b83a7d8a1c53e2be4a3ab952b42 | 2,182 | py | Python | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
]
| 1 | 2019-10-10T06:14:45.000Z | 2019-10-10T06:14:45.000Z | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
]
| null | null | null | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
]
| 1 | 2019-10-10T06:14:48.000Z | 2019-10-10T06:14:48.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract Base Class for quantize emulation in custom keras layers."""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class QuantizeEmulatableLayer(object):
"""Abstract Base Class for quantize emulation in custom keras layers.
Custom keras layers which want to implement quantization of their operations
during training should implement this class.
"""
@abc.abstractmethod
def get_quantizable_weights(self):
"""Returns list of quantizable weight tensors.
All the weight tensors which the layer wants to be quantized during
training must be returned by this method.
Returns: List of weight tensors/kernels in the keras layer which must be
quantized during training.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def set_quantizable_weights(self, weights):
"""Sets list of quantizable weight tensors.
This method replaces the existing quantizable weight tensors for
the layer with the specified set of weights.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_quantizable_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
raise NotImplementedError('Must be implemented in subclasses.')
| 35.770492 | 80 | 0.706691 | 1,362 | 0.624198 | 0 | 0 | 1,394 | 0.638863 | 0 | 0 | 1,865 | 0.85472 |
48b322c1b5c9322a3e7a06f6f8cf4904f59abc42 | 1,373 | py | Python | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
]
| null | null | null | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
]
| 1 | 2019-08-04T22:44:54.000Z | 2019-08-04T22:44:54.000Z | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
]
| null | null | null | import os
import re
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from GEN_Utils import FileHandling
from loguru import logger
logger.info("Import OK")
# Set sample-specific variables
input_path = 'examples/python/gauss_models/'
output_path = 'examples/python/phase_plotting/'
plate_sample = ['TPE only', '1', '1.5', '2', '3', '4']*4
plate_cords = [f'{x}{y}' for x in string.ascii_uppercase[0:4]
for y in range(1, 7)]
sample_map = dict(zip(plate_cords, plate_sample))
if not os.path.exists(output_path):
os.mkdir(output_path)
# Read in summary df and preview
summary = pd.read_excel(f'{input_path}summary.xlsx')
# Assign sample-specific descriptors to summary table
summary['plate'] = summary['sample'].str[0]
summary['well'] = summary['sample'].str[1:]
summary['sample'] = summary['well'].map(sample_map)
phase_name = ['G', 'S', 'M']
phase_num = [1, 2, 3]
phase_map = dict(zip(phase_name, phase_num))
# Generate line-plot
fig = plt.subplots()
for phase in phase_name:
sns.lineplot(summary['sample'], summary[phase], label=phase, ci='sd')
plt.ylabel("Proportion of cells in phase")
plt.xlabel(r'Density(x 10$^ 5$)')
plt.title('Phase distribution')
plt.legend(bbox_to_anchor=(1.1, 1.0), title='Phase')
plt.tight_layout()
plt.autoscale()
plt.savefig(f'{output_path}line_plot.png')
| 27.46 | 73 | 0.718135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.324108 |
48b3cc7ab2adb8652b3ac164a64a50173d354d2a | 4,759 | py | Python | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
]
| 10 | 2020-01-05T19:33:33.000Z | 2022-02-04T14:56:09.000Z | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
]
| 1 | 2019-12-18T15:16:44.000Z | 2019-12-18T15:16:44.000Z | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
]
| 6 | 2019-12-18T14:45:37.000Z | 2021-09-13T12:48:28.000Z | # --------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------------------------------
"""Module containing explorer classes"""
from numpy import random as np_random
from .summaries import ScalarSummary
from .triggers import each_step
from .abc import Explorer, EpsilonFunction, Visualizable
class ConstantEpsilon(EpsilonFunction):
"""Epsilon function which returns a constant value regardless of step."""
def __init__(self, epsilon):
"""
Args:
epsilon -- the constant epsilon value
"""
self._epsilon = epsilon
def epsilon(self, step):
return self._epsilon
class LinearEpsilon(EpsilonFunction):
"""
This function uses linear interpolation between epsilon_max and epsilon_min
to linearly anneal epsilon as a function of the current episode.
3 cases exist:
- If 0 <= episode < eps_min_time then epsilon = interpolator(episode)
- If episode >= eps_min_time then epsilon then epsilon = eps_min
- Otherwise epsilon = eps_max
"""
def __init__(self, eps_max, eps_min, eps_min_time):
"""
Args:
eps_max -- the maximum epsilon value
eps_min -- the minimum epsilon value
eps_min_time -- the number of steps until epsilon is at its minimum
"""
assert eps_max > eps_min
assert eps_min_time > 0
self._eps_min_time = eps_min_time
self._eps_min = eps_min
self._eps_max = eps_max
self._delta = -(eps_max - eps_min) / eps_min_time
def epsilon(self, step):
"""The epsilon value at a specific step.
Args:
step -- the step during training
"""
if step < 0:
return self._eps_max
if step > self._eps_min_time:
return self._eps_min
return self._delta * step + self._eps_max
class EpsilonGreedyExplorer(Explorer, Visualizable):
"""Explorer which determines whether to explore by sampling from a Bernoulli distribution."""
def __init__(self, epsilon_function):
"""
Args:
epsilon_function -- an instance of EpsilonFunction
"""
assert isinstance(epsilon_function, EpsilonFunction)
self._epsilon = epsilon_function
self._epsilon_summary = ScalarSummary("EpsilonGreedy/Epsilon", each_step())
@property
def metrics(self):
return [self._epsilon_summary]
def is_exploring(self, step):
epsilon = self._epsilon(step)
self._epsilon_summary.add(epsilon)
return np_random.binomial(1, epsilon)
def explore(self, step, action_space):
return action_space.sample()
class ConstantExplorer(EpsilonGreedyExplorer):
"""Explorer which explores with a constant probability."""
def __init__(self, epsilon):
"""
Args:
epsilon -- the probability that the agent will explore
"""
super(ConstantExplorer, self).__init__(ConstantEpsilon(epsilon))
class LinearEpsilonGreedyExplorer(EpsilonGreedyExplorer):
"""Explorer which uses a LinearEpsilon function."""
def __init__(self, eps_max, eps_min, eps_min_time):
"""
Args:
eps_max -- the maximum epsilon value
eps_min -- the minimum epsilon value
eps_min_time -- the number of steps until epsilon is at its minimum
"""
epsilon_function = LinearEpsilon(eps_max, eps_min, eps_min_time)
super(LinearEpsilonGreedyExplorer, self).__init__(epsilon_function)
| 36.328244 | 100 | 0.65497 | 3,229 | 0.678504 | 0 | 0 | 71 | 0.014919 | 0 | 0 | 2,791 | 0.586468 |
48b5904c4c46f166269a35d1d5aae2ecfb57bef7 | 765 | py | Python | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
]
| null | null | null | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
]
| 1 | 2020-06-07T17:56:13.000Z | 2020-06-07T17:56:13.000Z | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
]
| null | null | null | # Standard imports
import sys
# Project imports
from icecreamscrape.cli import cli
from icecreamscrape.webdriver import driver_factory
from icecreamscrape import composites as comps
from icecreamscrape.composites import create_timestamped_dir
def main(args=sys.argv[1:]):
""" Main function. :param: args is used for testing """
user_inputs = cli(args)
url = user_inputs.params.url
active_features = user_inputs.active_features
if len(active_features) > 0:
time_dir = create_timestamped_dir()
with driver_factory(url) as driver:
for feature in active_features:
getattr(sys.modules[comps.__name__],
feature)(driver, time_dir)
def init():
""" Init construction allows for testing """
if __name__ == "__main__":
sys.exit(main())
init()
| 24.677419 | 60 | 0.75817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.188235 |
48b8c62b25b3330d58b5291c6fc3a3f2df2e485f | 5,051 | py | Python | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
]
| 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
]
| 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
]
| 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.controllers.mission.mission_unit_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Mission module algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMissionRecord
from ramstk.models.dbtables import RAMSTKMissionTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateMissionModels:
"""Class for unit testing Mission model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
__test__ = True
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Should return a Mission record model instance."""
assert isinstance(test_record_model, RAMSTKMissionRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_mission"
assert test_record_model.revision_id == 1
assert test_record_model.description == "Test mission #1"
assert test_record_model.mission_time == 100.0
assert test_record_model.time_units == "hours"
@pytest.mark.unit
def test_data_manager_create(self, unit_test_table_model):
"""Return a Mission table model instance."""
assert isinstance(unit_test_table_model, RAMSTKMissionTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._db_id_colname == "fld_mission_id"
assert unit_test_table_model._db_tablename == "ramstk_mission"
assert unit_test_table_model._tag == "mission"
assert unit_test_table_model._root == 0
assert unit_test_table_model._revision_id == 0
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_mission_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_mission_tree"
)
assert pub.isSubscribed(
unit_test_table_model.do_select_all, "selected_revision"
)
assert pub.isSubscribed(
unit_test_table_model.do_update, "request_update_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_update_all, "request_update_all_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_delete, "request_delete_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_insert, "request_insert_mission"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectMission(UnitTestSelectMethods):
"""Class for unit testing Mission table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestInsertMission(UnitTestInsertMethods):
"""Class for unit testing Mission table do_insert() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.skip(reason="Mission records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, unit_test_table_model):
"""Should not run because Mission records are not hierarchical."""
pass
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestDeleteMission(UnitTestDeleteMethods):
"""Class for unit testing Mission table do_delete() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterMission(UnitTestGetterSetterMethods):
"""Class for unit testing Mission table methods that get or set."""
__test__ = True
_id_columns = [
"revision_id",
"mission_id",
]
_test_attr = "mission_time"
_test_default_value = 0.0
@pytest.mark.unit
def test_get_record_model_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["revision_id"] == 1
assert _attributes["description"] == "Test mission #1"
assert _attributes["mission_time"] == 100.0
assert _attributes["time_units"] == "hours"
| 33.673333 | 88 | 0.713324 | 3,988 | 0.789547 | 0 | 0 | 4,331 | 0.857454 | 0 | 0 | 1,848 | 0.365868 |
48b9335e8465f09c7a066bfa90b273be5d354b55 | 569 | py | Python | src/streamlink/packages/flashmedia/flv.py | RomanKornev/streamlink | acdefee0822b9c10628b91a166f9abe084e44800 | [
"BSD-2-Clause"
]
| 5 | 2019-07-26T17:03:26.000Z | 2020-10-17T23:23:43.000Z | src/streamlink/packages/flashmedia/flv.py | RomanKornev/streamlink | acdefee0822b9c10628b91a166f9abe084e44800 | [
"BSD-2-Clause"
]
| 9 | 2018-01-14T15:20:23.000Z | 2021-03-08T20:29:51.000Z | src/streamlink/packages/flashmedia/flv.py | bumplzz69/streamlink | 34abc43875d7663ebafa241573dece272e93d88b | [
"BSD-2-Clause"
]
| 4 | 2018-01-14T13:27:25.000Z | 2021-11-15T22:28:30.000Z | #!/usr/bin/env python
from .error import FLVError
from .compat import is_py2
from .tag import Header, Tag
class FLV(object):
def __init__(self, fd=None, strict=False):
self.fd = fd
self.header = Header.deserialize(self.fd)
self.strict = strict
def __iter__(self):
return self
def __next__(self):
try:
tag = Tag.deserialize(self.fd, strict=self.strict)
except (IOError, FLVError):
raise StopIteration
return tag
if is_py2:
next = __next__
__all__ = ["FLV"]
| 18.966667 | 62 | 0.606327 | 439 | 0.771529 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.045694 |
48b9e626c31a3acad3ffc788ac2313af13310a0d | 120 | py | Python | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
]
| 24 | 2018-05-25T15:23:30.000Z | 2021-06-09T10:56:52.000Z | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
]
| 14 | 2019-04-30T10:51:01.000Z | 2020-09-16T20:37:30.000Z | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
]
| 9 | 2019-06-12T10:54:18.000Z | 2022-01-15T21:19:05.000Z | from pheasant.renderers.jupyter.jupyter import Jupyter
jupyter = Jupyter()
jupyter.findall("{{3}}3{{5}}")
jupyter.page
| 20 | 54 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.108333 |
48ba4f165b3430e0ef9885d29722f28bf1be64bd | 687 | py | Python | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
]
| null | null | null | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
]
| null | null | null | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import json
from http.server import HTTPServer, BaseHTTPRequestHandler
num_requests = 0
class Handler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def do_GET(self):
self._set_headers()
global num_requests
num_requests += 1
content = json.dumps({"num_requests": num_requests}).encode("utf8")
self.wfile.write(content)
if __name__ == "__main__":
http_service = HTTPServer(("0.0.0.0", 8000), Handler)
print(f"Starting http service on 0.0.0.0:8000")
http_service.serve_forever()
| 27.48 | 75 | 0.678311 | 401 | 0.583697 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.193595 |
48bb529c5d5a0817b3c6e3353e857c62a73b8a16 | 91 | py | Python | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
]
| null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
]
| null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
]
| null | null | null | from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
| 18.2 | 30 | 0.824176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
48bb6abe88059d9888226636da8508d01f476cba | 3,896 | py | Python | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
]
| 1 | 2021-06-20T16:58:21.000Z | 2021-06-20T16:58:21.000Z | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
]
| null | null | null | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
]
| 2 | 2019-11-26T12:18:18.000Z | 2022-03-28T19:22:55.000Z | """
Create data for simulations
(c) 2019 - EggdraSyl
"""
import json
# from mockup import Blockchain, Block
from minersimulator import MinerSimulator
from math import sin, pi
SPECIAL_MIN_TIME = 5 * 60
def init_stable(
start,
end,
block_time=60,
target="0000000000000028acfa28a803d2000000000000000000000000000000000000",
file="stable.json",
):
start_time = 0
blocks = []
for height in range(start, end):
block = {
"time": start_time,
"height": height,
"special_min": True if block_time > SPECIAL_MIN_TIME else False,
"target": target,
"block_time": block_time, # This one is not native.
}
start_time += block_time
blocks.append(block)
with open("data/init/{}".format(file), "w") as fp:
json.dump(blocks, fp)
def hash_stable(hash_count: int, hash_rate:int, file="stable.json"):
simu = MinerSimulator(hash_rate)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate, simu.HEX(simu.get_min_hash())))
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_arithmetic(hash_count: int, start: int, increment: int, file="arithmetic.json"):
simu = MinerSimulator(start)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate, simu.HEX(simu.get_min_hash())))
simu.hash_rate += increment
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_step(hash_count: int, start: int, h_end: int, file="step.json"):
simu = MinerSimulator(start)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate,simu.HEX(simu.get_min_hash())))
if i == hash_count//2:
simu.hash_rate = h_end
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_sinus(hash_count: int, base: int, amplitude: int, period: int, file="sinus.json"):
simu = MinerSimulator(base)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate,simu.HEX(simu.get_min_hash())))
simu.hash_rate = base + amplitude * sin(i * 2 * pi / period)
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
if __name__ == "__main__":
init_stable(
0,
1000,
block_time=3600,
target="0000000000000028acfa28a803d2000000000000000000000000000000000000",
file="stable_3600_14.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000000ffffffffff28acfa28a803d20000000000000000000000000000000000",
file="stable_300_6.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="00000ffffffffff28acfa28a803d200000000000000000000000000000000000",
file="stable_300_5.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="0000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_4.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_3.json",
)
hash_stable(10000, 167, file="stable_167.json")
hash_stable(10000, 1670, file="stable_1670.json")
hash_stable(10000, 16700, file="stable_16700.json")
hash_arithmetic(10000, 167, 16, file="arithmetic_167_16.json")
hash_step(10000, 167, 500, file="step_up_167_500.json")
hash_step(10000, 500, 167, file="step_down_500_167.json")
hash_sinus(10000, 300, 150, 60*12, file="sinus_300_150_720.json")
hash_sinus(10000, 300, 100, 1440, file="sinus_300_100_1440.json")
hash_sinus(10000, 300, 100, 2880, file="sinus_300_100_2880.json")
| 30.4375 | 91 | 0.641427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,017 | 0.261037 |
48bb8a2d0cac5d726a9c18529c0114315a34c2c3 | 13,473 | py | Python | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
]
| 3 | 2020-08-20T10:27:13.000Z | 2021-11-02T20:28:16.000Z | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
]
| null | null | null | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
]
| null | null | null | # This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
from unittest import mock
from unittest.mock import MagicMock
import pytest
import pynguin.configuration as config
import pynguin.testcase.defaulttestcase as dtc
import pynguin.testcase.statements.primitivestatements as prim
import pynguin.testcase.testcase as tc
import pynguin.testcase.variable.variablereferenceimpl as vri
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_value(statement_type, test_case_mock, value):
statement = statement_type(test_case_mock, value)
assert statement.value == value
@pytest.mark.parametrize(
"statement_type",
[
pytest.param(prim.IntPrimitiveStatement),
pytest.param(prim.FloatPrimitiveStatement),
pytest.param(prim.StringPrimitiveStatement),
pytest.param(prim.BooleanPrimitiveStatement),
],
)
def test_primitive_statement_value_none(statement_type, test_case_mock):
statement = statement_type(test_case_mock, None)
assert statement.value is not None
@pytest.mark.parametrize(
"statement_type,value,new_value",
[
pytest.param(prim.IntPrimitiveStatement, 42, 23),
pytest.param(prim.FloatPrimitiveStatement, 2.1, 1.2),
pytest.param(prim.StringPrimitiveStatement, "foo", "bar"),
pytest.param(prim.BooleanPrimitiveStatement, True, False),
],
)
def test_primitive_statement_set_value(
statement_type, test_case_mock, value, new_value
):
statement = statement_type(test_case_mock, value)
statement.value = new_value
assert statement.value == new_value
@pytest.mark.parametrize(
"statement_type,test_case,new_test_case,value",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42,
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42.23,
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
"foo",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
True,
),
],
)
def test_primitive_statement_clone(statement_type, test_case, new_test_case, value):
statement = statement_type(test_case, value)
new_statement = statement.clone(new_test_case)
assert new_statement.test_case == new_test_case
assert (
new_statement.return_value.variable_type == statement.return_value.variable_type
)
assert new_statement.value == statement.value
@pytest.mark.parametrize(
"statement_type,test_case,value,visitor_method",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
42,
"visit_int_primitive_statement",
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
2.1,
"visit_float_primitive_statement",
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
"foo",
"visit_string_primitive_statement",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
True,
"visit_boolean_primitive_statement",
),
],
)
def test_primitive_statement_accept(statement_type, test_case, value, visitor_method):
stmt = statement_type(test_case, value)
visitor = MagicMock()
stmt.accept(visitor)
getattr(visitor, visitor_method).assert_called_once_with(stmt)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_same(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert statement.__eq__(statement)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_other_type(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert not statement.__eq__(test_case)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_clone(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
def test_none_statement_equals_clone():
test_case = MagicMock(tc.TestCase)
statement = prim.NoneStatement(test_case, type(None))
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_hash(statement_type, value):
statement = statement_type(MagicMock(tc.TestCase), value)
assert statement.__hash__() != 0
def test_int_primitive_statement_randomize_value(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, int)
def test_float_primitive_statement_randomize_value(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, float)
def test_bool_primitive_statement_randomize_value(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert statement.value or not statement.value
def test_string_primitive_statement_randomize_value(test_case_mock):
statement = prim.StringPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert 0 <= len(statement.value) <= config.INSTANCE.string_length
def test_none_statement_randomize_value(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.randomize_value()
assert statement.value is None
def test_none_statement_delta(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.delta()
assert statement.value is None
def test_string_primitive_statement_random_deletion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_deletion(sample)
assert len(result) <= len(sample)
def test_string_primitive_statement_random_insertion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_insertion_empty(test_case_mock):
sample = list("")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_replacement(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_replacement(sample)
assert len(result) == len(sample)
def test_string_primitive_statement_delta_none(test_case_mock):
value = "t"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.side_effect = [1.0, 1.0, 1.0]
statement.delta()
assert statement.value == value
def test_string_primitive_statement_delta_all(test_case_mock):
value = "te"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_char") as char_mock:
char_mock.side_effect = ["a", "b"]
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 0
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
deletion = [0.0, 0.0, 1.0]
replacement = [0.0, 0.0]
insertion = [0.0, 0.0, 1.0]
float_mock.side_effect = deletion + replacement + insertion
statement.delta()
assert statement.value == "ba"
def test_int_primitive_statement_delta(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.IntPrimitiveStatement(test_case_mock, 1)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
statement.delta()
assert statement.value == 6
def test_float_primitive_statement_delta_max(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.5)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 0.0
statement.delta()
assert statement.value == 6.5
def test_float_primitive_statement_delta_gauss(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.0)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 1.0 / 3.0
statement.delta()
assert statement.value == 1.5
def test_float_primitive_statement_delta_round(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.2345)
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 2
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 2.0 / 3.0
statement.delta()
assert statement.value == 1.23
def test_boolean_primitive_statement_delta(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.delta()
assert not statement.value
def test_primitive_statement_mutate(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.mutate()
assert not statement.value
def test_primitive_statement_accessible(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert statement.accessible_object() is None
def test_primitive_statement_references(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert {statement.return_value} == statement.get_variable_references()
def test_primitive_statement_replace(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = vri.VariableReferenceImpl(test_case_mock, int)
statement.replace(statement.return_value, new)
assert statement.return_value == new
def test_primitive_statement_replace_ignore(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = prim.FloatPrimitiveStatement(test_case_mock, 0).return_value
old = statement.return_value
statement.replace(new, new)
assert statement.return_value == old
def test_primitive_statement_get_position():
test_case = dtc.DefaultTestCase()
statement = prim.IntPrimitiveStatement(test_case, 5)
test_case.add_statement(statement)
assert statement.get_position() == 0
| 34.724227 | 88 | 0.720255 | 0 | 0 | 0 | 0 | 5,749 | 0.426705 | 0 | 0 | 1,538 | 0.114154 |
48bbe200dfeacc3fe42e8fdff56e3de41ac32c2b | 725 | py | Python | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
]
| 1 | 2022-03-30T15:06:18.000Z | 2022-03-30T15:06:18.000Z | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
]
| null | null | null | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
]
| null | null | null | from pathlib import Path
from src import constants
from src.data.download.utils.download_dataset_zip import download_dataset_zip
def download_tencent_test(
tmp_dir: Path = None,
tqdm_name: str = None,
tqdm_idx: int = None,
):
"""Download the test set of the Tencent Corpus and extract it to the
appropriate directory."""
download_dataset_zip(
name="tencent_test",
data_url=constants.TENCENT_TEST_URL,
output_dir=constants.TENCENT_TEST_DIR,
extracted_name=constants.TENCENT_TEST_ZIP_FOLDER,
tmp_dir=tmp_dir,
tqdm_name=tqdm_name,
tqdm_idx=tqdm_idx,
)
if __name__ == "__main__":
download_tencent_test(tqdm_name="tencent", tqdm_idx=0)
| 25.892857 | 77 | 0.714483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.18069 |
48bc446a06d58d6a75df610f9236257a1d789475 | 9,669 | py | Python | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
]
| 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
]
| 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
]
| 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
class Encoder_6(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_6, self).__init__(name='Encoder_6', **kwargs)
self.dim_neck_3 = hparams.dim_neck_3
self.freq_3 = hparams.freq_3
self.dim_f0 = hparams.dim_f0
self.dim_enc_3 = hparams.dim_enc_3
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_1'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc_3
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_1',
)
self.interp = InterpLnr(hparams)
def call(self, x, attention_mask, training=True):
x = self.before_dense_1(x)
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
x = self.interp(
x,
tf.tile([tf.shape(x)[1]], [tf.shape(x)[0]]),
training=training,
)
x = self.encoder_dense_1(x)
return x
class Encoder_7(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_7, self).__init__(name='Encoder_7', **kwargs)
self.config = config
self.dim_neck = hparams.dim_neck
self.dim_neck_3 = hparams.dim_neck_3
self.dim_freq = hparams.dim_freq
self.dim_enc = hparams.dim_enc
self.dim_enc_3 = hparams.dim_enc_3
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc, dtype=tf.float32, name='before_dense_1'
)
self.before_dense_2 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_2'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
config_2 = copy.deepcopy(config)
config_2.hidden_size = self.dim_enc_3
self.layer_2 = [
TFFastSpeechLayer(config_2, name='layer_._{}'.format(i))
for i in range(config_2.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck, dtype=tf.float32, name='encoder_dense_1'
)
self.encoder_dense_2 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_2',
)
self.interp = InterpLnr(hparams)
def call(self, x_f0, attention_mask, training=True):
x = x_f0[:, :, : self.dim_freq]
f0 = x_f0[:, :, self.dim_freq:]
x = self.before_dense_1(x)
f0 = self.before_dense_2(f0)
seq_length = tf.shape(x_f0)[1]
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
f0 = self.layer_2[no]([f0, attention_mask], training=training)[0]
x_f0 = tf.concat((x, f0), axis=2)
x_f0 = self.interp(
x_f0,
tf.tile([tf.shape(x_f0)[1]], [tf.shape(x)[0]]),
training=training,
)
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = self.encoder_dense_1(x)
f0 = self.encoder_dense_2(f0)
return x, f0
class Encoder_t(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_t, self).__init__(name='Encoder_t', **kwargs)
self.dim_neck_2 = hparams.dim_neck_2
self.freq_2 = hparams.freq_2
self.dim_freq = hparams.dim_freq
self.dim_enc_2 = hparams.dim_enc_2
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
config = copy.deepcopy(config)
config.num_hidden_layers = 1
config.hidden_size = self.dim_enc_2
self.config = config
self.before_dense = tf.keras.layers.Dense(
units=self.dim_enc_2, dtype=tf.float32, name='before_dense_1'
)
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.encoder_dense = tf.keras.layers.Dense(
units=self.dim_neck_2, dtype=tf.float32, name='encoder_dense'
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.encoder_dense(f)
class Decoder_3(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_3, self).__init__(name='Decoder_3', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_freq,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Decoder_4(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_4, self).__init__(name='Decoder_4', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_f0,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Model(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model, self).__init__(name='speechsplit', **kwargs)
self.encoder_1 = Encoder_7(
config.encoder_self_attention_params, hparams
)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_3(config.decoder_self_attention_params, hparams)
self.freq = hparams.freq
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_f0, x_org, c_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_x, codes_f0 = self.encoder_1(
x_f0, attention_mask, training=training
)
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_1 = codes_x
code_exp_3 = codes_f0
code_exp_2 = codes_2
c_trg = tf.tile(tf.expand_dims(c_trg, 1), (1, tf.shape(x_f0)[1], 1))
encoder_outputs = tf.concat(
(code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1
)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_x, codes_f0, codes_2, encoder_outputs, mel_outputs
class Model_F0(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model_F0, self).__init__(name='speechsplit_f0', **kwargs)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.encoder_3 = Encoder_6(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_4(config.decoder_self_attention_params, hparams)
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_org, f0_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_2 = codes_2
codes_3 = self.encoder_3(f0_trg, attention_mask, training=training)
code_exp_3 = codes_3
self.o = [code_exp_2, code_exp_3]
encoder_outputs = tf.concat((code_exp_2, code_exp_3), axis=-1)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_2, codes_3, encoder_outputs, mel_outputs
| 34.532143 | 79 | 0.617541 | 9,445 | 0.976833 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.036922 |
48bc4c72c304a6d7aeeb0dab781f82a2616fe4d3 | 4,766 | py | Python | test/test_memory_leaks.py | elventear/psutil | c159f3352dc5f699143960840e4f6535174690ed | [
"BSD-3-Clause"
]
| 4 | 2015-01-06T01:39:12.000Z | 2019-12-09T10:27:44.000Z | test/test_memory_leaks.py | elventear/psutil | c159f3352dc5f699143960840e4f6535174690ed | [
"BSD-3-Clause"
]
| null | null | null | test/test_memory_leaks.py | elventear/psutil | c159f3352dc5f699143960840e4f6535174690ed | [
"BSD-3-Clause"
]
| 2 | 2016-10-21T03:15:34.000Z | 2018-12-10T03:40:50.000Z | #!/usr/bin/env python
#
# $Id$
#
"""
Note: this is targeted for python 2.x.
To run it under python 3.x you need to use 2to3 tool first:
$ 2to3 -w test/test_memory_leaks.py
"""
import os
import gc
import sys
import unittest
import psutil
from test_psutil import reap_children, skipUnless, skipIf, \
POSIX, LINUX, WINDOWS, OSX, BSD
LOOPS = 1000
TOLERANCE = 4096
class TestProcessObjectLeaks(unittest.TestCase):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def execute(self, method, *args, **kwarks):
# step 1
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_name(self):
self.execute('name')
def test_cmdline(self):
self.execute('cmdline')
def test_ppid(self):
self.execute('ppid')
def test_uid(self):
self.execute('uid')
def test_uid(self):
self.execute('gid')
@skipIf(POSIX)
def test_username(self):
self.execute('username')
def test_create_time(self):
self.execute('create_time')
def test_get_num_threads(self):
self.execute('get_num_threads')
def test_get_threads(self):
self.execute('get_num_threads')
def test_get_cpu_times(self):
self.execute('get_cpu_times')
def test_get_memory_info(self):
self.execute('get_memory_info')
def test_is_running(self):
self.execute('is_running')
@skipUnless(WINDOWS)
def test_resume(self):
self.execute('resume')
@skipUnless(WINDOWS)
def test_getcwd(self):
self.execute('getcwd')
@skipUnless(WINDOWS)
def test_get_open_files(self):
self.execute('get_open_files')
@skipUnless(WINDOWS)
def test_get_connections(self):
self.execute('get_connections')
class TestModuleFunctionsLeaks(unittest.TestCase):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def execute(self, function, *args, **kwarks):
# step 1
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_get_pid_list(self):
self.execute('get_pid_list')
@skipIf(POSIX)
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_process_iter(self):
self.execute('process_iter')
def test_used_phymem(self):
self.execute('used_phymem')
def test_avail_phymem(self):
self.execute('avail_phymem')
def test_total_virtmem(self):
self.execute('total_virtmem')
def test_used_virtmem(self):
self.execute('used_virtmem')
def test_avail_virtmem(self):
self.execute('avail_virtmem')
def test_cpu_times(self):
self.execute('cpu_times')
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestProcessObjectLeaks))
test_suite.addTest(unittest.makeSuite(TestModuleFunctionsLeaks))
unittest.TextTestRunner(verbosity=2).run(test_suite)
if __name__ == '__main__':
test_main()
| 24.822917 | 82 | 0.599245 | 4,075 | 0.855015 | 0 | 0 | 515 | 0.108057 | 0 | 0 | 747 | 0.156735 |
48bc6e9f0498c16dbcd64706a2f744500361365e | 8,516 | py | Python | ga4gh/search/compliance/util/local_server.py | ga4gh-discovery/ga4gh-search-compliance | 58c693ca2f96d145f4ccba08aec23e4ebe1f7599 | [
"Apache-2.0"
]
| null | null | null | ga4gh/search/compliance/util/local_server.py | ga4gh-discovery/ga4gh-search-compliance | 58c693ca2f96d145f4ccba08aec23e4ebe1f7599 | [
"Apache-2.0"
]
| null | null | null | ga4gh/search/compliance/util/local_server.py | ga4gh-discovery/ga4gh-search-compliance | 58c693ca2f96d145f4ccba08aec23e4ebe1f7599 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Module compliance_suite.report_server.py
This module contains class definition of small web server utility. Serves final
report results as HTML.
"""
import datetime
import time
import http.server
import socketserver
import os
import logging
import inspect
import socket
import webbrowser
import shutil
import sys
import threading
import json
import jinja2 as j2
import ga4gh.search.compliance as pkg_dir
from ga4gh.search.compliance.config.configuration import Configuration
def capitalize(text):
"""capitalizes a word, for use in rendering template
Args:
text (str): word to capitalize
Returns:
capitalized (str): capitalized word
"""
return text[0].upper() + text[1:]
def get_route_status(route_obj):
count_d = {
"incomplete": 0,
"pass": 0,
"fail": 0,
"warn": 0,
"skip": 0
}
symbol_d = {
"0": "incomplete",
"1": "pass",
"2": "warn",
"3": "fail",
"4": "skip",
}
ret = {
"btn": "btn-danger",
"text": "No Tests Run"
}
for test_case_report in route_obj["test_case_reports"]:
count_d[symbol_d[str(test_case_report["status"])]] += 1
if count_d["fail"] > 0 or count_d["skip"] > 0:
ret = {
"btn": "btn-danger",
"text": "%s Failed / %s Skipped" % (str(count_d["fail"]),
str(count_d["skip"]))
}
if count_d["pass"] > 0:
ret = {
"btn": "btn-success",
"text": "Pass"
}
return ret
class LocalServer(object):
"""Creates web server, serves test report as HTML
The ReportServer spins up a small, local web server to host test result
reports once the final JSON object has been generated. The server can be
shut down with CTRL+C.
Attributes:
port (Port): object representing free port to serve content
httpd (TCPServer): handle for web server
thread (Thread): thread serves content indefinitely, can be killed
safely from the outside via CTRL+C
web_dir (str): directory which host web files (CSS and generated HTML)
cwd (str): working directory to change back to after creating server
render_helper (dict): contains data structures and functions to be
passed to rendering engine to aid in rendering HTML
"""
def __init__(self):
"""instantiates a ReportServer object"""
self.port = None
self.httpd = None
self.thread = None
self.web_dir = Configuration.get_instance().get_output_dir()
self.web_resource_dir = os.path.join(
os.path.dirname(pkg_dir.__file__),
"web"
)
self.cwd = os.getcwd()
self.render_helper = {
"s": { # s: structures
"endpoints": [
"service_info",
"tables",
"table_info",
"table_data",
"search"
],
"formatted": {
"service_info": "Service Info",
"tables": "Tables",
"table_info": "Table Info",
"table_data": "Table Data",
"search": "Search"
},
"status": {
0: {
"status": "INCOMPLETE",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
1: {
"status": "PASS",
"css_class": "text-success",
"fa_class": "fa-check-circle"
},
2: {
"status": "WARN",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
3: {
"status": "FAIL",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
4: {
"status": "SKIP",
"css_class": "text-info",
"fa_class": "fa-ban"
}
}
},
"f": { # f: functions
"capitalize": capitalize,
"format_test_name": lambda text: " ".join(
[capitalize(t) for t in text.split("_")]
),
"server_name_url": lambda name: \
name.lower().replace(" ", "") + ".html",
"rm_space": lambda text: text.replace(" ", "_")\
.replace(",", ""),
"timestamp": lambda: \
datetime.datetime.now(datetime.timezone.utc)\
.strftime("%B %d, %Y at %l:%M %p (%Z)"),
"route_status": get_route_status
}
}
def setup(self):
self.__set_free_port()
self.__copy_web_resource_dir()
self.__render_html()
def serve(self, uptime=3600):
"""serves server as separate thread so it can be stopped from outside
Args:
uptime (int): server will remain up for this time in seconds unless
shutdown by user
"""
try:
self.thread = threading.Thread(target=self.__start_mock_server,
args=(uptime,))
self.thread.start()
time.sleep(uptime)
except KeyboardInterrupt as e:
print("stopping server")
finally:
self.httpd.shutdown()
os.chdir(self.cwd)
def __set_free_port(self):
"""get free port on local machine on which to run the report server
This function is used in conftest and the return of this is a free port
available in the system on which the mock server will be run. This port
will be passed to start_mock_server as a required parameter from
conftest.py
Returns:
(Port): free port on which to run server
"""
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
self.port = port
def __copy_web_resource_dir(self):
for subdir in ["public"]:
src = os.path.join(self.web_resource_dir, subdir)
dst = os.path.join(self.web_dir, subdir)
shutil.copytree(src, dst)
def __render_html(self):
data = None
with open(self.web_dir + "/ga4gh-search-compliance-report.json", "r") as f:
data = json.load(f)
# set up jinja2 rendering engine
view_loader = j2.FileSystemLoader(searchpath=self.web_resource_dir)
view_env = j2.Environment(loader=view_loader)
# render the index/homepage
home_template = view_env.get_template("views/home.html")
home_rendered = home_template.render(data=data, h=self.render_helper)
home_path = self.web_dir + "/index.html"
open(home_path, "w").write(home_rendered)
for server_report in data["server_reports"]:
report_template = view_env.get_template("views/report.html")
report_rendered = report_template.render(server_report=server_report,
h=self.render_helper)
report_path = self.web_dir + "/" + \
self.render_helper["f"]["server_name_url"](server_report["name"])
open(report_path, "w").write(report_rendered)
def __start_mock_server(self, uptime):
"""run server to serve final test report
Args:
port (Port): port on which to run the server
"""
os.chdir(self.web_dir)
Handler = http.server.SimpleHTTPRequestHandler
self.httpd = socketserver.TCPServer(("", self.port), Handler)
logging.info("serving at http://localhost:" + str(self.port))
webbrowser.open("http://localhost:" + str(self.port))
logging.info("server will shut down after " + str(uptime) + " seconds, "
+ "press CTRL+C to shut down manually")
self.httpd.serve_forever()
| 33.136187 | 83 | 0.516557 | 6,876 | 0.807421 | 0 | 0 | 0 | 0 | 0 | 0 | 3,238 | 0.380225 |
48bc7c9db7dabf6628ee230ef0c1f45b6794af0d | 2,146 | py | Python | api/routefinder.py | shingkid/DrWatson-ToTheRescue_SCDFXIBM | 009d2b4599b276ea760dbd888718a25332893075 | [
"MIT"
]
| 1 | 2020-06-12T10:24:31.000Z | 2020-06-12T10:24:31.000Z | api/routefinder.py | yankai364/Dr-Watson | 22bd885d028e118fa5abf5a9d0ea373b7020ca1d | [
"MIT"
]
| 3 | 2020-09-24T15:36:33.000Z | 2022-02-10T02:32:42.000Z | api/routefinder.py | shingkid/DrWatson-ToTheRescue_SCDFXIBM | 009d2b4599b276ea760dbd888718a25332893075 | [
"MIT"
]
| 1 | 2020-06-14T10:09:58.000Z | 2020-06-14T10:09:58.000Z | import csv
import pandas as pd
import numpy as np
import networkx as nx
class RouteFinder():
def __init__(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def reset_graph(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def remove_node(self,nodes):
self.G.remove_nodes_from(nodes)
def optimal_route(self,source,target):
return nx.shortest_path(self.G, source, target)
def optimal_entry_route(self,target):
exits = ['Exit_4','Exit_3','Exit_2','Exit_1']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, exit, target)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
def optimal_exit_route(self,source):
exits = ['Exit_1','Exit_2','Exit_3','Exit_4']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, source, exit)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
| 24.11236 | 83 | 0.547996 | 2,072 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.077353 |
48bd4369d1643a3a728218455de1ea42bfd683e8 | 8,084 | py | Python | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/extras/rst.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 4 | 2019-11-15T15:21:27.000Z | 2020-07-08T15:04:30.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/extras/rst.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 9 | 2019-11-18T15:54:36.000Z | 2020-07-14T13:56:53.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/extras/rst.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 3 | 2021-01-25T15:41:30.000Z | 2021-09-21T08:51:36.000Z | #!/usr/bin/env python
# Jérôme Carretero, 2013 (zougloub)
"""
reStructuredText support (experimental)
Example::
def configure(conf):
conf.load('rst')
if not conf.env.RST2HTML:
conf.fatal('The program rst2html is required')
def build(bld):
bld(
features = 'rst',
type = 'rst2html', # rst2html, rst2pdf, ...
source = 'index.rst', # mandatory, the source
deps = 'image.png', # to give additional non-trivial dependencies
)
By default the tool looks for a set of programs in PATH.
The tools are defined in `rst_progs`.
To configure with a special program use::
$ RST2HTML=/path/to/rst2html waf configure
This tool is experimental; don't hesitate to contribute to it.
"""
import re
from waflib import Errors
from waflib import Logs
from waflib import Node
from waflib import Task
from waflib import Utils
from waflib.TaskGen import before_method
from waflib.TaskGen import feature
rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split()
def parse_rst_node(task, node, nodes, names, seen, dirs=None):
# TODO add extensibility, to handle custom rst include tags...
if dirs is None:
dirs = (node.parent, node.get_bld().parent)
if node in seen:
return
seen.append(node)
code = node.read()
re_rst = re.compile(
r"^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$", re.M
)
for match in re_rst.finditer(code):
ipath = match.group("file")
itype = match.group("type")
Logs.debug("rst: visiting %s: %s", itype, ipath)
found = False
for d in dirs:
Logs.debug("rst: looking for %s in %s", ipath, d.abspath())
found = d.find_node(ipath)
if found:
Logs.debug("rst: found %s as %s", ipath, found.abspath())
nodes.append((itype, found))
if itype == "include":
parse_rst_node(task, found, nodes, names, seen)
break
if not found:
names.append((itype, ipath))
class docutils(Task.Task):
"""
Compile a rst file.
"""
def scan(self):
"""
A recursive regex-based scanner that finds rst dependencies.
"""
nodes = []
names = []
seen = []
node = self.inputs[0]
if not node:
return (nodes, names)
parse_rst_node(self, node, nodes, names, seen)
Logs.debug("rst: %r: found the following file deps: %r", self, nodes)
if names:
Logs.warn("rst: %r: could not find the following file deps: %r", self, names)
return ([v for (t, v) in nodes], [v for (t, v) in names])
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError(f"{msg!r} command exit status {retcode!r}")
def run(self):
"""
Runs the rst compilation using docutils
"""
raise NotImplementedError()
class rst2html(docutils):
color = "BLUE"
def __init__(self, *args, **kw):
docutils.__init__(self, *args, **kw)
self.command = self.generator.env.RST2HTML
self.attributes = ["stylesheet"]
def scan(self):
nodes, names = docutils.scan(self)
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
ssnode = self.generator.to_nodes(stylesheet)[0]
nodes.append(ssnode)
Logs.debug("rst: adding dep to %s %s", attribute, stylesheet)
return nodes, names
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.command + [src, dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
stylesheet = self.generator.to_nodes(stylesheet)[0]
cmd += ["--%s" % attribute, stylesheet.path_from(cwdn)]
return self.exec_command(cmd, cwd=cwdn.abspath())
class rst2s5(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2S5
self.attributes = ["stylesheet"]
class rst2latex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2LATEX
self.attributes = ["stylesheet"]
class rst2xetex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2XETEX
self.attributes = ["stylesheet"]
class rst2pdf(docutils):
color = "BLUE"
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.generator.env.RST2PDF + [src, "-o", dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
return self.exec_command(cmd, cwd=cwdn.abspath())
@feature("rst")
@before_method("process_source")
def apply_rst(self):
"""
Create :py:class:`rst` or other rst-related task objects
"""
if self.target:
if isinstance(self.target, Node.Node):
tgt = self.target
elif isinstance(self.target, str):
tgt = self.path.get_bld().make_node(self.target)
else:
self.bld.fatal(
f"rst: Don't know how to build target name {self.target} which is not a string or Node for {self}"
)
else:
tgt = None
tsk_type = getattr(self, "type", None)
src = self.to_nodes(self.source)
assert len(src) == 1
src = src[0]
if tsk_type is not None and tgt is None:
if tsk_type.startswith("rst2"):
ext = tsk_type[4:]
else:
self.bld.fatal("rst: Could not detect the output file extension for %s" % self)
tgt = src.change_ext(".%s" % ext)
elif tsk_type is None and tgt is not None:
out = tgt.name
ext = out[out.rfind(".") + 1 :]
self.type = "rst2" + ext
elif tsk_type is not None and tgt is not None:
# the user knows what he wants
pass
else:
self.bld.fatal("rst: Need to indicate task type or target name for %s" % self)
deps_lst = []
if getattr(self, "deps", None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal(f"Could not find {filename!r} for {self!r}")
if not n in deps_lst:
deps_lst.append(n)
try:
task = self.create_task(self.type, src, tgt)
except KeyError:
self.bld.fatal(f"rst: Task of type {self.type} not implemented (created by {self})")
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = self.bld.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
self.bld.node_deps[task.uid()] = deps_lst
inst_to = getattr(self, "install_path", None)
if inst_to:
self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:])
self.source = []
def configure(self):
"""
Try to find the rst programs.
Do not raise any error if they are not found.
You'll have to use additional code in configure() to die
if programs were not found.
"""
for p in rst_progs:
self.find_program(p, mandatory=False)
| 29.289855 | 114 | 0.593147 | 3,383 | 0.418377 | 0 | 0 | 2,305 | 0.285061 | 0 | 0 | 2,403 | 0.29718 |
48bf7ed2085cdea54fbe4837b4e8e76a67b7373c | 1,339 | py | Python | mofa/analytics/tests/test_participationAnalytics/test_quizParticipation.py | BoxInABoxICT/BoxPlugin | ad351978faa37ab867a86d2f4023a2b3e5a2ce19 | [
"Apache-2.0"
]
| null | null | null | mofa/analytics/tests/test_participationAnalytics/test_quizParticipation.py | BoxInABoxICT/BoxPlugin | ad351978faa37ab867a86d2f4023a2b3e5a2ce19 | [
"Apache-2.0"
]
| null | null | null | mofa/analytics/tests/test_participationAnalytics/test_quizParticipation.py | BoxInABoxICT/BoxPlugin | ad351978faa37ab867a86d2f4023a2b3e5a2ce19 | [
"Apache-2.0"
]
| null | null | null | import unittest
import json
import os
from unittest.mock import MagicMock, patch
from analytics.src.participationAnalytics import quizParticipation
class TestQuizParticipation(unittest.TestCase):
@patch("analytics.src.participationAnalytics.quizParticipation.runQuery")
def test_generateData(self, lrs_mock):
"""
Tests if the analysis is performed correctly
"""
# Setup mock for database query
d = os.path.dirname(os.path.realpath(__file__))
f = open(f'{d}/quizQuery.json')
lrs_mock.side_effect = [json.load(f)]
# Run the test
correct_result = {
"http://localhost/mod/quiz/view.php?id=1": 3,
"http://localhost/mod/quiz/view.php?id=2": 1,
"http://localhost/mod/quiz/view.php?id=5": 1
}
actual_result = quizParticipation.generateData(0)
self.assertEqual(correct_result, actual_result)
@patch("analytics.src.participationAnalytics.quizParticipation.runQuery")
def test_generateData_error(self, lrs_mock):
"""
Tests if an error is passed trough correctly
"""
# Setup mock for database query
error = {"error": "mock error"}
lrs_mock.side_effect = [error]
# Run test
self.assertEqual(error, quizParticipation.generateData(2))
| 32.658537 | 77 | 0.657207 | 1,187 | 0.886482 | 0 | 0 | 1,128 | 0.84242 | 0 | 0 | 515 | 0.384615 |
48bf9da5843cf6858ec4f1074f331bc92553a1cd | 1,171 | py | Python | visualize_cam.py | mhamdan91/Gradcam_eager | ee732ff65256ef1692caf94c8c0b4bdbe22d2d1d | [
"MIT"
]
| 2 | 2019-09-19T18:08:26.000Z | 2019-10-11T12:42:22.000Z | visualize_cam.py | mhamdan91/Gradcam_eager | ee732ff65256ef1692caf94c8c0b4bdbe22d2d1d | [
"MIT"
]
| null | null | null | visualize_cam.py | mhamdan91/Gradcam_eager | ee732ff65256ef1692caf94c8c0b4bdbe22d2d1d | [
"MIT"
]
| null | null | null | # from utils import Sample_main
import gradcam_main
import numpy as np
import tensorflow as tf
import argparse
import os
tf.logging.set_verbosity(tf.logging.ERROR) # disable to see tensorflow warnings
def cam(in_path='sample.bmp', out_path = 'sample.png',):
gradcam_main.cam_vis(in_path, out_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_image', default='sample.bmp', type=str, help= '(Full name of the input image -- default set to sample.bmp')
parser.add_argument('-o', '--output_image', default='sample.png', type=str, help='Full name of output image (should be .png) -- default set to '
'input_image.png')
args = parser.parse_args()
if args.input_image != 'sample.bmp' and args.output_image == 'sample.png':
out_name = args.input_image
out_name = out_name.replace('bmp', 'png')
else:
out_name = args.output_image
out_name = out_name.replace('bmp', 'png')
cam(args.input_image, out_name)
# In case referenced by other modules
if __name__ == '__main__':
main()
| 34.441176 | 150 | 0.64304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.328779 |
48bfa6a9870aa2f95044df7a3145739de4a0dc15 | 1,681 | py | Python | tests/molecular/molecules/building_block/test_with_functional_groups.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
]
| 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | tests/molecular/molecules/building_block/test_with_functional_groups.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
]
| 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | tests/molecular/molecules/building_block/test_with_functional_groups.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
]
| 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | from ..utilities import (
has_same_structure,
is_equivalent_molecule,
is_equivalent_building_block,
are_equivalent_functional_groups,
)
def test_with_functional_groups(building_block, get_functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
get_functional_groups : :class:`callable`
Takes a single parameter, `building_block` and returns the
`functional_groups` parameter to use for this test.
Returns
-------
None : :class:`NoneType`
"""
# Save clone to check immutability.
clone = building_block.clone()
_test_with_functional_groups(
building_block=building_block,
functional_groups=tuple(get_functional_groups(building_block)),
)
is_equivalent_building_block(building_block, clone)
has_same_structure(building_block, clone)
def _test_with_functional_groups(building_block, functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
functional_groups : :class:`tuple` of :class:`.FunctionalGroup`
The functional groups the new building block should hold.
Returns
-------
None : :class:`NoneType`
"""
new = building_block.with_functional_groups(functional_groups)
are_equivalent_functional_groups(
new.get_functional_groups(),
functional_groups,
)
is_equivalent_molecule(building_block, new)
has_same_structure(building_block, new)
| 26.68254 | 71 | 0.702558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 812 | 0.483046 |
48bfa7e063bfbe3193516ebfca7f4a3ae8dc8a0a | 9,512 | py | Python | tests/master/test_master.py | bk-mtg/piwheels | 67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6 | [
"BSD-3-Clause"
]
| null | null | null | tests/master/test_master.py | bk-mtg/piwheels | 67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6 | [
"BSD-3-Clause"
]
| null | null | null | tests/master/test_master.py | bk-mtg/piwheels | 67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6 | [
"BSD-3-Clause"
]
| null | null | null | # The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from unittest import mock
from threading import Thread
import pytest
from conftest import find_message
from piwheels import __version__, protocols, transport
from piwheels.master import main, const
@pytest.fixture()
def mock_pypi(request):
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.return_value = []
yield proxy
@pytest.fixture()
def mock_signal(request):
with mock.patch('signal.signal') as signal:
yield signal
@pytest.fixture()
def mock_context(request, zmq_context):
with mock.patch('piwheels.transport.Context') as ctx_mock:
# Pass thru calls to Context.socket, but ignore everything else (in
# particular, destroy and term calls as we want the testing context to
# stick around)
ctx_mock().socket.side_effect = zmq_context.socket
yield ctx_mock
@pytest.fixture()
def master_thread(request, mock_pypi, mock_context, mock_systemd, mock_signal,
tmpdir, db_url, db, with_schema):
main_thread = None
def _master_thread(args=None):
nonlocal main_thread
if args is None:
args = []
main_thread = Thread(daemon=True, target=main, args=([
'--dsn', db_url,
'--output-path', str(tmpdir.join('output')),
'--status-queue', 'ipc://' + str(tmpdir.join('status-queue')),
'--control-queue', 'ipc://' + str(tmpdir.join('control-queue')),
'--slave-queue', 'ipc://' + str(tmpdir.join('slave-queue')),
'--file-queue', 'ipc://' + str(tmpdir.join('file-queue')),
'--import-queue', 'ipc://' + str(tmpdir.join('import-queue')),
'--log-queue', 'ipc://' + str(tmpdir.join('log-queue')),
] + list(args),))
return main_thread
yield _master_thread
if main_thread is not None and main_thread.is_alive():
with mock_context().socket(
transport.PUSH, protocol=reversed(protocols.master_control)) as control:
control.connect('ipc://' + str(tmpdir.join('control-queue')))
control.send_msg('QUIT')
main_thread.join(10)
assert not main_thread.is_alive()
@pytest.fixture()
def master_control(request, tmpdir, mock_context):
control = mock_context().socket(
transport.PUSH, protocol=reversed(protocols.master_control))
control.connect('ipc://' + str(tmpdir.join('control-queue')))
yield control
control.close()
def test_help(capsys):
with pytest.raises(SystemExit):
main(['--help'])
out, err = capsys.readouterr()
assert out.startswith('usage:')
assert '--pypi-xmlrpc' in out
def test_version(capsys):
with pytest.raises(SystemExit):
main(['--version'])
out, err = capsys.readouterr()
assert out.strip() == __version__
def test_no_root(caplog):
with mock.patch('os.geteuid') as geteuid:
geteuid.return_value = 0
assert main([]) != 0
assert find_message(caplog.records,
message='Master must not be run as root')
def test_quit_control(mock_systemd, master_thread, master_control):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
def test_system_exit(mock_systemd, master_thread, caplog):
with mock.patch('piwheels.master.PiWheelsMaster.main_loop') as main_loop:
main_loop.side_effect = SystemExit(1)
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='shutting down on SIGTERM')
def test_system_ctrl_c(mock_systemd, master_thread, caplog):
with mock.patch('piwheels.master.PiWheelsMaster.main_loop') as main_loop:
main_loop.side_effect = KeyboardInterrupt()
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='shutting down on Ctrl+C')
def test_bad_control(mock_systemd, master_thread, master_control, caplog):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send(b'FOO')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='unable to deserialize data')
def test_status_passthru(tmpdir, mock_context, mock_systemd, master_thread):
with mock_context().socket(transport.PUSH, protocol=protocols.monitor_stats) as int_status, \
mock_context().socket(transport.SUB, protocol=reversed(protocols.monitor_stats)) as ext_status:
ext_status.connect('ipc://' + str(tmpdir.join('status-queue')))
ext_status.subscribe('')
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
# Wait for the first statistics message (from BigBrother) to get the
# SUB queue working
msg, data = ext_status.recv_msg()
assert msg == 'STATS'
data['builds_count'] = 12345
int_status.connect(const.INT_STATUS_QUEUE)
int_status.send_msg('STATS', data)
# Try several times to read the passed-thru message; other messages
# (like stats from BigBrother) will be sent to ext-status too
for i in range(3):
msg, copy = ext_status.recv_msg()
if msg == 'STATS':
assert copy == data
break
else:
assert False, "Didn't see modified STATS passed-thru"
def test_kill_control(mock_systemd, master_thread, master_control):
with mock.patch('piwheels.master.SlaveDriver.kill_slave') as kill_slave:
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('KILL', 1)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert kill_slave.call_args == mock.call(1)
def test_pause_resume(mock_systemd, master_thread, master_control, caplog):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('PAUSE')
master_control.send_msg('RESUME')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='pausing operations')
assert find_message(caplog.records, message='resuming operations')
def test_new_monitor(mock_systemd, master_thread, master_control, caplog):
with mock.patch('piwheels.master.SlaveDriver.list_slaves') as list_slaves:
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('HELLO')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records,
message='sending status to new monitor')
assert list_slaves.call_args == mock.call()
def test_debug(mock_systemd, master_thread, master_control, caplog):
thread = master_thread(args=['--debug', 'master.the_scribe',
'--debug', 'master.the_architect'])
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, name='master.the_scribe',
levelname='DEBUG', message='<< QUIT None')
assert find_message(caplog.records, name='master.the_architect',
levelname='DEBUG', message='<< QUIT None')
| 38.983607 | 107 | 0.678616 | 0 | 0 | 2,184 | 0.229605 | 2,274 | 0.239066 | 0 | 0 | 3,080 | 0.323802 |
48c0042b454fab2f52a5d4277d95dcb8ccdc7da6 | 1,254 | py | Python | dibase/rpi/gpio/test/pinid-platformtests.py | ralph-mcardell/dibase-rpi-python | 724c18d1f3c6745b3dddf582ea2272ed4e2df8ac | [
"BSD-3-Clause"
]
| null | null | null | dibase/rpi/gpio/test/pinid-platformtests.py | ralph-mcardell/dibase-rpi-python | 724c18d1f3c6745b3dddf582ea2272ed4e2df8ac | [
"BSD-3-Clause"
]
| null | null | null | dibase/rpi/gpio/test/pinid-platformtests.py | ralph-mcardell/dibase-rpi-python | 724c18d1f3c6745b3dddf582ea2272ed4e2df8ac | [
"BSD-3-Clause"
]
| null | null | null | '''
Part of the dibase.rpi.gpio.test package.
GPIO pin id support classes' platform tests.
Underlying GPIO pin ids are those used by the Linux gpiolib and used
to identify a device's GPIO pins in the Linux sys filesystem GPIO
sub-tree.
Developed by R.E. McArdell / Dibase Limited.
Copyright (c) 2012 Dibase Limited
License: dual: GPL or BSD.
'''
import unittest
import sys
if __name__ == '__main__':
# Add path to directory containing the dibase package directory
sys.path.insert(0, './../../../..')
from dibase.rpi.gpio import pinid
class PinIdRPiPlatforrmTestCases(unittest.TestCase):
def test_0000_get_rpi_major_revision_index_returns_zero_or_positive_int(self):
returned_rev_index = pinid.PinId._get_rpi_major_revision_index()
self.assertIsNotNone(returned_rev_index)
self.assertIsInstance(returned_rev_index,int)
self.assertTrue(returned_rev_index>=0)
def test_0020_PinId_value_of_p1_sda_0_or_2(self):
rev_index = pinid.PinId._get_rpi_major_revision_index()
p1_sda_gpio_id = pinid.PinId.p1_sda()
self.assertTrue((rev_index==0 and p1_sda_gpio_id==0) or p1_sda_gpio_id==2)
if __name__ == '__main__':
unittest.main()
| 33.891892 | 83 | 0.719298 | 604 | 0.481659 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.389952 |
48c34cc81742111643982bd0d218ec0140e5a1a0 | 7,503 | py | Python | analysis.py | tj294/2.5D-RB | f72f79d349ff27a058f503ccca58d63babb298e2 | [
"MIT"
]
| null | null | null | analysis.py | tj294/2.5D-RB | f72f79d349ff27a058f503ccca58d63babb298e2 | [
"MIT"
]
| null | null | null | analysis.py | tj294/2.5D-RB | f72f79d349ff27a058f503ccca58d63babb298e2 | [
"MIT"
]
| null | null | null | """
Analysis code for plotting vertical flux transport and/or a gif of temperature,
velocity and KE from the merged output of a Dedalus Rayleigh-Bérnard code.
Author: Tom Joshi-Cale
"""
# ====================
# IMPORTS
# ====================
import numpy as np
import h5py
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pathlib
import os
import shutil
import time
import imageio
from dedalus import public as de
from dedalus.tools import post
# ====================
# CLA PARSING
# ====================
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", help="Folder where the processing data is stored", required=True
)
parser.add_argument(
"-t", "--heatmap", help="Plot a gif of the temperature heatmap", action="store_true"
)
parser.add_argument(
"-f", "--flux", help="Plot the average flux contributions", action="store_true"
)
parser.add_argument(
"-k", "--KE", help="Plot the kinetic energy only", action="store_true"
)
args = parser.parse_args()
direc = os.path.normpath(args.input) + "/"
with h5py.File(direc + "run_params/run_params_s1.h5", "r") as f:
a = int(np.array(f["tasks"]["a"]))
y = de.Fourier("y", 256, interval=(0, a), dealias=3 / 2)
z = de.Chebyshev("z", 64, interval=(0, 1), dealias=3 / 2)
y = np.array(y.grid(1))
z = np.array(z.grid(1))
# ====================
# Plot Fluxes
# ====================
if args.flux:
avg_t_start = float(input("Start average at: "))
avg_t_stop = float(input("End average at: "))
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
L_cond_arr = np.array(file["tasks"]["L_cond"])[:, 0]
L_conv_arr = np.array(file["tasks"]["L_conv"])[:, 0]
KE = np.array(file["tasks"]["KE"])[:, 0]
snap_t = np.array(file["scales"]["sim_time"])
if (
(avg_t_start <= snap_t[0])
or (avg_t_start >= snap_t[-1])
or (avg_t_stop <= snap_t[0])
or (avg_t_stop >= snap_t[-1])
):
print(
"Average time period out of simulation range: {} -> {}".format(
snap_t[0], snap_t[-1]
)
)
pass
ASI = np.abs(snap_t - avg_t_start).argmin()
if np.isnan(avg_t_stop):
AEI = -1
else:
AEI = np.abs(snap_t - avg_t_stop).argmin()
avg_t_range = snap_t[AEI] - snap_t[ASI]
print("Averaging between {} and {}".format(snap_t[ASI], snap_t[AEI]))
mean_L_cond = np.mean(np.array(L_cond_arr[ASI:AEI]), axis=0)
mean_L_conv = np.mean(np.array(L_conv_arr[ASI:AEI]), axis=0)
mean_L_tot = mean_L_cond + mean_L_conv
del_L = np.max(np.abs(1.0 - mean_L_tot))
print("max del_L = {}".format(del_L))
fig = plt.figure(figsize=(6, 6))
KE_ax = fig.add_subplot(311)
KE_ax.plot(snap_t, KE, "k", label="Kinetic Energy")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.axvspan(
snap_t[ASI], snap_t[AEI], color="r", alpha=0.5, label="Flux averaging"
)
L_ax = fig.add_subplot(212)
L_ax.plot(z, mean_L_cond, "r", linestyle="-", label=r"$L_{cond}$")
L_ax.plot(z, mean_L_conv, "g", linestyle="-", label=r"$L_{conv}$")
L_ax.plot(z, mean_L_tot, "k", ls="-", label=r"$L_{total}$")
L_ax.set_xlabel("z")
L_ax.set_ylabel("L")
L_ax.legend()
plt.savefig(direc + "fluxes.png")
plt.show()
plt.close()
# ====================
# Plot heatmap
# ====================
if args.heatmap:
filenames = []
os.makedirs(direc + "figure", exist_ok=True)
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
KE = np.array(file["tasks"]["KE"])[:, 0]
with h5py.File(direc + "snapshots/snapshots_s1.h5", "r") as file:
T = np.array(file["tasks"]["T"])
v = np.array(file["tasks"]["v"])
w = np.array(file["tasks"]["w"])
snap_t = np.array(file["scales"]["sim_time"])
snap_iter = np.array(file["scales"]["iteration"])
yy, zz = np.meshgrid(y, z)
maxT = np.max(T)
maxV = np.max(v)
maxW = np.max(w)
n_iter = len(T[:, 0:, 0])
start_time = time.time()
print("Plotting {} graphs".format(n_iter))
try:
for i in range(0, int(n_iter)):
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(ncols=2, nrows=3, figure=fig)
T_ax = fig.add_subplot(gs[0:2, 0])
v_ax = fig.add_subplot(gs[0, 1])
w_ax = fig.add_subplot(gs[1, 1])
KE_ax = fig.add_subplot(gs[2, :])
if (i % 50 == 0) and (i != 0):
sec_per_frame = (time.time() - start_time) / i
eta = sec_per_frame * (n_iter - i)
print(
"image {}/{} at {:.3f}ips \t| ETA in {}m {}s".format(
i, n_iter, sec_per_frame, int(eta // 60), int(eta % 60)
)
)
fig.suptitle(
"Iteration: {}\n".format(snap_iter[i])
+ r"Sim Time: {:.2f} $\tau_\kappa$".format(snap_t[i])
)
c1 = v_ax.contourf(
yy,
zz,
np.transpose(v[i, :, :]),
levels=np.linspace(np.min(v), maxV),
cmap="coolwarm",
)
c1_bar = fig.colorbar(c1, ax=v_ax)
c1_bar.set_label("v", rotation=0)
v_ax.set_ylabel("z")
v_ax.set_xlabel("y")
v_ax.invert_xaxis()
c2 = w_ax.contourf(
yy,
zz,
np.transpose(w[i, :, :]),
levels=np.linspace(np.min(w), maxW),
cmap="coolwarm",
)
c2_bar = fig.colorbar(c2, ax=w_ax)
c2_bar.set_label("w", rotation=0)
w_ax.set_ylabel("z")
w_ax.set_xlabel("y")
w_ax.invert_xaxis()
c3 = T_ax.contourf(
yy,
zz,
np.transpose(T[i, :, :]),
levels=np.linspace(0, maxT),
cmap="coolwarm",
)
c3_bar = fig.colorbar(c3, ax=T_ax)
c3_bar.set_label("T", rotation=0)
T_ax.set_ylabel("z")
T_ax.set_xlabel("y")
T_ax.invert_xaxis()
KE_ax.plot(snap_t[:i], KE[:i], "k")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.set_ylim([0, 1.1 * np.max(KE)])
KE_ax.set_xlim([0, np.max(snap_t)])
plt.tight_layout()
plt.savefig(direc + "figure/fig_{:03d}.png".format(i))
filenames.append(direc + "figure/fig_{:03d}.png".format(i))
plt.close()
plt.clf()
except KeyboardInterrupt:
print("ending loop")
print("completed in {:.2f} sec".format(time.time() - start_time))
print("Creating gif...")
with imageio.get_writer(direc + "info.gif", mode="I") as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
print("Removing raw image files...")
shutil.rmtree(direc + "figure")
if args.KE:
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as f:
KE = np.array(f["tasks"]["KE"])[:, 0]
snap_t = np.array(f["scales"]["sim_time"])
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.plot(snap_t, KE, "k")
ax.set_xlabel(r"time [$\tau_\kappa$]")
ax.set_ylabel("KE")
plt.show()
plt.close()
print("done.")
| 31.52521 | 88 | 0.53432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,655 | 0.220549 |
48c388d2a91f2301d0f59df1f50eb64349cced6a | 2,104 | py | Python | direct_gd_predict/hash-profile.py | wac/meshop | ea5703147006e5e85617af897e1d1488e6f29f32 | [
"0BSD"
]
| 1 | 2016-05-08T14:54:31.000Z | 2016-05-08T14:54:31.000Z | direct_gd_predict/hash-profile.py | wac/meshop | ea5703147006e5e85617af897e1d1488e6f29f32 | [
"0BSD"
]
| null | null | null | direct_gd_predict/hash-profile.py | wac/meshop | ea5703147006e5e85617af897e1d1488e6f29f32 | [
"0BSD"
]
| null | null | null | import sys
import heapq
import optparse
from bitcount2 import bitcount
hasher={}
profile={}
key_list=[]
key_col=0
def usage():
print sys.argv[0]," [profile_file]"
print " Load the profile lines from profile_file"
print " Hash function uses the features listed in profile_file"
print " and tests for p-value greater/less than or equal (0/1)"
print " Hash all the profiles from stdin"
exit(1)
def do_hash(hasher, p, key_list):
hashval=""
# for k, v in hasher.iteritems():
for k in key_list:
v=hasher[k]
if k in p and p[k] < v:
hashval=hashval+"1"
else:
hashval=hashval+"0"
return hashval
sep='|'
key_col=0
#feature_col=1
#score_col=6
in_feature_col=0
in_score_col=1
process_feature_col=1
process_score_col=6
parser = optparse.OptionParser()
#parser.add_option("-n", dest="heapsize",
# default=50, action="store", type="int")
#parser.add_option("-R", "--random", dest="use_random",
# default=False, action="store_true")
(options, args) = parser.parse_args(sys.argv)
if (len(args) > 1):
profile_filename=args[1]
else:
usage()
for line in open(profile_filename):
if line[0]=='#':
continue
tuples=line.strip().split(sep)
key=tuples[in_feature_col]
key_list.append(key)
hasher[key]=tuples[in_score_col]
curr_profile={}
old_key=""
for line in sys.stdin:
line=line.strip()
if line[0]=='#':
print line
continue
tuples=line.split(sep)
curr_key=tuples[key_col]
if not old_key:
old_key=curr_key
if not old_key==curr_key:
hashval=do_hash(hasher, curr_profile, key_list)
hashval_int=int(hashval, 2)
print old_key+sep+hashval+sep+str(hashval_int)+sep+str(bitcount(hashval_int))
curr_profile={}
old_key=curr_key
curr_profile[tuples[process_feature_col]]=tuples[process_score_col]
hashval=do_hash(hasher, curr_profile, key_list)
hashval_int=int(hashval, 2)
print old_key+sep+hashval+sep+str(hashval_int)+sep+str(bitcount(hashval_int))
| 23.120879 | 85 | 0.65827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.239544 |
48c5df022af8f3cc4834d472772e95a600e0b3cc | 3,804 | py | Python | sdk/eventgrid/azure-eventgrid/azure/eventgrid/aio/_publisher_client_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
]
| 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/eventgrid/azure-eventgrid/azure/eventgrid/aio/_publisher_client_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
]
| null | null | null | sdk/eventgrid/azure-eventgrid/azure/eventgrid/aio/_publisher_client_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
]
| 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from .._models import CloudEvent, EventGridEvent, CustomEvent
from .._helpers import _get_topic_hostname_only_fqdn, _get_authentication_policy, _is_cloud_event
from azure.core.pipeline.policies import AzureKeyCredentialPolicy
from azure.core.credentials import AzureKeyCredential
from .._generated.aio import EventGridPublisherClient as EventGridPublisherClientAsync
from .. import _constants as constants
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Union, Dict, List
SendType = Union[
CloudEvent,
EventGridEvent,
CustomEvent,
Dict,
List[CloudEvent],
List[EventGridEvent],
List[CustomEvent],
List[Dict]
]
class EventGridPublisherClient(object):
"""Asynchronous EventGrid Python Publisher Client.
:param str topic_hostname: The topic endpoint to send the events to.
:param credential: The credential object used for authentication which implements SAS key authentication or SAS token authentication.
:type credential: Union[~azure.core.credentials.AzureKeyCredential, azure.eventgrid.EventGridSharedAccessSignatureCredential]
"""
def __init__(self, topic_hostname, credential, **kwargs):
# type: (str, Union[AzureKeyCredential, EventGridSharedAccessSignatureCredential], Any) -> None
auth_policy = _get_authentication_policy(credential)
self._client = EventGridPublisherClientAsync(authentication_policy=auth_policy, **kwargs)
topic_hostname = _get_topic_hostname_only_fqdn(topic_hostname)
self._topic_hostname = topic_hostname
async def send(self, events, **kwargs):
# type: (SendType) -> None
"""Sends event data to topic hostname specified during client initialization.
:param events: A list or an instance of CloudEvent/EventGridEvent/CustomEvent to be sent.
:type events: SendType
:keyword str content_type: The type of content to be used to send the events.
Has default value "application/json; charset=utf-8" for EventGridEvents, with "cloudevents-batch+json" for CloudEvents
:rtype: None
:raises: :class:`ValueError`, when events do not follow specified SendType.
"""
if not isinstance(events, list):
events = [events]
if all(isinstance(e, CloudEvent) for e in events) or all(_is_cloud_event(e) for e in events):
kwargs.setdefault("content_type", "application/cloudevents-batch+json; charset=utf-8")
await self._client.publish_cloud_event_events(self._topic_hostname, events, **kwargs)
elif all(isinstance(e, EventGridEvent) for e in events) or all(isinstance(e, dict) for e in events):
kwargs.setdefault("content_type", "application/json; charset=utf-8")
await self._client.publish_events(self._topic_hostname, events, **kwargs)
elif all(isinstance(e, CustomEvent) for e in events):
serialized_events = [dict(e) for e in events]
await self._client.publish_custom_event_events(self._topic_hostname, serialized_events, **kwargs)
else:
raise ValueError("Event schema is not correct.")
| 49.402597 | 137 | 0.701367 | 2,468 | 0.648791 | 0 | 0 | 0 | 0 | 1,574 | 0.413775 | 1,711 | 0.44979 |
48c82cf824c0f047f355b4e4cd11359596e54a76 | 5,037 | py | Python | multi_tool.py | zbigos/multi_project_tools | cb9996d0fea0c2c763054ad5f78e904a68b9c80e | [
"Apache-2.0"
]
| null | null | null | multi_tool.py | zbigos/multi_project_tools | cb9996d0fea0c2c763054ad5f78e904a68b9c80e | [
"Apache-2.0"
]
| null | null | null | multi_tool.py | zbigos/multi_project_tools | cb9996d0fea0c2c763054ad5f78e904a68b9c80e | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import logging, sys, argparse
from collect import Collection
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="test a project repo")
parser.add_argument('--force-delete', help='instead of aborting on existing files, delete them', action='store_const', const=True)
subparsers = parser.add_subparsers(help='help for subcommand', dest="command")
parser.add_argument('--config', help="the config file listing all project directories", default='projects.yaml')
parser.add_argument('--local-config', help="the local environment config file", default='local.yaml')
parser.add_argument('--project', help="just run for a single project, supply project ID", type=int)
parser.add_argument('--test-module', help="run the module's test", action='store_const', const=True)
parser.add_argument('--prove-wrapper', help="check the wrapper proof", action='store_const', const=True)
parser.add_argument('--test-caravel', help="check the caravel test", action='store_const', const=True)
parser.add_argument('--test-gds', help="check the gds", action='store_const', const=True)
parser.add_argument('--test-lvs', help="check the gds against powered verilog", action='store_const', const=True)
parser.add_argument('--test-tristate-z', help="check outputs are z when not active", action='store_const', const=True)
parser.add_argument('--test-ports', help="check ports defined in yaml match the verilog", action='store_const', const=True)
parser.add_argument('--test-git', help="check gitsha on disk matches the config", action='store_const', const=True)
parser.add_argument('--test-all', help="run all the checks for each project", action='store_const', const=True)
parser.add_argument('--test-from', help="run all the checks for all projects with id equal or more than the given id", type=int)
parser.add_argument('--openram', help="use OpenRAM - instantiate the bridge, wrapper and do the wiring", action='store_const', const=True)
parser.add_argument('--clone-shared-repos', help="clone shared repos defined in projects.yaml", action='store_const', const=True)
parser.add_argument('--clone-repos', help="git clone the repo", action='store_const', const=True)
parser.add_argument('--create-openlane-config', help="create the OpenLane & caravel_user_project config", action='store_const', const=True)
parser.add_argument('--gate-level', help="create the caravel includes file with gate level includes", action='store_const', const=True)
parser.add_argument('--copy-project', help="copy project's RTL and tests to correct locations in caravel_user_project", action='store_const', const=True)
parser.add_argument('--copy-gds', help="copy the projects GDS and LEF files", action='store_const', const=True)
parser.add_argument('--generate-doc', help="generate a index.md file with information about each project", action='store_const', const=True)
parser.add_argument('--dump-hash', help="print current commit hash of each project along with author and title", action='store_const', const=True)
parser.add_argument('--fill', help="for testing, repeat the given projects this number of times", type=int)
parser.add_argument('--annotate-image', help="annotate the multi_macro.png image generated by klayout", action='store_const', const=True)
parser.add_argument('--dump-macro-position', help="use the macro.cfg + gds to create a list of positions and sizes", action='store_const', const=True)
parser.add_argument('--layout-tool', help="run the manual layout tool on current designs", action='store_const', const=True)
parser.add_argument('--layout-tool-downscale', help="scale factor for layout tool", type=int)
args = parser.parse_args()
# setup log
log_format = logging.Formatter('%(asctime)s - %(module)-15s - %(levelname)-8s - %(message)s')
# configure the client logging
log = logging.getLogger('')
# has to be set to debug as is the root logger
log.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler(sys.stdout)
# create formatter for console
ch.setFormatter(log_format)
log.addHandler(ch)
collection = Collection(args)
# run any tests specified by arguments
collection.run_tests()
if args.layout_tool:
collection.launch_layout_tool(args.layout_tool_downscale)
# create all the OpenLane config for the user collection wrapper
if args.create_openlane_config:
collection.create_openlane_config()
# copy gds to correct place
if args.copy_gds:
collection.copy_all_gds()
if args.copy_project:
collection.copy_all_project_files_to_caravel()
# generate doc
if args.generate_doc:
collection.generate_docs()
# image
if args.annotate_image:
collection.annotate_image()
# dump macro pos - wip for assisted macro placement
if args.dump_macro_position:
collection.get_macro_pos()
| 59.258824 | 157 | 0.728608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,548 | 0.505857 |
48c98054b3a6ea0035473ed0534ee80b41dcebb8 | 4,327 | py | Python | benchbuild/projects/benchbuild/bots.py | ognarb/benchbuild | ad93ae0666e3100fd36c697793c0db1ba52938d0 | [
"MIT"
]
| null | null | null | benchbuild/projects/benchbuild/bots.py | ognarb/benchbuild | ad93ae0666e3100fd36c697793c0db1ba52938d0 | [
"MIT"
]
| null | null | null | benchbuild/projects/benchbuild/bots.py | ognarb/benchbuild | ad93ae0666e3100fd36c697793c0db1ba52938d0 | [
"MIT"
]
| null | null | null | from plumbum import local
from benchbuild import project
from benchbuild.utils import compiler, download, run, wrapping
from benchbuild.utils.cmd import make, mkdir
@download.with_git("https://github.com/bsc-pm/bots", limit=5)
class BOTSGroup(project.Project):
"""
Barcelona OpenMP Task Suite.
Barcelona OpenMP Task Suite is a collection of applications that allow
to test OpenMP tasking implementations and compare its behaviour under
certain circumstances: task tiedness, throttle and cut-offs mechanisms,
single/multiple task generators, etc.
Alignment: Aligns sequences of proteins.
FFT: Computes a Fast Fourier Transformation.
Floorplan: Computes the optimal placement of cells in a floorplan.
Health: Simulates a country health system.
NQueens: Finds solutions of the N Queens problem.
Sort: Uses a mixture of sorting algorithms to sort a vector.
SparseLU: Computes the LU factorization of a sparse matrix.
Strassen: Computes a matrix multiply with Strassen's method.
"""
DOMAIN = 'bots'
GROUP = 'bots'
VERSION = 'HEAD'
path_dict = {
"alignment": "serial/alignment",
"fft": "serial/fft",
"fib": "serial/fib",
"floorplan": "serial/floorplan",
"health": "serial/health",
"knapsack": "serial/knapsack",
"nqueens": "serial/nqueens",
"sort": "serial/sort",
"sparselu": "serial/sparselu",
"strassen": "serial/strassen",
"uts": "serial/uts"
}
input_dict = {
"alignment": ["prot.100.aa", "prot.20.aa"],
"floorplan": ["input.15", "input.20", "input.5"],
"health": ["large.input", "medium.input", "small.input", "test.input"],
"knapsack": [
"knapsack-012.input", "knapsack-016.input", "knapsack-020.input",
"knapsack-024.input", "knapsack-032.input", "knapsack-036.input",
"knapsack-040.input", "knapsack-044.input", "knapsack-048.input",
"knapsack-064.input", "knapsack-096.input", "knapsack-128.input"
],
"uts": [
"huge.input", "large.input", "medium.input", "small.input",
"test.input", "tiny.input"
]
}
SRC_FILE = "bots.git"
def compile(self):
self.download()
makefile_config = local.path(self.src_file) / "config" / "make.config"
clang = compiler.cc(self)
with open(makefile_config, 'w') as config:
lines = [
"LABEL=benchbuild",
"ENABLE_OMPSS=",
"OMPSSC=",
"OMPC=",
"CC={cc}",
"OMPSSLINK=",
"OMPLINK={cc} -fopenmp",
"CLINK={cc}",
"OPT_FLAGS=",
"CC_FLAGS=",
"OMPC_FLAGS=",
"OMPSSC_FLAGS=",
"OMPC_FINAL_FLAGS=",
"OMPSSC_FINAL_FLAG=",
"CLINK_FLAGS=",
"OMPLINK_FLAGS=",
"OMPSSLINK_FLAGS=",
]
lines = [l.format(cc=clang) + "\n" for l in lines]
config.writelines(lines)
mkdir(local.path(self.src_file) / "bin")
with local.cwd(self.src_file):
run.run(make["-C", self.path_dict[self.name]])
def run_tests(self, runner):
binary_name = "{name}.benchbuild.serial".format(name=self.name)
binary_path = local.path(self.src_file) / "bin" / binary_name
exp = wrapping.wrap(binary_path, self)
if self.name in self.input_dict:
for test_input in self.input_dict[self.name]:
input_file = local.path(
self.src_file) / "inputs" / self.name / test_input
runner(exp["-f", input_file])
else:
runner(exp)
class Alignment(BOTSGroup):
NAME = 'alignment'
class FFT(BOTSGroup):
NAME = 'fft'
class Fib(BOTSGroup):
NAME = 'fib'
class FloorPlan(BOTSGroup):
NAME = 'floorplan'
class Health(BOTSGroup):
NAME = 'health'
class Knapsack(BOTSGroup):
NAME = 'knapsack'
class NQueens(BOTSGroup):
NAME = 'nqueens'
class Sort(BOTSGroup):
NAME = 'sort'
class SparseLU(BOTSGroup):
NAME = 'sparselu'
class Strassen(BOTSGroup):
NAME = 'strassen'
class UTS(BOTSGroup):
NAME = 'uts'
| 28.655629 | 79 | 0.580079 | 4,063 | 0.938988 | 0 | 0 | 3,637 | 0.840536 | 0 | 0 | 1,971 | 0.455512 |
48c9b882f54e25efdd1d54210cde93be6398663c | 668 | py | Python | clients/python/setup.py | timtadh/queued | 9c46a49a73103de9a929718c223326149cb9accd | [
"BSD-3-Clause"
]
| 4 | 2015-12-29T05:07:50.000Z | 2022-02-10T20:27:40.000Z | clients/python/setup.py | timtadh/queued | 9c46a49a73103de9a929718c223326149cb9accd | [
"BSD-3-Clause"
]
| 1 | 2015-04-16T15:56:26.000Z | 2015-04-16T15:56:26.000Z | clients/python/setup.py | timtadh/queued | 9c46a49a73103de9a929718c223326149cb9accd | [
"BSD-3-Clause"
]
| 8 | 2015-02-24T12:05:16.000Z | 2022-02-10T20:27:41.000Z | try:
from setuptools import setup
setup # quiet "redefinition of unused ..." warning from pyflakes
# arguments that distutils doesn't understand
setuptools_kwargs = {
'install_requires': [
],
'provides': ['queued'],
'zip_safe': False
}
except ImportError:
from distutils.core import setup
setuptools_kwargs = {}
setup(name='queued',
version=1.1,
description=(
'A client for queued'
),
author='Tim Henderson',
author_email='[email protected]',
url='queued.org',
packages=['queued',],
platforms=['unix'],
scripts=[],
**setuptools_kwargs
)
| 23.034483 | 69 | 0.586826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.350299 |
48ca7f075d0516343cadcc4c408fff80c48e1083 | 11,129 | py | Python | sym_executor.py | zhangzhenghsy/fiber | af1a8c8b01d4935849df73b01ccfeccbba742205 | [
"BSD-2-Clause"
]
| null | null | null | sym_executor.py | zhangzhenghsy/fiber | af1a8c8b01d4935849df73b01ccfeccbba742205 | [
"BSD-2-Clause"
]
| null | null | null | sym_executor.py | zhangzhenghsy/fiber | af1a8c8b01d4935849df73b01ccfeccbba742205 | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/python
import angr,simuvex
import sys,os
import time
from utils_sig import *
from sym_tracer import Sym_Tracer
from sig_recorder import Sig_Recorder
#This class is responsible for performing symbolic execution.
class Sym_Executor(object):
def __init__(self,options=None,dbg_out=False):
self.tracer = None
self.recorder = None
self.dbg_out = dbg_out
self._whitelist = set()
self._all_bbs = set()
self._num_find = 10
self.options = options
def _get_initial_state(self,proj,start,targetfunc=None):
if proj is None:
return None
st = proj.factory.blank_state(addr=start,symbolic_sp=True)
# print st.arch.registers.keys()
# We can customize the symbolic execution by setting various options in the state
# for a full list of available options:
# https://github.com/angr/simuvex/blob/master/simuvex/s_options.py
# E.g. st.options.add(simuvex.o.LAZY_SOLVES) ('options' is a set)
# CALLLESS to do intra-procedure analysis
st.options.add(simuvex.o.CALLLESS)
if targetfunc is not None:
st.options.add(str(hex(targetfunc)))
# To prevent the engine from discarding log history
st.options.add(simuvex.o.TRACK_ACTION_HISTORY)
if self.options.get('simplify_ast',True):
st.options.add(simuvex.o.SIMPLIFY_EXPRS)
st.options.add(simuvex.o.SIMPLIFY_MEMORY_READS)
st.options.add(simuvex.o.SIMPLIFY_MEMORY_WRITES)
st.options.add(simuvex.o.SIMPLIFY_EXIT_GUARD)
#TODO: Find a way to deal with function side-effect (i.e. a function call will output to a parameter, then the parameter will be used in a condition later)
st.options.add(simuvex.o.IGNORE_EXIT_GUARDS)
st.options.add(simuvex.o.IGNORE_MERGE_CONDITIONS)
st.options.add(simuvex.o.DONT_MERGE_UNCONSTRAINED)
#Use customized addr conc strategy
st.memory.read_strategies = [angr.concretization_strategies.SimConcretizationStrategyHZ(limit=3)]
st.memory.write_strategies = [angr.concretization_strategies.SimConcretizationStrategyHZ(limit=3)]
#print st.options
return st
#Include all the BBs along the path from start to ends in the cfg into the whitelist.
#The CFG here is CFGAcc.
def _prep_whitelist(self,cfg,cfg_bounds,ends,start=None,proj=None,sym_tab=None,cfg2=None,cfg_bounds2=None,ends2=None,start2=None,func_cfg=None):
#print "cfg:", [hex(n.addr) for n in cfg.nodes()]
#print cfg.functions[cfg_bounds[0]]
if cfg is None or cfg_bounds is None or len(cfg_bounds) < 2:
print '_prep_whitelist(): Incomplete CFG information'
return
#for addr in cfg2.functions:
# print cfg2.functions[addr]
if cfg2 is not None:
func_cfg2 = get_func_cfg(cfg2,cfg_bounds2[0],proj=proj,sym_tab=sym_tab)
if func_cfg is None:
print 'No func_cfg is available at %x' % cfg_bounds[0]
return
start = cfg_bounds[0]
self._all_bbs = set([x.addr for x in func_cfg.nodes()])
#print '_all_bbs: ' + str([hex(x) for x in list(self._all_bbs)])
#print '_all_bbs2: '+str([hex(x) for x in list(set([x.addr for x in func_cfg2.nodes()]))])
if cfg2 is not None:
self._all_bbs = self._all_bbs.union(set([x.addr for x in func_cfg2.nodes()]))
self._whitelist = get_node_addrs_between(func_cfg,start,ends,from_func_start=(start == cfg_bounds[0]))
if cfg2 is not None:
self._whitelist= self._whitelist.union(get_node_addrs_between(func_cfg2,start2,ends2,from_func_start=(start2 == cfg_bounds2[0])))
l = list(self._whitelist)
l.sort()
#print 'whitelist: ' + str([hex(x) for x in l])
l = list(self._all_bbs)
l.sort()
#print '_all_bbs: ' + str([hex(x) for x in l])
if self.dbg_out:
l = list(self._whitelist)
l.sort()
print 'whitelist: ' + str([hex(x) for x in l])
return
#Why we put a absolutely 'False' find_func here:
#(1)We rely on an accurate whitelist and all the nodes in the list should be explored, so we don't want
#to stop at a certain node.
#(2)With this find_func, basically we will have no states in the 'found' stash in the end, but that's OK
#because all the things we want to do will be done along the symbolic execution process.
def _find_func(self,p):
return False
def _avoid_func(self,p):
#print 'avoid_func: ' + str(hex(p.addr)) + ' ' + str(p.addr in whitelist)
#One problem is that, sometimes p.addr is in the middle of a certain BB, while in whitelist we only have start addresses of BBs.
#Currently for these cases, we will let it continue to execute because it will align to the BB starts later.
with open('testexplorenodes','a') as f:
f.write(str(hex(p.addr))+'\n')
return False if p.addr not in self._all_bbs else (not p.addr in self._whitelist)
#This is basically the 'hook_complete' used in 'explorer' technique, simply deciding whether num_find has been reached.
def _vt_terminator(self,smg):
return len(smg.stashes['found']) >= self._num_find
def _prep_veritesting_options(self,find=None,avoid=None,num_find=10):
if find is None:
find = self._find_func
if avoid is None:
avoid = self._avoid_func
#We need to construct an 'explorer' as an 'exploration_technique' used in the internal SimManager of Veritesting,
#which is basically the same one as used in normal DSE SimManager (by invoking 'explore()' method)
#NOTE that the Veritesting mode will use a separate SimManager, so we have to make TWO 'explorer'.
exp_tech = angr.exploration_techniques.Explorer(find=find,avoid=avoid,num_find=num_find)
veritesting_options = {}
#NOTE: 'loop_unrolling_limit' is compared and considered as 'passed' with '>=' instead of '>', that means if we use '1', no loops will be even entered.
#However we want exactly ONE loop execution, so we should should use '2' here actually.
veritesting_options['loop_unrolling_limit'] = 2
veritesting_options['tech'] = exp_tech
#NOTE that original 'explorer' technique will set a 'hook_complete' in SimManager, which will be passed from 'run()' to 'step()'
#as a 'until_func', however, Veritesting will not invoke 'run()', instead, it calls 'step()' directly, so this hook is basically
#invalidated. To deal with this, we provide a 'terminator' to Veritesting, which will terminate Veritesting when len(stashes[found]) > num_find
veritesting_options['terminator'] = self._vt_terminator
return veritesting_options
#Do the symbolic execution on the given CFG, from start to target, with Veritesting and Whitelist mechanisms.
#Params:
#proj: the angr project.
#states: if it's None, creates a default initial state@start, if start is None, then @cfg_bounds[0].
#cfg: cfg_accurate.
#cfg_bounds: a 2-element list, specifying the area of the target function (to be executed) in the cfg.
#start: Where to start the symbolic execution? Must be within the cfg_bounds.
#targets: Where to end the symbolic execution? Must be within the cfg_bounds. Can specify multiple targets in a list.
#Ret:
#The resulting SimManager.
def try_sym_exec(self,proj,cfg,cfg_bounds,targets,states=None,start=None,new_tracer=False,tracer=None,new_recorder=False,recorder=None,sym_tab=None,sigs=None,cfg2=None,cfg_bounds2=None,targets2=None, start2=None,func_cfg=None,num_find=10):
#print "start1: ", hex(start)
#print "start2: ", hex(start2)
if cfg is None or cfg_bounds is None or len(cfg_bounds) < 2:
print 'No CFG information available for sym exec.'
return None
#This is the start point of sym exec.
st = start if start is not None else cfg_bounds[0]
if start2 is not None:
st=start2
#Fill initial state.
#print 'hex(start)', hex(start)
#print 'str(hex(start))', str(hex(start))
if states is None:
if start2 is not None:
init_state = self._get_initial_state(proj,st,start)
#init_state = self._get_initial_state(proj,start)
else:
init_state = self._get_initial_state(proj,st)
states = [init_state]
#Whether we need to create a new Sym_Tracer to trace the symbolic execution
if new_tracer:
self.tracer = Sym_Tracer(symbol_table=sym_tab,dbg_out=self.dbg_out)
#for example:<class 'sym_tracer.Sym_Tracer'>: {'addr_collision': False, 'dbg_out': True, 'symbol_table': <sym_table.Sym_Table object at 0x7fffeba54890>, '_addr_conc_buf': [], '_sym_map': {}}
#Clear any remaining breakpoints
self.tracer.stop_trace(states)
self.tracer.trace(states)
else:
self.tracer = tracer
#Whether we need to create a new Sig_Recorder
if new_recorder:
if sigs is None:
print 'You must provide sigs if you want to use new recorder'
return
if self.tracer is None:
print 'You must provide tracer or specify new_tracer flag if you want to use new recorder'
return
self.recorder = Sig_Recorder(sigs,self.tracer,dbg_out=dbg_out)
#Clear any remaining breakpoints
self.recorder.stop_record(states)
#Record structural information (nodes and their relationships) and semantic information of 'root'
#instructions with per-instruction breakpoint, the structural information has already been partly recorded in the initial signature.
self.recorder.record(states)
else:
self.recorder = recorder
#Set the whitelist of basic blocks, we only want to include the BBs that along the paths from st to targets.
self._prep_whitelist(cfg,cfg_bounds,targets,start,proj=proj,sym_tab=sym_tab,cfg2=cfg2,cfg_bounds2=cfg_bounds2,ends2=targets2,start2=start2,func_cfg=func_cfg)
self._num_find = num_find
#Set the VeriTesting options
veritesting_options = self._prep_veritesting_options(num_find=self._num_find)
#Construct the simulation execution manager
smg = proj.factory.simgr(thing=states, veritesting=True, veritesting_options=veritesting_options)
#TODO: Do we still need to use loop limiter for the main DSE SimManager since Veritesting has already got a built-in loop limiter?
#limiter = angr.exploration_techniques.looplimiter.LoopLimiter(count=0, discard_stash='spinning')
#smg.use_technique(limiter)
t0 = time.time()
smg.explore(find=self._find_func, avoid=self._avoid_func, num_find=self._num_find)
print ['%s:%d ' % (name,len(stash)) for name, stash in smg.stashes.items()]
print 'Time elapsed: ' + str(time.time() - t0)
return smg
| 54.287805 | 243 | 0.673556 | 10,905 | 0.979872 | 0 | 0 | 0 | 0 | 0 | 0 | 5,151 | 0.462845 |
48ca956ecd40df08896e125936a630042abd2d96 | 2,228 | py | Python | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
]
| 1,573 | 2015-01-01T07:19:06.000Z | 2022-03-30T09:06:06.000Z | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
]
| 1,691 | 2015-01-03T11:03:23.000Z | 2022-03-30T07:27:28.000Z | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
]
| 895 | 2015-01-03T19:56:15.000Z | 2022-03-18T18:30:57.000Z | # flake8: noqa
# errmsg.h
CR_ERROR_FIRST = 2000
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_SECURE_AUTH = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_ERROR_LAST = 2061
| 32.289855 | 47 | 0.745063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.010772 |
48cccf1158ee9bcd15cefd678338ca10d4234710 | 992 | py | Python | check-challenge.py | gjaiswal108/Check-if-Challenge-problem-added-in-codechef | 74b29725ad38bdf0dc210dbdb67fccf056ec6d8c | [
"Apache-2.0"
]
| null | null | null | check-challenge.py | gjaiswal108/Check-if-Challenge-problem-added-in-codechef | 74b29725ad38bdf0dc210dbdb67fccf056ec6d8c | [
"Apache-2.0"
]
| null | null | null | check-challenge.py | gjaiswal108/Check-if-Challenge-problem-added-in-codechef | 74b29725ad38bdf0dc210dbdb67fccf056ec6d8c | [
"Apache-2.0"
]
| null | null | null | import requests,smtplib,time
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
r =requests.get('https://www.codechef.com/JUNE19B/')
while(1):
if('(Challenge)' in r.text):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("sender_gmail_id", "password")
msg= MIMEMultipart("alternative")
msg["Subject"]="Challenge Problem added"
msg["From"]="sender_gmail_id"
msg["To"]="receiver_gmail_id"
text="I guess challenge problem is added in long challenge,check it on codechef."
html="<h4>I guess challenge problem is added in long challenge,check it on codechef.</h4><br/><a href='https://www.codechef.com/'>Click here to visit. </a>"
msg.attach(MIMEText(html, "html"))
s.sendmail("sender_gmail_id","receiver_gmail_id",msg.as_string())
s.quit()
print('sent')
break
print('Sleeping...')
time.sleep(3600)
print('Trying again...')
| 41.333333 | 164 | 0.647177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.492944 |
48cd5f2495ad481cbf1d4200796edf478513850e | 2,840 | py | Python | applications/tensorflow/click_through_rate/din/test/test_attention_fcn.py | kew96/GraphcoreExamples | 22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb | [
"MIT"
]
| null | null | null | applications/tensorflow/click_through_rate/din/test/test_attention_fcn.py | kew96/GraphcoreExamples | 22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb | [
"MIT"
]
| null | null | null | applications/tensorflow/click_through_rate/din/test/test_attention_fcn.py | kew96/GraphcoreExamples | 22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb | [
"MIT"
]
| null | null | null | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests covering attention used by the DIN model.
"""
import tensorflow as tf
import unittest
import pytest
import numpy as np
import sys
from pathlib import Path
# Add common module to path
common_path = Path(Path(__file__).absolute().parent.parent.parent)
sys.path.append(str(common_path))
from common.utils import din_attention
from din.din_model import DIN
seed = 3
tf.set_random_seed(seed)
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestDINFCN(unittest.TestCase):
"""Testing att layer"""
@classmethod
def setUpClass(cls):
cls.model_dtype = tf.float32
cls.ATTENTION_SIZE = 1
def test_att_results(self):
# test attention layer output
query_value = np.ones([4, 2], np.float32)
query_value = query_value * 0.8
query_inp = tf.placeholder(shape=[4, 2], dtype='float32')
facts_value = np.ones([4, 8, 2], np.float32)
facts_value = facts_value * 0.5
facts_inp = tf.placeholder(shape=[4, 8, 2], dtype='float32')
mask_value = np.ones([4, 8], np.float32)
mask_value = mask_value * 0.2
mask_inp = tf.placeholder(shape=[4, 8], dtype='float32')
out = din_attention(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(out, feed_dict={query_inp: query_value, facts_inp: facts_value, mask_inp: mask_value})
y0 = np.float32(0.5)
y1 = np.float32(0.5)
self.assertAlmostEqual(output[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(output[0, 0, 0], y1, delta = 0.01)
def test_fcn_results(self):
# test fcn results
inputs_value = np.ones([2, 6, 2], np.float32)
inp = tf.placeholder(shape=[2, 6, 2], dtype='float32')
y_hat = DIN.build_fcn_net(self, inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
y = sess.run(y_hat, feed_dict={inp: inputs_value})
y0 = np.float32(0.5225718)
y1 = np.float32(0.47742826)
self.assertAlmostEqual(y[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(y[0, 0, 1], y1, delta = 0.01)
| 34.216867 | 116 | 0.669366 | 1,791 | 0.630634 | 0 | 0 | 1,835 | 0.646127 | 0 | 0 | 778 | 0.273944 |
48cd84239fff9070a94f62f2913b39c9eded80ea | 204 | py | Python | shiva/constants.py | tooxie/shiva-server | 4d169aae8d4cb01133f62701b14610695e48c297 | [
"MIT"
]
| 70 | 2015-01-09T15:15:15.000Z | 2022-01-14T09:51:55.000Z | shiva/constants.py | tooxie/shiva-server | 4d169aae8d4cb01133f62701b14610695e48c297 | [
"MIT"
]
| 14 | 2015-01-04T10:08:26.000Z | 2021-12-13T19:35:07.000Z | shiva/constants.py | tooxie/shiva-server | 4d169aae8d4cb01133f62701b14610695e48c297 | [
"MIT"
]
| 19 | 2015-01-02T22:42:01.000Z | 2022-01-14T09:51:59.000Z | # -*- coding: utf-8 -*-
class HTTP:
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
CONFLICT = 409
UNSUPPORTED_MEDIA_TYPE = 415
| 17 | 32 | 0.632353 | 177 | 0.867647 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.112745 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.