max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Graphs/cc.py | Prince23598/cs-algorithms | 239 | 12640889 | #!/usr/bin/env python3
# Reference: Rosalind (http://rosalind.info/problems/cc/)
def cc(g):
"""
>>> graph = [[1, 4], [0], [3, 6, 7], [2, 7], [0, 8, 9], [], [2, 10], \
[2, 3, 10, 11], [4, 9], [4, 8], [6, 7, 11], [10, 7]]
>>> cc(graph)
3
"""
def dfs(g, t, seen):
for v in g[t]:
if v not in seen:
seen.add(v)
dfs(g, v, seen)
seen = set()
cnt = 0
for v in range(len(g)):
if v in seen:
continue
dfs(g, v, seen)
cnt += 1
return cnt
if __name__ == '__main__':
import doctest
doctest.testmod()
with open('cc.txt') as f:
line = f.readline()
n, m = [int(x.strip()) for x in line.strip().split()]
graph = [[] for _ in range(n)]
for edge in range(m):
line = f.readline()
i, j = [int(x.strip()) for x in line.strip().split()]
graph[i-1].append(j-1)
graph[j-1].append(i-1)
value = cc(graph)
print(value)
|
model/carn.py | Gummary/denet | 343 | 12640905 | """
CutBlur
Copyright 2020-present NAVER corp.
MIT license
Referenced from PCARN-pytorch, https://github.com/nmhkahn/PCARN-pytorch
"""
import torch
import torch.nn as nn
from model import ops
class Group(nn.Module):
def __init__(self, num_channels, num_blocks, res_scale=1.0):
super().__init__()
for nb in range(num_blocks):
setattr(self,
"b{}".format(nb+1),
ops.ResBlock(num_channels, res_scale)
)
setattr(self,
"c{}".format(nb+1),
nn.Conv2d(num_channels*(nb+2), num_channels, 1, 1, 0)
)
self.num_blocks = num_blocks
def forward(self, x):
c = out = x
for nb in range(self.num_blocks):
unit_b = getattr(self, "b{}".format(nb+1))
unit_c = getattr(self, "c{}".format(nb+1))
b = unit_b(out)
c = torch.cat([c, b], dim=1)
out = unit_c(c)
return out
class Net(nn.Module):
def __init__(self, opt):
super().__init__()
self.sub_mean = ops.MeanShift(255)
self.add_mean = ops.MeanShift(255, sign=1)
head = [
ops.DownBlock(opt.scale),
nn.Conv2d(3*opt.scale**2, opt.num_channels, 3, 1, 1)
]
# define body module
for ng in range(opt.num_groups):
setattr(self,
"c{}".format(ng+1),
nn.Conv2d(opt.num_channels*(ng+2), opt.num_channels, 1, 1, 0)
)
setattr(self,
"b{}".format(ng+1),
Group(opt.num_channels, opt.num_blocks)
)
tail = [
ops.Upsampler(opt.num_channels, opt.scale),
nn.Conv2d(opt.num_channels, 3, 3, 1, 1)
]
self.head = nn.Sequential(*head)
self.tail = nn.Sequential(*tail)
self.opt = opt
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
c = out = x
for ng in range(self.opt.num_groups):
group = getattr(self, "b{}".format(ng+1))
conv = getattr(self, "c{}".format(ng+1))
g = group(out)
c = torch.cat([c, g], dim=1)
out = conv(c)
res = out
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
|
evennia/contrib/ingame_python/scripts.py | Jaykingamez/evennia | 1,544 | 12640910 | <gh_stars>1000+
"""
Scripts for the in-game Python system.
"""
from datetime import datetime, timedelta
from queue import Queue
import re
import sys
import traceback
from django.conf import settings
from evennia import DefaultObject, DefaultScript, ChannelDB, ScriptDB
from evennia import logger, ObjectDB
from evennia.utils.ansi import raw
from evennia.utils.create import create_channel
from evennia.utils.dbserialize import dbserialize
from evennia.utils.utils import all_from_module, delay, pypath_to_realpath
from evennia.contrib.ingame_python.callbackhandler import CallbackHandler
from evennia.contrib.ingame_python.utils import get_next_wait, EVENTS, InterruptEvent
# Constants
RE_LINE_ERROR = re.compile(r'^ File "\<string\>", line (\d+)')
class EventHandler(DefaultScript):
"""
The event handler that contains all events in a global script.
This script shouldn't be created more than once. It contains
event (in a non-persistent attribute) and callbacks (in a
persistent attribute). The script method would help adding,
editing and deleting these events and callbacks.
"""
def at_script_creation(self):
"""Hook called when the script is created."""
self.key = "event_handler"
self.desc = "Global event handler"
self.persistent = True
# Permanent data to be stored
self.db.callbacks = {}
self.db.to_valid = []
self.db.locked = []
# Tasks
self.db.tasks = {}
def at_start(self):
"""Set up the event system when starting.
Note that this hook is called every time the server restarts
(including when it's reloaded). This hook performs the following
tasks:
- Create temporarily stored events.
- Generate locals (individual events' namespace).
- Load eventfuncs, including user-defined ones.
- Re-schedule tasks that aren't set to fire anymore.
- Effectively connect the handler to the main script.
"""
self.ndb.events = {}
for typeclass, name, variables, help_text, custom_call, custom_add in EVENTS:
self.add_event(typeclass, name, variables, help_text, custom_call, custom_add)
# Generate locals
self.ndb.current_locals = {}
self.ndb.fresh_locals = {}
addresses = ["evennia.contrib.ingame_python.eventfuncs"]
addresses.extend(getattr(settings, "EVENTFUNCS_LOCATIONS", ["world.eventfuncs"]))
for address in addresses:
if pypath_to_realpath(address):
self.ndb.fresh_locals.update(all_from_module(address))
# Restart the delayed tasks
now = datetime.now()
for task_id, definition in tuple(self.db.tasks.items()):
future, obj, event_name, locals = definition
seconds = (future - now).total_seconds()
if seconds < 0:
seconds = 0
delay(seconds, complete_task, task_id)
# Place the script in the CallbackHandler
from evennia.contrib.ingame_python import typeclasses
CallbackHandler.script = self
DefaultObject.callbacks = typeclasses.EventObject.callbacks
# Create the channel if non-existent
try:
self.ndb.channel = ChannelDB.objects.get(db_key="everror")
except ChannelDB.DoesNotExist:
self.ndb.channel = create_channel(
"everror",
desc="Event errors",
locks="control:false();listen:perm(Builders);send:false()",
)
def get_events(self, obj):
"""
Return a dictionary of events on this object.
Args:
obj (Object or typeclass): the connected object or a general typeclass.
Returns:
A dictionary of the object's events.
Notes:
Events would define what the object can have as
callbacks. Note, however, that chained callbacks will not
appear in events and are handled separately.
You can also request the events of a typeclass, not a
connected object. This is useful to get the global list
of events for a typeclass that has no object yet.
"""
events = {}
all_events = self.ndb.events
classes = Queue()
if isinstance(obj, type):
classes.put(obj)
else:
classes.put(type(obj))
invalid = []
while not classes.empty():
typeclass = classes.get()
typeclass_name = typeclass.__module__ + "." + typeclass.__name__
for key, etype in all_events.get(typeclass_name, {}).items():
if key in invalid:
continue
if etype[0] is None: # Invalidate
invalid.append(key)
continue
if key not in events:
events[key] = etype
# Look for the parent classes
for parent in typeclass.__bases__:
classes.put(parent)
return events
def get_variable(self, variable_name):
"""
Return the variable defined in the locals.
This can be very useful to check the value of a variable that can be modified in an event, and whose value will be used in code. This system allows additional customization.
Args:
variable_name (str): the name of the variable to return.
Returns:
The variable if found in the locals.
None if not found in the locals.
Note:
This will return the variable from the current locals.
Keep in mind that locals are shared between events. As
every event is called one by one, this doesn't pose
additional problems if you get the variable right after
an event has been executed. If, however, you differ,
there's no guarantee the variable will be here or will
mean the same thing.
"""
return self.ndb.current_locals.get(variable_name)
def get_callbacks(self, obj):
"""
Return a dictionary of the object's callbacks.
Args:
obj (Object): the connected objects.
Returns:
A dictionary of the object's callbacks.
Note:
This method can be useful to override in some contexts,
when several objects would share callbacks.
"""
obj_callbacks = self.db.callbacks.get(obj, {})
callbacks = {}
for callback_name, callback_list in obj_callbacks.items():
new_list = []
for i, callback in enumerate(callback_list):
callback = dict(callback)
callback["obj"] = obj
callback["name"] = callback_name
callback["number"] = i
new_list.append(callback)
if new_list:
callbacks[callback_name] = new_list
return callbacks
def add_callback(self, obj, callback_name, code, author=None, valid=False, parameters=""):
"""
Add the specified callback.
Args:
obj (Object): the Evennia typeclassed object to be extended.
callback_name (str): the name of the callback to add.
code (str): the Python code associated with this callback.
author (Character or Account, optional): the author of the callback.
valid (bool, optional): should the callback be connected?
parameters (str, optional): optional parameters.
Note:
This method doesn't check that the callback type exists.
"""
obj_callbacks = self.db.callbacks.get(obj, {})
if not obj_callbacks:
self.db.callbacks[obj] = {}
obj_callbacks = self.db.callbacks[obj]
callbacks = obj_callbacks.get(callback_name, [])
if not callbacks:
obj_callbacks[callback_name] = []
callbacks = obj_callbacks[callback_name]
# Add the callback in the list
callbacks.append(
{
"created_on": datetime.now(),
"author": author,
"valid": valid,
"code": code,
"parameters": parameters,
}
)
# If not valid, set it in 'to_valid'
if not valid:
self.db.to_valid.append((obj, callback_name, len(callbacks) - 1))
# Call the custom_add if needed
custom_add = self.get_events(obj).get(callback_name, [None, None, None, None])[3]
if custom_add:
custom_add(obj, callback_name, len(callbacks) - 1, parameters)
# Build the definition to return (a dictionary)
definition = dict(callbacks[-1])
definition["obj"] = obj
definition["name"] = callback_name
definition["number"] = len(callbacks) - 1
return definition
def edit_callback(self, obj, callback_name, number, code, author=None, valid=False):
"""
Edit the specified callback.
Args:
obj (Object): the Evennia typeclassed object to be edited.
callback_name (str): the name of the callback to edit.
number (int): the callback number to be changed.
code (str): the Python code associated with this callback.
author (Character or Account, optional): the author of the callback.
valid (bool, optional): should the callback be connected?
Raises:
RuntimeError if the callback is locked.
Note:
This method doesn't check that the callback type exists.
"""
obj_callbacks = self.db.callbacks.get(obj, {})
if not obj_callbacks:
self.db.callbacks[obj] = {}
obj_callbacks = self.db.callbacks[obj]
callbacks = obj_callbacks.get(callback_name, [])
if not callbacks:
obj_callbacks[callback_name] = []
callbacks = obj_callbacks[callback_name]
# If locked, don't edit it
if (obj, callback_name, number) in self.db.locked:
raise RuntimeError("this callback is locked.")
# Edit the callback
callbacks[number].update(
{"updated_on": datetime.now(), "updated_by": author, "valid": valid, "code": code}
)
# If not valid, set it in 'to_valid'
if not valid and (obj, callback_name, number) not in self.db.to_valid:
self.db.to_valid.append((obj, callback_name, number))
elif valid and (obj, callback_name, number) in self.db.to_valid:
self.db.to_valid.remove((obj, callback_name, number))
# Build the definition to return (a dictionary)
definition = dict(callbacks[number])
definition["obj"] = obj
definition["name"] = callback_name
definition["number"] = number
return definition
def del_callback(self, obj, callback_name, number):
"""
Delete the specified callback.
Args:
obj (Object): the typeclassed object containing the callback.
callback_name (str): the name of the callback to delete.
number (int): the number of the callback to delete.
Raises:
RuntimeError if the callback is locked.
"""
obj_callbacks = self.db.callbacks.get(obj, {})
callbacks = obj_callbacks.get(callback_name, [])
# If locked, don't edit it
if (obj, callback_name, number) in self.db.locked:
raise RuntimeError("this callback is locked.")
# Delete the callback itself
try:
code = callbacks[number]["code"]
except IndexError:
return
else:
logger.log_info(
"Deleting callback {} {} of {}:\n{}".format(callback_name, number, obj, code)
)
del callbacks[number]
# Change IDs of callbacks to be validated
i = 0
while i < len(self.db.to_valid):
t_obj, t_callback_name, t_number = self.db.to_valid[i]
if obj is t_obj and callback_name == t_callback_name:
if t_number == number:
# Strictly equal, delete the callback
del self.db.to_valid[i]
i -= 1
elif t_number > number:
# Change the ID for this callback
self.db.to_valid.insert(i, (t_obj, t_callback_name, t_number - 1))
del self.db.to_valid[i + 1]
i += 1
# Update locked callback
for i, line in enumerate(self.db.locked):
t_obj, t_callback_name, t_number = line
if obj is t_obj and callback_name == t_callback_name:
if number < t_number:
self.db.locked[i] = (t_obj, t_callback_name, t_number - 1)
# Delete time-related callbacks associated with this object
for script in obj.scripts.all():
if isinstance(script, TimecallbackScript):
if script.obj is obj and script.db.callback_name == callback_name:
if script.db.number == number:
script.stop()
elif script.db.number > number:
script.db.number -= 1
def accept_callback(self, obj, callback_name, number):
"""
Valid a callback.
Args:
obj (Object): the object containing the callback.
callback_name (str): the name of the callback.
number (int): the number of the callback.
"""
obj_callbacks = self.db.callbacks.get(obj, {})
callbacks = obj_callbacks.get(callback_name, [])
# Accept and connect the callback
callbacks[number].update({"valid": True})
if (obj, callback_name, number) in self.db.to_valid:
self.db.to_valid.remove((obj, callback_name, number))
def call(self, obj, callback_name, *args, **kwargs):
"""
Call the connected callbacks.
Args:
obj (Object): the Evennia typeclassed object.
callback_name (str): the callback name to call.
*args: additional variables for this callback.
Keyword Args:
number (int, optional): call just a specific callback.
parameters (str, optional): call a callback with parameters.
locals (dict, optional): a locals replacement.
Returns:
True to report the callback was called without interruption,
False otherwise.
"""
# First, look for the callback type corresponding to this name
number = kwargs.get("number")
parameters = kwargs.get("parameters")
locals = kwargs.get("locals")
# Errors should not pass silently
allowed = ("number", "parameters", "locals")
if any(k for k in kwargs if k not in allowed):
raise TypeError(
"Unknown keyword arguments were specified " "to call callbacks: {}".format(kwargs)
)
event = self.get_events(obj).get(callback_name)
if locals is None and not event:
logger.log_err(
"The callback {} for the object {} (typeclass "
"{}) can't be found".format(callback_name, obj, type(obj))
)
return False
# Prepare the locals if necessary
if locals is None:
locals = self.ndb.fresh_locals.copy()
for i, variable in enumerate(event[0]):
try:
locals[variable] = args[i]
except IndexError:
logger.log_trace(
"callback {} of {} ({}): need variable "
"{} in position {}".format(callback_name, obj, type(obj), variable, i)
)
return False
else:
locals = {key: value for key, value in locals.items()}
callbacks = self.get_callbacks(obj).get(callback_name, [])
if event:
custom_call = event[2]
if custom_call:
callbacks = custom_call(callbacks, parameters)
# Now execute all the valid callbacks linked at this address
self.ndb.current_locals = locals
for i, callback in enumerate(callbacks):
if not callback["valid"]:
continue
if number is not None and callback["number"] != number:
continue
try:
exec(callback["code"], locals, locals)
except InterruptEvent:
return False
except Exception:
etype, evalue, tb = sys.exc_info()
trace = traceback.format_exception(etype, evalue, tb)
self.handle_error(callback, trace)
return True
def handle_error(self, callback, trace):
"""
Handle an error in a callback.
Args:
callback (dict): the callback representation.
trace (list): the traceback containing the exception.
Notes:
This method can be useful to override to change the default
handling of errors. By default, the error message is sent to
the character who last updated the callback, if connected.
If not, display to the everror channel.
"""
callback_name = callback["name"]
number = callback["number"]
obj = callback["obj"]
oid = obj.id
logger.log_err(
"An error occurred during the callback {} of "
"{} (#{}), number {}\n{}".format(callback_name, obj, oid, number + 1, "\n".join(trace))
)
# Create the error message
line = "|runknown|n"
lineno = "|runknown|n"
for error in trace:
if error.startswith(' File "<string>", line '):
res = RE_LINE_ERROR.search(error)
if res:
lineno = int(res.group(1))
# Try to extract the line
try:
line = raw(callback["code"].splitlines()[lineno - 1])
except IndexError:
continue
else:
break
exc = raw(trace[-1].strip("\n").splitlines()[-1])
err_msg = "Error in {} of {} (#{})[{}], line {}:" " {}\n{}".format(
callback_name, obj, oid, number + 1, lineno, line, exc
)
# Inform the last updater if connected
updater = callback.get("updated_by")
if updater is None:
updater = callback["created_by"]
if updater and updater.sessions.all():
updater.msg(err_msg)
else:
err_msg = "Error in {} of {} (#{})[{}], line {}:" " {}\n {}".format(
callback_name, obj, oid, number + 1, lineno, line, exc
)
self.ndb.channel.msg(err_msg)
def add_event(self, typeclass, name, variables, help_text, custom_call, custom_add):
"""
Add a new event for a defined typeclass.
Args:
typeclass (str): the path leading to the typeclass.
name (str): the name of the event to add.
variables (list of str): list of variable names for this event.
help_text (str): the long help text of the event.
custom_call (callable or None): the function to be called
when the event fires.
custom_add (callable or None): the function to be called when
a callback is added.
"""
if typeclass not in self.ndb.events:
self.ndb.events[typeclass] = {}
events = self.ndb.events[typeclass]
if name not in events:
events[name] = (variables, help_text, custom_call, custom_add)
def set_task(self, seconds, obj, callback_name):
"""
Set and schedule a task to run.
Args:
seconds (int, float): the delay in seconds from now.
obj (Object): the typecalssed object connected to the event.
callback_name (str): the callback's name.
Notes:
This method allows to schedule a "persistent" task.
'utils.delay' is called, but a copy of the task is kept in
the event handler, and when the script restarts (after reload),
the differed delay is called again.
The dictionary of locals is frozen and will be available
again when the task runs. This feature, however, is limited
by the database: all data cannot be saved. Lambda functions,
class methods, objects inside an instance and so on will
not be kept in the locals dictionary.
"""
now = datetime.now()
delta = timedelta(seconds=seconds)
# Choose a free task_id
used_ids = list(self.db.tasks.keys())
task_id = 1
while task_id in used_ids:
task_id += 1
# Collect and freeze current locals
locals = {}
for key, value in self.ndb.current_locals.items():
try:
dbserialize(value)
except TypeError:
continue
else:
locals[key] = value
self.db.tasks[task_id] = (now + delta, obj, callback_name, locals)
delay(seconds, complete_task, task_id)
# Script to call time-related events
class TimeEventScript(DefaultScript):
"""Gametime-sensitive script."""
def at_script_creation(self):
"""The script is created."""
self.start_delay = True
self.persistent = True
# Script attributes
self.db.time_format = None
self.db.event_name = "time"
self.db.number = None
def at_repeat(self):
"""
Call the event and reset interval.
It is necessary to restart the script to reset its interval
only twice after a reload. When the script has undergone
down time, there's usually a slight shift in game time. Once
the script restarts once, it will set the average time it
needs for all its future intervals and should not need to be
restarted. In short, a script that is created shouldn't need
to restart more than once, and a script that is reloaded should
restart only twice.
"""
if self.db.time_format:
# If the 'usual' time is set, use it
seconds = self.ndb.usual
if seconds is None:
seconds, usual, details = get_next_wait(self.db.time_format)
self.ndb.usual = usual
if self.interval != seconds:
self.restart(interval=seconds)
if self.db.event_name and self.db.number is not None:
obj = self.obj
if not obj.callbacks:
return
event_name = self.db.event_name
number = self.db.number
obj.callbacks.call(event_name, obj, number=number)
# Functions to manipulate tasks
def complete_task(task_id):
"""
Mark the task in the event handler as complete.
Args:
task_id (int): the task ID.
Note:
This function should be called automatically for individual tasks.
"""
try:
script = ScriptDB.objects.get(db_key="event_handler")
except ScriptDB.DoesNotExist:
logger.log_trace("Can't get the event handler.")
return
if task_id not in script.db.tasks:
logger.log_err("The task #{} was scheduled, but it cannot be " "found".format(task_id))
return
delta, obj, callback_name, locals = script.db.tasks.pop(task_id)
script.call(obj, callback_name, locals=locals)
|
alipay/aop/api/response/AlipayMarketingCdpRecommendQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12640921 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMarketingCdpRecommendQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCdpRecommendQueryResponse, self).__init__()
self._recommend_id = None
self._shop_info = None
@property
def recommend_id(self):
return self._recommend_id
@recommend_id.setter
def recommend_id(self, value):
self._recommend_id = value
@property
def shop_info(self):
return self._shop_info
@shop_info.setter
def shop_info(self, value):
self._shop_info = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingCdpRecommendQueryResponse, self).parse_response_content(response_content)
if 'recommend_id' in response:
self.recommend_id = response['recommend_id']
if 'shop_info' in response:
self.shop_info = response['shop_info']
|
descarteslabs/scenes/tests/test_search.py | descarteslabs/descarteslabs-python | 167 | 12640928 | import unittest
import datetime
from descarteslabs.scenes import geocontext, search
from shapely.geometry import shape
import mock
from .mock_data import _metadata_search, _cached_bands_by_product
class TestScenesSearch(unittest.TestCase):
geom = {
"coordinates": (
(
(-95.836498, 39.278486),
(-92.068696, 39.278486),
(-92.068696, 42.799988),
(-95.836498, 42.799988),
(-95.836498, 39.278486),
),
),
"type": "Polygon",
}
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_geom(self):
sc, ctx = search(self.geom, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert isinstance(ctx, geocontext.AOI)
assert ctx.__geo_interface__ == self.geom
assert ctx.resolution == 15
assert ctx.crs == "EPSG:32615"
for scene in sc:
# allow for changes in publicly available data
assert abs(len(scene.properties.bands) - 24) < 4
assert "derived:ndvi" in scene.properties.bands
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_shapely(self):
sc, ctx = search(shape(self.geom), products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) == 2
assert isinstance(ctx, geocontext.AOI)
assert ctx.__geo_interface__ == self.geom
assert ctx.resolution == 15
assert ctx.crs == "EPSG:32615"
for scene in sc:
# allow for changes in publicly available data
assert abs(len(scene.properties.bands) - 24) < 4
assert "derived:ndvi" in scene.properties.bands
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_AOI(self):
aoi = geocontext.AOI(self.geom, resolution=5)
sc, ctx = search(aoi, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx.resolution == 5
assert ctx.crs == "EPSG:32615"
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_AOI_with_shape(self):
aoi = geocontext.AOI(self.geom, shape=(100, 100))
sc, ctx = search(aoi, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx.resolution is None
assert ctx.shape == aoi.shape
assert ctx.crs == "EPSG:32615"
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_dltile(self):
tile = geocontext.DLTile(
{
"geometry": {
"coordinates": [
[
[-94.50970627780103, 40.460817879515986],
[-93.75494640538922, 40.468212507270195],
[-93.76149667591069, 41.04471363474632],
[-94.5228005945451, 41.03716803374444],
[-94.50970627780103, 40.460817879515986],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32615",
"key": "64:0:1000.0:15:-2:70",
"outputBounds": [372000.0, 4480000.0, 436000.0, 4544000.0],
"pad": 0,
"resolution": 1000.0,
"ti": -2,
"tilesize": 64,
"tj": 70,
"zone": 15,
},
}
)
sc, ctx = search(tile, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx == tile
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_no_products(self):
sc, ctx = search(self.geom, limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.cached_bands_by_product",
_cached_bands_by_product,
)
def test_search_datetime(self):
start_datetime = datetime.datetime(2016, 7, 6)
end_datetime = datetime.datetime(2016, 7, 15)
sc, ctx = search(
self.geom,
products="landsat:LC08:PRE:TOAR",
start_datetime=start_datetime,
end_datetime=end_datetime,
limit=4,
)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
for scene in sc:
assert scene.properties["date"] >= start_datetime
assert scene.properties["date"] <= end_datetime
|
snips_nlu/slot_filler/crf_utils.py | CharlyBlavier/snips-nlu-Copy | 3,764 | 12640930 | <gh_stars>1000+
from __future__ import unicode_literals
from builtins import range
from enum import Enum, unique
from snips_nlu.constants import END, SLOT_NAME, START, TEXT
from snips_nlu.preprocessing import Token, tokenize
from snips_nlu.result import unresolved_slot
BEGINNING_PREFIX = "B-"
INSIDE_PREFIX = "I-"
LAST_PREFIX = "L-"
UNIT_PREFIX = "U-"
OUTSIDE = "O"
RANGE = "range"
TAGS = "tags"
TOKENS = "tokens"
@unique
class TaggingScheme(Enum):
"""CRF Coding Scheme"""
IO = 0
"""Inside-Outside scheme"""
BIO = 1
"""Beginning-Inside-Outside scheme"""
BILOU = 2
"""Beginning-Inside-Last-Outside-Unit scheme, sometimes referred as
BWEMO"""
def tag_name_to_slot_name(tag):
return tag[2:]
def start_of_io_slot(tags, i):
if i == 0:
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
return tags[i - 1] == OUTSIDE
def end_of_io_slot(tags, i):
if i + 1 == len(tags):
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
return tags[i + 1] == OUTSIDE
def start_of_bio_slot(tags, i):
if i == 0:
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
if tags[i].startswith(BEGINNING_PREFIX):
return True
if tags[i - 1] != OUTSIDE:
return False
return True
def end_of_bio_slot(tags, i):
if i + 1 == len(tags):
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
if tags[i + 1].startswith(INSIDE_PREFIX):
return False
return True
def start_of_bilou_slot(tags, i):
if i == 0:
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
if tags[i].startswith(BEGINNING_PREFIX):
return True
if tags[i].startswith(UNIT_PREFIX):
return True
if tags[i - 1].startswith(UNIT_PREFIX):
return True
if tags[i - 1].startswith(LAST_PREFIX):
return True
if tags[i - 1] != OUTSIDE:
return False
return True
def end_of_bilou_slot(tags, i):
if i + 1 == len(tags):
return tags[i] != OUTSIDE
if tags[i] == OUTSIDE:
return False
if tags[i + 1] == OUTSIDE:
return True
if tags[i].startswith(LAST_PREFIX):
return True
if tags[i].startswith(UNIT_PREFIX):
return True
if tags[i + 1].startswith(BEGINNING_PREFIX):
return True
if tags[i + 1].startswith(UNIT_PREFIX):
return True
return False
def _tags_to_preslots(tags, tokens, is_start_of_slot, is_end_of_slot):
slots = []
current_slot_start = 0
for i, tag in enumerate(tags):
if is_start_of_slot(tags, i):
current_slot_start = i
if is_end_of_slot(tags, i):
slots.append({
RANGE: {
START: tokens[current_slot_start].start,
END: tokens[i].end
},
SLOT_NAME: tag_name_to_slot_name(tag)
})
current_slot_start = i
return slots
def tags_to_preslots(tokens, tags, tagging_scheme):
if tagging_scheme == TaggingScheme.IO:
slots = _tags_to_preslots(tags, tokens, start_of_io_slot,
end_of_io_slot)
elif tagging_scheme == TaggingScheme.BIO:
slots = _tags_to_preslots(tags, tokens, start_of_bio_slot,
end_of_bio_slot)
elif tagging_scheme == TaggingScheme.BILOU:
slots = _tags_to_preslots(tags, tokens, start_of_bilou_slot,
end_of_bilou_slot)
else:
raise ValueError("Unknown tagging scheme %s" % tagging_scheme)
return slots
def tags_to_slots(text, tokens, tags, tagging_scheme, intent_slots_mapping):
slots = tags_to_preslots(tokens, tags, tagging_scheme)
return [
unresolved_slot(match_range=slot[RANGE],
value=text[slot[RANGE][START]:slot[RANGE][END]],
entity=intent_slots_mapping[slot[SLOT_NAME]],
slot_name=slot[SLOT_NAME])
for slot in slots
]
def positive_tagging(tagging_scheme, slot_name, slot_size):
if slot_name == OUTSIDE:
return [OUTSIDE for _ in range(slot_size)]
if tagging_scheme == TaggingScheme.IO:
tags = [INSIDE_PREFIX + slot_name for _ in range(slot_size)]
elif tagging_scheme == TaggingScheme.BIO:
if slot_size > 0:
tags = [BEGINNING_PREFIX + slot_name]
tags += [INSIDE_PREFIX + slot_name for _ in range(1, slot_size)]
else:
tags = []
elif tagging_scheme == TaggingScheme.BILOU:
if slot_size == 0:
tags = []
elif slot_size == 1:
tags = [UNIT_PREFIX + slot_name]
else:
tags = [BEGINNING_PREFIX + slot_name]
tags += [INSIDE_PREFIX + slot_name
for _ in range(1, slot_size - 1)]
tags.append(LAST_PREFIX + slot_name)
else:
raise ValueError("Invalid tagging scheme %s" % tagging_scheme)
return tags
def negative_tagging(size):
return [OUTSIDE for _ in range(size)]
def utterance_to_sample(query_data, tagging_scheme, language):
tokens, tags = [], []
current_length = 0
for chunk in query_data:
chunk_tokens = tokenize(chunk[TEXT], language)
tokens += [Token(t.value, current_length + t.start,
current_length + t.end) for t in chunk_tokens]
current_length += len(chunk[TEXT])
if SLOT_NAME not in chunk:
tags += negative_tagging(len(chunk_tokens))
else:
tags += positive_tagging(tagging_scheme, chunk[SLOT_NAME],
len(chunk_tokens))
return {TOKENS: tokens, TAGS: tags}
def get_scheme_prefix(index, indexes, tagging_scheme):
if tagging_scheme == TaggingScheme.IO:
return INSIDE_PREFIX
elif tagging_scheme == TaggingScheme.BIO:
if index == indexes[0]:
return BEGINNING_PREFIX
return INSIDE_PREFIX
elif tagging_scheme == TaggingScheme.BILOU:
if len(indexes) == 1:
return UNIT_PREFIX
if index == indexes[0]:
return BEGINNING_PREFIX
if index == indexes[-1]:
return LAST_PREFIX
return INSIDE_PREFIX
else:
raise ValueError("Invalid tagging scheme %s" % tagging_scheme)
|
tests/test_check_www_redirect.py | Prodject/yawast | 200 | 12640973 | # Copyright (c) 2013 - 2020 <NAME> and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
from unittest import TestCase
from yawast.shared import network
class TestCheckWwwRedirect(TestCase):
def test_check_www_redirect_valid(self):
self.assertEqual(
"https://adamcaudill.com/",
network.check_www_redirect("https://www.adamcaudill.com/"),
)
def test_check_www_redirect_none(self):
self.assertEqual(
"https://adamcaudill.com/",
network.check_www_redirect("https://adamcaudill.com/"),
)
def test_check_www_redirect_www(self):
self.assertEqual(
"https://www.apple.com/", network.check_www_redirect("https://apple.com/")
)
|
tests/buildah_test.py | msaladna/mitogen | 1,526 | 12640990 | import os
import mitogen
import unittest2
import testlib
class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
def test_okay(self):
buildah_path = testlib.data_path('stubs/stub-buildah.py')
context = self.router.buildah(
container='container_name',
buildah_path=buildah_path,
)
stream = self.router.stream_by_id(context.context_id)
argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
self.assertEquals(argv[0], buildah_path)
self.assertEquals(argv[1], 'run')
self.assertEquals(argv[2], '--')
self.assertEquals(argv[3], 'container_name')
self.assertEquals(argv[4], stream.conn.options.python_path)
if __name__ == '__main__':
unittest2.main()
|
auctioning_platform/shipping_infrastructure/shipping_infrastructure/repositories/__init__.py | nhdinh/smp-modulith | 299 | 12640997 | __all__ = ["FakeAddressRepository"]
from shipping_infrastructure.repositories.address import FakeAddressRepository
|
scripts/androaxml.py | jamjven/ATX | 1,132 | 12641015 | <reponame>jamjven/ATX
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, <NAME> <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import os
__dir__ = os.path.relpath(os.path.dirname(os.path.abspath(__file__)))
import sys
sys.path.append(os.path.join(__dir__, "androguard.zip"))
from optparse import OptionParser
from xml.dom import minidom
import codecs
import string
from androguard.core import androconf
from androguard.core.bytecodes import apk
#option_0 = { 'name' : ('-i', '--input'), 'help' : 'filename input (APK or android\'s binary xml)', 'nargs' : 1 }
option_1 = {'name' : ('-f', '--format'),
'help': 'output format',
'nargs': 1,
'default': '$package'
}
option_2 = {
'name': ('-v', '--version'),
'help':'version of the API',
'action': 'count'
}
options = [option_1, option_2]
def xml2parse(dom, strformat='$package/$activity'):
root = dom.getElementsByTagName("manifest")[0]
package = root.getAttribute('package')
activity = ''
for e in root.getElementsByTagName('activity'):
name = e.getAttribute('android:name')
t = e.getElementsByTagName('intent-filter')
if t:
activity = name
print string.Template(strformat).safe_substitute(
package=package, activity = activity)
def main(options, filename) :
if filename != None :
buff = ""
ret_type = androconf.is_android(filename)
if ret_type == "APK":
a = apk.APK(filename)
dom = a.get_android_manifest_xml()
#buff = a.get_android_manifest_xml().toprettyxml(encoding="utf-8")
#a.get_activities()
xml2parse(dom)
elif ".xml" in filename:
ap = apk.AXMLPrinter(open(filename, "rb").read())
buff = minidom.parseString(ap.get_buff()).toprettyxml(encoding="utf-8")
else:
print "Unknown file type"
return
#if options.output != None :
# fd = codecs.open(options.output, "w", "utf-8")
# fd.write( buff )
# fd.close()
#else :
# print buff
elif options.version != None :
print "Androaxml version %s" % androconf.ANDROGUARD_VERSION
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
if len(arguments) == 0:
sys.exit('use --help for more help')
sys.argv[:] = arguments
main(options, arguments[0])
|
python/veles/tests/schema/test_polymodel.py | pombredanne/veles | 918 | 12641018 | # Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import unittest
import six
from veles.schema import fields
from veles.schema.model import PolymorphicModel
from veles.proto.exceptions import SchemaError
class BaseNieZlew(PolymorphicModel):
pass
class NieZlew(BaseNieZlew):
object_type = 'nie_zlew'
pole = fields.String(optional=True)
class BaseZlew(PolymorphicModel):
imie = fields.String(optional=True)
class Zlew(BaseZlew):
object_type = 'zlew'
odplyw = fields.String()
class TurboZlew(Zlew):
object_type = 'turbozlew'
dopalacz = fields.Binary()
class WieloZlew(BaseZlew):
przeplyw = fields.Integer(default=13)
class DwuZlew(WieloZlew):
object_type = 'dwuzlew'
lewy = fields.Object(Zlew)
prawy = fields.Object(Zlew)
class PietroZlew(WieloZlew):
object_type = 'pietrozlew'
pietra = fields.List(fields.Object(BaseZlew))
class TestModel(unittest.TestCase):
def test_fields(self):
self.assertEqual(set(BaseZlew.fields), {
BaseZlew.imie
})
self.assertEqual(set(Zlew.fields), {
BaseZlew.imie, Zlew.odplyw
})
self.assertEqual(set(TurboZlew.fields), {
BaseZlew.imie, Zlew.odplyw, TurboZlew.dopalacz
})
self.assertEqual(set(WieloZlew.fields), {
BaseZlew.imie, WieloZlew.przeplyw
})
self.assertEqual(set(DwuZlew.fields), {
BaseZlew.imie, WieloZlew.przeplyw, DwuZlew.lewy, DwuZlew.prawy
})
self.assertEqual(set(PietroZlew.fields), {
BaseZlew.imie, WieloZlew.przeplyw, PietroZlew.pietra
})
def test_object_types(self):
self.assertEqual(set(BaseZlew.object_types), {
'zlew', 'turbozlew', 'dwuzlew', 'pietrozlew',
})
for x in BaseZlew.object_types:
self.assertIsInstance(x, six.text_type)
class BaseAbc(PolymorphicModel):
pass
with self.assertRaises(TypeError):
class Abc(BaseAbc):
object_type = b'abc'
with self.assertRaises(TypeError):
class Def(BaseAbc):
object_type = 1234
def test_init(self):
a = Zlew(odplyw='o')
b = TurboZlew(odplyw='wzium', dopalacz=b'\xf3\x90')
c = DwuZlew(lewy=a, prawy=b)
d = PietroZlew(imie='Jasiu', pietra=[c], przeplyw=1)
with self.assertRaises(TypeError):
BaseZlew(imie='Sid')
with self.assertRaises(TypeError):
WieloZlew(imie='Legion')
with self.assertRaises(SchemaError):
DwuZlew(lewy=a, prawy=d)
def test_dump(self):
a = Zlew(odplyw='o')
b = TurboZlew(odplyw='wzium', dopalacz=b'\xf3\x90')
c = DwuZlew(lewy=a, prawy=b)
d = PietroZlew(imie='Jasiu', pietra=[c], przeplyw=1)
da = a.dump()
db = b.dump()
dc = c.dump()
dd = d.dump()
for x in da:
self.assertIsInstance(x, six.text_type)
for x in db:
self.assertIsInstance(x, six.text_type)
for x in dc:
self.assertIsInstance(x, six.text_type)
for x in dd:
self.assertIsInstance(x, six.text_type)
self.assertEqual(da, {
'object_type': 'zlew',
'imie': None,
'odplyw': 'o',
})
self.assertEqual(db, {
'object_type': 'turbozlew',
'imie': None,
'odplyw': 'wzium',
'dopalacz': b'\xf3\x90',
})
self.assertEqual(dc, {
'object_type': 'dwuzlew',
'imie': None,
'lewy': da,
'prawy': db,
'przeplyw': 13,
})
self.assertEqual(dd, {
'object_type': 'pietrozlew',
'imie': 'Jasiu',
'pietra': [dc],
'przeplyw': 1,
})
def test_load(self):
a = Zlew(odplyw='o')
b = TurboZlew(odplyw='wzium', dopalacz=b'\xf3\x90')
c = DwuZlew(lewy=a, prawy=b)
d = PietroZlew(imie='Jasiu', pietra=[c], przeplyw=1)
da = a.dump()
db = b.dump()
dc = c.dump()
dd = d.dump()
self.assertEqual(BaseZlew.load(da), a)
self.assertEqual(Zlew.load(da), a)
with self.assertRaises(SchemaError):
TurboZlew.load(da)
with self.assertRaises(SchemaError):
WieloZlew.load(da)
with self.assertRaises(SchemaError):
DwuZlew.load(da)
with self.assertRaises(SchemaError):
PietroZlew.load(da)
with self.assertRaises(SchemaError):
NieZlew.load(da)
with self.assertRaises(SchemaError):
BaseNieZlew.load(da)
self.assertEqual(BaseZlew.load(db), b)
self.assertEqual(Zlew.load(db), b)
self.assertEqual(TurboZlew.load(db), b)
with self.assertRaises(SchemaError):
WieloZlew.load(db)
with self.assertRaises(SchemaError):
DwuZlew.load(db)
with self.assertRaises(SchemaError):
PietroZlew.load(db)
with self.assertRaises(SchemaError):
NieZlew.load(db)
with self.assertRaises(SchemaError):
BaseNieZlew.load(db)
self.assertEqual(BaseZlew.load(dc), c)
self.assertEqual(WieloZlew.load(dc), c)
self.assertEqual(DwuZlew.load(dc), c)
with self.assertRaises(SchemaError):
Zlew.load(dc)
with self.assertRaises(SchemaError):
TurboZlew.load(dc)
with self.assertRaises(SchemaError):
PietroZlew.load(dc)
with self.assertRaises(SchemaError):
NieZlew.load(dc)
with self.assertRaises(SchemaError):
BaseNieZlew.load(dc)
self.assertEqual(BaseZlew.load(dd), d)
self.assertEqual(WieloZlew.load(dd), d)
self.assertEqual(PietroZlew.load(dd), d)
with self.assertRaises(SchemaError):
Zlew.load(dd)
with self.assertRaises(SchemaError):
TurboZlew.load(dd)
with self.assertRaises(SchemaError):
DwuZlew.load(dd)
with self.assertRaises(SchemaError):
NieZlew.load(dd)
with self.assertRaises(SchemaError):
BaseNieZlew.load(dd)
with self.assertRaises(SchemaError):
BaseZlew.load({})
with self.assertRaises(SchemaError):
BaseZlew.load({'object_type': 'nie_zlew'})
|
chrome/updater/run_updater_tests.py | Ron423c/chromium | 575 | 12641025 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import shutil
import sys
from test.integration_tests.common import path_finder
path_finder.add_typ_dir_to_sys_path()
import typ
class Context(object):
def __init__(self, build_dir):
self.build_dir = build_dir
def copy_file(source, destination):
shutil.copyfile(source, destination)
def main():
parser = typ.ArgumentParser()
parser.add_argument('--build-dir',
help='Specifies chromium build directory.')
parser.add_argument('--target-gen-dir')
runner = typ.Runner()
# Set this when using context that will be passed to tests.
runner.win_multiprocessing = typ.WinMultiprocessing.importable
runner.parse_args(parser,
argv=None,
tests=[path_finder.get_integration_tests_dir()])
# setup logging level
if runner.args.verbose > 1:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
# copy dynamically generated updater version_info.py from
# target gen directory to
# //chrome/updater/test/integration_tests/updater so that
# it can be imported as a module during test runs.
target_gen_dir_abs_path = os.path.abspath(runner.args.target_gen_dir)
version_file_path = os.path.join(target_gen_dir_abs_path, 'gen',
'chrome', 'updater', 'version_info.py')
if os.path.exists(version_file_path):
dest = os.path.join(path_finder.get_integration_tests_dir(),
'updater', 'version_info.py')
copy_file(version_file_path, dest)
else:
logging.info('File not found: %s' % version_file_path)
return -1
# copy dynamically generated updater branding_info.py from
# target gen directory to
# //chrome/updater/test/integration_tests/updater so that
# it can be imported as a module during test runs.
branding_file_path = os.path.join(target_gen_dir_abs_path, 'gen',
'chrome', 'updater', 'branding_info.py')
if os.path.exists(branding_file_path):
dest = os.path.join(path_finder.get_integration_tests_dir(),
'updater', 'branding_info.py')
copy_file(branding_file_path, dest)
else:
logging.info('File not found: %s' % branding_file_path)
return -2
runner.context = Context(runner.args.build_dir)
return runner.run()[0]
if __name__ == "__main__":
sys.exit(main())
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/test_data/constants.py | diCagri/content | 799 | 12641029 | <reponame>diCagri/content
INCIDENTS_RESULT = [
{'ModuleName': 'InnerServicesModule', 'Brand': 'Builtin', 'Category': 'Builtin', 'ID': '', 'Version': 0, 'Type': 1,
'Contents': {
'ErrorsPrivateDoNotUse': None, 'data': [
{
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20, 'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'timetoassignment': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False, 'attachment': None,
'autime': 1601398110261438200, 'canvases': None,
'category': '', 'closeNotes': '', 'closeReason': '',
'closed': '0001-01-01T00:00:00Z', 'closingUserId': '',
'created': '2020-09-29T16:48:30.261438285Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '2020-10-09T16:48:30.261438285Z',
'feedBased': False, 'hasRole': False, 'id': '7',
'investigationId': '7', 'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '2020-09-30T15:40:11.737120193Z',
'linkedCount': 0, 'linkedIncidents': None,
'modified': '2020-09-30T15:40:36.604919119Z',
'name': 'errors',
'notifyTime': '2020-09-29T16:48:30.436371249Z',
'occurred': '2020-09-29T16:48:30.261438058Z',
'openDuration': 62265, 'owner': '', 'parent': '',
'phase': '', 'playbookId': 'AutoFocusPolling',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '', 'rawName': 'errors',
'rawPhase': '', 'rawType': 'Unclassified', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 0, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1,
'type': 'Unclassified', 'version': 8}, {
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'integrationscategories': ['Utilities',
'Utilities',
'Utilities',
'Utilities',
'Endpoint',
'Messaging',
'Data Enrichment & Threat Intelligence'],
'integrationsfailedcategories': [
'Data Enrichment & Threat Intelligence',
'Endpoint'],
'numberofentriesiderrors': 0,
'numberoffailedincidents': 0,
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'timetoassignment': {
'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle', 'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'totalfailedinstances': 2,
'totalgoodinstances': 7,
'totalinstances': 9,
'unassignedincidents': [],
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False,
'attachment': None, 'autime': 1601388165826470700,
'canvases': None, 'category': '',
'closeNotes': 'Created a new incident type.',
'closeReason': '', 'closed': '0001-01-01T00:00:00Z',
'closingUserId': '',
'created': '2020-09-29T14:02:45.82647067Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '0001-01-01T00:00:00Z', 'feedBased': False,
'hasRole': False, 'id': '3', 'investigationId': '3',
'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '2020-09-30T15:40:48.618174584Z',
'linkedCount': 0, 'linkedIncidents': None,
'modified': '2020-09-30T15:41:15.184226213Z',
'name': 'Incident with error',
'notifyTime': '2020-09-29T14:09:06.048819578Z',
'occurred': '2020-09-29T14:02:45.826470478Z',
'openDuration': 686, 'owner': 'admin', 'parent': '',
'phase': '',
'playbookId': 'JOB - Integrations and Playbooks Health Check',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '',
'rawName': 'Incident with error', 'rawPhase': '',
'rawType': 'testing', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 0, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1, 'type': 'testing',
'version': 13}, {
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'sourceusername': 'JohnJoe',
'timetoassignment': {
'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle', 'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False,
'attachment': None, 'autime': 1601480646930752000,
'canvases': None, 'category': '', 'closeNotes': '',
'closeReason': '', 'closed': '0001-01-01T00:00:00Z',
'closingUserId': '',
'created': '2020-09-30T15:44:06.930751906Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '2020-10-10T15:44:06.930751906Z',
'feedBased': False, 'hasRole': False, 'id': '48',
'investigationId': '48', 'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '0001-01-01T00:00:00Z', 'linkedCount': 0,
'linkedIncidents': None,
'modified': '2020-09-30T15:46:35.843037049Z',
'name': 'Multiple Failed Logins',
'notifyTime': '2020-09-30T15:46:35.836929058Z',
'occurred': '2020-09-30T15:44:06.930751702Z',
'openDuration': 0, 'owner': 'admin', 'parent': '',
'phase': '',
'playbookId': 'Account Enrichment - Generic v2.1',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '',
'rawName': 'Multiple Failed Logins', 'rawPhase': '',
'rawType': 'Unclassified', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 1, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1,
'type': 'Unclassified', 'version': 10}], 'total': 3},
'HumanReadable': None, 'ImportantEntryContext': None, 'EntryContext': None, 'IgnoreAutoExtract': False,
'ReadableContentsFormat': '', 'ContentsFormat': 'json', 'File': '', 'FileID': '', 'FileMetadata': None,
'System': '', 'Note': False, 'Evidence': False, 'EvidenceID': '', 'Tags': None,
'Metadata': {'id': '', 'version': 0, 'modified': '0001-01-01T00:00:00Z', 'sortValues': None, 'roles': None,
'allRead': False, 'allReadWrite': False, 'previousRoles': None, 'previousAllRead': False,
'previousAllReadWrite': False, 'hasRole': False, 'dbotCreatedBy': '', 'ShardID': 0, 'type': 1,
'created': '2020-10-03T12:39:59.908094336Z', 'retryTime': '0001-01-01T00:00:00Z', 'user': '',
'errorSource': '', 'contents': '', 'format': 'json', 'investigationId': '51', 'file': '',
'fileID': '', 'parentId': '156@51', 'pinned': False, 'fileMetadata': None,
'parentContent': '!getIncidents page="0" query="-status:closed and runStatus:error"',
'parentEntryTruncated': False, 'system': '', 'reputations': None, 'category': '', 'note': False,
'isTodo': False, 'tags': None, 'tagsRaw': None, 'startDate': '0001-01-01T00:00:00Z', 'times': 0,
'recurrent': False, 'endingDate': '0001-01-01T00:00:00Z', 'timezoneOffset': 0, 'cronView': False,
'scheduled': False, 'entryTask': None, 'taskId': '', 'playbookId': '', 'reputationSize': 0,
'contentsSize': 0, 'brand': 'Builtin', 'instance': 'Builtin', 'IndicatorTimeline': None,
'mirrored': False}, 'IndicatorTimeline': None}]
TASKS_RESULT = [
{'ModuleName': 'Demisto REST API_instance_1', 'Brand': 'Demisto REST API', 'Category': 'Utilities', 'ID': '',
'Version': 0, 'Type': 1, 'Contents': {'response': [{'ancestors': ['AutoFocusPolling'],
'arguments': {'additionalPollingCommandArgNames': '',
'additionalPollingCommandArgValues': '',
'ids': '', 'pollingCommand': '',
'pollingCommandArgName': 'ids'},
'comments': False, 'completedBy': 'DBot',
'completedDate': '2020-09-29T16:48:30.427891714Z',
'doNotSaveTaskHistory': True,
'dueDate': '0001-01-01T00:00:00Z', 'dueDateDuration': 0,
'entries': ['4@7', '5@7'],
'evidenceData': {'description': None, 'occurred': None,
'tags': None}, 'forEachIndex': 0,
'forEachInputs': None, 'id': '3', 'indent': 0,
'nextTasks': {'#none#': ['1']}, 'note': False, 'outputs': {},
'playbookInputs': None, 'previousTasks': {'#none#': ['0']},
'quietMode': 2, 'reputationCalc': 0,
'restrictedCompletion': False, 'scriptArguments': {
'additionalPollingCommandArgNames': {'complex': None,
'simple': '${inputs.AdditionalPollingCommandArgNames}'},
'additionalPollingCommandArgValues': {'complex': None,
'simple': '${inputs.AdditionalPollingCommandArgValues}'},
'ids': {'complex': None, 'simple': '${inputs.Ids}'},
'pollingCommand': {'complex': None, 'simple': '${inputs.PollingCommandName}'},
'pollingCommandArgName': {'complex': None, 'simple': '${inputs.PollingCommandArgName}'}},
'separateContext': False,
'startDate': '2020-09-29T16:48:30.324811804Z',
'state': 'Error', 'task': {
'brand': '', 'conditions': None,
'description': 'RunPollingCommand',
'id': 'c6a3af0a-cc78-4323-80c1-93d686010d86',
'isCommand': False,
'isLocked': False,
'modified': '2020-09-29T08:23:25.596407031Z',
'name': 'RunPollingCommand',
'playbookName': '',
'scriptId': 'RunPollingCommand',
'sortValues': None,
'type': 'regular', 'version': 1},
'taskCompleteData': [],
'taskId': 'c6a3af0a-cc78-4323-80c1-93d686010d86',
'type': 'regular',
'view': {'position': {'x': 50, 'y': 195}}}]},
'HumanReadable': None, 'ImportantEntryContext': None, 'EntryContext': None, 'IgnoreAutoExtract': False,
'ReadableContentsFormat': '', 'ContentsFormat': 'json', 'File': '', 'FileID': '', 'FileMetadata': None,
'System': '', 'Note': False, 'Evidence': False, 'EvidenceID': '', 'Tags': None,
'Metadata': {
'id': '', 'version': 0, 'modified': '0001-01-01T00:00:00Z', 'sortValues': None, 'roles': None,
'allRead': False, 'allReadWrite': False, 'previousRoles': None, 'previousAllRead': False,
'previousAllReadWrite': False, 'hasRole': False, 'dbotCreatedBy': '', 'ShardID': 0, 'type': 1,
'created': '2020-10-03T12:43:23.006018275Z', 'retryTime': '0001-01-01T00:00:00Z', 'user': '',
'errorSource': '', 'contents': '', 'format': 'json', 'investigationId': '51', 'file': '',
'fileID': '', 'parentId': '158@51', 'pinned': False, 'fileMetadata': None,
'parentContent': '!demisto-api-post uri="investigation/7/workplan/tasks" body='
'"{\\"states\\":[\\"Error\\"],\\"types\\":[\\"regular\\",\\"condition\\",\\"collection\\"]}"',
'parentEntryTruncated': False, 'system': '', 'reputations': None, 'category': '', 'note': False,
'isTodo': False, 'tags': None, 'tagsRaw': None, 'startDate': '0001-01-01T00:00:00Z', 'times': 0,
'recurrent': False, 'endingDate': '0001-01-01T00:00:00Z', 'timezoneOffset': 0, 'cronView': False,
'scheduled': False, 'entryTask': None, 'taskId': '', 'playbookId': '', 'reputationSize': 0,
'contentsSize': 0, 'brand': 'Demisto REST API', 'instance': 'Demisto REST API_instance_1',
'IndicatorTimeline': None, 'mirrored': False}, 'IndicatorTimeline': None}]
SERVER_URL = [{'ModuleName': 'CustomScripts',
'Brand': 'Scripts',
'Category': 'automation',
'ID': '', 'Version': 0,
'Type': 1,
'Contents': 'https://ec2-11-123-11-22.eu-west-1.compute.amazonaws.com//acc_test',
'HumanReadable': 'https://ec2-11-123-11-22.eu-west-1.compute.amazonaws.com//acc_test'}]
|
FWCore/Services/test/test_zombiekiller_fail_cfg.py | ckamtsikis/cmssw | 852 | 12641035 | <filename>FWCore/Services/test/test_zombiekiller_fail_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.stuck = cms.EDAnalyzer("StuckAnalyzer")
process.p = cms.Path(process.stuck)
process.add_(cms.Service("ZombieKillerService",
secondsBetweenChecks = cms.untracked.uint32(10),
numberOfAllowedFailedChecksInARow = cms.untracked.uint32(2)))
|
code/utils/effective_loss_function.py | ricklentz/2dimageto3dmodel | 150 | 12641037 | # author: <NAME>
import torch
import torch.nn as nn
from ..camera.coordinate_system_transformation import CameraUtilities
from trilinear_interpolation import TrilinearInterpolation
from smooth_voxels import VoxelsSmooth
class EffectiveLossFunction(nn.Module):
# Replaces need for differentiable point cloud projection PI(P, c) and generates point cloud without rendering.
def __init__(self, voxel_size=64, kernel_size=21, smooth_sigma=3.0):
super(EffectiveLossFunction, self).__init__()
self.voxel_size = voxel_size
self.kernel_size = kernel_size
self.register_buffer("sigma", torch.tensor(smooth_sigma))
def termination_probs(self, voxels, epsilon=1e-5):
"""
:param voxels: smoothed voxels
:param epsilon: ignore factor
:return:
"""
"""
Before projecting the resulting volume to a plane, we need to ensure that the signal from the occluded points
does not interfere with the foreground points. To this end, we perform occlusion reasoning, similar to
Tulsiani et al. [20]. We convert the occupancies o to ray termination probabilities.
"""
# The occupancy function of the point cloud is a clipped sum of the individual per-point functions
per_point_functions = voxels.permute(1, 0, 2, 3)
occupancy_function = per_point_functions.clamp(epsilon, 1.0 - epsilon)
x = torch.log(1 - occupancy_function)
x_prim = torch.log(occupancy_function)
ray_termination_probs = torch.cumsum(x, dim=0)
zeros_matrix = voxels.new(1, occupancy_function.size(1), occupancy_function.size(2),
occupancy_function.size(3)).fill_(epsilon)
"""
Intuitively, a cell has high termination probability if its occupancy value is high and all previous occupancy
values are low. The additional background cell serves to ensure that the termination probabilities sum to 1.
"""
# Concatenates the given sequence of seq tensors in the given dimension
r1 = torch.cat([zeros_matrix, ray_termination_probs], dim=0)
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
r2 = torch.cat([x_prim, zeros_matrix], dim=0)
project_volume_to_the_plane = r1 + r2
return torch.exp(project_volume_to_the_plane).permute(1, 0, 2, 3)
def forward(self, point_cloud, rotation, scale=None):
"""
Projection based loss.
:param point_cloud: point cloud of interest
:param rotation: is rotation specified
:param scale: if not None will scale the object
:return: projection
"""
camera_utilities = CameraUtilities()
point_cloud = camera_utilities.transformation_3d_coord_to_camera_coord(point_cloud=point_cloud,
rotation=rotation, field_of_view=1.875,
camera_view_distance=2.0)
interpolation = TrilinearInterpolation()
voxels = interpolation.trilinear_interpolation(point_cloud=point_cloud)
voxels_smoothing = VoxelsSmooth()
smoothed_voxels = voxels_smoothing.smooth(voxels=voxels, kernels=(), scale=scale)
probs = self.termination_probs(smoothed_voxels)
return probs[:, :-1].sum(1).flip(1)
|
add_gitutf16.py | mayl8822/CommandTrayHost | 353 | 12641040 | # /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
import codecs
import traceback
UTF16_GITCONFIG = '''[filter "utf16"]
clean = iconv -f utf-16le -t utf-8
smudge = iconv -f utf-8 -t utf-16le
required
'''
def main():
git_config_path = os.path.expandvars(r'%USERPROFILE%\.gitconfig')
if os.path.isfile(git_config_path):
with open(git_config_path, "rb") as f:
content = f.read().decode('utf-8').replace('\r\n', '\n')
else:
print(git_config_path, "not exist")
content = ''
if UTF16_GITCONFIG not in content:
print(f"No UTF16_GITCONFIG in {git_config_path}")
content = content + '\n' + UTF16_GITCONFIG if content else UTF16_GITCONFIG
content = content.replace('\n', '\r\n').encode('utf-8')
with open(git_config_path, "wb") as f:
f.write(content)
print("changed .gitconfig\n", content.decode('utf-8'))
else:
print('.gitconfig already includes [filter "utf16"]')
if __name__ == '__main__':
print(sys.version_info)
if (sys.version_info < (3, 0)):
sys.exit(2)
main()
|
src/obfuscapk/obfuscators/method_overload/method_overload.py | Elyorbe/Obfuscapk | 688 | 12641043 | <gh_stars>100-1000
#!/usr/bin/env python3
import logging
import random
from typing import List, Set
from obfuscapk import obfuscator_category
from obfuscapk import util
from obfuscapk.obfuscation import Obfuscation
class MethodOverload(obfuscator_category.ICodeObfuscator):
def __init__(self):
self.logger = logging.getLogger(
"{0}.{1}".format(__name__, self.__class__.__name__)
)
super().__init__()
self.is_adding_methods = True
self.param_types = ["Ljava/lang/String;", "Z", "B", "S", "C", "I", "F"]
def add_method_overloads_to_file(
self,
smali_file: str,
overloaded_method_body: str,
class_names_to_ignore: Set[str],
) -> int:
new_methods_num: int = 0
with util.inplace_edit_file(smali_file) as (in_file, out_file):
skip_remaining_lines = False
class_name = None
for line in in_file:
if skip_remaining_lines:
out_file.write(line)
continue
if not class_name:
class_match = util.class_pattern.match(line)
# If this is an enum class, skip it.
if " enum " in line:
skip_remaining_lines = True
out_file.write(line)
continue
elif class_match:
class_name = class_match.group("class_name")
if class_name in class_names_to_ignore:
# The methods of this class should be ignored when
# renaming, so proceed with the next class.
skip_remaining_lines = True
out_file.write(line)
continue
# Skip virtual methods, consider only the direct methods defined
# earlier in the file.
if line.startswith("# virtual methods"):
skip_remaining_lines = True
out_file.write(line)
continue
# Method declared in class.
method_match = util.method_pattern.match(line)
# Avoid constructors, native and abstract methods.
if (
method_match
and "<init>" not in line
and "<clinit>" not in line
and " native " not in line
and " abstract " not in line
):
# Create lists with random parameters to be added to the method
# signature. Add 3 overloads for each method and for each overload
# use 4 random params.
for params in util.get_random_list_permutations(
random.sample(self.param_types, 4)
)[:3]:
new_param = "".join(params)
# Update parameter list and add void return type.
overloaded_signature = line.replace(
"({0}){1}".format(
method_match.group("method_param"),
method_match.group("method_return"),
),
"({0}{1})V".format(
method_match.group("method_param"), new_param
),
)
out_file.write(overloaded_signature)
out_file.write(overloaded_method_body)
new_methods_num += 1
# Print original method.
out_file.write(line)
else:
out_file.write(line)
return new_methods_num
def add_method_overloads(
self,
smali_files: List[str],
class_names_to_ignore: Set[str],
max_methods_to_add: int,
interactive: bool = False,
):
overloaded_method_body = util.get_smali_method_overload()
added_methods = 0
for smali_file in util.show_list_progress(
smali_files,
interactive=interactive,
description="Inserting method overloads in smali files",
):
self.logger.debug(
'Inserting method overloads in file "{0}"'.format(smali_file)
)
if added_methods < max_methods_to_add:
added_methods += self.add_method_overloads_to_file(
smali_file, overloaded_method_body, class_names_to_ignore
)
else:
break
self.logger.debug("{0} new overloaded methods were added".format(added_methods))
def obfuscate(self, obfuscation_info: Obfuscation):
self.logger.info('Running "{0}" obfuscator'.format(self.__class__.__name__))
try:
# NOTE: only direct methods (methods that are by nature non-overridable,
# namely private instance methods, constructors and static methods) will be
# overloaded.
android_class_names: Set[str] = set(util.get_android_class_names())
# There is a method limit for dex files.
max_methods_to_add = obfuscation_info.get_remaining_methods_per_obfuscator()
if obfuscation_info.is_multidex():
for index, dex_smali_files in enumerate(
util.show_list_progress(
obfuscation_info.get_multidex_smali_files(),
interactive=obfuscation_info.interactive,
unit="dex",
description="Processing multidex",
)
):
max_methods_to_add = (
obfuscation_info.get_remaining_methods_per_obfuscator()[index]
)
self.add_method_overloads(
dex_smali_files,
android_class_names,
max_methods_to_add,
obfuscation_info.interactive,
)
else:
self.add_method_overloads(
obfuscation_info.get_smali_files(),
android_class_names,
max_methods_to_add,
obfuscation_info.interactive,
)
except Exception as e:
self.logger.error(
'Error during execution of "{0}" obfuscator: {1}'.format(
self.__class__.__name__, e
)
)
raise
finally:
obfuscation_info.used_obfuscators.append(self.__class__.__name__)
|
test/unit/test_model_optimizer.py | Zhiyuan-w/DeepReg | 379 | 12641046 | <reponame>Zhiyuan-w/DeepReg
# coding=utf-8
"""
Tests for deepreg/model/optimizer.py
pytest style
"""
import tensorflow as tf
import deepreg.model.optimizer as optimizer
class TestBuildOptimizer:
def test_build_optimizer_adam(self):
"""Build an Adam optimizer"""
opt_config = {"name": "Adam", "learning_rate": 1.0e-5}
opt_get = optimizer.build_optimizer(opt_config)
assert isinstance(opt_get, tf.keras.optimizers.Adam)
def test_build_optimizer_sgd(self):
"""Build an SGD optimizer"""
opt_config = {"name": "SGD"}
opt_get = optimizer.build_optimizer(opt_config)
assert isinstance(opt_get, tf.keras.optimizers.SGD)
|
cleanerversion/settings/sqlite.py | DocTocToc/cleanerversion | 121 | 12641077 | """
Django settings for CleanerVersion project.
"""
from .base import *
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
|
base/models/base_module.py | CJWBW/TediGAN | 219 | 12641127 | <reponame>CJWBW/TediGAN<filename>base/models/base_module.py
# python 3.7
"""Contains the base class for modules in a GAN model.
Commonly, GAN consists of two components, i.e., generator and discriminator.
In practice, however, more modules can be added, such as encoder.
"""
import os.path
import sys
import logging
import numpy as np
import torch
from . import model_settings
__all__ = ['BaseModule']
DTYPE_NAME_TO_TORCH_TENSOR_TYPE = {
'float16': torch.HalfTensor,
'float32': torch.FloatTensor,
'float64': torch.DoubleTensor,
'int8': torch.CharTensor,
'int16': torch.ShortTensor,
'int32': torch.IntTensor,
'int64': torch.LongTensor,
'uint8': torch.ByteTensor,
'bool': torch.BoolTensor,
}
def get_temp_logger(logger_name='logger'):
"""Gets a temporary logger.
This logger will print all levels of messages onto the screen.
Args:
logger_name: Name of the logger.
Returns:
A `logging.Logger`.
Raises:
ValueError: If the input `logger_name` is empty.
"""
if not logger_name:
raise ValueError(f'Input `logger_name` should not be empty!')
logger = logging.getLogger(logger_name)
if not logger.hasHandlers():
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
class BaseModule(object):
"""Base class for modules in GANs, like generator and discriminator.
NOTE: The module should be defined with pytorch, and used for inference only.
"""
def __init__(self, model_name, module_name, logger=None):
"""Initializes with specific settings.
The GAN model should be first registered in `model_settings.py` with proper
settings. Among them, some attributes are necessary, including:
(1) resolution: Resolution of the synthesis.
(2) image_channels: Number of channels of the synthesis. (default: 3)
(3) channel_order: Channel order of the raw synthesis. (default: `RGB`)
(4) min_val: Minimum value of the raw synthesis. (default -1.0)
(5) max_val: Maximum value of the raw synthesis. (default 1.0)
Args:
model_name: Name with which the GAN model is registered.
module_name: Name of the module, like `generator` or `discriminator`.
logger: Logger for recording log messages. If set as `None`, a default
logger, which prints messages from all levels onto the screen, will be
created. (default: None)
Raises:
AttributeError: If some necessary attributes are missing.
"""
self.model_name = model_name
self.module_name = module_name
self.logger = logger or get_temp_logger(model_name)
# Parse settings.
for key, val in model_settings.MODEL_POOL[model_name].items():
setattr(self, key, val)
self.use_cuda = model_settings.USE_CUDA and torch.cuda.is_available()
self.batch_size = model_settings.MAX_IMAGES_ON_DEVICE
self.ram_size = model_settings.MAX_IMAGES_ON_RAM
self.net = None
self.run_device = 'cuda' if self.use_cuda else 'cpu'
self.cpu_device = 'cpu'
# Check necessary settings.
self.check_attr('gan_type') # Should be specified in derived classes.
self.check_attr('resolution')
self.image_channels = getattr(self, 'image_channels', 3)
assert self.image_channels in [1, 3]
self.channel_order = getattr(self, 'channel_order', 'RGB').upper()
assert self.channel_order in ['RGB', 'BGR']
self.min_val = getattr(self, 'min_val', -1.0)
self.max_val = getattr(self, 'max_val', 1.0)
# Get paths.
self.weight_path = model_settings.get_weight_path(
f'{model_name}_{module_name}')
# Build graph and load pre-trained weights.
self.logger.info(f'Build network for module `{self.module_name}` in '
f'model `{self.model_name}`.')
self.model_specific_vars = []
self.build()
if os.path.isfile(self.weight_path):
self.load()
else:
self.logger.warning(f'No pre-trained weights will be loaded!')
# Change to inference mode and GPU mode if needed.
assert self.net
self.net.eval().to(self.run_device)
def check_attr(self, attr_name):
"""Checks the existence of a particular attribute.
Args:
attr_name: Name of the attribute to check.
Raises:
AttributeError: If the target attribute is missing.
"""
if not hasattr(self, attr_name):
raise AttributeError(f'Field `{attr_name}` is missing for '
f'module `{self.module_name}` in '
f'model `{self.model_name}`!')
def build(self):
"""Builds the graph."""
raise NotImplementedError(f'Should be implemented in derived class!')
def load(self):
"""Loads pre-trained weights."""
self.logger.info(f'Loading pytorch weights from `{self.weight_path}`.')
state_dict = torch.load(self.weight_path)
for var_name in self.model_specific_vars:
state_dict[var_name] = self.net.state_dict()[var_name]
self.net.load_state_dict(state_dict)
self.logger.info(f'Successfully loaded!')
def to_tensor(self, array):
"""Converts a `numpy.ndarray` to `torch.Tensor` on running device.
Args:
array: The input array to convert.
Returns:
A `torch.Tensor` whose dtype is determined by that of the input array.
Raises:
ValueError: If the array is with neither `torch.Tensor` type nor
`numpy.ndarray` type.
"""
dtype = type(array)
if isinstance(array, torch.Tensor):
tensor = array
elif isinstance(array, np.ndarray):
tensor_type = DTYPE_NAME_TO_TORCH_TENSOR_TYPE[array.dtype.name]
tensor = torch.from_numpy(array).type(tensor_type)
else:
raise ValueError(f'Unsupported input type `{dtype}`!')
tensor = tensor.to(self.run_device)
return tensor
def get_value(self, tensor):
"""Gets value of a `torch.Tensor`.
Args:
tensor: The input tensor to get value from.
Returns:
A `numpy.ndarray`.
Raises:
ValueError: If the tensor is with neither `torch.Tensor` type nor
`numpy.ndarray` type.
"""
dtype = type(tensor)
if isinstance(tensor, np.ndarray):
return tensor
if isinstance(tensor, torch.Tensor):
return tensor.to(self.cpu_device).detach().numpy()
raise ValueError(f'Unsupported input type `{dtype}`!')
def get_ont_hot_labels(self, num, labels=None):
"""Gets ont-hot labels for conditional generation.
Args:
num: Number of labels to generate.
labels: Input labels as reference to generate one-hot labels. If set as
`None`, label `0` will be used by default. (default: None)
Returns:
Returns `None` if `self.label_size` is 0, otherwise, a `numpy.ndarray`
with shape [num, self.label_size] and dtype `np.float32`.
"""
self.check_attr('label_size')
if self.label_size == 0:
return None
if labels is None:
labels = 0
labels = np.array(labels).reshape(-1)
if labels.size == 1:
labels = np.tile(labels, (num,))
assert labels.shape == (num,)
for label in labels:
if label >= self.label_size or label < 0:
raise ValueError(f'Label should be smaller than {self.label_size}, '
f'but {label} is received!')
one_hot = np.zeros((num, self.label_size), dtype=np.int32)
one_hot[np.arange(num), labels] = 1
return one_hot
def get_batch_inputs(self, inputs, batch_size=None):
"""Gets inputs within mini-batch.
This function yields at most `self.batch_size` inputs at a time.
Args:
inputs: Input data to form mini-batch.
batch_size: Batch size. If not specified, `self.batch_size` will be used.
(default: None)
"""
total_num = inputs.shape[0]
batch_size = batch_size or self.batch_size
for i in range(0, total_num, batch_size):
yield inputs[i:i + batch_size]
def batch_run(self, inputs, run_fn):
"""Runs model with mini-batch.
This function splits the inputs into mini-batches, run the model with each
mini-batch, and then concatenate the outputs from all mini-batches together.
NOTE: The output of `run_fn` can only be `numpy.ndarray` or a dictionary
whose values are all `numpy.ndarray`.
Args:
inputs: The input samples to run with.
run_fn: A callable function.
Returns:
Same type as the output of `run_fn`.
Raises:
ValueError: If the output type of `run_fn` is not supported.
"""
if inputs.shape[0] > self.ram_size:
self.logger.warning(f'Number of inputs on RAM is larger than '
f'{self.ram_size}. Please use '
f'`self.get_batch_inputs()` to split the inputs! '
f'Otherwise, it may encounter OOM problem!')
results = {}
temp_key = '__temp_key__'
for batch_inputs in self.get_batch_inputs(inputs):
batch_outputs = run_fn(batch_inputs)
if isinstance(batch_outputs, dict):
for key, val in batch_outputs.items():
if not isinstance(val, np.ndarray):
raise ValueError(f'Each item of the model output should be with '
f'type `numpy.ndarray`, but type `{type(val)}` is '
f'received for key `{key}`!')
if key not in results:
results[key] = [val]
else:
results[key].append(val)
elif isinstance(batch_outputs, np.ndarray):
if temp_key not in results:
results[temp_key] = [batch_outputs]
else:
results[temp_key].append(batch_outputs)
else:
raise ValueError(f'The model output can only be with type '
f'`numpy.ndarray`, or a dictionary of '
f'`numpy.ndarray`, but type `{type(batch_outputs)}` '
f'is received!')
for key, val in results.items():
results[key] = np.concatenate(val, axis=0)
return results if temp_key not in results else results[temp_key]
|
geoq/settings.py | kaydoh/geoq | 471 | 12641154 | <reponame>kaydoh/geoq
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import os
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost','127.0.0.1']
ADMINS = (
('Admin User', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'geoq', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'geoq',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost', # Empty for local through domain sockets or '127.0.0.1' for local through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/usr/local/src/geoq'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/images/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_URL_FOLDER = '' # Can be set to something like 'geoq-test/' if the app is not run at root level
STATIC_ROOT = '{0}{1}'.format('/Users/srjones/www/static/', STATIC_URL_FOLDER)
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '{0}{1}'.format('/static/', STATIC_URL_FOLDER)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
# TODO: Should we add this static location back in?
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
#Change back to True after finishing development to verify it still works
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
LEAFLET_CSS = [
STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw.css',
os.path.join(STATIC_ROOT, '/static/leaflet/leaflet-draw/leaflet.draw.css')
]
LEAFLET_CONFIG = {
'RESET_VIEW' : False,
'MAX_ZOOM' : 18,
'PLUGINS': {
'proj4js': {
'css': [],
'js': [STATIC_URL + 'leaflet/proj4-src.js', STATIC_URL + 'leaflet/proj4defs.js', STATIC_URL + 'leaflet/proj4leaflet.js'],
'repo': 'https://github.com/proj4js'
},
'draw': {
'css': LEAFLET_CSS,
'js': STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw-src.js',
'repo': 'https://github.com/Leaflet/Leaflet.draw'
},
'esri': {
'css': [],
'js': [STATIC_URL + 'leaflet/esri-leaflet-src.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'esriCluster': {
'css': [STATIC_URL + 'leaflet/MarkerCluster.css'],
'js': [STATIC_URL + 'leaflet/ClusteredFeatureLayer.js', STATIC_URL + 'leaflet/leaflet.markercluster.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'MakiMarkers': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MakiMarkers.js'],
'repo': 'https://github.com/jseppi/Leaflet.MakiMarkers'
},
'MediaQ': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MediaQ.js'],
'repo': 'https://github.com/stephenrjones/Leaflet.MediaQ'
},
'AutoResizeSVG': {
'css': [],
'js': [STATIC_URL + 'leaflet/marker-resize-svg.js'],
'repo': 'https://github.com/john-kilgo/L.Marker.AutoResizeSVG'
},
'NWSIcons': {
'css': [],
'js': [STATIC_URL + 'leaflet/nws-leaflet.js'],
'repo': 'https://github.com/john-kilgo/L.Marker.NWS'
},
'OpenSensorHub': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.SOS.min.js'],
'repo': 'https://github.com/opensensorhub/osh-js'
},
'WCS': {
'css': [],
'js': [STATIC_URL + 'leaflet/NonTiledLayer.WCS.js'],
'repo': 'https://github.com/stuartmatthews/Leaflet.NonTiledLayer.WCS'
},
'WMSHeader': {
'css': [],
'js': [STATIC_URL + 'leaflet/leaflet-plugins/layer/tile/leaflet-wms-header.js'],
'repo': 'https://https://github.com/ticinum-aerospace/leaflet-wms-header'
}
}
}
# List of callables that know how to import templates from various sources
# Location of template files
#TEMPLATE_DIRS = (
# os.path.join(SITE_ROOT, 'templates'),
# SITE_ROOT,
#)
#.
#TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# #'django.template.loaders.eggs.Loader',
#)
#
#TEMPLATE_CONTEXT_PROCESSORS = (
## 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.request',
# 'django.core.context_processors.static',
# 'django.contrib.messages.context_processors.messages',
# 'geoq.core.contextprocessors.app_settings',
#)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ os.path.join(SITE_ROOT, 'templates'),
SITE_ROOT ],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader'
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'geoq.core.contextprocessors.app_settings'
]
}
}
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'django.contrib.humanize',
'django_select2',
'reversion',
'easy_thumbnails',
'userena',
'guardian',
'compressor',
'geoexplorer',
'bootstrap_toolkit',
'leaflet',
'jsonfield',
'crispy_forms',
'django_extensions',
'debug_toolbar',
#'httpproxy',
'bootstrap3',
#'feedgen',
'geoq.feedback.apps.FeedbackConfig',
'geoq.accounts.apps.AccountsConfig',
'geoq.locations.apps.LocationsConfig',
'geoq.mage.apps.MageConfig',
'geoq.mgrs.apps.MgrsConfig',
'geoq.proxy.apps.ProxyConfig',
'geoq.training.apps.TrainingConfig',
'geoq.core.apps.CoreConfig',
'geoq.maps.apps.MapsConfig',
'geoq.workflow.apps.WorkflowConfig',
'geoq.ontology.apps.OntologyConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
]
# removed middleware
# 'geoq.core.middleware.UserPermsMiddleware',
# 'geoq.core.middleware.Http403Middleware',
# 'geoq.core.middleware.UpdateLastActivityMiddleware',
# auth setup
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend', # default
)
SITE_ID = 1
ANONYMOUS_USER_NAME = "ANONYMOUS_USER_NAME"
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/' #'/geoq/' #
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/geoq'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
USERENA_ACTIVATION_DAYS = 3
USERENA_MUGSHOT_DEFAULT = 'identicon'
USERENA_HIDE_EMAIL = True
USERENA_HTML_EMAIL = False
ROOT_URLCONF = 'geoq.urls'
WSGI_APPLICATION = 'geoq.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# take out later
REST_FRAMEWORK = {
'UNAUTHENTICATED_USER': None,
}
# Set default login location
#LOGIN_REDIRECT_URL = '/'
# Gamification variables
GAMIFICATION_SERVER = ''
GAMIFICATION_PROJECT = 'geoq'
#GeoServer
GEOSERVER_WFS_JOB_LAYER = None
# For Django Debug Toolbar - need to set this to resolve some errors
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Able to vary what we call workcells
GEOQ_LEXICON = {
'WORKCELL_NAME': 'Target'
}
# Bootstrap variables to work with django-bootstrap-toolkit
# Comment these out to use cdnjs.cloudflare.com versions of Bootstrap
BOOTSTRAP_BASE_URL = STATIC_URL
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'bootstrap/js/'
BOOTSTRAP_JS_URL = BOOTSTRAP_JS_BASE_URL + 'bootstrap.min.js'
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'bootstrap/css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
#Time to check if users online (in milliseconds)
ONLINE_TIME = 10 * 60 * 1000
########## Select2 Settings
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default-cache',
},
'select2': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'select2-cache',
}
}
SELECT2_CACHE_BACKEND = 'select2'
########## MAGE Settings
MAGE_USERNAME = 'username'
MAGE_UID = '12345'
MAGE_PASSWORD = 'password'
MAGE_URL = 'https://mage.server.com/api'
########## End MAGE Settings
########## DEBUG TOOLBAR CONFIGURATION
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
]
INTERNAL_IPS = ['127.0.0.1']
########## COMPRESSION CONFIGURATION
# COMPRESS_ENABLED = True
# Default : the opposite of DEBUG
# see https://github.com/jezdez/django_compressor/issues/226
COMPRESS_OUTPUT_DIR = 'STATIC_CACHE'
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = False
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'compressor.storage.CompressorFileStorage'
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_CSS_FILTERS
COMPRESS_CSS_FILTERS = [
'compressor.filters.cssmin.CSSMinFilter',
]
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_JS_FILTERS
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter',
]
COMPRESS_DEBUG_TOGGLE = 'nocompress'
COMPRESS_JS_COMPRESSOR = 'compressor.js.JsCompressor'
COMPRESS_CSS_COMPRESSOR = 'compressor.css.CssCompressor'
COMPRESS_PARSER = 'compressor.parser.AutoSelectParser'
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_VERBOSE = False
COMPRESS_CACHEABLE_PRECOMPILERS = (
'text/coffeescript',
)
########## END COMPRESSION CONFIGURATION
########## BOOTSTRAP 3 CONFIGURATION
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': STATIC_URL + 'jquery/jquery.min.js',
# The Bootstrap base URL
'base_url': STATIC_URL + 'bootstrap/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': STATIC_URL + 'bootstrap/css/bootstrap.css',
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': STATIC_URL + 'bootstrap/css/bootstrap-theme.css',
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': STATIC_URL + 'bootstrap/js/bootstrap.min.js',
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields, for Django <= 1.8 only
'set_required': True,
# Set HTML disabled attribute on disabled fields, for Django <= 1.8 only
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
########## END BOOTSTRAP 3 CONFIGURATION
# Special case
IMAGE_TRACKING = False
# For KML uploads
KML_REPOSITORY_ROOT = 'kml/'
# initialize apps
#django.setup()
# Override production settings with local settings if they exist
#try:
# from local_settings import *
#
#except ImportError, e:
# # local_settings does not exist
# pass
|
graphs/models/custom_layers/denseblock.py | algocompretto/template-deep-learning | 696 | 12641182 | <reponame>algocompretto/template-deep-learning
"""
Definitions for custom blocks for condensenet model
"""
import torch
import torch.nn as nn
from graphs.models.custom_layers.learnedgroupconv import LearnedGroupConv
class DenseBlock(nn.Sequential):
def __init__(self, num_layers, in_channels, growth_rate, config):
super().__init__()
for layer_id in range(num_layers):
layer = DenseLayer(in_channels=in_channels + (layer_id * growth_rate), growth_rate=growth_rate, config=config)
self.add_module('dense_layer_%d' % (layer_id + 1), layer)
class DenseLayer(nn.Module):
def __init__(self, in_channels, growth_rate, config):
super().__init__()
self.config = config
self.conv_bottleneck = self.config.conv_bottleneck
self.group1x1 = self.config.group1x1
self.group3x3 = self.config.group3x3
self.condense_factor = self.config.condense_factor
self.dropout_rate = self.config.dropout_rate
# 1x1 conv in_channels --> bottleneck*growth_rate
self.conv_1 = LearnedGroupConv(in_channels=in_channels, out_channels=self.conv_bottleneck * growth_rate, kernel_size=1,
groups=self.group1x1, condense_factor=self.condense_factor, dropout_rate=self.dropout_rate)
self.batch_norm = nn.BatchNorm2d(self.conv_bottleneck * growth_rate)
self.relu = nn.ReLU(inplace=True)
# 3x3 conv bottleneck*growth_rate --> growth_rate
self.conv_2 = nn.Conv2d(in_channels=self.conv_bottleneck * growth_rate, out_channels=growth_rate, kernel_size=3, padding=1, stride=1, groups=self.group3x3, bias=False)
def forward(self, x):
out = self.conv_1(x)
out = self.batch_norm(out)
out = self.relu(out)
out = self.conv_2(out)
return torch.cat([x, out], 1)
|
samples/vsphere/oauth/grant_types/list_vms_client_credentials.py | CypherLegacy/anamorphicBI | 589 | 12641193 | #!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2020. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import sample_cli
from samples.vsphere.common import sample_util
from samples.vsphere.common.ssl_helper import get_unverified_session
from samples.vsphere.oauth.grant_types.oauth_utility \
import login_using_client_credentials
import argparse
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2020 VMware, Inc. All rights reserved.'
__vcenter_version__ = '7.0+'
"""
To run this sample,
$ python list_vms_client_credentials.py --server <VC_IP> \
-- client_id <client_id> --client_secret <client_secret> --skipverification
"""
parser = argparse.ArgumentParser()
parser.add_argument("--server",
help="VC IP or hostname")
parser.add_argument("--client_id",
help="Client/Application ID of the server to server app")
parser.add_argument("--client_secret",
help="Client/Application secret \
of the server to server app")
parser.add_argument('--skipverification',
action='store_true',
help='Verify server certificate when connecting to vc.')
args = parser.parse_args()
session = get_unverified_session() if args.skipverification else None
saml_assertion = login_using_client_credentials(
args.server,
session,
args.client_id,
args.client_secret)
client = create_vsphere_client(
server=args.server,
bearer_token=saml_assertion,
session=session)
vms = client.vcenter.VM.list()
print(vms)
|
cli/tests/pcluster3_config_converter/test_pcluster3_config_converter.py | enrico-usai/cfncluster | 415 | 12641217 | <filename>cli/tests/pcluster3_config_converter/test_pcluster3_config_converter.py
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import pytest
import yaml
from assertpy import assert_that
from pcluster3_config_converter.pcluster3_config_converter import Pcluster3ConfigConverter
from tests.pcluster3_config_converter import test_data
@pytest.mark.parametrize(
"expected_input, expected_output, warn",
[
(
"pcluster.config.ini",
"pcluster.config.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to "
"False in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to "
"both headnode and scheduling sections. Please review the configuration file after conversion "
"and decide whether to further trim down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/scripts/pre_install.sh is added to both headnode and "
"scheduling sections. Please review the configuration file after conversion and decide whether to "
"further trim down the permissions and specialize.",
"Warning: post_install = s3://testbucekt/scripts/post_install.sh is added to both headnode and "
"scheduling sections. Please review the configuration file after conversion and decide whether "
"to further trim down the permissions and specialize.",
],
),
],
)
def test_pcluster3_config_converter_command(test_datadir, tmpdir, expected_input, expected_output, warn):
config_file_path = os.path.join(str(test_datadir), expected_input)
args = [
"pcluster3-config-converter",
"--config-file",
config_file_path,
"--output-file",
tmpdir / "pcluster.config.yaml",
]
result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
_assert_files_are_equal(
tmpdir / "pcluster.config.yaml",
test_datadir / expected_output,
)
for message in warn:
assert_that(result.stdout).contains(message)
@pytest.mark.parametrize(
"expected_input, expected_output, warn, error, force_convert, cluster_label",
[
(
"slurm_full.ini",
"slurm_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to both "
"headnode and scheduling sections. Please review the configuration file after conversion and decide "
"whether to further trim down the permissions and specialize.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: Parameters ['extra_json', 'custom_chef_cookbook', 'template_url', 'instance_types_data'] "
"are not officially supported and not recommended.",
"Warning: Duplicate names 'custom1' are not allowed in the SharedStorage section. Please change them "
"before cluster creation.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i1'. Please rename it before "
"cluster creation.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i3'. Please rename it before "
"cluster creation.",
"Warning: Parameter initial_count = 2 is no longer supported. Ignoring it during conversion.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i2'. Please rename it before "
"cluster creation.",
],
None,
True,
"default",
),
(
"slurm_required.ini",
"slurm_required.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-123 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
],
None,
False,
"cluster_label1",
),
(
"awsbatch_required.ini",
"awsbatch_required.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter sanity_check = false is no longer supported, please specify "
"`--suppress-validators ALL` during cluster creation.",
],
None,
False,
None,
),
(
"awsbatch_full.ini",
"awsbatch_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter sanity_check = false is no longer supported, please specify "
"`--suppress-validators ALL` during cluster creation.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: disable_hyperthreading = true is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: Parameters ['extra_json'] are not officially supported and not recommended.",
"Warning: Duplicate names 'custom1' are not allowed in the SharedStorage section. Please change them "
"before cluster creation.",
],
None,
True,
"default",
),
(
"slurm_full.ini",
None,
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to both "
"headnode and scheduling sections. Please review the configuration file after conversion and decide "
"whether to further trim down the permissions and specialize.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
],
"ERROR: ['extra_json', 'custom_chef_cookbook', 'template_url', 'instance_types_data'] are not officially "
"supported and not recommended. If you want to proceed with conversion, please specify `--force-convert` "
"and rerun the command.",
False,
None,
),
(
"compute_subnet_cidr.ini",
None,
None,
"ERROR: Parameter compute_subnet_cidr = 0.0.0.0/16 is no longer supported. Please remove it and run the "
"converter again.",
False,
None,
),
(
"missing_vpc.ini",
None,
None,
"Missing vpc_settings in the configuration file",
False,
None,
),
(
"slurm_full.ini",
None,
None,
"The specified cluster section is not in the configuration.",
False,
"invalid_cluster_label",
),
(
"slurm_requred.ini",
None,
None,
"Can not find a valid cluster section.",
False,
None,
),
(
"sit_base.ini",
"sit_base.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = false is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
],
None,
False,
None,
),
(
"sit_full.ini",
"sit_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = false is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: disable_hyperthreading = false is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: Parameter initial_queue_size = 2 is no longer supported. Ignoring it during conversion.",
],
None,
False,
None,
),
],
)
def test_pcluster3_config_converter(
test_datadir, tmpdir, expected_input, expected_output, mocker, warn, error, force_convert, capsys, cluster_label
):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
converter = Pcluster3ConfigConverter(
test_datadir / expected_input, cluster_label, tmpdir / "output_yaml", False, force_convert
)
try:
converter.validate()
converter.convert_to_pcluster3_config()
converter.write_configuration_file()
_assert_files_are_equal(
tmpdir / "output_yaml",
test_datadir / expected_output,
)
except SystemExit as e:
print(e)
assert_that(e.args[0]).contains(error)
if warn:
readouterr = capsys.readouterr()
for message in warn:
assert_that(readouterr.out).contains(message)
@pytest.mark.parametrize(
"test_case",
test_data.region_test,
)
def test_convert_region(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.convert_region("Region")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.image_test,
)
def test_convert_image(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_image("Image")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.iam_test,
)
def test_convert_iam(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_iam("Iam")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.additional_packages_test,
)
def test_convert_additional_packages(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_additional_packages("AdditionalPackages")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.tags_test,
)
def test_convert_tags(test_case):
user_input, expected_output, error_message = test_case[0], test_case[1], test_case[2]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
try:
converter.convert_tags("Tags")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
except SystemExit as e:
assert_that(e.args[0]).contains(error_message)
@pytest.mark.parametrize(
"test_case",
test_data.monitoring_test,
)
def test_convert_monitoring(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_monitoring("Monitoring")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_custom_s3_bucket_test,
)
def test_convert_custom_s3_bucket(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_custom_s3_bucket("CustomS3Bucket")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_dev_settings_test,
)
def test_convert_dev_settings(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_dev_settings("DevSettings")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_additional_resources_test,
)
def test_convert_additional_resources(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_additional_resources("AdditionalResources")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.shared_storage_test,
)
def test_convert_shared_storage(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_shared_storage("SharedStorage")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.headnode_test,
)
def test_convert_headnode(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.validate_vpc_settings()
converter.convert_headnode("HeadNode")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.scheduling_test,
)
def test_convert_scheduling(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output, warn = test_case[0], test_case[1], test_case[2]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.validate_vpc_settings()
converter.convert_scheduling("Scheduling")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
if warn:
assert_that(converter.comments).contains(warn)
@pytest.mark.parametrize(
"pcluster2_field, value, pcluster3_field, method, error_message",
[
("proxy_server", "https://x.x.x.x:8080", "HttpProxyAddress", None, None),
("disable_hyperthreading", True, "DisableSimultaneousMultithreading", "getboolean", None),
("master_root_volume_size", 30, "Size", "getint", None),
(
"master_root_volume_size",
True,
"Size",
"getint",
"Wrong type for master_root_volume_size in dummy-section section: invalid literal for int() with base 10: "
"'True'",
),
("spot_price", 20.99, "SpotPrice", "getfloat", None),
],
)
def test_convert_single_field(
test_datadir, tmpdir, pcluster2_field, value, pcluster3_field, method, error_message, caplog, capsys
):
converter = Pcluster3ConfigConverter(
config_file="dummy_input", cluster_template="default", output_file="dummy_output"
)
converter.config_parser.read_dict({"dummy-section": {pcluster2_field: value}})
pcluster3_model = {}
try:
converter.convert_single_field("dummy-section", pcluster2_field, pcluster3_model, pcluster3_field, method)
assert_that(pcluster3_model).is_equal_to({pcluster3_field: value})
except SystemExit as e:
assert_that(e.args[0]).contains(error_message)
def _assert_files_are_equal(file, expected_file):
with open(file, "r") as f, open(expected_file, "r") as exp_f:
expected_file_content = exp_f.read()
expected_file_content = expected_file_content.replace("<DIR>", os.path.dirname(file))
assert_that(f.read()).is_equal_to(expected_file_content)
|
exp.voc/voc8.res50v3+.CCT/train.py | Yongjin-colin-choi/TorchSemiSeg | 268 | 12641242 | <filename>exp.voc/voc8.res50v3+.CCT/train.py
from __future__ import division
import os.path as osp
import os
import sys
import time
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from config import config
from dataloader import get_train_loader
from network import Network
from dataloader import VOC
from utils.init_func import init_weight, group_weight
from engine.lr_policy import WarmUpPolyLR
from engine.engine import Engine
from seg_opr.loss_opr import SigmoidFocalLoss, ProbOhemCrossEntropy2d
# from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
from tensorboardX import SummaryWriter
from unsupervised_head import *
try:
from apex.parallel import DistributedDataParallel, SyncBatchNorm
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
try:
from azureml.core import Run
azure = True
run = Run.get_context()
except:
azure = False
parser = argparse.ArgumentParser()
os.environ['MASTER_PORT'] = '169711'
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
seed = config.seed
if engine.distributed:
seed = engine.local_rank
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# data loader + unsupervised data loader
train_loader, train_sampler = get_train_loader(engine, VOC, train_source=config.train_source, \
unsupervised=False)
unsupervised_train_loader, unsupervised_train_sampler = get_train_loader(engine, VOC, \
train_source=config.unsup_source, unsupervised=True)
if engine.distributed and (engine.local_rank == 0):
tb_dir = config.tb_dir + '/{}'.format(time.strftime("%b%d_%d-%H-%M", time.localtime()))
generate_tb_dir = config.tb_dir + '/tb'
logger = SummaryWriter(log_dir=tb_dir)
engine.link_tb(tb_dir, generate_tb_dir)
# config network and criterion
criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=255)
criterion = abCE_loss(iters_per_epoch=config.niters_per_epoch, epochs=config.nepochs,
num_classes=config.num_classes)
if engine.distributed:
BatchNorm2d = SyncBatchNorm
model = Network(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
if engine.distributed:
base_lr = config.lr * engine.world_size
params_list = []
params_list = group_weight(params_list, model.backbone,
BatchNorm2d, base_lr)
for module in model.business_layer:
params_list = group_weight(params_list, module, BatchNorm2d,
base_lr)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = WarmUpPolyLR(base_lr, config.lr_power, total_iteration, config.niters_per_epoch * config.warm_up_epoch)
if engine.distributed:
print('distributed !!')
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DataParallelModel(model, device_ids=engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
model.train()
print('begin train')
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
unsupervised_dataloader = iter(unsupervised_train_loader)
sum_loss = 0
sum_loss_unsup = 0
for idx in pbar:
optimizer.zero_grad()
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
unsupervised_minibatch = unsupervised_dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
unsupervised_imgs = unsupervised_minibatch['data']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
unsupervised_imgs = unsupervised_imgs.cuda(non_blocking=True)
loss_sup, loss_unsup = model(imgs, unsupervised_imgs, gts, curr_iter=idx+1, epoch=epoch)
# reduce the whole loss over multi-gpu
dist.all_reduce(loss_sup, dist.ReduceOp.SUM)
loss_sup = loss_sup / engine.world_size
dist.all_reduce(loss_unsup, dist.ReduceOp.SUM)
loss_unsup = loss_unsup / engine.world_size
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr
unsup_weight = config.unsup_weight
loss_unsup = loss_unsup * unsup_weight
tot_loss = loss_sup + loss_unsup # sup loss + unsup loss
tot_loss.backward()
optimizer.step()
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % tot_loss.item() \
+ ' loss_unsup=%.2f' % loss_unsup.item()
sum_loss += tot_loss.item()
sum_loss_unsup += loss_unsup.item()
pbar.set_description(print_str, refresh=False)
if engine.distributed and (engine.local_rank == 0):
logger.add_scalar('train_loss', sum_loss / len(pbar), epoch)
logger.add_scalar('train_loss_unsup', sum_loss_unsup / len(pbar), epoch)
if azure:
run.log(name='Training Loss', value=sum_loss / len(pbar))
run.log(name='Unsupervised Training Loss', value=sum_loss_unsup / len(pbar))
if (epoch > config.nepochs // 6) and (epoch % config.snapshot_iter == 0) or (epoch == config.nepochs - 1):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
|
var/spack/repos/builtin/packages/tippecanoe/package.py | kkauder/spack | 2,360 | 12641257 | <filename>var/spack/repos/builtin/packages/tippecanoe/package.py<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tippecanoe(MakefilePackage):
"""Build vector tilesets from large collections of GeoJSON features."""
homepage = "https://github.com/mapbox/tippecanoe"
url = "https://github.com/mapbox/tippecanoe/archive/1.34.3.tar.gz"
version('1.34.3', sha256='7a2dd2376a93d66a82c8253a46dbfcab3eaaaaca7bf503388167b9ee251bee54')
depends_on('sqlite')
depends_on('zlib')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter(r'PREFIX \?= /usr/local', 'PREFIX = ' + self.prefix)
|
tests/integration/pybaseball/datasources/test_fg_pitching_data.py | reddigari/pybaseball | 650 | 12641271 | <reponame>reddigari/pybaseball<filename>tests/integration/pybaseball/datasources/test_fg_pitching_data.py
from typing import Callable
import pandas as pd
import pytest
from pybaseball.datasources.fangraphs import fg_pitching_data
from pybaseball.enums.fangraphs.pitching_data_enum import FangraphsPitchingStats
from tests.conftest import _DataFrameComparer
class TestFGPitchingData:
ALL_DATA_COLUMNS_COUNT = len(FangraphsPitchingStats.ALL()) + 2 # All columns + name and team
DEFAULT_MAX_RESULTS = 10
def test_fg_pitching_data(self) -> None:
season = 2019
data = fg_pitching_data(season, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == self.DEFAULT_MAX_RESULTS
seasons = list(set(data['Season']))
assert len(seasons) == 1
assert seasons[0] == season
def test_fg_pitching_data_future_season(self) -> None:
season = 3000
with pytest.raises(ValueError):
fg_pitching_data(season, max_results=self.DEFAULT_MAX_RESULTS)
def test_fg_pitching_data_end_season(self) -> None:
data = fg_pitching_data(2018, end_season=2019, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == self.DEFAULT_MAX_RESULTS
def test_fg_pitching_data_end_season_no_split_season(self) -> None:
data = fg_pitching_data(2018, end_season=2019, split_seasons=False, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT - 1
assert 'Season' not in data.columns
assert len(data.index) == self.DEFAULT_MAX_RESULTS
def test_fg_pitching_data_single_stat_columns(self) -> None:
data = fg_pitching_data(2019, stat_columns='ERA', max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == 5
assert len(data.index) == self.DEFAULT_MAX_RESULTS
def test_fg_pitching_data_multiple_stat_columns(self) -> None:
data = fg_pitching_data(2019, stat_columns=['ERA', 'BB'], max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == 6
assert len(data.index) == self.DEFAULT_MAX_RESULTS
def test_fg_pitching_data_league(self, assert_frame_not_equal: _DataFrameComparer) -> None:
data_al = fg_pitching_data(2019, league='AL', max_results=self.DEFAULT_MAX_RESULTS)
assert data_al is not None
assert not data_al.empty
assert len(data_al.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data_al.index) == self.DEFAULT_MAX_RESULTS
data_nl = fg_pitching_data(2019, league='NL', max_results=self.DEFAULT_MAX_RESULTS)
assert data_nl is not None
assert not data_nl.empty
assert len(data_nl.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data_nl.index) == self.DEFAULT_MAX_RESULTS
assert assert_frame_not_equal(data_al, data_nl)
def test_fg_pitching_data_qual(self) -> None:
data = fg_pitching_data(2019, qual=210, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == 4
def test_fg_pitching_data_on_active_roster(self, assert_frame_not_equal: _DataFrameComparer) -> None:
data = fg_pitching_data(2018, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == self.DEFAULT_MAX_RESULTS
oar_data = fg_pitching_data(2018, on_active_roster=True, max_results=self.DEFAULT_MAX_RESULTS)
assert oar_data is not None
assert not oar_data.empty
assert len(oar_data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(oar_data.index) == self.DEFAULT_MAX_RESULTS
assert_frame_not_equal(data, oar_data)
def test_fg_pitching_minimum_age(self) -> None:
data = fg_pitching_data(2019, minimum_age=37, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == 1
def test_fg_pitching_maximum_age(self) -> None:
data = fg_pitching_data(2019, maximum_age=21, max_results=self.DEFAULT_MAX_RESULTS)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == 1
def test_fg_pitching_team(self, assert_frame_not_equal: _DataFrameComparer) -> None:
data_1 = fg_pitching_data(2019, team='3', max_results=self.DEFAULT_MAX_RESULTS)
assert data_1 is not None
assert not data_1.empty
assert len(data_1.columns) == self.ALL_DATA_COLUMNS_COUNT - 1
assert 'Team' not in data_1.columns
assert len(data_1.index) == 2
data_2 = fg_pitching_data(2019, team='4', max_results=self.DEFAULT_MAX_RESULTS)
assert data_2 is not None
assert not data_2.empty
assert len(data_2.columns) == self.ALL_DATA_COLUMNS_COUNT - 1
assert 'Team' not in data_2.columns
assert len(data_2.index) == 3
assert_frame_not_equal(data_1, data_2)
def test_fg_pitching_data_max_results(self) -> None:
season = 2019
data = fg_pitching_data(season)
assert data is not None
assert not data.empty
assert len(data.columns) == self.ALL_DATA_COLUMNS_COUNT
assert len(data.index) == 61
|
tests/__init__.py | steve1aa/microdot | 173 | 12641275 | <reponame>steve1aa/microdot
from tests.microdot.test_multidict import TestMultiDict
from tests.microdot.test_request import TestRequest
from tests.microdot.test_response import TestResponse
from tests.microdot.test_url_pattern import TestURLPattern
from tests.microdot.test_microdot import TestMicrodot
from tests.microdot_asyncio.test_request_asyncio import TestRequestAsync
from tests.microdot_asyncio.test_response_asyncio import TestResponseAsync
from tests.microdot_asyncio.test_microdot_asyncio import TestMicrodotAsync
|
tests/ae_example/models/autoencoder.py | agr17/pytorch-msssim | 461 | 12641279 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .gdn import GDN
# https://arxiv.org/pdf/1611.01704.pdf
# A simplfied version without quantization
class AutoEncoder(nn.Module):
def __init__(self, C=128, M=128, in_chan=3, out_chan=3):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(C=C, M=M, in_chan=in_chan)
self.decoder = Decoder(C=C, M=M, out_chan=out_chan)
def forward(self, x, **kargs):
code = self.encoder(x)
out = self.decoder(code)
return out
class Encoder(nn.Module):
""" Encoder
"""
def __init__(self, C=32, M=128, in_chan=3):
super(Encoder, self).__init__()
self.enc = nn.Sequential(
nn.Conv2d(in_channels=in_chan, out_channels=M, kernel_size=5, stride=2, padding=2, bias=False),
GDN(M),
nn.Conv2d(in_channels=M, out_channels=M, kernel_size=5, stride=2, padding=2, bias=False),
GDN(M),
nn.Conv2d(in_channels=M, out_channels=M, kernel_size=5, stride=2, padding=2, bias=False),
GDN(M),
nn.Conv2d(in_channels=M, out_channels=C, kernel_size=5, stride=2, padding=2, bias=False)
)
def forward(self, x):
return self.enc(x)
class Decoder(nn.Module):
""" Decoder
"""
def __init__(self, C=32, M=128, out_chan=3):
super(Decoder, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(in_channels=C, out_channels=M, kernel_size=5, stride=2, padding=2, output_padding=1, bias=False),
GDN(M, inverse=True),
nn.ConvTranspose2d(in_channels=M, out_channels=M, kernel_size=5, stride=2, padding=2, output_padding=1, bias=False),
GDN(M, inverse=True),
nn.ConvTranspose2d(in_channels=M, out_channels=M, kernel_size=5, stride=2, padding=2, output_padding=1, bias=False),
GDN(M, inverse=True),
nn.ConvTranspose2d(in_channels=M, out_channels=out_chan, kernel_size=5, stride=2, padding=2, output_padding=1, bias=False),
)
def forward(self, q):
return torch.sigmoid( self.dec(q) )
|
stonesoup/types/tests/test_hypothesis.py | Red-Portal/Stone-Soup-1 | 157 | 12641280 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from ..prediction import StatePrediction, StateMeasurementPrediction
from ..detection import Detection
from ..track import Track
from ..hypothesis import (
SingleHypothesis,
SingleDistanceHypothesis,
SingleProbabilityHypothesis,
JointHypothesis,
ProbabilityJointHypothesis,
DistanceJointHypothesis)
prediction = StatePrediction(np.array([[1], [0]]))
measurement_prediction = StateMeasurementPrediction(np.array([[1], [0]]))
detection = Detection(np.array([[1], [0]]))
distance = float(1)
def test_single_hypothesis():
"""Single Measurement Hypothesis type test"""
hypothesis = SingleHypothesis(prediction, detection)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.measurement_prediction is None
assert hypothesis
hypothesis = SingleHypothesis(prediction, detection,
measurement_prediction)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.measurement_prediction is measurement_prediction
assert hypothesis
hypothesis = SingleHypothesis(prediction, None)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is None
assert hypothesis.measurement_prediction is None
assert not hypothesis
def test_single_distance_hypothesis():
"""Single Measurement Distance Hypothesis type test"""
hypothesis = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.distance is distance
assert hypothesis.measurement_prediction is measurement_prediction
assert hypothesis.weight == 1/distance
hypothesis.distance = 0
assert hypothesis.weight == float('inf')
def test_single_distance_hypothesis_comparison():
"""Single Measurement Distance Hypothesis comparison test"""
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance + 1, measurement_prediction)
assert h1 > h2
assert h2 < h1
assert h1 <= h1
assert h1 >= h1
assert h1 == h1
def test_single_probability_hypothesis_comparison():
"""Single Measurement Probability Hypothesis comparison test"""
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.9, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.1, measurement_prediction)
assert h1 > h2
assert h2 < h1
assert h1 <= h1
assert h1 >= h1
assert h1 == h1
def test_probability_joint_hypothesis():
"""Probability Joint Hypothesis type test"""
t1 = Track()
t2 = Track()
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.9, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.1, measurement_prediction)
hypotheses = {t1: h1, t2: h2}
joint_hypothesis = JointHypothesis(hypotheses)
assert isinstance(joint_hypothesis,
ProbabilityJointHypothesis)
assert joint_hypothesis[t1] is h1
assert joint_hypothesis[t2] is h2
assert joint_hypothesis.probability == h1.probability * h2.probability
def test_probability_joint_hypothesis_comparison():
"""Probability Joint Hypothesis comparison test"""
t1 = Track()
t2 = Track()
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.75, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.75, measurement_prediction)
h3 = SingleProbabilityHypothesis(
prediction, detection, 0.25, measurement_prediction)
hypotheses1 = {t1: h1, t2: h2}
hypotheses2 = {t1: h1, t2: h3}
j1 = JointHypothesis(hypotheses1)
j1.normalise()
j2 = JointHypothesis(hypotheses2)
j2.normalise()
assert j1 > j2
assert j2 < j1
assert j1 <= j1
assert j1 >= j1
assert j1 == j1
def test_distance_joint_hypothesis():
"""Distance Joint Hypothesis type test"""
t1 = Track()
t2 = Track()
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
hypotheses = {t1: h1, t2: h2}
joint_hypothesis = JointHypothesis(hypotheses)
assert isinstance(joint_hypothesis,
DistanceJointHypothesis)
assert joint_hypothesis[t1] is h1
assert joint_hypothesis[t2] is h2
assert joint_hypothesis.distance == distance * 2
def test_distance_joint_hypothesis_comparison():
"""Distance Joint Hypothesis comparison test"""
t1 = Track()
t2 = Track()
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h3 = SingleDistanceHypothesis(
prediction, detection, distance + 1, measurement_prediction)
hypotheses1 = {t1: h1, t2: h2}
hypotheses2 = {t1: h1, t2: h3}
j1 = JointHypothesis(hypotheses1)
j2 = JointHypothesis(hypotheses2)
assert j1 > j2
assert j2 < j1
assert j1 <= j1
assert j1 >= j1
assert j1 == j1
def test_invalid_single_joint_hypothesis():
"""Invalid Single Measurement Joint Hypothesis test"""
t1 = Track()
t2 = Track()
h1 = object()
h2 = object()
hypotheses = {t1: h1, t2: h2}
with pytest.raises(NotImplementedError):
JointHypothesis(hypotheses)
|
bin/deepstate/executors/fuzz/libfuzzer.py | acpaquette/deepstate | 684 | 12641325 | #!/usr/bin/env python3.6
# Copyright (c) 2019 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
from typing import List
from deepstate.core import FuzzerFrontend, FuzzFrontendError
L = logging.getLogger(__name__)
class LibFuzzer(FuzzerFrontend):
NAME = "libFuzzer"
EXECUTABLES = {"FUZZER": "clang++", # placeholder
"COMPILER": "clang++"
}
ENVVAR = "LIBFUZZER_HOME"
REQUIRE_SEEDS = False
PUSH_DIR = os.path.join("sync_dir", "queue")
PULL_DIR = os.path.join("sync_dir", "queue")
CRASH_DIR = os.path.join("the_fuzzer", "crashes")
@classmethod
def parse_args(cls) -> None:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Use libFuzzer as a backend for DeepState")
cls.parser = parser
super(LibFuzzer, cls).parse_args()
def compile(self) -> None: # type: ignore
lib_path: str = "/usr/local/lib/libdeepstate_LF.a"
flags: List[str] = ["-ldeepstate_LF", "-fsanitize=fuzzer,undefined"]
if self.compiler_args:
flags += [arg for arg in self.compiler_args.split(" ")]
super().compile(lib_path, flags, self.out_test_name)
def pre_exec(self) -> None:
"""
Perform argparse and environment-related sanity checks.
"""
# first, redefine and override fuzzer as harness executable
if self.binary:
self.binary = os.path.abspath(self.binary)
self.fuzzer_exe = self.binary # type: ignore
super().pre_exec()
# again, because we may had run compiler
if not self.binary:
raise FuzzFrontendError("Binary not set.")
self.binary = os.path.abspath(self.binary)
self.fuzzer_exe = self.binary # type: ignore
if self.blackbox is True:
raise FuzzFrontendError("Blackbox fuzzing is not supported by libFuzzer.")
# resuming fuzzing
if len(os.listdir(self.output_test_dir)) > 0:
self.check_required_directories([self.push_dir, self.pull_dir, self.crash_dir])
self.input_seeds = None
L.info(f"Resuming fuzzing using seeds from {self.push_dir} (skipping --input_seeds option).")
else:
self.setup_new_session([self.pull_dir, self.crash_dir])
@property
def cmd(self):
"""
Initializes a command for an in-process libFuzzer instance that runs
indefinitely until an interrupt.
"""
cmd_list: List[str] = list()
# guaranteed arguments
cmd_list.extend([
"-rss_limit_mb={}".format(self.mem_limit),
"-max_len={}".format(self.max_input_size),
"-artifact_prefix={}".format(self.crash_dir + "/"),
# "-jobs={}".format(0),
# "-workers={}".format(1),
# "-fork=1",
"-reload=1",
"-runs=-1",
"-print_final_stats=1"
])
for key, val in self.fuzzer_args:
if val is not None:
cmd_list.append('-{}={}'.format(key, val))
else:
cmd_list.append('-{}'.format(key))
# optional arguments:
if self.dictionary:
cmd_list.append("-dict={}".format(self.dictionary))
if self.exec_timeout:
cmd_list.append("-timeout={}".format(self.exec_timeout / 1000))
# must be here, this are positional args
cmd_list.append(self.push_dir) # no auto-create, reusable
# not required, if provided: not auto-create and not require any files inside
if self.input_seeds:
cmd_list.append(self.input_seeds)
return cmd_list
def populate_stats(self):
super().populate_stats()
if not os.path.isfile(self.output_file):
return
with open(self.output_file, "rb") as f:
for line in f:
# libFuzzer under DeepState have broken output
# splitted into multiple lines, preceeded with "EXTERNAL:"
if line.startswith(b"EXTERNAL: "):
line = line.split(b":", 1)[1].strip()
if line.startswith(b"#"):
# new event code
self.stats["execs_done"] = line.split()[0].strip(b"#").decode()
elif b":" in line:
line = line.split(b":", 1)[1].strip()
if b":" in line:
key, value = line.split(b":", 1)
if key == b"exec/s":
self.stats["execs_per_sec"] = value.strip().decode()
elif key == b"units":
self.stats["paths_total"] = value.strip().decode()
elif key == b"cov":
self.stats["bitmap_cvg"] = value.strip().decode()
def _sync_seeds(self, src, dest, excludes=[]) -> None:
excludes += ["*.cur_input", ".state"]
super()._sync_seeds(src, dest, excludes=excludes)
def post_exec(self):
# TODO: remove crashes from seeds dir and from sync_dir
pass
def main():
fuzzer = LibFuzzer()
return fuzzer.main()
if __name__ == "__main__":
exit(main())
|
tests/resolution/evaluation/test_evaluate_resolved.py | mib1185/homeassistant-supervisor | 597 | 12641372 | <reponame>mib1185/homeassistant-supervisor<filename>tests/resolution/evaluation/test_evaluate_resolved.py
"""Test evaluate systemd-resolved."""
from unittest.mock import PropertyMock, patch
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
from supervisor.resolution.evaluations.resolved import EvaluateResolved
async def test_evaluation(coresys: CoreSys):
"""Test evaluation."""
resolved = EvaluateResolved(coresys)
coresys.core.state = CoreState.SETUP
assert resolved.reason not in coresys.resolution.unsupported
with patch.object(
type(coresys.dbus.resolved), "is_connected", PropertyMock(return_value=False)
):
await resolved()
assert resolved.reason in coresys.resolution.unsupported
await resolved()
assert resolved.reason not in coresys.resolution.unsupported
async def test_did_run(coresys: CoreSys):
"""Test that the evaluation ran as expected."""
resolved = EvaluateResolved(coresys)
should_run = resolved.states
should_not_run = [state for state in CoreState if state not in should_run]
assert len(should_run) != 0
assert len(should_not_run) != 0
with patch(
"supervisor.resolution.evaluations.resolved.EvaluateResolved.evaluate",
return_value=None,
) as evaluate:
for state in should_run:
coresys.core.state = state
await resolved()
evaluate.assert_called_once()
evaluate.reset_mock()
for state in should_not_run:
coresys.core.state = state
await resolved()
evaluate.assert_not_called()
evaluate.reset_mock()
|
wren/resources/constants.py | tzano/wren | 245 | 12641374 | <gh_stars>100-1000
# config files
CONFIG_DIR = "config"
# Link to DB Configuration
DB_CONFIG_FILE = "db.yml"
# Name of collection
COLLECTION_NAME = 'collection_name'
CONFIG_DIR = "config"
CONFIG_FNAME = "rss_feeds.yml"
PARAM_CONFIG_FILE = "services.yml"
APP_KEYS_FILE = "keys.yml"
NLU_CONFIG = "nlu_config.yml"
MESSAGING_FILE = "messaging_platforms.yml"
SOCIALMEDIA_FILE = "socialmedia.yml"
EMAIL_FILE = "email.yml"
PAR_DIR = ".."
# News Organizations
ALJAZEERA = "AlJazeera"
BBC = "BBC"
CNN = "CNN"
HUFFINGTONPOST = "HuffingtonPost"
NYPOST = "NYPost"
NYTIMES = "NYTimes"
REUTERS = "Reuters"
TELEGRAPH = "Telegraph"
THEGLOBAEANDMAIL = "TheGlobeAndMail"
GUARDIAN = "Guardian"
USTODAY = "USAToday"
VICE = "Vice"
WSJ = "WSJ"
# name of collections
COLLECTION_ARTICLES = 'articles'
COLLECTION_PODCASTS = 'podcasts'
COLLECTION_VIDEOS = 'videos'
COLLECTION_LISTS = 'list'
COLLECTION_BOOKMARS = 'bookmarks'
# Schedulers' names
SCHEDULER_NEWS_ARTICLES = "Scheduler-NewsArticles"
SCHEDULER_PODCASTS = "Scheduler-Podcasts"
SCHEDULER_VIDEOS = "Scheduler-Videos"
SECONDS = 60
# Empty string
EMPTY_STR = ""
EMPTY_LIST = []
EMPTY_DICT = {}
# media types
ARTICLES = "Articles"
PODCASTS = "Podcasts"
VIDEOS = "Videos"
ARTICLE = "Article"
PODCAST = "Podcast"
VIDEO = "Video"
MEDIA_TYPE_ARTICLES = {ARTICLES: ARTICLE}
MEDIA_TYPE_PODCASTS = {PODCASTS: PODCAST}
MEDIA_TYPE_VIDEOS = {VIDEOS: VIDEO}
# Scheduler
STOPPED = "stopped"
RUNNING = "running"
# Languages
EN_LANG = "English"
AR_LANG = "Arabic"
# Keys for webservices
CALAIS_KEY = "calais_key"
DBPEDIA_KEY = "dbpedia_key"
FREEBASE_KEY = "freebase_key"
YAHOO_KEY = "yahoo_key"
ZEMANTA_KEY = "zemanta_key"
VONA_KEY = "vona_key"
VONA_USERNAME = "vona_username"
SLACK_API_KEY = "api_key"
SLACK_BOT_NAME = "bot_name"
SLACK_CHANNEL_NAME = "channel_name"
SLACK_SERVICE = "slack"
# Voice TTs
TTS_PERONA = 'Emma'
# Connectors
MONGODB = "mongodb"
KAFKA = "kafka"
# Host/Port
HOST = "host"
PORT = "port"
SENTISTRENGHT_JAR = "../resources/SentiStrength.jar"
SENTISTRENGHT_DIR = "../resources/sentstrength_data/"
# NLU Server
NLU_SERVER = "http://localhost:5000"
# NLU Parser parameters
MIN_THRESHOLD = 0.30
# List of intents
INTENT_FINDNEWSCONTENT = 'findNewsContent'
INTENT_ADDCONTENTITEMTOCOLLECTION = 'addContentItemToCollection'
INTENT_BOOKMARKCONTENTITEM = 'bookmarkContentItem'
INTENT_EMAILCONTENTITEM = 'emailContentItem'
INTENT_FAVORITECONTENTITEM = 'favoriteContentItem'
INTENT_GETCONTENTINFO = 'getContentInfo'
INTENT_LISTENPODCAST = 'listenPodcast'
INTENT_RATECONTENTITEM = 'rateContentItem'
INTENT_READARTICLE = 'readArticle'
INTENT_SHARECONTENTITEM = 'shareContentItem'
INTENT_WATCHVIDEO = 'watchVideo'
# List of entities
ENTITY_FINDNEWSCONTENT_AUTHORNAME = 'findnewscontent_authorname'
ENTITY_FINDNEWSCONTENT_CONTENTITEMNAME = 'findnewscontent_contentitemname'
ENTITY_FINDNEWSCONTENT_CONTENTTYPE = 'findnewscontent_contenttype'
ENTITY_FINDNEWSCONTENT_EVENTNAME = 'findnewscontent_eventname'
ENTITY_FINDNEWSCONTENT_LOCATIONNAME = 'findnewscontent_locationname'
ENTITY_FINDNEWSCONTENT_ORGNAME = 'findnewscontent_orgname'
ENTITY_FINDNEWSCONTENT_PERSONNAME = 'findnewscontent_personname'
ENTITY_FINDNEWSCONTENT_SPATIALRELATION = 'findnewscontent_spatialrelation'
ENTITY_FINDNEWSCONTENT_TIMEFRAME = 'findnewscontent_timeframe'
ENTITY_FINDNEWSCONTENT_TOPICNAME = 'findnewscontent_topicname'
ENTITY_ADDCONTENTITEMTOCOLLECTION_COLLECTIONNAME = 'addcontentitemtocollection_collectionname'
ENTITY_ADDCONTENTITEMTOCOLLECTION_CONTENTITEMNAME = 'addcontentitemtocollection_contentitemname'
ENTITY_ADDCONTENTITEMTOCOLLECTION_CONTENTTYPE = 'addcontentitemtocollection_contenttype'
ENTITY_BOOKMARKCONTENTITEM_CONTENTITEMNAME = 'bookmarkcontentitem_contentitemname'
ENTITY_BOOKMARKCONTENTITEM_CONTENTTYPE = 'bookmarkcontentitem_contenttype'
ENTITY_BOOKMARKCONTENTITEM_SELECTCRITERIA = 'bookmarkcontentitem_selectcriteria'
ENTITY_EMAILCONTENTITEM_CONTENTITEMNAME = 'emailcontentitem_contentitemname'
ENTITY_EMAILCONTENTITEM_CONTENTTYPE = 'emailcontentitem_contenttype'
ENTITY_EMAILCONTENTITEM_RECEIPENT = 'emailcontentitem_receipent'
ENTITY_EMAILCONTENTITEM_SELECTCRITERIA = 'emailcontentitem_selectcriteria'
ENTITY_FAVORITECONTENTITEM_CONTENTITEMNAME = 'favoritecontentitem_contentitemname'
ENTITY_FAVORITECONTENTITEM_CONTENTTYPE = 'favoritecontentitem_contenttype'
ENTITY_FAVORITECONTENTITEM_SELECTCRITERIA = 'favoritecontentitem_selectcriteria'
ENTITY_GETCONTENTINFO_CONTENTTYPE = 'getcontentinfo_contenttype'
ENTITY_GETCONTENTINFO_SELECTCRITERIA = 'getcontentinfo_selectcriteria'
ENTITY_LISTENPODCAST_COMMAND = 'listenpodcast_command'
ENTITY_LISTENPODCAST_CONTENTITEMNAME = 'listenpodcast_contentitemname'
ENTITY_LISTENPODCAST_CONTENTTYPE = 'listenpodcast_contenttype'
ENTITY_LISTENPODCAST_SELECTCRITERIA = 'listenpodcast_selectcriteria'
ENTITY_RATECONTENTITEM_CONTENTITEMNAME = 'ratecontentitem_contentitemname'
ENTITY_RATECONTENTITEM_CONTENTTYPE = 'ratecontentitem_contenttype'
ENTITY_RATECONTENTITEM_RATINGVALUE = 'ratecontentitem_ratingvalue'
ENTITY_READARTICLE_COMMAND = 'readarticle_command'
ENTITY_READARTICLE_CONTENTITEMNAME = 'readarticle_contentitemname'
ENTITY_READARTICLE_CONTENTTYPE = 'readarticle_contenttype'
ENTITY_READARTICLE_SELECTCRITERIA = 'readarticle_selectcriteria'
ENTITY_SHARECONTENTITEM_CONTENTITEMNAME = 'sharecontentitem_contentitemname'
ENTITY_SHARECONTENTITEM_CONTENTTYPE = 'sharecontentitem_contenttype'
ENTITY_SHARECONTENTITEM_SOCIALNETWORK = 'sharecontentitem_socialnetwork'
ENTITY_WATCHVIDEO_COMMAND = 'watchvideo_command'
ENTITY_WATCHVIDEO_CONTENTITEMNAME = 'watchvideo_contentitemname'
ENTITY_WATCHVIDEO_CONTENTTYPE = 'watchvideo_contenttype'
ENTITY_WATCHVIDEO_SELECTCRITERIA = 'watchvideo_selectcriteria'
|
utime/utils/scriptutils/predict.py | learning310/U-Time | 138 | 12641400 | """
A set of functions for running prediction in various settings
"""
import numpy as np
def predict_on_generator(model, generator, argmax=False):
"""
Takes a tf.keras model and uses it to predict on all batches in a generator
Stacks the predictions over all batches on axis 0 (vstack)
Args:
model: A tf.keras module instance. Should accept batches as output
from 'generator'
generator: A generator object yielding one or more batches of data to
predict on
argmax: Whether to return argmax values or model output values
Returns:
If argmax is true, returns integer predictions of shape [-1, 1].
Otherwise, returns floating values of shape [-1, n_classes]
"""
pred = []
end_of_data = False
while not end_of_data:
try:
X_batch, _ = next(generator)
except StopIteration:
end_of_data = True
else:
# Predict
pred_batch = model.predict_on_batch(X_batch)
if argmax:
pred_batch = pred_batch.argmax(-1).reshape(-1, 1)
pred.append(pred_batch)
return np.vstack(pred)
def predict_by_id(model, sequencer, study_id, argmax=False):
"""
Takes a tf.keras model and predicts on all batches of data in a SleepStudy
object.
Args:
model: A tf.keras model instance. Should accept batches of data
as output by the 'sequence' Sequence object.
sequencer: A Sequence object which stores at least the passed
SleepStudy object of 'sleep_study'.
study_id: The identifier string of a SleepStudy object in 'sequence'.
argmax: See predict_on_generator docstring.
Returns:
Predictions of 'model' on all batches of data in a SleepStudy
Please refer to the 'predict_on_generator' docstring.
"""
# Get generator
gen = sequencer.to_batch_generator(study_id=study_id)
return predict_on_generator(model, gen, argmax)
def sequence_predict_generator(model, total_seq_length, generator,
argmax=False, overlapping=True, verbose=True):
"""
Takes a tf.keras model and predicts on segments of data from a generator.
This function takes a few additional values needed to derive an
understanding of the data produced by 'generator', see below:
Args:
model: A tf.keras model to predict with. Should accept data
as output by the generator.
total_seq_length: The total number of 'segments/epochs/stages' in the
generator. This is needed to initialize the
predictions array.
generator: A generator which produces batches of data
argmax: Whether to return argmax values or model output values
overlapping: Specifies whether the sequences output of 'generator'
represent overlapping segments or contagious data.
verbose: If True, prints the prediction progess to screen.
Returns:
An array of shape [total_seq_length, n_classes] or
[total_seq_length, -1, n_classes] if data_per_prediction != input_dims.
If argmax = True axis -1 (now shape 1) is squeezed.
"""
n_classes = model.outputs[0].get_shape()[-1]
s = model.outputs[0].get_shape().as_list()
pred = np.zeros(shape=[total_seq_length] + s[2:], dtype=np.float64)
cur_pos = 0
for X, _, _ in generator:
if verbose:
print(" pos: {}/{}".format(cur_pos+1, total_seq_length),
end="\r", flush=True)
batch_pred = model.predict_on_batch(X)
if overlapping:
for p in batch_pred:
pred[cur_pos:cur_pos+p.shape[0]] += p
cur_pos += 1
else:
batch_pred = batch_pred.reshape(-1, n_classes)
n_vals = batch_pred.shape[0]
pred[cur_pos:cur_pos+n_vals] += batch_pred
cur_pos += n_vals
if argmax:
pred = pred.argmax(-1)
print()
return pred
|
api/accounts_api.py | liamnewmarch/chromium-dashboard | 450 | 12641434 | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.cloud import ndb
from framework import basehandlers
from framework import permissions
from framework import ramcache
from internals import models
class AccountsAPI(basehandlers.APIHandler):
"""User accounts store info on registered users."""
# TODO(jrobbins): do_get
@permissions.require_admin_site
def do_post(self):
"""Process a request to create an account."""
email = self.get_param('email', required=True)
is_admin = self.get_bool_param('isAdmin')
user = self.create_account(email, is_admin)
response_json = user.format_for_template()
return response_json
def create_account(self, email, is_admin):
"""Create and store a new account entity."""
# Don't add a duplicate email address.
user = models.AppUser.query(
models.AppUser.email == email).get(keys_only=True)
if not user:
user = models.AppUser(email=str(email))
user.is_admin = is_admin
user.put()
return user
else:
self.abort(400, 'User already exists')
# TODO(jrobbins): do_patch
@permissions.require_admin_site
def do_delete(self, account_id):
"""Process a request to delete the specified account."""
if account_id:
self.delete_account(account_id)
return {'message': 'Done'}
else:
self.abort(400, msg='Account ID not specified')
def delete_account(self, account_id):
"""Delete the specified account."""
if account_id:
found_user = models.AppUser.get_by_id(int(account_id))
if found_user:
found_user.key.delete()
ramcache.flush_all()
else:
self.abort(404, msg='Specified account ID not found')
|
ckan/cli/dataset.py | gg2/ckan | 2,805 | 12641453 | # encoding: utf-8
import logging
import pprint
import click
import ckan.logic as logic
import ckan.model as model
log = logging.getLogger(__name__)
@click.group(short_help=u"Manage datasets")
def dataset():
"""Manage datasets.
"""
pass
@dataset.command()
@click.argument(u'package')
def show(package):
u'''Shows dataset properties.
'''
dataset = _get_dataset(package)
click.echo(pprint.pformat(dataset.as_dict()))
@dataset.command()
def list():
u'''Lists datasets.
'''
click.echo(u'Datasets:')
datasets = model.Session.query(model.Package)
click.echo(u'count = %i' % datasets.count())
for dataset in datasets:
state = (
u'(%s)' % dataset.state
) if dataset.state != u'active' else u''
click.echo(
u'%s %s %s' %
(click.style(dataset.id, bold=True), dataset.name, state)
)
@dataset.command()
@click.argument(u'package')
def delete(package):
u'''Changes dataset state to 'deleted'.
'''
dataset = _get_dataset(package)
old_state = dataset.state
dataset.delete()
model.repo.commit_and_remove()
dataset = _get_dataset(package)
click.echo(
u'%s %s -> %s' % (
dataset.name, click.style(old_state, fg=u'red'),
click.style(dataset.state, fg=u'green')
)
)
@dataset.command()
@click.argument(u'package')
def purge(package):
u'''Removes dataset from db entirely.
'''
dataset = _get_dataset(package)
name = dataset.name
site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})
context = {u'user': site_user[u'name'], u'ignore_auth': True}
logic.get_action(u'dataset_purge')(context, {u'id': package})
click.echo(u'%s purged' % name)
def _get_dataset(package):
dataset = model.Package.get(str(package))
assert dataset, u'Could not find dataset matching reference: {}'.format(
package
)
return dataset
|
hw3/plot_part1.py | whoiszyc/cs294-112_hws | 102 | 12641464 | import pandas as pd
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
def read_result(exp_name):
path = os.path.join('data', exp_name, 'log.txt')
return pd.read_csv(path, sep='\t')
def add_plot(data, var_name, label=''):
sns.set(style="darkgrid", font_scale=1.5)
plt.plot(data['Timestep'], data[var_name], \
label=label, alpha=0.8)
def plot_11(data):
r1, r2, r3, r4 = data
plt.figure()
add_plot(r1, 'MeanReward100Episodes', 'MeanReward100Episodes');
add_plot(r1, 'BestMeanReward', 'BestMeanReward');
plt.xlabel('Time step');
plt.ylabel('Reward');
plt.legend();
plt.savefig(
os.path.join('results', 'p11.png'),
bbox_inches='tight',
transparent=True,
pad_inches=0.1
)
def plot_12(data):
r1, r2, r3, r4 = data
plt.figure()
add_plot(r1, 'MeanReward100Episodes');
add_plot(r1, 'BestMeanReward', 'vanilla DQN');
add_plot(r2, 'MeanReward100Episodes');
add_plot(r2, 'BestMeanReward', 'double DQN');
plt.xlabel('Time step');
plt.ylabel('Reward');
plt.legend();
plt.savefig(
os.path.join('results', 'p12.png'),
bbox_inches='tight',
transparent=True,
pad_inches=0.1
)
def plot_13(data):
r1, r2, r3, r4 = data
plt.figure()
add_plot(r3, 'MeanReward100Episodes');
add_plot(r3, 'BestMeanReward', 'gamma = 0.9');
add_plot(r2, 'MeanReward100Episodes');
add_plot(r2, 'BestMeanReward', 'gamma = 0.99');
add_plot(r4, 'MeanReward100Episodes');
add_plot(r4, 'BestMeanReward', 'gamma = 0.999');
plt.legend();
plt.xlabel('Time step');
plt.ylabel('Reward');
plt.savefig(
os.path.join('results', 'p13.png'),
bbox_inches='tight',
transparent=True,
pad_inches=0.1
)
def main():
if not os.path.exists('results'):
os.makedirs('results')
r1 = read_result('PongNoFrameskip-v4_bq')
r2 = read_result('PongNoFrameskip-v4_dq')
r3 = read_result('PongNoFrameskip-v4_dq_gamma-0_9')
r4 = read_result('PongNoFrameskip-v4_dq_gamma-0_999')
data = (r1, r2, r3, r4)
plot_11(data)
plot_12(data)
plot_13(data)
if __name__ == '__main__':
main() |
quantecon/optimize/pivoting.py | Smit-create/QuantEcon.py | 1,462 | 12641469 | <reponame>Smit-create/QuantEcon.py
"""
Contain pivoting routines commonly used in the Simplex Algorithm and
Lemke-Howson Algorithm routines.
"""
import numpy as np
from numba import jit
TOL_PIV = 1e-10
TOL_RATIO_DIFF = 1e-15
@jit(nopython=True, cache=True)
def _pivoting(tableau, pivot_col, pivot_row):
"""
Perform a pivoting step. Modify `tableau` in place.
Parameters
----------
tableau : ndarray(float, ndim=2)
Array containing the tableau.
pivot_col : scalar(int)
Pivot column index.
pivot_row : scalar(int)
Pivot row index.
Returns
-------
tableau : ndarray(float, ndim=2)
View to `tableau`.
"""
nrows, ncols = tableau.shape
pivot_elt = tableau[pivot_row, pivot_col]
for j in range(ncols):
tableau[pivot_row, j] /= pivot_elt
for i in range(nrows):
if i == pivot_row:
continue
multiplier = tableau[i, pivot_col]
if multiplier == 0:
continue
for j in range(ncols):
tableau[i, j] -= tableau[pivot_row, j] * multiplier
return tableau
@jit(nopython=True, cache=True)
def _min_ratio_test_no_tie_breaking(tableau, pivot, test_col,
argmins, num_candidates,
tol_piv, tol_ratio_diff):
"""
Perform the minimum ratio test, without tie breaking, for the
candidate rows in `argmins[:num_candidates]`. Return the number
`num_argmins` of the rows minimizing the ratio and store thier
indices in `argmins[:num_argmins]`.
Parameters
----------
tableau : ndarray(float, ndim=2)
Array containing the tableau.
pivot : scalar(int)
Pivot.
test_col : scalar(int)
Index of the column used in the test.
argmins : ndarray(int, ndim=1)
Array containing the indices of the candidate rows. Modified in
place to store the indices of minimizing rows.
num_candidates : scalar(int)
Number of candidate rows in `argmins`.
tol_piv : scalar(float)
Pivot tolerance below which a number is considered to be
nonpositive.
tol_ratio_diff : scalar(float)
Tolerance to determine a tie between ratio values.
Returns
-------
num_argmins : scalar(int)
Number of minimizing rows.
"""
ratio_min = np.inf
num_argmins = 0
for k in range(num_candidates):
i = argmins[k]
if tableau[i, pivot] <= tol_piv: # Treated as nonpositive
continue
ratio = tableau[i, test_col] / tableau[i, pivot]
if ratio > ratio_min + tol_ratio_diff: # Ratio large for i
continue
elif ratio < ratio_min - tol_ratio_diff: # Ratio smaller for i
ratio_min = ratio
num_argmins = 1
else: # Ratio equal
num_argmins += 1
argmins[num_argmins-1] = i
return num_argmins
@jit(nopython=True, cache=True)
def _lex_min_ratio_test(tableau, pivot, slack_start, argmins,
tol_piv=TOL_PIV, tol_ratio_diff=TOL_RATIO_DIFF):
"""
Perform the lexico-minimum ratio test.
Parameters
----------
tableau : ndarray(float, ndim=2)
Array containing the tableau.
pivot : scalar(int)
Pivot.
slack_start : scalar(int)
First index for the slack variables.
argmins : ndarray(int, ndim=1)
Empty array used to store the row indices. Its length must be no
smaller than the number of the rows of `tableau`.
tol_piv : scalar(float), optional
Pivot tolerance below which a number is considered to be
nonpositive. Default value is {TOL_PIV}.
tol_ratio_diff : scalar(float), optional
Tolerance to determine a tie between ratio values. Default value
is {TOL_RATIO_DIFF}.
Returns
-------
found : bool
False if there is no positive entry in the pivot column.
row_min : scalar(int)
Index of the row with the lexico-minimum ratio.
"""
nrows = tableau.shape[0]
num_candidates = nrows
found = False
# Initialize `argmins`
for i in range(nrows):
argmins[i] = i
num_argmins = _min_ratio_test_no_tie_breaking(
tableau, pivot, -1, argmins, num_candidates, tol_piv, tol_ratio_diff
)
if num_argmins == 1:
found = True
elif num_argmins >= 2:
for j in range(slack_start, slack_start+nrows):
if j == pivot:
continue
num_argmins = _min_ratio_test_no_tie_breaking(
tableau, pivot, j, argmins, num_argmins,
tol_piv, tol_ratio_diff
)
if num_argmins == 1:
found = True
break
return found, argmins[0]
_lex_min_ratio_test.__doc__ = _lex_min_ratio_test.__doc__.format(
TOL_PIV=TOL_PIV, TOL_RATIO_DIFF=TOL_RATIO_DIFF
)
|
tests/tasks/twitter/test_twitter.py | concreted/prefect | 8,633 | 12641470 | import pytest
import prefect
from prefect.tasks.twitter import LoadTweetReplies
from prefect.utilities.configuration import set_temporary_config
class TestLoadTweetReplies:
def test_initialize_with_nothing_sets_defaults(self):
task = LoadTweetReplies()
assert task.user is None
assert task.tweet_id is None
def test_initialize_kwargs_are_processed(self):
task = LoadTweetReplies(checkpoint=True, name="test")
assert task.name == "test"
assert task.checkpoint is True
@pytest.mark.parametrize("attr", ["user", "tweet_id"])
def test_initializes_attr_from_kwargs(self, attr):
task = LoadTweetReplies(**{attr: "my-value"})
assert getattr(task, attr) == "my-value"
|
mpire/dashboard/__init__.py | synapticarbors/mpire | 505 | 12641525 | <reponame>synapticarbors/mpire<gh_stars>100-1000
try:
from mpire.dashboard.dashboard import connect_to_dashboard, start_dashboard
except (ImportError, ModuleNotFoundError):
def connect_to_dashboard(*_, **__):
raise NotImplementedError("Install the dashboard dependencies to enable the dashboard")
def start_dashboard(*_, **__):
raise NotImplementedError("Install the dashboard dependencies to enable the dashboard")
|
labs/03_neural_recsys/keras_fixes.py | soufiomario/labs-Deep-learning | 1,398 | 12641535 | """Temporary workaround for keras bugs in merge modes.
merge([...], mode='dot') and merge([...], mode='cos') do not return
the correct output on 2D inputs.
Those fixes only work with the TF backend.
More details:
https://github.com/fchollet/keras/issues/2626
"""
import tensorflow as tf
def dot_mode(inputs):
"""Work around for Keras bug with merge([...], mode='dot').
https://github.com/fchollet/keras/issues/2626
The dot product of 2 embeddings can be used as an unnormalized
approximation to the cosine similarity.
"""
latent_codes_1, latent_codes_2 = inputs
return tf.reduce_sum(latent_codes_1 * latent_codes_2, axis=-1)
def cos_mode(inputs):
"""Work around for Keras bug with merge([...], mode='cos').
Compute the cosine similarity of two unormalized embeddings.
"""
latent_codes_1, latent_codes_2 = inputs
sq_norm_1 = tf.reduce_sum(latent_codes_1 ** 2, axis=-1)
sq_norm_2 = tf.reduce_sum(latent_codes_2 ** 2, axis=-1)
dot = tf.reduce_sum(latent_codes_1 * latent_codes_2, axis=-1)
return dot / tf.sqrt(sq_norm_1 * sq_norm_2)
|
plugins/public/cinq-collector-dns/cinq_collector_dns/__init__.py | gibbsie/cloud-inquisitor | 462 | 12641536 | <reponame>gibbsie/cloud-inquisitor
from collections import defaultdict
import requests
from cloud_inquisitor.config import dbconfig, ConfigOption
from cloud_inquisitor.database import db
from cloud_inquisitor.exceptions import CloudFlareError
from cloud_inquisitor.plugins import BaseCollector, CollectorType
from cloud_inquisitor.plugins.types.accounts import AXFRAccount, CloudFlareAccount
from cloud_inquisitor.plugins.types.resources import DNSZone, DNSRecord
from cloud_inquisitor.utils import get_resource_id
from cloud_inquisitor.wrappers import retry
from dns import zone as dns_zone, query
from dns.rdatatype import to_text as type_to_text
class DNSCollector(BaseCollector):
name = 'DNS'
ns = 'collector_dns'
type = CollectorType.GLOBAL
interval = dbconfig.get('interval', ns, 15)
options = (
ConfigOption('enabled', False, 'bool', 'Enable the DNS collector plugin'),
ConfigOption('interval', 15, 'int', 'Run frequency in minutes'),
ConfigOption('cloudflare_enabled', False, 'bool', 'Enable CloudFlare as a source for DNS records'),
ConfigOption('axfr_enabled', False, 'bool', 'Enable using DNS Zone Transfers for records')
)
def __init__(self):
super().__init__()
self.axfr_enabled = self.dbconfig.get('axfr_enabled', self.ns, False)
self.cloudflare_enabled = self.dbconfig.get('cloudflare_enabled', self.ns, False)
self.axfr_accounts = list(AXFRAccount.get_all().values())
self.cf_accounts = list(CloudFlareAccount.get_all().values())
self.cloudflare_initialized = defaultdict(lambda: False)
self.cloudflare_session = {}
def run(self):
if self.axfr_enabled:
try:
for account in self.axfr_accounts:
records = self.get_axfr_records(account.server, account.domains)
self.process_zones(records, account)
except:
self.log.exception('Failed processing domains via AXFR')
if self.cloudflare_enabled:
try:
for account in self.cf_accounts:
records = self.get_cloudflare_records(account=account)
self.process_zones(records, account)
except:
self.log.exception('Failed processing domains via CloudFlare')
def process_zones(self, zones, account):
self.log.info('Processing DNS records for {}'.format(account.account_name))
# region Update zones
existing_zones = DNSZone.get_all(account)
for data in zones:
if data['zone_id'] in existing_zones:
zone = DNSZone.get(data['zone_id'])
if zone.update(data):
self.log.debug('Change detected for DNS zone {}/{}'.format(
account.account_name,
zone.name
))
db.session.add(zone.resource)
else:
DNSZone.create(
data['zone_id'],
account_id=account.account_id,
properties={k: v for k, v in data.items() if k not in ('records', 'zone_id', 'tags')},
tags=data['tags']
)
self.log.debug('Added DNS zone {}/{}'.format(
account.account_name,
data['name']
))
db.session.commit()
zk = set(x['zone_id'] for x in zones)
ezk = set(existing_zones.keys())
for resource_id in ezk - zk:
zone = existing_zones[resource_id]
# Delete all the records for the zone
for record in zone.records:
db.session.delete(record.resource)
db.session.delete(zone.resource)
self.log.debug('Deleted DNS zone {}/{}'.format(
account.account_name,
zone.name.value
))
db.session.commit()
# endregion
# region Update resource records
for zone in zones:
try:
existing_zone = DNSZone.get(zone['zone_id'])
existing_records = {rec.id: rec for rec in existing_zone.records}
for data in zone['records']:
if data['id'] in existing_records:
record = existing_records[data['id']]
if record.update(data):
self.log.debug('Changed detected for DNSRecord {}/{}/{}'.format(
account.account_name,
zone.name,
data['name']
))
db.session.add(record.resource)
else:
record = DNSRecord.create(
data['id'],
account_id=account.account_id,
properties={k: v for k, v in data.items() if k not in ('records', 'zone_id')},
tags={}
)
self.log.debug('Added new DNSRecord {}/{}/{}'.format(
account.account_name,
zone['name'],
data['name']
))
existing_zone.add_record(record)
db.session.commit()
rk = set(x['id'] for x in zone['records'])
erk = set(existing_records.keys())
for resource_id in erk - rk:
record = existing_records[resource_id]
db.session.delete(record.resource)
self.log.debug('Deleted DNSRecord {}/{}/{}'.format(
account.account_name,
zone['zone_id'],
record.name
))
db.session.commit()
except:
self.log.exception('Error while attempting to update records for {}/{}'.format(
account.account_name,
zone['zone_id'],
))
db.session.rollback()
# endregion
@retry
def get_axfr_records(self, server, domains):
"""Return a `list` of `dict`s containing the zones and their records, obtained from the DNS server
Returns:
:obj:`list` of `dict`
"""
zones = []
for zoneName in domains:
try:
zone = {
'zone_id': get_resource_id('axfrz', zoneName),
'name': zoneName,
'source': 'AXFR',
'comment': None,
'tags': {},
'records': []
}
z = dns_zone.from_xfr(query.xfr(server, zoneName))
rdata_fields = ('name', 'ttl', 'rdata')
for rr in [dict(zip(rdata_fields, x)) for x in z.iterate_rdatas()]:
record_name = rr['name'].derelativize(z.origin).to_text()
zone['records'].append(
{
'id': get_resource_id('axfrr', record_name, ['{}={}'.format(k, str(v)) for k, v in rr.items()]),
'zone_id': zone['zone_id'],
'name': record_name,
'value': sorted([rr['rdata'].to_text()]),
'type': type_to_text(rr['rdata'].rdtype)
})
if len(zone['records']) > 0:
zones.append(zone)
except Exception as ex:
self.log.exception('Failed fetching DNS zone information for {}: {}'.format(zoneName, ex))
raise
return zones
def get_cloudflare_records(self, *, account):
"""Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API
Returns:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
:obj:`list` of `dict`
"""
zones = []
for zobj in self.__cloudflare_list_zones(account=account):
try:
self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name']))
zone = {
'zone_id': get_resource_id('cfz', zobj['name']),
'name': zobj['name'],
'source': 'CloudFlare',
'comment': None,
'tags': {},
'records': []
}
for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']):
zone['records'].append({
'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for k, v in record.items()]),
'zone_id': zone['zone_id'],
'name': record['name'],
'value': record['value'],
'type': record['type']
})
if len(zone['records']) > 0:
zones.append(zone)
except CloudFlareError:
self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name']))
return zones
# region Helper functions for CloudFlare
def __cloudflare_request(self, *, account, path, args=None):
"""Helper function to interact with the CloudFlare API.
Args:
account (:obj:`CloudFlareAccount`): CloudFlare Account object
path (`str`): URL endpoint to communicate with
args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume
Returns:
`dict`
"""
if not args:
args = {}
if not self.cloudflare_initialized[account.account_id]:
self.cloudflare_session[account.account_id] = requests.Session()
self.cloudflare_session[account.account_id].headers.update({
'X-Auth-Email': account.email,
'X-Auth-Key': account.api_key,
'Content-Type': 'application/json'
})
self.cloudflare_initialized[account.account_id] = True
if 'per_page' not in args:
args['per_page'] = 100
response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args)
if response.status_code != 200:
raise CloudFlareError('Request failed: {}'.format(response.text))
return response.json()
def __cloudflare_list_zones(self, *, account, **kwargs):
"""Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
**kwargs (`dict`): Extra arguments to pass to the API endpoint
Returns:
`list` of `dict`
"""
done = False
zones = []
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(account=account, path='/zones', args=kwargs)
info = response['result_info']
if 'total_pages' not in info or page == info['total_pages']:
done = True
else:
page += 1
zones += response['result']
return zones
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):
"""Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and
their information.
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
zoneID (`int`): Internal CloudFlare ID of the DNS zone
**kwargs (`dict`): Additional arguments to be consumed by the API endpoint
Returns:
:obj:`dict` of `str`: `dict`
"""
done = False
records = {}
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(
account=account,
path='/zones/{}/dns_records'.format(zoneID),
args=kwargs
)
info = response['result_info']
# Check if we have received all records, and if not iterate over the result set
if 'total_pages' not in info or page >= info['total_pages']:
done = True
else:
page += 1
for record in response['result']:
if record['name'] in records:
records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']])
else:
records[record['name']] = {
'name': record['name'],
'value': sorted([record['content']]),
'type': record['type']
}
return list(records.values())
# endregion
|
src/holodeck/exceptions.py | LaudateCorpus1/holodeck | 518 | 12641544 | <gh_stars>100-1000
"""Holodeck Exceptions"""
class HolodeckException(Exception):
"""Base class for a generic exception in Holodeck."""
class HolodeckConfigurationException(HolodeckException):
"""The user provided an invalid configuration for Holodeck"""
class TimeoutException(HolodeckException):
"""Exception raised when communicating with the engine timed out."""
class NotFoundException(HolodeckException):
"""Raised when a package cannot be found"""
|
tensorlayer/layers/convolution/quan_conv_bn.py | Howdy-Personally/tensorlayer-master | 4,484 | 12641569 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.python.training import moving_averages
import tensorlayer as tl
from tensorlayer import logging
from tensorlayer.layers.core import Layer
from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow)
# from tensorlayer.layers.core import LayersConfig
__all__ = ['QuanConv2dWithBN']
class QuanConv2dWithBN(Layer):
"""The :class:`QuanConv2dWithBN` class is a quantized convolutional layer with BN, which weights are 'bitW' bits and the output of the previous layer
are 'bitA' bits while inferencing.
Note that, the bias vector would keep the same.
Parameters
----------
n_filter : int
The number of filters.
filter_size : tuple of int
The filter size (height, width).
strides : tuple of int
The sliding window strides of corresponding input dimensions.
It must be in the same order as the ``shape`` parameter.
padding : str
The padding algorithm type: "SAME" or "VALID".
act : activation function
The activation function of this layer.
decay : float
A decay factor for `ExponentialMovingAverage`.
Suggest to use a large value for large dataset.
epsilon : float
Eplison.
is_train : boolean
Is being used for training or inference.
beta_init : initializer or None
The initializer for initializing beta, if None, skip beta.
Usually you should not skip beta unless you know what happened.
gamma_init : initializer or None
The initializer for initializing gamma, if None, skip gamma.
bitW : int
The bits of this layer's parameter
bitA : int
The bits of the output of previous layer
use_gemm : boolean
If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO).
W_init : initializer
The initializer for the the weight matrix.
W_init_args : dictionary
The arguments for the weight matrix initializer.
data_format : str
"NHWC" or "NCHW", default is "NHWC".
dilation_rate : tuple of int
Specifying the dilation rate to use for dilated convolution.
in_channels : int
The number of in channels.
name : str
A unique layer name.
Examples
---------
>>> import tensorlayer as tl
>>> net = tl.layers.Input([50, 256, 256, 3])
>>> layer = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1')
>>> print(layer)
>>> net = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1')(net)
>>> print(net)
"""
def __init__(
self,
n_filter=32,
filter_size=(3, 3),
strides=(1, 1),
padding='SAME',
act=None,
decay=0.9,
epsilon=1e-5,
is_train=False,
gamma_init=tl.initializers.truncated_normal(stddev=0.02),
beta_init=tl.initializers.truncated_normal(stddev=0.02),
bitW=8,
bitA=8,
use_gemm=False,
W_init=tl.initializers.truncated_normal(stddev=0.02),
W_init_args=None,
data_format="channels_last",
dilation_rate=(1, 1),
in_channels=None,
name='quan_cnn2d_bn',
):
super(QuanConv2dWithBN, self).__init__(act=act, name=name)
self.n_filter = n_filter
self.filter_size = filter_size
self.strides = strides
self.padding = padding
self.decay = decay
self.epsilon = epsilon
self.is_train = is_train
self.gamma_init = gamma_init
self.beta_init = beta_init
self.bitW = bitW
self.bitA = bitA
self.use_gemm = use_gemm
self.W_init = W_init
self.W_init_args = W_init_args
self.data_format = data_format
self.dilation_rate = dilation_rate
self.in_channels = in_channels
logging.info(
"QuanConv2dWithBN %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s " % (
self.name, n_filter, filter_size, str(strides), padding,
self.act.__name__ if self.act is not None else 'No Activation'
)
)
if self.in_channels:
self.build(None)
self._built = True
if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")
if len(strides) != 2:
raise ValueError("len(strides) should be 2.")
def __repr__(self):
actstr = self.act.__name__ if self.act is not None else 'No Activation'
s = (
'{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}'
', strides={strides}, padding={padding}' + actstr
)
if self.dilation_rate != (1, ) * len(self.dilation_rate):
s += ', dilation={dilation_rate}'
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
if self.data_format == 'channels_last':
self.data_format = 'NHWC'
if self.in_channels is None:
self.in_channels = inputs_shape[-1]
self._strides = [1, self.strides[0], self.strides[1], 1]
self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], 1]
elif self.data_format == 'channels_first':
self.data_format = 'NCHW'
if self.in_channels is None:
self.in_channels = inputs_shape[1]
self._strides = [1, 1, self.strides[0], self.strides[1]]
self._dilation_rate = [1, 1, self.dilation_rate[0], self.dilation_rate[1]]
else:
raise Exception("data_format should be either channels_last or channels_first")
self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter)
self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init)
para_bn_shape = (self.n_filter, )
if self.gamma_init:
self.scale_para = self._get_weights(
"scale_para", shape=para_bn_shape, init=self.gamma_init, trainable=self.is_train
)
else:
self.scale_para = None
if self.beta_init:
self.offset_para = self._get_weights(
"offset_para", shape=para_bn_shape, init=self.beta_init, trainable=self.is_train
)
else:
self.offset_para = None
self.moving_mean = self._get_weights(
"moving_mean", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False
)
self.moving_variance = self._get_weights(
"moving_variance", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False
)
def forward(self, inputs):
x = inputs
inputs = quantize_active_overflow(inputs, self.bitA) # Do not remove
outputs = tf.nn.conv2d(
input=x, filters=self.W, strides=self._strides, padding=self.padding, data_format=self.data_format,
dilations=self._dilation_rate, name=self.name
)
mean, variance = tf.nn.moments(outputs, axes=list(range(len(outputs.get_shape()) - 1)))
update_moving_mean = moving_averages.assign_moving_average(
self.moving_mean, mean, self.decay, zero_debias=False
) # if zero_debias=True, has bias
update_moving_variance = moving_averages.assign_moving_average(
self.moving_variance, mean, self.decay, zero_debias=False
) # if zero_debias=True, has bias
if self.is_train:
mean, var = self.mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance)
else:
mean, var = self.moving_mean, self.moving_variance
w_fold = self._w_fold(self.W, self.scale_para, var, self.epsilon)
W_ = quantize_weight_overflow(w_fold, self.bitW)
conv_fold = tf.nn.conv2d(inputs, W_, strides=self.strides, padding=self.padding, data_format=self.data_format)
if self.beta_init:
bias_fold = self._bias_fold(self.offset_para, self.scale_para, mean, var, self.epsilon)
conv_fold = tf.nn.bias_add(conv_fold, bias_fold, name='bn_bias_add')
if self.act:
conv_fold = self.act(conv_fold)
return conv_fold
def mean_var_with_update(self, update_moving_mean, update_moving_variance, mean, variance):
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
def _w_fold(self, w, gama, var, epsilon):
return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon))
def _bias_fold(self, beta, gama, mean, var, epsilon):
return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon)))
|
demos/common/python/openvino/model_zoo/model_api/models/model.py | kblaszczak-intel/open_model_zoo | 2,201 | 12641596 | <reponame>kblaszczak-intel/open_model_zoo
"""
Copyright (C) 2020-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
class WrapperError(RuntimeError):
'''Special class for errors occurred in Model API wrappers'''
def __init__(self, wrapper_name, message):
super().__init__(f"{wrapper_name}: {message}")
class Model:
'''An abstract model wrapper
The abstract model wrapper is free from any executor dependencies.
It sets the `ModelAdapter` instance with the provided model
and defines model inputs/outputs.
Next, it loads the provided configuration variables and sets it as wrapper attributes.
The keys of the configuration dictionary should be presented in the `parameters` method.
Also, it decorates the following adapter interface:
- Loading the model to the device
- The model reshaping
- Synchronous model inference
- Asynchronous model inference
The `preprocess` and `postprocess` methods must be implemented in a specific inherited wrapper.
Attributes:
logger (Logger): instance of the Logger
model_adapter (ModelAdapter): allows working with the specified executor
inputs (dict): keeps the model inputs names and `Metadata` structure for each one
outputs (dict): keeps the model outputs names and `Metadata` structure for each one
model_loaded (bool): a flag whether the model is loaded to device
'''
__model__ = None # Abstract wrapper has no name
def __init__(self, model_adapter, configuration=None, preload=False):
'''Model constructor
Args:
model_adapter (ModelAdapter): allows working with the specified executor
configuration (dict, optional): it contains values for parameters accepted by specific
wrapper (`confidence_threshold`, `labels` etc.) which are set as data attributes
preload (bool, optional): a flag whether the model is loaded to device while
initialization. If `preload=False`, the model must be loaded via `load` method before inference
Raises:
WrapperError: if the wrapper configuration is incorrect
'''
self.logger = log.getLogger()
self.model_adapter = model_adapter
self.inputs = self.model_adapter.get_input_layers()
self.outputs = self.model_adapter.get_output_layers()
for name, parameter in self.parameters().items():
self.__setattr__(name, parameter.default_value)
self._load_config(configuration)
self.model_loaded = False
if preload:
self.load()
@classmethod
def get_model(cls, name):
subclasses = [subclass for subclass in cls.get_subclasses() if subclass.__model__]
if cls.__model__:
subclasses.append(cls)
for subclass in subclasses:
if name.lower() == subclass.__model__.lower():
return subclass
cls.raise_error('There is no model with name "{}" in list: {}'.format(
name, ', '.join([subclass.__model__ for subclass in subclasses])))
@classmethod
def create_model(cls, name, model_adapter, configuration=None, preload=False):
Model = cls.get_model(name)
return Model(model_adapter, configuration, preload)
@classmethod
def get_subclasses(cls):
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(subclass.get_subclasses())
return all_subclasses
@classmethod
def available_wrappers(cls):
available_classes = [cls] if cls.__model__ else []
available_classes.extend(cls.get_subclasses())
return [subclass.__model__ for subclass in available_classes if subclass.__model__]
@classmethod
def parameters(cls):
'''Defines the description and type of configurable data parameters for the wrapper.
See `types.py` to find available types of the data parameter. For each parameter
the type, default value and description must be provided.
The example of possible data parameter:
'confidence_threshold': NumericalValue(
default_value=0.5, description="Threshold value for detection box confidence"
)
The method must be implemented in each specific inherited wrapper.
Returns:
- the dictionary with defined wrapper data parameters
'''
parameters = {}
return parameters
def _load_config(self, config):
'''Reads the configuration and creates data attributes
by setting the wrapper parameters with values from configuration.
Args:
config (dict): the dictionary with keys to be set as data attributes
and its values. The example of the config is the following:
{
'confidence_threshold': 0.5,
'resize_type': 'fit_to_window',
}
Note:
The config keys should be provided in `parameters` method for each wrapper,
then the default value of the parameter will be updated. If some key presented
in the config is not introduced in `parameters`, it will be omitted.
Raises:
WrapperError: if the configuration is incorrect
'''
if config is None: return
parameters = self.parameters()
for name, value in config.items():
if name in parameters:
errors = parameters[name].validate(value)
if errors:
self.logger.error(f'Error with "{name}" parameter:')
for error in errors:
self.logger.error(f"\t{error}")
self.raise_error('Incorrect user configuration')
value = parameters[name].get_value(value)
self.__setattr__(name, value)
else:
self.logger.warning(f'The parameter "{name}" not found in {self.__model__} wrapper, will be omitted')
def raise_error(self, message):
'''Raises the WrapperError.
Args:
message (str): error message to be shown in the following format:
"WrapperName: message"
'''
raise WrapperError(self.__model__, message)
def preprocess(self, inputs):
'''Interface for preprocess method.
Args:
inputs: raw input data, the data type is defined by wrapper
Returns:
- the preprocessed data which is submitted to the model for inference
and has the following format:
{
'input_layer_name_1': data_1,
'input_layer_name_2': data_2,
...
}
- the input metadata, which might be used in `postprocess` method
'''
raise NotImplementedError
def postprocess(self, outputs, meta):
'''Interface for postprocess method.
Args:
outputs (dict): model raw output in the following format:
{
'output_layer_name_1': raw_result_1,
'output_layer_name_2': raw_result_2,
...
}
meta (dict): the input metadata obtained from `preprocess` method
Returns:
- postprocessed data in the format defined by wrapper
'''
raise NotImplementedError
def _check_io_number(self, number_of_inputs, number_of_outputs):
'''Checks whether the number of model inputs/outputs is supported.
Args:
number_of_inputs (int, Tuple(int)): number of inputs supported by wrapper.
Use -1 to omit the check
number_of_outputs (int, Tuple(int)): number of outputs supported by wrapper.
Use -1 to omit the check
Raises:
WrapperError: if the model has unsupported number of inputs/outputs
'''
if not isinstance(number_of_inputs, tuple):
if len(self.inputs) != number_of_inputs and number_of_inputs != -1:
self.raise_error("Expected {} input blob{}, but {} found: {}".format(
number_of_inputs, 's' if number_of_inputs !=1 else '',
len(self.inputs), ', '.join(self.inputs)
))
else:
if not len(self.inputs) in number_of_inputs:
self.raise_error("Expected {} or {} input blobs, but {} found: {}".format(
', '.join(str(n) for n in number_of_inputs[:-1]), int(number_of_inputs[-1]),
len(self.inputs), ', '.join(self.inputs)
))
if not isinstance(number_of_outputs, tuple):
if len(self.outputs) != number_of_outputs and number_of_outputs != -1:
self.raise_error("Expected {} output blob{}, but {} found: {}".format(
number_of_outputs, 's' if number_of_outputs !=1 else '',
len(self.outputs), ', '.join(self.outputs)
))
else:
if not len(self.outputs) in number_of_outputs:
self.raise_error("Expected {} or {} output blobs, but {} found: {}".format(
', '.join(str(n) for n in number_of_outputs[:-1]), int(number_of_outputs[-1]),
len(self.outputs), ', '.join(self.outputs)
))
def __call__(self, inputs):
'''
Applies preprocessing, synchronous inference, postprocessing routines while one call.
Args:
inputs: raw input data, the data type is defined by wrapper
Returns:
- postprocessed data in the format defined by wrapper
- the input metadata obtained from `preprocess` method
'''
dict_data, input_meta = self.preprocess(inputs)
raw_result = self.infer_sync(dict_data)
return self.postprocess(raw_result, input_meta), input_meta
def load(self, force=False):
if not self.model_loaded or force:
self.model_loaded = True
self.model_adapter.load_model()
def reshape(self, new_shape):
if self.model_loaded:
self.logger.warning(f'{self.__model__}: the model already loaded to device, ',
'should be reloaded after reshaping.')
self.model_loaded = False
self.model_adapter.reshape_model(new_shape)
self.inputs = self.model_adapter.get_input_layers()
self.outputs = self.model_adapter.get_output_layers()
def infer_sync(self, dict_data):
if not self.model_loaded:
self.raise_error("The model is not loaded to the device. Please, create the wrapper "
"with preload=True option or call load() method before infer_sync()")
return self.model_adapter.infer_sync(dict_data)
def infer_async(self, dict_data, callback_data):
if not self.model_loaded:
self.raise_error("The model is not loaded to the device. Please, create the wrapper "
"with preload=True option or call load() method before infer_async()")
self.model_adapter.infer_async(dict_data, callback_data)
def is_ready(self):
return self.model_adapter.is_ready()
def await_all(self):
self.model_adapter.await_all()
def await_any(self):
self.model_adapter.await_any()
def log_layers_info(self):
'''Prints the shape, precision and layout for all model inputs/outputs.
'''
for name, metadata in self.inputs.items():
self.logger.info('\tInput layer: {}, shape: {}, precision: {}, layout: {}'.format(
name, metadata.shape, metadata.precision, metadata.layout))
for name, metadata in self.outputs.items():
self.logger.info('\tOutput layer: {}, shape: {}, precision: {}, layout: {}'.format(
name, metadata.shape, metadata.precision, metadata.layout))
|
demos/text_to_speech_demo/python/utils/embeddings_processing.py | APrigarina/open_model_zoo | 2,201 | 12641601 | <filename>demos/text_to_speech_demo/python/utils/embeddings_processing.py
import numpy as np
class PCA:
def __init__(self, n_components=1):
self.mean = None
self.eig_vectors = None
self.n_components = n_components
def build(self, x):
m = np.mean(x, axis=0)
xm = x - m
cov_mat = np.cov(xm.T)
eig_values, eig_vectors = np.linalg.eig(cov_mat)
idx = np.argsort(eig_values)[::-1]
eig_vectors = eig_vectors[:, idx]
v = eig_vectors[:, :self.n_components]
projection = xm.dot(v)
self.eig_vectors = eig_vectors
self.mean = m
return projection
def project(self, x):
xm = x - self.mean
v = self.eig_vectors[:, :self.n_components]
return xm.dot(v)
def iproject(self, z):
v = self.eig_vectors[:, :self.n_components]
x = z * v.T + self.mean
return x
|
hypergbm/tests/experiment_test.py | oaksharks/HyperGBM | 687 | 12641607 | <reponame>oaksharks/HyperGBM
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
from datetime import datetime
from sklearn.metrics import get_scorer
from sklearn.model_selection import train_test_split
from hypergbm import HyperGBM, CompeteExperiment, make_experiment
from hypergbm.search_space import search_space_general
from hypernets.core import OptimizeDirection, EarlyStoppingCallback
from hypernets.experiment import GeneralExperiment, ExperimentCallback, ConsoleCallback, StepNames
from hypernets.searchers import RandomSearcher
from hypernets.tabular.datasets import dsutils
class LogCallback(ExperimentCallback):
def __init__(self, output_elapsed=False):
self.logs = []
self.experiment_elapsed = None
self.output_elapsed = output_elapsed
def experiment_start(self, exp):
self.logs.append('experiment start')
def experiment_end(self, exp, elapsed):
self.logs.append(f'experiment end')
if self.output_elapsed:
self.logs.append(f' elapsed:{elapsed}')
self.experiment_elapsed = elapsed
def experiment_break(self, exp, error):
self.logs.append(f'experiment break, error:{error}')
def step_start(self, exp, step):
self.logs.append(f' step start, step:{step}')
def step_progress(self, exp, step, progress, elapsed, eta=None):
self.logs.append(f' progress:{progress}')
if self.output_elapsed:
self.logs.append(f' elapsed:{elapsed}')
def step_end(self, exp, step, output, elapsed):
self.logs.append(f' step end, step:{step}, output:{output.keys() if output is not None else ""}')
if self.output_elapsed:
self.logs.append(f' elapsed:{elapsed}')
def step_break(self, exp, step, error):
self.logs.append(f'step break, step:{step}, error:{error}')
class Test_Experiment():
def test_regression_cv(self):
self.run_regression(cv=True)
def test_regression_feature_reselection(self):
self.run_regression(feature_reselection=True)
def test_regression_pseudo_labeling(self):
self.run_regression(pseudo_labeling=True)
def test_regression_adversarial_validation(self):
self.run_regression(train_test_split_strategy='adversarial_validation')
def test_regression_cross_validator(self):
from hypernets.tabular.lifelong_learning import PrequentialSplit
preq_split = PrequentialSplit(PrequentialSplit.STRATEGY_PREQ_BLS, n_splits=3)
self.run_regression(cv=True, cross_validator=preq_split)
def run_regression(self, train_test_split_strategy=None, cv=False, feature_reselection=False, pseudo_labeling=False,
collinearity_detection=False, drift_detection=True, max_trials=3, cross_validator=None):
df = dsutils.load_Bike_Sharing()
y = df.pop('count')
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=9527)
log_callback = LogCallback(output_elapsed=True)
rs = RandomSearcher(lambda: search_space_general(early_stopping_rounds=5, ),
optimize_direction='min')
hk = HyperGBM(rs, task='regression', reward_metric='mse', callbacks=[])
experiment = CompeteExperiment(hk, X_train, y_train, X_test=X_test,
callbacks=[log_callback],
train_test_split_strategy=train_test_split_strategy,
cv=cv, num_folds=3,
pseudo_labeling=pseudo_labeling,
scorer=get_scorer('neg_root_mean_squared_error'),
collinearity_detection=collinearity_detection,
drift_detection=drift_detection,
feature_reselection=feature_reselection,
feature_reselection_estimator_size=5,
feature_reselection_threshold=1e-5,
ensemble_size=10,
cross_validator=cross_validator,
random_state=12345,
)
pipeline = experiment.run(max_trials=max_trials)
rmse_scorer = get_scorer('neg_root_mean_squared_error')
rmse = rmse_scorer(pipeline, X_test, y_test)
assert rmse
def test_multiclass_cv(self):
self.run_multiclass(cv=True)
def test_multiclass_pseudo_labeling(self):
self.run_multiclass(pseudo_labeling=True)
def test_multiclass_feature_reselection(self):
self.run_multiclass(feature_reselection=True)
def test_multiclass_adversarial_validation(self):
self.run_multiclass(train_test_split_strategy='adversarial_validation')
def test_multiclass_cross_validator(self):
from hypernets.tabular.lifelong_learning import PrequentialSplit
preq_split = PrequentialSplit(PrequentialSplit.STRATEGY_PREQ_BLS, n_splits=3)
self.run_multiclass(cv=True, cross_validator=preq_split)
def run_multiclass(self, train_test_split_strategy=None, cv=False, feature_reselection=False, pseudo_labeling=False,
collinearity_detection=False, drift_detection=True, max_trials=3, cross_validator=None):
df = dsutils.load_glass_uci()
df.columns = [f'x_{c}' for c in df.columns.to_list()]
df.pop('x_0')
y = df.pop('x_10')
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=1, stratify=y)
rs = RandomSearcher(lambda: search_space_general(early_stopping_rounds=20, verbose=0),
optimize_direction=OptimizeDirection.Maximize)
es = EarlyStoppingCallback(20, 'max')
hk = HyperGBM(rs, reward_metric='auc', callbacks=[es])
log_callback = ConsoleCallback()
experiment = CompeteExperiment(hk, X_train, y_train, X_test=X_test,
callbacks=[log_callback],
train_test_split_strategy=train_test_split_strategy,
cv=cv, num_folds=3,
pseudo_labeling=pseudo_labeling,
scorer=get_scorer('roc_auc_ovr'),
collinearity_detection=collinearity_detection,
drift_detection=drift_detection,
feature_reselection=feature_reselection,
feature_reselection_estimator_size=5,
feature_reselection_threshold=1e-5,
ensemble_size=10,
cross_validator=cross_validator,
random_state=2345
)
pipeline = experiment.run(max_trials=max_trials)
acc_scorer = get_scorer('accuracy')
acc = acc_scorer(pipeline, X_test, y_test)
assert acc
auc_scorer = get_scorer('roc_auc_ovo')
auc = auc_scorer(pipeline, X_test, y_test)
assert auc
def test_general_exp(self):
rs = RandomSearcher(search_space_general, optimize_direction=OptimizeDirection.Maximize)
hk = HyperGBM(rs, reward_metric='accuracy', callbacks=[])
df = dsutils.load_bank().head(1000)
df.drop(['id'], axis=1, inplace=True)
y = df.pop('y')
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=9527)
log_callback = LogCallback()
experiment = GeneralExperiment(hk, X_train, y_train, X_test=X_test, callbacks=[log_callback])
experiment.run(max_trials=5)
assert log_callback.logs == ['experiment start',
' step start, step:data split',
" step end, step:data split, output:dict_keys(['X_train.shape', "
"'y_train.shape', 'X_eval.shape', 'y_eval.shape', 'X_test.shape'])",
' step start, step:search',
" step end, step:search, output:dict_keys(['best_trial'])",
' step start, step:load estimator',
" step end, step:load estimator, output:dict_keys(['estimator'])",
'experiment end']
def run_binary(self, train_test_split_strategy=None, cv=False, pseudo_labeling=False,
feature_reselection=False,
collinearity_detection=False, drift_detection=True, max_trials=3, scoring='roc_auc_ovr',
cross_validator=None):
rs = RandomSearcher(lambda: search_space_general(early_stopping_rounds=20, verbose=0),
optimize_direction=OptimizeDirection.Maximize)
hk = HyperGBM(rs, reward_metric='auc', callbacks=[])
df = dsutils.load_bank().head(1000)
df.drop(['id'], axis=1, inplace=True)
y = df.pop('y')
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=9527)
log_callback = LogCallback(output_elapsed=True)
experiment = CompeteExperiment(hk, X_train, y_train, X_test=X_test,
train_test_split_strategy=train_test_split_strategy,
callbacks=[log_callback],
scorer=get_scorer(scoring),
collinearity_detection=collinearity_detection,
drift_detection=drift_detection,
cv=cv,
pseudo_labeling=pseudo_labeling,
feature_reselection=feature_reselection,
feature_reselection_estimator_size=5,
feature_reselection_threshold=1e-5,
ensemble_size=5,
cross_validator=cross_validator,
random_state=12345,
)
pipeline = experiment.run(max_trials=max_trials)
auc_scorer = get_scorer('roc_auc_ovo')
acc_scorer = get_scorer('accuracy')
auc = auc_scorer(pipeline, X_test, y_test)
acc = acc_scorer(pipeline, X_test, y_test)
assert auc
assert acc
def test_binary_cv(self):
self.run_binary(cv=True)
def test_binary_pseudo_labeling(self):
self.run_binary(pseudo_labeling=True)
def test_binary_importance_selection(self):
self.run_binary(feature_reselection=True, cv=True, scoring='accuracy')
def test_binary_adversarial_validation(self):
self.run_binary(train_test_split_strategy='adversarial_validation')
def test_binary_cross_validator(self):
from hypernets.tabular.lifelong_learning import PrequentialSplit
preq_split = PrequentialSplit(PrequentialSplit.STRATEGY_PREQ_BLS, n_splits=3)
self.run_binary(cv=True, cross_validator=preq_split)
def test_feature_generation(self):
from hypernets.tabular.cfg import TabularCfg as tcfg
tcfg.tfidf_primitive_output_feature_count = 5
df = dsutils.load_movielens()
df['genres'] = df['genres'].apply(lambda s: s.replace('|', ' '))
df['timestamp'] = df['timestamp'].apply(datetime.fromtimestamp)
experiment = make_experiment(df, target='rating', cv=False, ensemble_size=0,
feature_generation=True,
feature_generation_text_cols=['title', 'genres'],
random_state=2345
)
assert isinstance(experiment, CompeteExperiment)
estimator = experiment.run(max_trials=3)
assert estimator is not None
step = experiment.get_step(StepNames.FEATURE_GENERATION)
assert step is not None
feature_names = step.get_fitted_params()['output_feature_names']
assert all([c in feature_names for c in ['TFIDF__title____0__', 'TFIDF__genres____0__', 'DAY__timestamp__']])
|
corehq/apps/notifications/urls.py | dimagilg/commcare-hq | 471 | 12641632 | from django.conf.urls import url
from corehq.apps.notifications.views import (
ManageNotificationView,
NotificationsServiceRMIView,
)
urlpatterns = [
url(r"^service/$",
NotificationsServiceRMIView.as_view(),
name=NotificationsServiceRMIView.urlname),
url(r"^manage/$",
ManageNotificationView.as_view(),
name=ManageNotificationView.urlname),
]
|
tests/functional/python_tests/hived/replay_based_tests/vop_pagnation.py | drov0/hive | 283 | 12641648 | #!/usr/bin/python3
import sys
import os
import tempfile
import argparse
from threading import Thread
sys.path.append("../../../")
import hive_utils
from hive_utils.resources.configini import config as configuration
from hive_utils.resources.configini import validate_address
# https://developers.hive.io/tutorials-recipes/paginated-api-methods.html#account_history_apiget_account_history
MAX_AT_ONCE = 10000
parser = argparse.ArgumentParser()
parser.add_argument("--run-hived", dest="hived", help = "IP address to replayed node", required=True, type=str)
parser.add_argument("--path-to-config", dest="config_path", help = "Path to node config file", required=True, type=str, default=None)
parser.add_argument("--blocks", dest="blocks", help = "Blocks to replay", required=False, type=int, default=1000000)
args = parser.parse_args()
node = None
assert int(args.blocks) >= 1000000, "replay has to be done for more than 1 million blocks"
# config
config = configuration()
config.load(args.config_path)
# check existance of required plugins
plugins = config.plugin.split(' ')
assert "account_history_rocksdb" in plugins
assert "account_history_api" in plugins
# class that compressing vop
class compressed_vop:
def __init__(self, vop):
from hashlib import sha512
from random import randint
from json import dumps
self.id = "{}_{}_{}".format( (~0x8000000000000000) & int(vop["operation_id"]), vop["block"], vop["trx_in_block"])
self.checksum = sha512( dumps(vop).encode() ).hexdigest()
# self.content = vop
def get(self):
return self.__dict__
# return compressed data from api call
def compress_vops(data : list) -> list:
ret = []
for vop in data:
ret.append(compressed_vop(vop).get())
return ret
# this function do call to API
def get_vops(range_begin : int, range_end : int, start_from_id : int, limit : int) -> dict:
global config
from requests import post
from json import dumps
# from time import sleep
# sleep(0.25)
data = {
"jsonrpc":"2.0",
"method":"call",
"params":[
"account_history_api",
"enum_virtual_ops",
{
"block_range_begin":range_begin,
"block_range_end":range_end,
"operation_begin":start_from_id,
"limit":limit
}
],
"id":1
}
ret = post(f"http://{config.webserver_http_endpoint}", data=dumps(data))
if ret.status_code == 200:
return ret.json()['result']
else:
raise Exception("bad request")
# checks is there anythink more to get
def paginated(data : dict, range_end : int) -> bool:
return not ( data['next_operation_begin'] == 0 and ( data['next_block_range_begin'] == range_end or data['next_block_range_begin'] == 0 ) )
# do one huge call
def get_vops_at_once(range_begin : int, range_end : int) -> list:
tmp = get_vops(range_begin, range_end, 0, MAX_AT_ONCE)
assert not paginated(tmp, range_end)
return compress_vops(tmp['ops'])
# generator, that get page by page in step given as limit
def get_vops_paginated(range_begin : int, range_end : int, limit : int):
ret = get_vops(range_begin, range_end, 0, limit)
yield compress_vops(ret['ops'])
if not paginated(ret, range_end):
ret = None
while ret is not None:
ret = get_vops(ret['next_block_range_begin'], range_end, ret['next_operation_begin'], limit)
yield compress_vops(ret['ops'])
if not paginated(ret, range_end):
ret = None
yield None
# wrapper on generator that agregates paginated output
def get_vops_with_step(range_begin : int, range_end : int, limit : int) -> list:
next_object = get_vops_paginated(range_begin, range_end, limit)
ret = []
value = next(next_object)
while value is not None:
ret.extend(value)
value = next(next_object)
return ret
# proxy, to get_vops_with_step with limit set as 1
def get_vops_one_by_one(range_begin : int, range_end : int) -> list:
return get_vops_with_step(range_begin, range_end, 1)
# get same data in given range with diffrent step
def check_range(range_begin : int, blocks : int):
from operator import itemgetter
from json import dump
range_end = range_begin + blocks + 1
print(f"gathering blocks in range [ {range_begin} ; {range_end} )")
all_at_once = get_vops_at_once(range_begin, range_end)
paginated_by_1 = get_vops_one_by_one(range_begin, range_end)
paginated_by_2 = get_vops_with_step(range_begin, range_end, 2)
paginated_by_5 = get_vops_with_step(range_begin, range_end, 5)
paginated_by_10 = get_vops_with_step(range_begin, range_end, 10)
# dump(all_at_once, open("all_at_once.json", 'w'))
# dump(paginated_by_1, open("paginated_by_1.json", 'w'))
# dump(paginated_by_2, open("paginated_by_2.json", 'w'))
# dump(paginated_by_5, open("paginated_by_5.json", 'w'))
# dump(paginated_by_10, open("paginated_by_10.json", 'w'))
assert all_at_once == paginated_by_1
print(f"[OK] all == paginated by 1 [ {range_begin} ; {range_end} )")
assert all_at_once == paginated_by_2
print(f"[OK] all == paginated by 2 [ {range_begin} ; {range_end} )")
assert all_at_once == paginated_by_5
print(f"[OK] all == paginated by 5 [ {range_begin} ; {range_end} )")
assert all_at_once == paginated_by_10
print(f"[OK] all == paginated by 10 [ {range_begin} ; {range_end} )")
return True
threads = []
STEP = 100
# start tests in diffrent threads
for i in range(args.blocks - (STEP * 4), args.blocks, STEP):
th = Thread(target=check_range, args=(i, STEP))
th.start()
threads.append( th )
for job in threads:
job.join()
print("success")
exit(0)
|
terrascript/vault/d.py | mjuenema/python-terrascript | 507 | 12641710 | <reponame>mjuenema/python-terrascript
# terrascript/vault/d.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class vault_approle_auth_backend_role_id(terrascript.Data):
pass
class vault_auth_backend(terrascript.Data):
pass
class vault_aws_access_credentials(terrascript.Data):
pass
class vault_azure_access_credentials(terrascript.Data):
pass
class vault_generic_secret(terrascript.Data):
pass
class vault_identity_entity(terrascript.Data):
pass
class vault_identity_group(terrascript.Data):
pass
class vault_kubernetes_auth_backend_config(terrascript.Data):
pass
class vault_kubernetes_auth_backend_role(terrascript.Data):
pass
class vault_policy_document(terrascript.Data):
pass
|
demos/common/python/visualizers/instance_segmentation.py | alpkn/open_model_zoo | 2,201 | 12641720 | <reponame>alpkn/open_model_zoo
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .drawing_utils import ColorPalette
class InstanceSegmentationVisualizer:
def __init__(self, labels=None, show_boxes=False, show_scores=False):
colors_num = len(labels) if labels else 80
self.labels = labels
self.palette = ColorPalette(colors_num)
self.show_boxes = show_boxes
self.show_scores = show_scores
def __call__(self, image, boxes, classes, scores, masks=None, ids=None, texts=None):
result = image.copy()
if masks is not None:
result = self.overlay_masks(result, masks, ids)
if self.show_boxes:
result = self.overlay_boxes(result, boxes, classes)
result = self.overlay_labels(result, boxes, classes, scores, texts)
return result
def overlay_masks(self, image, masks, ids=None):
segments_image = image.copy()
aggregated_mask = np.zeros(image.shape[:2], dtype=np.uint8)
aggregated_colored_mask = np.zeros(image.shape, dtype=np.uint8)
all_contours = []
for i, mask in enumerate(masks):
contours = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
if contours:
all_contours.append(contours[0])
mask_color = self.palette[i if ids is None else ids[i]]
cv2.bitwise_or(aggregated_mask, mask, dst=aggregated_mask)
cv2.bitwise_or(aggregated_colored_mask, mask_color, dst=aggregated_colored_mask, mask=mask)
# Fill the area occupied by all instances with a colored instances mask image
cv2.bitwise_and(segments_image, (0, 0, 0), dst=segments_image, mask=aggregated_mask)
cv2.bitwise_or(segments_image, aggregated_colored_mask, dst=segments_image, mask=aggregated_mask)
cv2.addWeighted(image, 0.5, segments_image, 0.5, 0, dst=image)
cv2.drawContours(image, all_contours, -1, (0, 0, 0))
return image
def overlay_boxes(self, image, boxes, classes):
for box, class_id in zip(boxes, classes):
color = self.palette[class_id]
box = box.astype(int)
top_left, bottom_right = box[:2], box[2:]
image = cv2.rectangle(image, top_left, bottom_right, color, 2)
return image
def overlay_labels(self, image, boxes, classes, scores, texts=None):
if texts:
labels = texts
elif self.labels:
labels = (self.labels[class_id] for class_id in classes)
else:
raise RuntimeError('InstanceSegmentationVisualizer must contain either labels or texts to display')
template = '{}: {:.2f}' if self.show_scores else '{}'
for box, score, label in zip(boxes, scores, labels):
text = template.format(label, score)
textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
position = ((box[:2] + box[2:] - textsize) / 2).astype(np.int32)
cv2.putText(image, text, position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
return image
|
codes/labs_lecture19/lab02_graph_clustering/util/graph_generator.py | sanjaysaha1311/Deep_Learning_CS7454_2018_NTU | 221 | 12641722 | <filename>codes/labs_lecture19/lab02_graph_clustering/util/graph_generator.py
import numpy as np
import block
import torch
import scipy.sparse as sp
default_type='torch.cuda.FloatTensor'
default_type='torch.FloatTensor'
class variable_size_graph():
def __init__(self, task_parameters):
# parameters
vocab_size = task_parameters['Voc']
nb_of_clust = task_parameters['nb_clusters_target']
clust_size_min = task_parameters['size_min']
clust_size_max = task_parameters['size_max']
p = task_parameters['p']
q = task_parameters['q']
self_loop = True
W0 = task_parameters['W0']
u0 = task_parameters['u0']
# create block model graph and put random signal on it
W,c=block.unbalanced_block_model(nb_of_clust,clust_size_min,clust_size_max,p,q)
u=np.random.randint(vocab_size,size=W.shape[0])
# add the subgraph to be detected
W,c=block.add_a_block(W0,W,c,nb_of_clust,q)
u=np.concatenate((u,u0),axis=0)
# shuffle
W,c,idx=block.schuffle(W,c)
u=u[idx]
u=torch.from_numpy(u)
u=u.long()
# add self loop
if self_loop:
for i in range(W.shape[0]):
W[i,i]=1
# create the target
target= (c==nb_of_clust).astype(float)
target=torch.from_numpy(target)
target=target.long()
# mapping matrices
W_coo=sp.coo_matrix(W)
nb_edges=W_coo.nnz
nb_vertices=W.shape[0]
edge_to_starting_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.row) ),
shape=(nb_edges, nb_vertices) )
edge_to_ending_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.col) ),
shape=(nb_edges, nb_vertices) )
# attribute
#self.adj_matrix=torch.from_numpy(W).type(default_type)
#self.edge_to_starting_vertex=torch.from_numpy(edge_to_starting_vertex.toarray()).type(default_type)
#self.edge_to_ending_vertex=torch.from_numpy(edge_to_ending_vertex.toarray()).type(default_type)
self.adj_matrix=W
self.edge_to_starting_vertex=edge_to_starting_vertex
self.edge_to_ending_vertex=edge_to_ending_vertex
self.signal=u
self.target=target
class graph_semi_super_clu():
def __init__(self, task_parameters):
# parameters
vocab_size = task_parameters['Voc']
nb_of_clust = task_parameters['nb_clusters_target']
clust_size_min = task_parameters['size_min']
clust_size_max = task_parameters['size_max']
p = task_parameters['p']
q = task_parameters['q']
self_loop = True
# block model
W, c = block.unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)
# add self loop
if self_loop:
for i in range(W.shape[0]):
W[i,i]=1
# shuffle
W,c,idx = block.schuffle(W,c)
# signal on block model
u = np.zeros(c.shape[0])
for r in range(nb_of_clust):
cluster = np.where(c==r)[0]
s = cluster[np.random.randint(cluster.shape[0])]
u[s] = r+1
# target
target = c
# convert to pytorch
u = torch.from_numpy(u)
u = u.long()
target = torch.from_numpy(target)
target = target.long()
# mapping matrices
W_coo=sp.coo_matrix(W)
nb_edges=W_coo.nnz
nb_vertices=W.shape[0]
edge_to_starting_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.row) ),
shape=(nb_edges, nb_vertices) )
edge_to_ending_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.col) ),
shape=(nb_edges, nb_vertices) )
# attribute
#self.adj_matrix=torch.from_numpy(W).type(default_type)
#self.edge_to_starting_vertex=torch.from_numpy(edge_to_starting_vertex.toarray()).type(default_type)
#self.edge_to_ending_vertex=torch.from_numpy(edge_to_ending_vertex.toarray()).type(default_type)
self.adj_matrix=W
self.edge_to_starting_vertex=edge_to_starting_vertex
self.edge_to_ending_vertex=edge_to_ending_vertex
self.signal=u
self.target=target
|
pywick/datasets/MultiFolderDataset.py | achaiah/pywick | 408 | 12641735 | <gh_stars>100-1000
import itertools
import os
from PIL import Image
from .FolderDataset import FolderDataset, npy_loader, pil_loader, rgb_image_loader, rgba_image_loader, _find_classes, _finds_inputs_and_targets
class MultiFolderDataset(FolderDataset):
"""
This class extends the FolderDataset with abilty to supply multiple root directories. The ``rel_target_root`` must exist
relative to each root directory. For complete description of functionality see ``FolderDataset``
:param roots: (list):
list of root directories to traverse\n
:param class_mode: (string in `{'label', 'image', 'path'}):`
type of target sample to look for and return\n
`label` = return class folder as target\n
`image` = return another image as target (determined by optional target_prefix/postfix)\n
NOTE: if class_mode == 'image', in addition to input, you must also provide rel_target_root,
target_prefix or target_postfix (in any combination).
`path` = determines paths for inputs and targets and applies the respective loaders to the path
:param class_to_idx: (dict):
If specified, the given class_to_idx map will be used. Otherwise one will be derived from the directory structure.
:param input_regex: (string `(default is any valid image file)`):
regular expression to find input images\n
e.g. if all your inputs have the word 'input',
you'd enter something like input_regex='*input*'
:param rel_target_root: (string `(default is Nothing)`):
root of directory where to look for target images RELATIVE to the root dir (first arg)
:param target_prefix: (string `(default is Nothing)`):
prefix to use (if any) when trying to locate the matching target
:param target_postfix: (string):
postfix to use (if any) when trying to locate the matching target
:param transform: (torch transform):
transform to apply to input sample individually
:param target_transform: (torch transform):
transform to apply to target sample individually
:param co_transform: (torch transform):
transform to apply to both the input and the target
:param apply_co_transform_first: (bool):
whether to apply the co-transform before or after individual transforms (default: True = before)
:param default_loader: (string in `{'npy', 'pil'}` or function `(default: pil)`):
defines how to load samples from file. Will be applied to both input and target unless a separate target_loader is defined.\n
if a function is provided, it should take in a file path as input and return the loaded sample.
:param target_loader: (string in `{'npy', 'pil'}` or function `(default: pil)`):
defines how to load target samples from file\n
if a function is provided, it should take in a file path as input and return the loaded sample.
:param exclusion_file: (string):
list of files to exclude when enumerating all files.
The list must be a full path relative to the root parameter
:param target_index_map: (dict `(defaults to binary mask: {255:1})):
a dictionary that maps pixel values in the image to classes to be recognized.\n
Used in conjunction with 'image' class_mode to produce a label for semantic segmentation
For semantic segmentation this is required so the default is a binary mask. However, if you want to turn off
this feature then specify target_index_map=None
"""
def __init__(self,
roots,
class_mode='label',
class_to_idx=None,
input_regex='*',
rel_target_root='',
target_prefix='',
target_postfix='',
target_extension='png',
transform=None,
target_transform=None,
co_transform=None,
apply_co_transform_first=True,
default_loader='pil',
target_loader=None,
exclusion_file=None,
target_index_map=None):
# call the super constructor first, then set our own parameters
# super().__init__()
self.num_inputs = 1 # these are hardcoded for the fit module to work
self.num_targets = 1 # these are hardcoded for the fit module to work
if default_loader == 'npy':
default_loader = npy_loader
elif default_loader == 'pil':
default_loader = pil_loader
self.default_loader = default_loader
# separate loading for targets (e.g. for black/white masks)
self.target_loader = target_loader
if class_to_idx:
self.classes = class_to_idx.keys()
self.class_to_idx = class_to_idx
else:
self.classes, self.class_to_idx = _find_classes(roots)
data_list = []
for root in roots:
datai, _ = _finds_inputs_and_targets(root, class_mode=class_mode, class_to_idx=self.class_to_idx, input_regex=input_regex,
rel_target_root=rel_target_root, target_prefix=target_prefix, target_postfix=target_postfix,
target_extension=target_extension, exclusion_file=exclusion_file)
data_list.append(datai)
self.data = list(itertools.chain.from_iterable(data_list))
if len(self.data) == 0:
raise (RuntimeError('Found 0 data items in subfolders of: {}'.format(roots)))
print('Found %i data items' % len(self.data))
self.roots = [os.path.expanduser(x) for x in roots]
self.transform = transform
self.target_transform = target_transform
self.co_transform = co_transform
self.apply_co_transform_first = apply_co_transform_first
self.target_index_map = target_index_map
self.class_mode = class_mode
|
haskell/private/java.bzl | guibou/rules_haskell | 222 | 12641752 | """Interop with Java."""
load("@bazel_skylib//lib:collections.bzl", "collections")
JavaInteropInfo = provider(
doc = "Information needed for interop with Java rules.",
fields = {
"inputs": "Files needed during build.",
"env": "Dict with env variables that should be set during build.",
},
)
def java_interop_info(deps):
"""Gather information from any Java dependencies.
Args:
deps: Dependencies of a target that might include Java artifacts.
Returns:
JavaInteropInfo: Information needed for Java interop.
"""
inputs = depset(
transitive = [
# We only expose direct dependencies, though we could
# expose transitive ones as well. Only exposing the direct
# ones corresponds to Bazel's "strict Java dependencies"
# mode. See
# https://github.com/tweag/rules_haskell/issues/96.
dep[JavaInfo].compile_jars
for dep in deps
if JavaInfo in dep
],
)
env_dict = dict()
uniq_classpath = collections.uniq([
f.path
for f in inputs.to_list()
])
if len(uniq_classpath) > 0:
env_dict["CLASSPATH"] = ":".join(uniq_classpath)
return JavaInteropInfo(
inputs = inputs,
env = env_dict,
)
|
src/aptsources_cleanup/util/import_check.py | DazEB/aptsources-cleanup | 461 | 12641774 | # -*- coding: utf-8
__all__ = ('import_check',)
from . import pkg
from .gettext import _
from .terminal import termwrap
from .filesystem import samefile
import sys
import os.path
def import_check(module_name, apt_pkg_suffix, import_error=None, debug_fail=0):
"""Check for possible issues during the import of the given module
...and print warnings as appropriate.
"""
if import_error is None or debug_fail > 0:
try:
module = __import__(module_name)
if debug_fail > 0:
import __nonexistant_module__ as module
raise AssertionError
except ImportError as ex:
import_error = ex
else:
return module
python_name = 'python'
if sys.version_info.major >= 3:
python_name += str(sys.version_info.major)
python_exe = os.path.join("/usr/bin", python_name)
python_pkg = python_name + '-minimal'
apt_pkg = '-'.join((python_name, apt_pkg_suffix))
paragraphs = [
'{:s}: {!s}. {:s} sudo apt-get install {:s}'.format(
type(import_error).__name__, import_error,
_("Do you have the '{package:s}' package installed? You can do so with:")
.format(package=apt_pkg),
apt_pkg)
]
if not samefile(python_exe, sys.executable) or debug_fail:
questionable_interpreter_msg = len(paragraphs)
paragraphs.append(': '.join((
_('Warning'),
_("The current Python interpreter is '{py_exe:s}'. Please use the "
"default '{py_exe_default:s}' if you encounter issues with the "
"import of the '{module:s}' module.")
.format(py_exe=sys.executable, py_exe_default=python_exe,
module=module_name))))
else:
questionable_interpreter_msg = None
if not pkg.check_integrity(python_pkg, paragraphs, debug_fail):
msg = (
_("Please make sure that the '{package:s}' package wasn't corrupted and "
"that '{py_exe:s}' refers to the Python interpreter from the same "
"package.")
.format(package=python_pkg, py_exe=python_exe))
if questionable_interpreter_msg is not None:
paragraphs[questionable_interpreter_msg] = ' '.join((
paragraphs[questionable_interpreter_msg], msg))
else:
paragraphs.append(': '.join((_('Warning'), msg)))
try:
stderr = termwrap.get(sys.stderr, ignore_errors=False)
except EnvironmentError as ex:
print(_('Warning'),
_('Cannot wrap text output due a failure to get the terminal size'),
ex, sep=': ', end='\n\n', file=sys.stderr)
stderr.print_all(paragraphs)
sys.exit(127)
|
autoimpute/imputations/series/ffill.py | gjdv/autoimpute | 191 | 12641830 | <reponame>gjdv/autoimpute<gh_stars>100-1000
"""This module implements forward & backward imputation via two Imputers.
The LOCFImputer carries the last observation forward (locf) to impute missing
data in a time series. NOCBImputer carries the next observation backward (nocb)
to impute missing data in a time series. Dataframe imputers utilize these
classes when each's strategy is requested. Use SingleImputer or MultipleImputer
with strategy = `locf` or `nocb` to broadcast either strategy across all the
columns in a dataframe, or specify either strategy for a given column.
"""
import pandas as pd
from sklearn.utils.validation import check_is_fitted
from autoimpute.imputations import method_names
from .base import ISeriesImputer
methods = method_names
# pylint:disable=attribute-defined-outside-init
# pylint:disable=unnecessary-pass
# pylint:disable=unused-argument
class LOCFImputer(ISeriesImputer):
"""Impute missing values by carrying the last observation forward.
LOCFImputer carries the last observation forward to impute missing data.
The imputer can be used directly, but such behavior is discouraged.
LOCFImputer does not have the flexibility / robustness of dataframe
imputers, nor is its behavior identical. Preferred use is
MultipleImputer(strategy="locf").
"""
# class variables
strategy = methods.LOCF
def __init__(self, start=None):
"""Create an instance of the LOCFImputer class.
Args:
start (any, optional): can be any value to impute first if first
is missing. Default is None, which ends up taking first
observed value found. Can also use "mean" to start with mean
of the series.
Returns:
self. Instance of class.
"""
self.start = start
def _handle_start(self, v, X):
"private method to handle start values."
if v is None:
v = X.loc[X.first_valid_index()]
if v == "mean":
v = X.mean()
return v
def fit(self, X, y=None):
"""Fit the Imputer to the dataset.
Args:
X (pd.Series): Dataset to fit the imputer.
y (None): ignored, None to meet requirements of base class
Returns:
self. Instance of the class.
"""
self.statistics_ = {"param": None, "strategy": self.strategy}
return self
def impute(self, X):
"""Perform imputations using the statistics generated from fit.
The impute method handles the actual imputation. Missing values
in a given dataset are replaced with the last observation carried
forward.
Args:
X (pd.Series): Dataset to impute missing data from fit.
Returns:
np.array -- imputed dataset.
"""
# check if fitted then impute with mean if first value
# or impute with observation carried forward otherwise
check_is_fitted(self, "statistics_")
# handle start...
if pd.isnull(X.iloc[0]):
ix = X.head(1).index[0]
X.fillna(
{ix: self._handle_start(self.start, X)}, inplace=True
)
return X.fillna(method="ffill", inplace=False)
def fit_impute(self, X, y=None):
"""Convenience method to perform fit and imputation in one go."""
return self.fit(X, y).impute(X)
class NOCBImputer(ISeriesImputer):
"""Impute missing data by carrying the next observation backward.
NOCBImputer carries the next observation backward to impute missing data.
The imputer can be used directly, but such behavior is discouraged.
NOCBImputer does not have the flexibility / robustness of dataframe
imputers, nor is its behavior identical. Preferred use is
MultipleImputer(strategy="nocb").
"""
# class variables
strategy = methods.NOCB
def __init__(self, end=None):
"""Create an instance of the NOCBImputer class.
Args:
end (any, optional): can be any value to impute end if end
is missing. Default is None, which ends up taking last
observed value found. Can also use "mean" to end with
mean of the series.
Returns:
self. Instance of class.
"""
self.end = end
def _handle_end(self, v, X):
"private method to handle end values."
if v is None:
v = X.loc[X.last_valid_index()]
if v == "mean":
v = X.mean()
return v
def fit(self, X, y=None):
"""Fit the Imputer to the dataset and calculate the mean.
Args:
X (pd.Series): Dataset to fit the imputer
y (None): ignored, None to meet requirements of base class
Returns:
self. Instance of the class.
"""
self.statistics_ = {"param": None, "strategy": self.strategy}
return self
def impute(self, X):
"""Perform imputations using the statistics generated from fit.
The impute method handles the actual imputation. Missing values
in a given dataset are replaced with the next observation carried
backward.
Args:
X (pd.Series): Dataset to impute missing data from fit.
Returns:
np.array -- imputed dataset.
"""
# check if fitted then impute with mean if first value
# or impute with observation carried backward otherwise
check_is_fitted(self, "statistics_")
# handle end...
if pd.isnull(X.iloc[-1]):
ix = X.tail(1).index[0]
X.fillna(
{ix: self._handle_end(self.end, X)}, inplace=True
)
return X.fillna(method="bfill", inplace=False)
def fit_impute(self, X, y=None):
"""Convenience method to perform fit and imputation in one go."""
return self.fit(X, y).impute(X)
|
panoptic_mapping_utils/src/detectron2/detectron_player.py | YuePanEdward/panoptic_mapping | 101 | 12641831 | #!/usr/bin/env python3
import os
import json
import csv
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
from PIL import Image as PilImage
import numpy as np
import tf
from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels
class DetectronPlayer(object):
def __init__(self):
""" Initialize ros node and read params """
# params
self.data_path = rospy.get_param(
'~data_path', '/home/lukas/Documents/Datasets/flat_dataset/run1')
self.global_frame_name = rospy.get_param('~global_frame_name', 'world')
# ROS
self.img_pub = rospy.Publisher("~predicted_image",
Image,
queue_size=10)
self.depth_pub = rospy.Publisher("~depth_image", Image, queue_size=10)
self.label_pub = rospy.Publisher("~labels",
DetectronLabels,
queue_size=10)
self.img_sub = rospy.Subscriber("~id_image_in",
Image,
self.img_callback,
queue_size=10)
self.tf_broadcaster = tf.TransformBroadcaster()
# setup
self.cv_bridge = CvBridge()
stamps_file = os.path.join(self.data_path, 'timestamps.csv')
self.stamp_to_id = {}
if not os.path.isfile(stamps_file):
rospy.logfatal("No timestamp file '%s' found." % stamps_file)
with open(stamps_file, 'r') as read_obj:
csv_reader = csv.reader(read_obj)
for row in csv_reader:
if row[0] == "ImageID":
continue
self.stamp_to_id[str(row[1])] = str(row[0])
def img_callback(self, id_img):
# Verify lookups for requiered datasets.
timestamp = str(
id_img.header.stamp.secs) + "%09d" % id_img.header.stamp.nsecs
if timestamp not in self.stamp_to_id:
rospy.logwarn(
"No prediction for message with timestamp '%s' found,"
" skipping image." % timestamp)
return
prediction_file = os.path.join(
self.data_path, self.stamp_to_id[timestamp] + "_predicted.png")
if not os.path.isfile(prediction_file):
rospy.logwarn("Could not find file '%s', skipping image." %
prediction_file)
return
labels_file = os.path.join(
self.data_path, self.stamp_to_id[timestamp] + "_labels.json")
if not os.path.isfile(labels_file):
rospy.logwarn("Could not find file '%s', skipping image." %
labels_file)
return
# Load and publish image.
cv_img = cv2.imread(prediction_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(cv_img[:, :, 0], "8UC1")
img_msg.header.stamp = id_img.header.stamp
img_msg.header.frame_id = id_img.header.frame_id
self.img_pub.publish(img_msg)
# Load and publish labels.
label_msg = DetectronLabels()
label_msg.header.stamp = img_msg.header.stamp
with open(labels_file) as json_file:
data = json.load(json_file)
for d in data:
if 'instance_id' not in d:
d['instance_id'] = 0
if 'score' not in d:
d['score'] = 0
label = DetectronLabel()
label.id = d['id']
label.instance_id = d['instance_id']
label.is_thing = d['isthing']
label.category_id = d['category_id']
label.score = d['score']
label_msg.labels.append(label)
self.label_pub.publish(label_msg)
# Load and publish depth image. These are optional.
depth_file = os.path.join(self.data_path,
self.stamp_to_id[timestamp] + "_depth.tiff")
if os.path.isfile(depth_file):
cv_img = PilImage.open(depth_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(np.array(cv_img), "32FC1")
img_msg.header.stamp = id_img.header.stamp
# img_msg.header.frame_id = id_img.header.frame_id
img_msg.header.frame_id = "test_frame"
self.depth_pub.publish(img_msg)
# Load and publish transform.
pose_file = os.path.join(self.data_path,
self.stamp_to_id[timestamp] + "_pose.txt")
if os.path.isfile(pose_file):
pose_data = [float(x) for x in open(pose_file, 'r').read().split()]
transform = np.eye(4)
for row in range(4):
for col in range(4):
transform[row, col] = pose_data[row * 4 + col]
rotation = tf.transformations.quaternion_from_matrix(transform)
self.tf_broadcaster.sendTransform(
(transform[0, 3], transform[1, 3], transform[2, 3]), rotation,
id_img.header.stamp, "test_frame", self.global_frame_name)
if __name__ == '__main__':
rospy.init_node('detectron_player', anonymous=True)
detectron_player = DetectronPlayer()
rospy.spin()
|
test_project/select2_djhacker_formfield/urls.py | robertispas/django-autocomplete-light | 1,368 | 12641852 | <gh_stars>1000+
from dal import autocomplete
from django.conf.urls import url
from .models import TModel
urlpatterns = [
url(
'test-autocomplete/$',
autocomplete.Select2QuerySetView.as_view(model=TModel),
name='select2_djhacker_formfield',
),
]
import djhacker
from django import forms
djhacker.formfield(
TModel.test,
forms.ModelChoiceField,
widget=autocomplete.ModelSelect2(url='select2_djhacker_formfield')
)
|
examples/issues/issue138.py | tgolsson/appJar | 666 | 12641866 | import sys
sys.path.append("../../")
from appJar import gui
def showDate(btn):
print(app.getDatePicker("dp"))
app=gui()
app.startToggleFrame("Birthday")
app.addDatePicker("dp")
app.addButton("GET", showDate)
app.stopToggleFrame()
app.setDatePickerRange("dp", 1900, 2100)
app.setDatePicker("dp")
app.setDatePickerChangeFunction("dp", showDate)
app.go()
|
RecoEgamma/PhotonIdentification/python/egmPhotonIDs_cfi.py | ckamtsikis/cmssw | 852 | 12641889 | import FWCore.ParameterSet.Config as cms
egmPhotonIDs = cms.EDProducer(
"VersionedPhotonIdProducer",
physicsObjectSrc = cms.InputTag('gedPhotons'),
physicsObjectIDs = cms.VPSet( )
)
|
Clients/ParaView/Testing/XML/AnimatedExportSceneVerify.py | xj361685640/ParaView | 815 | 12641893 | import argparse
import json
import os
import sys
import zipfile
parser = argparse.ArgumentParser(description='Verify AnimatedExportScene output.')
parser.add_argument('-T', help='Test output directory')
parser.add_argument('-N', help='Test name')
args = parser.parse_args(sys.argv[1:])
# Open the vtkjs archive written by the XML test
test_output_dir = args.T
test_name = args.N
archive_file_name = test_output_dir + os.sep + test_name + '.vtkjs'
with zipfile.ZipFile(archive_file_name, mode='r') as archive:
assert("index.json" in archive.namelist())
with archive.open("index.json", mode='r') as index:
indexStr = index.read()
indexObj = json.loads(indexStr)
# Check if we have everything for a basic scene
assert("version" in indexObj)
assert("background" in indexObj)
assert("lookupTables" in indexObj)
assert("centerOfRotation" in indexObj)
assert("scene" in indexObj)
assert("camera" in indexObj)
assert("focalPoint" in indexObj["camera"])
assert("position" in indexObj["camera"])
assert("viewUp" in indexObj["camera"])
# Check if scene is correct
assert(len(indexObj["scene"]) == 1)
source = indexObj["scene"][0]
sourceType = source["type"]
assert(sourceType == "vtkHttpDataSetSeriesReader")
assert("actor" in source)
assert("actorRotation" in source)
assert("mapper" in source)
assert("property" in source)
# Check that animation is correct
assert("animation" in indexObj)
assert(indexObj["animation"]["type"] == "vtkTimeStepBasedAnimationHandler")
assert(len(indexObj["animation"]["timeSteps"]) == 10)
for step in indexObj["animation"]["timeSteps"]:
assert("time" in step)
# Check if the folder for the source is here and correct
url = source[sourceType]["url"] + "/"
assert(url + "index.json" in archive.namelist())
with archive.open(url + "index.json", mode='r') as sourceIndex:
sourceIndexObj = json.loads(sourceIndex.read())
assert(len(sourceIndexObj["series"]) == 10)
for step in sourceIndexObj["series"]:
assert("timeStep" in step)
indexStepPath = url + step["url"] + "/index.json"
assert(indexStepPath in archive.namelist())
# Check that there's only 26 data array
assert(sum(map(lambda x : x.startswith("data/"), archive.namelist())) == 26)
|
tests/components/zha/test_logbook.py | mib1185/core | 30,023 | 12641898 | <gh_stars>1000+
"""ZHA logbook describe events tests."""
from unittest.mock import patch
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.general as general
from homeassistant.components.zha.core.const import ZHA_EVENT
from homeassistant.const import CONF_DEVICE_ID, CONF_UNIQUE_ID, Platform
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
from .conftest import SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE
from tests.components.logbook.common import MockRow, mock_humanify
ON = 1
OFF = 0
SHAKEN = "device_shaken"
COMMAND = "command"
COMMAND_SHAKE = "shake"
COMMAND_HOLD = "hold"
COMMAND_SINGLE = "single"
COMMAND_DOUBLE = "double"
DOUBLE_PRESS = "remote_button_double_press"
SHORT_PRESS = "remote_button_short_press"
LONG_PRESS = "remote_button_long_press"
LONG_RELEASE = "remote_button_long_release"
UP = "up"
DOWN = "down"
@pytest.fixture(autouse=True)
def sensor_platform_only():
"""Only setup the sensor and required base platforms to speed up tests."""
with patch("homeassistant.components.zha.PLATFORMS", (Platform.SENSOR,)):
yield
@pytest.fixture
async def mock_devices(hass, zigpy_device_mock, zha_device_joined):
"""IAS device fixture."""
zigpy_device = zigpy_device_mock(
{
1: {
SIG_EP_INPUT: [general.Basic.cluster_id],
SIG_EP_OUTPUT: [general.OnOff.cluster_id],
SIG_EP_TYPE: zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
SIG_EP_PROFILE: zigpy.profiles.zha.PROFILE_ID,
}
}
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.update_available(True)
await hass.async_block_till_done()
return zigpy_device, zha_device
async def test_zha_logbook_event_device_with_triggers(hass, mock_devices):
"""Test zha logbook events with device and triggers."""
zigpy_device, zha_device = mock_devices
zigpy_device.device_automation_triggers = {
(SHAKEN, SHAKEN): {COMMAND: COMMAND_SHAKE},
(UP, DOUBLE_PRESS): {COMMAND: COMMAND_DOUBLE, "endpoint_id": 1},
(DOWN, DOUBLE_PRESS): {COMMAND: COMMAND_DOUBLE, "endpoint_id": 2},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: COMMAND_HOLD},
(LONG_RELEASE, LONG_RELEASE): {COMMAND: COMMAND_HOLD},
}
ieee_address = str(zha_device.ieee)
ha_device_registry = dr.async_get(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)})
hass.config.components.add("recorder")
assert await async_setup_component(hass, "logbook", {})
events = mock_humanify(
hass,
[
MockRow(
ZHA_EVENT,
{
CONF_DEVICE_ID: reg_device.id,
COMMAND: COMMAND_SHAKE,
"device_ieee": str(ieee_address),
CONF_UNIQUE_ID: f"{str(ieee_address)}:1:0x0006",
"endpoint_id": 1,
"cluster_id": 6,
"params": {
"test": "test",
},
},
),
MockRow(
ZHA_EVENT,
{
CONF_DEVICE_ID: reg_device.id,
COMMAND: COMMAND_DOUBLE,
"device_ieee": str(ieee_address),
CONF_UNIQUE_ID: f"{str(ieee_address)}:1:0x0006",
"endpoint_id": 1,
"cluster_id": 6,
"params": {
"test": "test",
},
},
),
MockRow(
ZHA_EVENT,
{
CONF_DEVICE_ID: reg_device.id,
COMMAND: COMMAND_DOUBLE,
"device_ieee": str(ieee_address),
CONF_UNIQUE_ID: f"{str(ieee_address)}:1:0x0006",
"endpoint_id": 2,
"cluster_id": 6,
"params": {
"test": "test",
},
},
),
],
)
assert events[0]["name"] == "FakeManufacturer FakeModel"
assert events[0]["domain"] == "zha"
assert (
events[0]["message"]
== "Device Shaken event was fired with parameters: {'test': 'test'}"
)
assert events[1]["name"] == "FakeManufacturer FakeModel"
assert events[1]["domain"] == "zha"
assert (
events[1]["message"]
== "Up - Remote Button Double Press event was fired with parameters: {'test': 'test'}"
)
async def test_zha_logbook_event_device_no_triggers(hass, mock_devices):
"""Test zha logbook events with device and without triggers."""
zigpy_device, zha_device = mock_devices
ieee_address = str(zha_device.ieee)
ha_device_registry = dr.async_get(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)})
hass.config.components.add("recorder")
assert await async_setup_component(hass, "logbook", {})
events = mock_humanify(
hass,
[
MockRow(
ZHA_EVENT,
{
CONF_DEVICE_ID: reg_device.id,
COMMAND: COMMAND_SHAKE,
"device_ieee": str(ieee_address),
CONF_UNIQUE_ID: f"{str(ieee_address)}:1:0x0006",
"endpoint_id": 1,
"cluster_id": 6,
"params": {
"test": "test",
},
},
),
],
)
assert events[0]["name"] == "FakeManufacturer FakeModel"
assert events[0]["domain"] == "zha"
assert (
events[0]["message"]
== "Shake event was fired with parameters: {'test': 'test'}"
)
async def test_zha_logbook_event_device_no_device(hass, mock_devices):
"""Test zha logbook events without device and without triggers."""
hass.config.components.add("recorder")
assert await async_setup_component(hass, "logbook", {})
events = mock_humanify(
hass,
[
MockRow(
ZHA_EVENT,
{
CONF_DEVICE_ID: "non-existing-device",
COMMAND: COMMAND_SHAKE,
"device_ieee": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",
CONF_UNIQUE_ID: "90:fd:9f:ff:fe:fe:d8:a1:1:0x0006",
"endpoint_id": 1,
"cluster_id": 6,
"params": {
"test": "test",
},
},
),
],
)
assert events[0]["name"] == "Unknown device"
assert events[0]["domain"] == "zha"
assert (
events[0]["message"]
== "Shake event was fired with parameters: {'test': 'test'}"
)
|
demo_a3c_ale.py | wuyx/Asynchronous-Methods-for-Deep-Reinforcement-Learning | 443 | 12641908 | <reponame>wuyx/Asynchronous-Methods-for-Deep-Reinforcement-Learning<gh_stars>100-1000
import argparse
import os
import numpy as np
import chainer
from chainer import serializers
import ale
import random_seed
from dqn_phi import dqn_phi
from a3c_ale import A3CFF
from a3c_ale import A3CLSTM
def eval_performance(rom, model, deterministic=False, use_sdl=False,
record_screen_dir=None):
env = ale.ALE(rom, treat_life_lost_as_terminal=False, use_sdl=use_sdl,
record_screen_dir=record_screen_dir)
model.reset_state()
test_r = 0
while not env.is_terminal:
s = chainer.Variable(np.expand_dims(dqn_phi(env.state), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
if deterministic:
a = pout.most_probable_actions[0]
else:
a = pout.action_indices[0]
test_r += env.receive_action(a)
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('rom', type=str)
parser.add_argument('model', type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--use-sdl', action='store_true')
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--deterministic', action='store_true')
parser.add_argument('--record-screen-dir', type=str, default=None)
parser.add_argument('--use-lstm', action='store_true')
parser.set_defaults(use_sdl=False)
parser.set_defaults(use_lstm=False)
parser.set_defaults(deterministic=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
n_actions = ale.ALE(args.rom).number_of_actions
# Load an A3C-DQN model
if args.use_lstm:
model = A3CLSTM(n_actions)
else:
model = A3CFF(n_actions)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
episode_record_dir = None
if args.record_screen_dir is not None:
episode_record_dir = os.path.join(args.record_screen_dir, str(i))
os.makedirs(episode_record_dir)
score = eval_performance(
args.rom, model, deterministic=args.deterministic,
use_sdl=args.use_sdl, record_screen_dir=episode_record_dir)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
sampyl/tests/test_state.py | wilsonify/sampyl | 308 | 12641912 | from ..core import np
from ..state import State
state1 = State([('x', 1)])
state2 = State([('x', np.array([1, 2, 3]))])
state3 = State([('x', np.array([1,2,3])), ('y', 1)])
state4 = State([('x', np.array([2.]))])
state5 = State([('x', np.array([2,1,4])), ('y', 2)])
def test_add_states():
new = state3 + state5
print(new)
assert(type(new) == State)
assert(np.all(new['x'] == np.array([3, 3, 7])))
assert(new['y'] == 3)
def test_add_list():
new = state3 + [np.array([2, 3, 4]), 2]
assert(type(new) == State)
assert(np.all(new['x'] == np.array([3, 5, 7])))
assert(new['y'] == 3)
def test_add_multi_array():
new = state2 + np.array([2, 3, 4])
assert(type(new) == State)
assert(np.all(new['x'] == np.array([3, 5, 7])))
def test_add_single_array():
new = state4 + np.array([1.])
assert(type(new) == State)
assert(len(new) == 1)
assert(new['x'] == np.array([3.]))
def test_add_int():
new = state1 + 1
assert(type(new)==State)
assert(new['x'] == 2)
def test_add_float():
new = state1 + 1.
assert(type(new)==State)
assert(new['x'] == 2.)
def test_radd_int():
new = 1 + state1
assert(type(new)==State)
assert(new['x'] == 2)
def test_radd_float():
new = 1. + state1
assert(type(new)==State)
assert(new['x'] == 2.)
def test_mul_int():
new = state1 * 2
assert(type(new)==State)
assert(new['x'] == 2)
def test_mul_float():
new = state1 * 2.
assert(type(new)==State)
assert(new['x'] == 2.)
def test_rmul_int():
new = 2 * state1
assert(type(new)==State)
assert(new['x'] == 2)
def test_rmul_float():
new = 2. * state1
assert(type(new)==State)
assert(new['x'] == 2.) |
test_celluloid.py | maochongyuan/celluloid_animationPython | 1,086 | 12641914 | """Test animations."""
# pylint: disable=wrong-import-position
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from celluloid import Camera
def test_single():
"""Test plt.figure()"""
fig = plt.figure()
camera = Camera(fig)
for _ in range(10):
plt.plot(range(5))
plt.plot(-np.arange(5))
artists = camera.snap()
assert len(artists) == 2
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 2 * 10
anim = camera.animate()
assert len(list(anim.frame_seq)) == 10
def test_two_axes():
"""Test subplots."""
fig, axes = plt.subplots(2)
camera = Camera(fig)
axes[0].plot(np.zeros(100))
axes[1].plot(np.zeros(100))
artists = camera.snap()
assert len(artists) == 2
axes[0].plot(np.ones(100))
axes[1].plot(np.ones(100))
artists = camera.snap()
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 4
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
def test_legends():
"""Test subplots."""
camera = Camera(plt.figure())
plt.legend(plt.plot(range(5)), ['hello'])
artists = camera.snap()
assert len(artists) == 2
plt.legend(plt.plot(range(5)), ['world'])
artists = camera.snap()
assert len(artists) == 2
# pylint: disable=protected-access
assert camera._photos[0][1].texts[0]._text == 'hello'
assert camera._photos[1][1].texts[0]._text == 'world'
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 4
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
def test_images():
"""Test subplots."""
camera = Camera(plt.figure())
plt.imshow(np.ones((5, 5)))
artists = camera.snap()
assert len(artists) == 1
plt.imshow(np.zeros((5, 5)))
artists = camera.snap()
assert len(artists) == 1
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 2
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
|
build/toolchain/gcc_compile_wrapper.py | fantasialin/cpp_sandbox | 2,151 | 12641917 | <reponame>fantasialin/cpp_sandbox<filename>build/toolchain/gcc_compile_wrapper.py<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a compilation command.
This script exists to avoid using complex shell commands in
gcc_toolchain.gni's tool("cxx") and tool("cc") in case the host running the
compiler does not have a POSIX-like shell (e.g. Windows).
"""
import argparse
import sys
import wrapper_utils
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--resource-whitelist',
help='Generate a resource whitelist for this target.',
metavar='PATH')
parser.add_argument('command', nargs=argparse.REMAINDER,
help='Compilation command')
args = parser.parse_args()
returncode, stderr = wrapper_utils.CaptureCommandStderr(
wrapper_utils.CommandToRun(args.command))
used_resources = wrapper_utils.ExtractResourceIdsFromPragmaWarnings(stderr)
sys.stderr.write(stderr)
if args.resource_whitelist:
with open(args.resource_whitelist, 'w') as f:
if used_resources:
f.write('\n'.join(str(resource) for resource in used_resources))
f.write('\n')
return returncode
if __name__ == "__main__":
sys.exit(main())
|
usaspending_api/download/tests/unit/test_base_download_helpers.py | ststuck/usaspending-api | 217 | 12641943 | <reponame>ststuck/usaspending-api<filename>usaspending_api/download/tests/unit/test_base_download_helpers.py<gh_stars>100-1000
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from unittest.mock import patch
from usaspending_api.broker.lookups import EXTERNAL_DATA_TYPE_DICT
from usaspending_api.download.lookups import JOB_STATUS
from usaspending_api.download.v2.base_download_viewset import BaseDownloadViewSet
JSON_REQUEST = {"dummy_key": "dummy_value"}
@pytest.fixture
def common_test_data(db):
for js in JOB_STATUS:
mommy.make("download.JobStatus", job_status_id=js.id, name=js.name, description=js.desc)
download_jobs = [
{
"download_job_id": 1,
"file_name": "oldest_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 15, 12, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 2,
"file_name": "yesterday.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 16, 12, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_reveal_date=datetime(2021, 1, 1, 12, 0, 0, 0, timezone.utc),
)
def test_elasticsearch_download_cached(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
es_transaction_request = {**JSON_REQUEST, "download_types": ["elasticsearch_transactions", "sub_awards"]}
es_award_request = {**JSON_REQUEST, "download_types": ["elasticsearch_awards", "sub_awards"]}
download_jobs = [
{
"download_job_id": 10,
"file_name": "es_transaction_job_wrong.zip",
"job_status_id": 1,
"json_request": json.dumps(es_transaction_request),
"update_date": datetime(2021, 1, 17, 10, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 11,
"file_name": "es_transaction_job_right.zip",
"job_status_id": 1,
"json_request": json.dumps(es_transaction_request),
"update_date": datetime(2021, 1, 17, 12, 30, 0, 0, timezone.utc),
},
{
"download_job_id": 20,
"file_name": "es_award_job_wrong.zip",
"job_status_id": 1,
"json_request": json.dumps(es_award_request),
"update_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 21,
"file_name": "es_award_job_right.zip",
"job_status_id": 1,
"json_request": json.dumps(es_award_request),
"update_date": datetime(2021, 1, 17, 17, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(
json.dumps(es_transaction_request), es_transaction_request["download_types"]
)
assert result == {"download_job_id": 11, "file_name": "es_transaction_job_right.zip"}
result = BaseDownloadViewSet._get_cached_download(json.dumps(es_award_request), es_award_request["download_types"])
assert result == {"download_job_id": 21, "file_name": "es_award_job_right.zip"}
def test_elasticsearch_cached_download_not_found(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
result = BaseDownloadViewSet._get_cached_download(
json.dumps(JSON_REQUEST), ["elasticsearch_transactions", "sub_awards"]
)
assert result is None
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST), ["elasticsearch_awards", "sub_awards"])
assert result is None
def test_non_elasticsearch_download_cached(common_test_data):
external_load_dates = [
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 14, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
download_jobs = [
{
"download_job_id": 10,
"file_name": "10_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 10, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 11,
"file_name": "11_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 12, 30, 0, 0, timezone.utc),
},
{
"download_job_id": 20,
"file_name": "20_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 21,
"file_name": "21_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 17, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
# Grab latest valid download
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 21, "file_name": "21_download_job.zip"}
# FABS date updated; download no longer cached
mommy.make(
"broker.ExternalDataLoadDate",
external_data_type__external_data_type_id=EXTERNAL_DATA_TYPE_DICT["fabs"],
last_load_date=datetime(2021, 1, 18, 12, 0, 0, 0, timezone.utc),
)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
# New download comes through and is cached
with patch("django.utils.timezone.now") as mock_now:
job = {
"download_job_id": 30,
"file_name": "30_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 18, 13, 0, 0, 0, timezone.utc),
}
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 30, "file_name": "30_download_job.zip"}
# New submission_reveal_date is set in DABSSubmissionWindowSchedule; clears the cache
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_reveal_date=datetime(2021, 1, 19, 6, 0, 0, 0, timezone.utc),
)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
# Download after the new submission_reveal_date is cached
with patch("django.utils.timezone.now") as mock_now:
job = {
"download_job_id": 31,
"file_name": "31_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 19, 6, 15, 0, 0, timezone.utc),
}
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 31, "file_name": "31_download_job.zip"}
def test_non_elasticsearch_cached_download_not_found(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
|
ocr/tess/mark_wide_boxes.py | susannahsoon/oldperth | 302 | 12641969 | <reponame>susannahsoon/oldperth
#!/usr/bin/env python
"""Draw white lines where wide boxes would be split.
This encourages Tesseract to split these letter itself, by forcing them into
separate connected components.
"""
import os.path
import sys
from PIL import Image, ImageDraw
import numpy as np
from split_wide_boxes import split_box
from box import BoxLine, load_box_file
if __name__ == '__main__':
_, box_path, image_path = sys.argv
im = Image.open(image_path)
w, h = im.size
boxes = load_box_file(box_path)
draw = ImageDraw.Draw(im)
px = np.asarray(im)
line_count = 0
for box in boxes:
y1 = h - box.top
y2 = h - box.bottom
x1 = box.left
x2 = box.right
# reinforce existing vertical splits
# draw.line((x1, y2, x2, y2), fill='white')
splits = split_box(box)
if len(splits) == 1:
continue
# Draw white lines in all the boundary pixels.
for subbox in splits[1:]:
x1 = subbox.left
x2 = subbox.right
# TODO: Optimization: draw the line @ the least dark x-value
counts = [(-np.sum(px[y1:y2+1, x1 + dx]), x1 + dx) for dx in (-2, -1, 0, 1, 2)]
#print '%d, %d, %d %r' % (x, y1, y2, counts)
counts.sort()
x = counts[0][1]
draw.line((x, y2, x, y1), fill='rgb(246,246,246)')
line_count += 1
base, ext = os.path.splitext(image_path)
out_path = base + '.separated' + ext
im.save(out_path)
print 'Drew %d separating lines in %s' % (line_count, out_path)
|
Backtracking/017. Letter Combinations of a Phone Number.py | beckswu/Leetcode | 138 | 12642000 |
"""
17. Letter Combinations of a Phone Number
Given a digit string, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
"""
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if len(digits) == 0:
return []
lookup, result = ["", "", "abc", "def", "ghi", "jkl", "mno", \
"pqrs", "tuv", "wxyz"], []
self.backtracking(digits,lookup,result,"",0)
return result
def backtracking(self, digits, lookup,result, cur, index):
if index == len(digits):
result.append(cur)
return
for i in lookup[int(digits[index])]:
self.backtracking(digits,lookup,result,cur+i, index+1)
import functools
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits: return []
kp = {'2':'abc', '3':'def','4':'ghi','5':'jkl','6':'mno','7':'pqrs','8':'tuv','9':'wxyz'}
return functools.reduce(lambda acc, digit: [x+y for x in acc for y in kp[digit]],digits, [''])
|
web/dbpatterns/terrain.py | fatiherikli/dbpatterns | 133 | 12642058 | <gh_stars>100-1000
import logging
import os
import sys
from django.conf import settings
from django.core.management import call_command
from django.test.simple import DjangoTestSuiteRunner
from lettuce import *
from documents.models import Document
from newsfeed.models import Entry
from notifications.models import Notification
@before.all
def switch_to_test_database():
"""
Switching to the test database
"""
logging.info("Setting up a test database ...\n")
try:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
except ImportError:
pass
world.test_runner = DjangoTestSuiteRunner(interactive=False)
world.test_runner.setup_test_environment()
world.test_db = world.test_runner.setup_databases()
call_command('syncdb', **{
'settings': settings.SETTINGS_MODULE,
'interactive': False,
'verbosity': 0})
# Reload mongodb database
settings.MONGODB_DATABASE = settings.MONGODB_TEST_DATABASE
for model in [Document, Entry, Notification]:
model.objects.load()
model.objects.collection.remove()
@after.all
def after_all(total):
logging.info("Destroy test database ...\n")
# Destroy database.
world.test_runner.teardown_databases(world.test_db)
# Tear Down the test environment.
world.test_runner.teardown_test_environment()
@after.each_scenario
def before_each_feature(scenario):
logging.info("Flusing db ... \n")
call_command('flush', **{
'settings': settings.SETTINGS_MODULE,
'interactive': False})
def setup_test_directory():
sys.path.append(os.path.join(os.path.dirname(__file__), "../../tests"))
__import__("steps")
setup_test_directory() |
tests/v2/stop.py | tombry/virlutils | 133 | 12642086 | from . import BaseCMLTest
from click.testing import CliRunner
import requests_mock
import os
class CMLStopTests(BaseCMLTest):
def test_cml_stop(self):
with requests_mock.Mocker() as m:
m.put(self.get_api_path("labs/{}/nodes/n1/state/stop".format(self.get_test_id())), json=None)
self.setup_mocks(m)
virl = self.get_virl()
runner = CliRunner()
result = runner.invoke(virl, ["stop", "rtr-1"])
self.assertEqual(0, result.exit_code)
self.assertNotIn("Node rtr-1 is already stopped", result.output)
def test_cml_stop_already_stopped(self):
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
virl = self.get_virl()
runner = CliRunner()
result = runner.invoke(virl, ["stop", "rtr-2"])
self.assertEqual(0, result.exit_code)
self.assertIn("Node rtr-2 is already stopped", result.output)
def test_cml_stop_bogus_node(self):
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
virl = self.get_virl()
runner = CliRunner()
result = runner.invoke(virl, ["stop", "rtr-3"])
self.assertEqual(1, result.exit_code)
self.assertIn("Node rtr-3 was not found in lab {}".format(self.get_test_id()), result.output)
def test_cml_stop_bogus_lab(self):
src_dir = os.path.realpath(".virl")
try:
os.remove(".virl/current_cml_lab")
except OSError:
pass
with open(".virl/cached_cml_labs/123456", "w") as fd:
fd.write("lab: bogus\n")
os.symlink("{}/cached_cml_labs/123456".format(src_dir), "{}/current_cml_lab".format(src_dir))
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
virl = self.get_virl()
runner = CliRunner()
result = runner.invoke(virl, ["stop", "rtr-1"])
os.remove(".virl/current_cml_lab")
os.remove(".virl/cached_cml_labs/123456")
self.assertEqual(1, result.exit_code)
self.assertIn("Unable to find lab 123456", result.output)
def test_cml_stop_no_current_lab(self):
try:
os.remove(".virl/current_cml_lab")
except OSError:
pass
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
virl = self.get_virl()
runner = CliRunner()
result = runner.invoke(virl, ["stop", "rtr-1"])
self.assertEqual(1, result.exit_code)
self.assertIn("No current lab set", result.output)
|
AutotestWebD/apps/common/model/ExcelRead.py | yangjourney/sosotest | 422 | 12642093 | import xlrd
import os
import sys,random
import logging
from copy import deepcopy
rootpath = os.path.dirname(os.path.realpath(__file__)).replace("\\","/")
rootpath = rootpath.split("/apps")[0]
syspath=sys.path
sys.path=[]
sys.path.append(rootpath) #指定搜索路径绝对目录
sys.path.extend([rootpath+i for i in os.listdir( rootpath) if i[0]!="."])#将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
from apps.common.func.InitDjango import *
from all_models.models import TbUiTask,TbUiTaskExecute
from all_models.models import TbUiTestCase,TbUiTestCaseStep
from all_models.models import TbUiGlobalText,TbUiGlobalVars,TbUiPageObject,TbUiPageObjectElements,TbUiFunctions,TbUiFunctionsTestcase,TbUiFunctionsTestcaseStep
from all_models.models import TbBusinessLine,TbModules,TbSource,TbConfigHttp
from apps.common.func.CommonFunc import *
class ExcelProcesser(object):
def __init__(self,excelFilePath):
self.excelFilePath = excelFilePath
self.businessLineDict = {} #key是name,value是id
self.moduleDict = {} #key是name,value是id
self.sourceDict = {} #key是name,value是id
self.httpConfKeyList = []
businessLineSets = TbBusinessLine.objects.filter(state=1).all()
for tmpObj in businessLineSets:
self.businessLineDict[tmpObj.bussinessLineName] = tmpObj.id
moduleSets = TbModules.objects.filter(state=1).all()
for tmpObj in moduleSets:
self.moduleDict[tmpObj.moduleName] = tmpObj.id
sourceSets = TbSource.objects.filter(state=1).all()
for tmpObj in sourceSets:
self.sourceDict[tmpObj.sourceName] = tmpObj.id
configHttpSets = TbConfigHttp.objects.filter(state=1).all()
for tmpObj in configHttpSets:
self.httpConfKeyList.append(tmpObj.httpConfKey)
self.specialTagList = ["#",#注释符号
]
def getAllDatas(self):
textDict = {}
gvarDict = {}
pageObjectDict = {}
commonCaseDict = {}
caseList = []
#step1 获取textDict
retBl,retReason,textDict = self.getTEXTDict()
if retBl == False:
retReason = "获取全局文本失败,请检查。原因:%s" % retReason
logging.error(retReason)
return False,retReason,textDict,gvarDict,pageObjectDict,commonCaseDict,caseList
#step2 获取gvarDict
retBl, retReason,gvarDict = self.getGVARDict()
if retBl == False:
retReason = "获取全局变量失败,请检查。原因:%s" % retReason
logging.error(retReason)
return False,retReason,textDict,gvarDict,pageObjectDict,commonCaseDict,caseList
allSheetNameList = self.getAllSheetNameList()
#step3 获取pageObjectDict
# 生成pageObjectdict
pageObjectDict = {}
for tmpSheetName in allSheetNameList:
if tmpSheetName.startswith("PageObject_") :
tmpPODict = self.getPageObjectDictBySheetName(tmpSheetName)
pageObjectDict.update(tmpPODict)
#step4 获取commonCaseDict
commonCaseDict = {}
# 生成通用方法,可以在sheet case中调用。
for tmpSheetName in allSheetNameList:
if tmpSheetName.startswith("Function_"):
retBl,retReason,tmpCommonCaseDict = self.getCommonCaseDictBySheetName(tmpSheetName)
if retBl:
commonCaseDict[tmpSheetName.split("Function_")[1]] = tmpCommonCaseDict
else:
retReason = "生成Function用例时发生错误,原因:%s" % retReason
logging.error(retReason)
return False, retReason, textDict, gvarDict, pageObjectDict, commonCaseDict, caseList
# step5 获取caseList
# 生成要执行的caseList
caseList = []
# 如果sheetName为空,使用所有带Case的sheet
for tmpCaseSheetName in allSheetNameList:
if tmpCaseSheetName.startswith("Testcase_"):
retBl,retReason,retTmpCaseList = self.getCaseListBySheetName(tmpCaseSheetName)
if retBl:
caseList += retTmpCaseList
else:
retReason = "生成用例时发生错误,原因:%s" % retReason
logging.error(retReason)
return False, retReason, textDict, gvarDict, pageObjectDict, commonCaseDict, caseList
return True, "", textDict, gvarDict, pageObjectDict, commonCaseDict, caseList
def getDataListBySheetName(self,sheetName):
if ((os.path.exists(self.excelFilePath)) == False):
logging.info("不存在文件[%s],请检查!" % self.excelFilePath)
return []
else:
data = xlrd.open_workbook(self.excelFilePath)
if sheetName not in self.getAllSheetNameList():
return []
table = data.sheet_by_name(sheetName)
maxRowsNum = table.nrows
maxColsNum = table.ncols
allDataList = []
for rowIndex in range(0,maxRowsNum):
tmpRowValueList = []
for colIndex in range(0,maxColsNum):
# tmpRowValueList.append(table.cell(rowIndex,colIndex).value)
tmpRowValueList.append(str(table.cell(rowIndex,colIndex).value).strip())
allDataList.append(tmpRowValueList)
return allDataList
def getTEXTDict(self,sheetName = "$TEXT"):
commonEnvStartColNum = 2
#return 结果bool,原因string,数据dict
dataList = self.getDataListBySheetName(sheetName)
print("----------------------------------")
print(dataList)
if len(dataList) == 0:
return True,"",{}
envDict = {}
envList = dataList[0]
for envIndex in range(commonEnvStartColNum,len(envList)):
if (envList[envIndex] not in self.httpConfKeyList) and envList[envIndex] != "common":
return False,"环境key[%s]不在系统中" % envList[envIndex],{}
else:
envDict[str(envIndex)] = envList[envIndex]
retTextDict = {}
for i in range(1,len(dataList)):
tmpTextStr = ""
tmpEnvData = dataList[i]
tmpTextObj = TbUiGlobalText()
tmpTextObj.textDesc = tmpEnvData[0]
tmpTextObj.textKey = tmpEnvData[1]
for env2Index in range(commonEnvStartColNum,len(tmpEnvData)):
tmpTextStr += "[CONF=%s]%s[ENDCONF]" % (envDict[str(env2Index)],tmpEnvData[env2Index])
tmpTextObj.textValue = tmpTextStr
retTextDict[tmpTextObj.textKey] = tmpTextObj
return True,"",retTextDict
def getGVARDict(self,sheetName = "$GVAR"):
commonEnvStartColNum = 2
# return 结果bool,原因string,数据dict
dataList = self.getDataListBySheetName(sheetName)
if len(dataList) == 0:
return True, "", {}
envDict = {}
envList = dataList[0]
for envIndex in range(commonEnvStartColNum, len(envList)):
if (envList[envIndex] not in self.httpConfKeyList) and envList[envIndex] != "common":
return False, "环境key[%s]不在系统中" % envList[envIndex], {}
else:
envDict[str(envIndex)] = envList[envIndex]
retTextDict = {}
for i in range(1, len(dataList)):
tmpTextStr = ""
tmpEnvData = dataList[i]
tmpTextObj = TbUiGlobalVars()
tmpTextObj.varDesc = tmpEnvData[0]
tmpTextObj.varKey = tmpEnvData[1]
for env2Index in range(commonEnvStartColNum, len(tmpEnvData)):
tmpTextStr += "[CONF=%s]%s[ENDCONF]" % (envDict[str(env2Index)], tmpEnvData[env2Index])
tmpTextObj.varValue = tmpTextStr
retTextDict[tmpTextObj.varKey] = tmpTextObj
return True, "", retTextDict
def getCaseListBySheetName(self,sheetName,execCaseId = ""):
if sheetName.startswith("Testcase_") == False:
return False, "sheetName必须是Testcase_开头!", [] # 业务线错误,中止,返回空
dataList = self.getDataListBySheetName(sheetName)
caseList = []
execCaseIdList = []
if execCaseId.strip() != "":
execCaseIdList = execCaseId.split(",")
tmpUITestCaseDict = {}
tmpUITestCaseDict['case'] = TbUiTestCase()
tmpUITestCaseDict['caseSteps'] = []
for i in range(1,len(dataList)):
tmpRow = dataList[i]
stepNumIndex = 0
if tmpRow[2].strip()!="" and tmpRow[2] not in self.specialTagList:
if tmpUITestCaseDict['case'].caseId != "":
if len(execCaseIdList) == 0 or (len(execCaseIdList) >0 and tmpUITestCaseDict['case'].caseId in execCaseIdList):
caseList.append(tmpUITestCaseDict) #添加上一个用例
stepNumIndex = 0 #重置步骤编号
#找到一个新用例
tmpUITestCase = TbUiTestCase() #清空上一个用例信息
businessLineName = tmpRow[0].strip()
if businessLineName not in self.businessLineDict.keys():
retReason = "Case[%s]的业务线[%s]错误,请检查!" % (tmpRow[2],businessLineName)
logging.error(retReason)
return False,retReason,[] #业务线错误,中止,返回空
tmpUITestCase.businessLineId = self.businessLineDict[businessLineName]
moduleName = tmpRow[1].strip()
if moduleName not in self.moduleDict.keys():
retReason = "Case[%s]的业务线[%s]错误,请检查!" % (tmpRow[2],moduleName)
logging.error(retReason)
return False,retReason,[] #模块错误,中止
tmpUITestCase.moduleName = self.moduleDict[moduleName]
tmpUITestCase.caseId = tmpRow[2]
tmpUITestCase.title = tmpRow[3]
tmpUITestCase.casedesc = tmpRow[4]
tmpUITestCaseDict = {}
tmpUITestCaseDict['case'] = tmpUITestCase
tmpUITestCaseDict['caseSteps'] = []
else:
#找到的行不是用例,那肯定就是步骤
tmpUITestcaseStep = TbUiTestCaseStep()
stepNumIndex += 1
tmpUITestcaseStep.caseId = tmpUITestCaseDict['case'].caseId
tmpUITestcaseStep.stepNum = stepNumIndex
tmpUITestcaseStep.specialTag = tmpRow[2]
tmpUITestcaseStep.stepTitle = tmpRow[3]
tmpUITestcaseStep.stepDesc = tmpRow[4]
tmpUITestcaseStep.operate = tmpRow[5]
tmpUITestcaseStep.params = tmpRow[6]
if tmpUITestcaseStep.operate.strip() != "" and tmpUITestcaseStep.params.strip() != "":
#关键字和参数不能为空才是真的有效的。
tmpUITestCaseDict['caseSteps'].append(tmpUITestcaseStep)
if i == len(dataList)-1 and tmpUITestCaseDict['case'].caseId != "":
#执行完最后一步的时候,判断是否要把最后一个case加入到caseList
if len(execCaseIdList) == 0 or (len(execCaseIdList) >0 and tmpUITestCaseDict['case'].caseId in execCaseIdList):
caseList.append(tmpUITestCaseDict) #把最后一个用例加进来
return True,"",caseList
def getCommonCaseDictBySheetName(self,sheetName):
if sheetName.startswith("Function_") == False:
return False, "sheetName必须是Function_开头!", [] # 业务线错误,中止,返回空
dataList = self.getDataListBySheetName(sheetName)
caseList = []
commonFuncKey = sheetName.split("Function_")[1]
tmpUITestCaseDict = {}
tmpUITestCaseDict['function'] = TbUiFunctionsTestcase()
tmpUITestCaseDict['functionSteps'] = []
stepNumIndex = 0
for i in range(1, len(dataList)):
tmpRow = dataList[i]
if tmpRow[2].strip()!="" and tmpRow[2] not in self.specialTagList:
#找到一个新的用例:如果上一个的name不是空,那么就加入到case列表
if tmpUITestCaseDict['function'].functionName != "":
caseList.append(tmpUITestCaseDict) # 添加上一个用例
stepNumIndex = 0 # 重置步骤编号
# 找到一个新用例
tmpUITestCase = TbUiFunctionsTestcase() # 清空上一个用例信息
businessLineName = tmpRow[0].strip()
if businessLineName not in self.businessLineDict.keys():
retReason = "Function[%s]的业务线[%s]错误,请检查!" % (tmpRow[2], businessLineName)
logging.error(retReason)
return False, retReason, [] # 业务线错误,中止,返回空
tmpUITestCase.businessLineId = self.businessLineDict[businessLineName]
moduleName = tmpRow[1].strip()
if moduleName not in self.moduleDict.keys():
retReason = "Function[%s]的业务线[%s]错误,请检查!" % (tmpRow[2], moduleName)
logging.error(retReason)
return False, retReason, [] # 模块错误,中止
tmpUITestCase.moduleName = self.moduleDict[moduleName]
tmpUITestCase.commonFuncKey = commonFuncKey
tmpUITestCase.functionName = tmpRow[2]
tmpUITestCase.title = tmpRow[3]
tmpUITestCase.casedesc = tmpRow[4]
tmpUITestCaseDict = {}
tmpUITestCaseDict['function'] = tmpUITestCase
tmpUITestCaseDict['functionSteps'] = []
else:
# 找到的行不是用例,那肯定就是步骤
tmpUITestcaseStep = TbUiFunctionsTestcaseStep()
stepNumIndex += 1
tmpUITestcaseStep.commonFuncKey = commonFuncKey
tmpUITestcaseStep.functionName = tmpUITestCaseDict['function'].functionName
tmpUITestcaseStep.stepNum = stepNumIndex
tmpUITestcaseStep.specialTag = tmpRow[2]
tmpUITestcaseStep.stepTitle = tmpRow[3]
tmpUITestcaseStep.stepDesc = tmpRow[4]
tmpUITestcaseStep.operate = tmpRow[5]
tmpUITestcaseStep.params = tmpRow[6]
if tmpUITestcaseStep.operate.strip() != "" and tmpUITestcaseStep.params.strip() != "":
# 关键字和参数不能为空才是真的有效的。
tmpUITestCaseDict['functionSteps'].append(tmpUITestcaseStep)
if i == len(dataList) - 1 and tmpUITestCaseDict['function'].functionName != "":
# 执行完最后一步的时候,判断是否要把最后一个case加入到caseList
caseList.append(tmpUITestCaseDict) # 把最后一个用例加进来
#跟获取用例一样
caseDict = {}
for tmpCase in caseList:
caseDict[tmpCase['function'].functionName] = tmpCase
return True,"",caseDict
def getPageObjectDictBySheetName(self,sheetName = ""):
dataList = self.getDataListBySheetName(sheetName)
poDict = {}
currentPageObjName = ""
for i in range(1, len(dataList)):
tmpRow = dataList[i]
if tmpRow[0].strip() != "" :
#这是一个新的pageObject,生成对应的对象 dict
tmpPageObjectModel = TbUiPageObject()
tmpPageObjectModel.poKey = tmpRow[0].strip()
tmpPageObjectModel.poTitle = tmpRow[1].strip()
tmpPageObjectModel.poDesc = tmpRow[2].strip()
currentPageObjName = tmpRow[0].strip()
if currentPageObjName not in poDict.keys():
poDict[currentPageObjName] = {}
poDict[currentPageObjName]['po'] = tmpPageObjectModel
poDict[currentPageObjName]['elements'] = []
else:
#如果是空,那么就是一个element选项
tmpElementModel = TbUiPageObjectElements()
tmpElementModel.poKey = poDict[currentPageObjName]['po'].poKey
tmpElementModel.elementTitle = tmpRow[1]
tmpElementModel.elementDesc = tmpRow[2]
tmpElementModel.elementKey = tmpRow[3]
tmpElementModel.elementType = tmpRow[4]
tmpElementModel.elementValue = tmpRow[5]
if currentPageObjName != "" and currentPageObjName in poDict.keys():
poDict[currentPageObjName]['elements'].append(tmpElementModel)
return poDict
def getAllSheetNameList(self):
casefile = self.excelFilePath
if ((os.path.exists(casefile)) == False):
logging.info("不存在文件[%s],请检查!" % casefile)
return []
else:
data = xlrd.open_workbook(casefile)
allSheetsObj = data.sheets()
allName = []
for tmpSheetObj in allSheetsObj:
allName.append(tmpSheetObj.name)
return allName
class ExcelDataUpdateDB(object):
def __init__(self,sessionName,textDict = {},gvarDict = {},poDict = {},functionDict = {},caseList = [] ,upType = "add"):
self.sessionName = sessionName
self.textDict = textDict
self.gvarDict = gvarDict
self.poDict = poDict
self.functionDict = functionDict
self.caseList = caseList
self.upType = upType
def textUpdate(self):
msg = ""
result = True
for k,v in self.textDict.items():
dbV = TbUiGlobalText.objects.filter(textKey=k).all()
# 查找这个text,找到后判断这个text是不是自己添加的,如果是自己添加的就更新,如果不是就记录下来 最后给提示
if dbV:
# print(dbModelToDict(v))
if dbV[0].addBy != self.sessionName:
msg += "\n[%s]text不是当前用户添加,text未更新" % dbV[0].textKey
result = False
continue
v.id = dbV[0].id
v.addTime = dbV[0].addTime
v.save(force_update=True)
# print("#找到这个text")
else:
# print("#没有找到这个text")
# print(v)
v.addBy = self.sessionName
v.save(force_insert=True)
return result,msg
def gvarUpdate(self):
msg = ""
result = True
for k,v in self.gvarDict.items():
dbV = TbUiGlobalVars.objects.filter(varKey = k).all()
if dbV:
if dbV[0].addBy != self.sessionName:
msg += "\n[%s]gvar不是当前用户添加,gvar未更新" % dbV[0].varKey
result = False
continue
v.id = dbV[0].id
v.addTime = dbV[0].addTime
v.save(force_update=True)
else:
v.addBy = self.sessionName
v.save(force_insert=True)
return result, msg
def poAndElementUpdate(self):
poMsg = ""
poResult= True
elemetMsg = ""
elementResult = True
for k,v in self.poDict.items():
poKey = k
po = v['po']
poFromDb = TbUiPageObject.objects.filter(poKey = poKey).all()
if poFromDb:
if poFromDb[0].addBy != self.sessionName:
poMsg += "\n[%s]object page不是当前用户添加,object page未更新" % poFromDb[0].poKey
poResult = False
else:
po.id = poFromDb[0].id
po.addTime = poFromDb[0].addTime
po.save(force_update=True)
else:
po.addBy = self.sessionName
po.save(force_insert=True)
elementsList = v['elements']
for tmpElement in elementsList:
tmpEleFromDb = TbUiPageObjectElements.objects.filter(poKey=poKey,elementKey = tmpElement.elementKey).all()
if tmpEleFromDb:
if tmpEleFromDb[0].addBy != self.sessionName:
elemetMsg += "\n[%s]element不是当前用户添加,element未更新" % tmpEleFromDb[0].elementKey
elementResult = False
continue
tmpElement.id = tmpEleFromDb[0].id
tmpElement.addTime = tmpEleFromDb[0].addTime
tmpElement.save(force_update=True)
else:
tmpElement.addBy = self.sessionName
tmpElement.save(force_insert=True)
return poResult,poMsg,elementResult,elemetMsg
def funcUpdate(self):
funcMsg = ""
funcResult = True
for k,v in self.functionDict.items():
# ui functions #######################################################################################
tmpUiFunction = TbUiFunctions.objects.filter(commonFuncKey = k).all()
if tmpUiFunction:
#有直接进行更新:
tmpUiFunction = tmpUiFunction[0]
tmpUiFunction.save(force_update=True)
else:
#没有对应的func,创建func
tmpUiFunction = TbUiFunctions()
tmpUiFunction.commonFuncKey = k
tmpUiFunction.commonFuncTitle = "默认值,请修改"
tmpUiFunction.commonFuncDesc = "默认值,请修改"
tmpUiFunction.addBy = self.sessionName
tmpUiFunction.save(force_insert=True)
for k2,v2 in v.items():
#对function testcase进行便利
tmpUiFuncTestcase = TbUiFunctionsTestcase.objects.filter(commonFuncKey = k,functionName = k2).all()
if tmpUiFuncTestcase:
#存在,那么就更新。
tmpFuntion = v2['function']
if tmpUiFuncTestcase[0].addBy != self.sessionName:
funcMsg += "\n[%s] 的创建人不是当前用户,function更新失败" % tmpFuntion.functionName
funcResult = False
continue
tmpFuntion.id = tmpUiFuncTestcase[0].id
tmpFuntion.addTime = tmpUiFuncTestcase[0].addTime
tmpFuntion.modBy = self.sessionName
tmpFuntion.save(force_update=True)
else:
tmpFuntion = v2['function']
tmpFuntion.addBy = self.sessionName
tmpFuntion.save(force_insert=True)
#开始保存步骤。
funcSteps = v2['functionSteps']
funcStepsInDb = TbUiFunctionsTestcaseStep.objects.filter(commonFuncKey = k,functionName = k2).all()
if len(funcSteps) <= len(funcStepsInDb):
#如果实际的小于数据库中的,直接更新实际的到数据库中,多余的步骤为0
for dbFuncStepi in range(0,len(funcStepsInDb)):
#TODO pass根据步骤的长度决定是插入还是更新。
if dbFuncStepi < len(funcSteps):
#更新步骤到db中
funcSteps[dbFuncStepi].id = funcStepsInDb[dbFuncStepi].id
funcSteps[dbFuncStepi].state = 1
funcSteps[dbFuncStepi].addTime = funcStepsInDb[dbFuncStepi].addTime
funcSteps[dbFuncStepi].modBy = self.sessionName
funcSteps[dbFuncStepi].save(force_update=True)
else:
#更新数据库的步骤的state为0
funcStepsInDb[dbFuncStepi].state = 0
funcStepsInDb[dbFuncStepi].modBy = self.sessionName
funcStepsInDb[dbFuncStepi].save(force_update=True)
else:
#如果实际的大于数据库中的
for realFuncStepi in range(0,len(funcSteps)):
if realFuncStepi < len(funcStepsInDb):
#更新到db中
funcSteps[realFuncStepi].id = funcStepsInDb[realFuncStepi].id
funcSteps[realFuncStepi].addTime = funcStepsInDb[realFuncStepi].addTime
funcSteps[realFuncStepi].state = 1
funcSteps[realFuncStepi].modBy = self.sessionName
funcSteps[realFuncStepi].save(force_update=True)
else:
#插入到db中
funcSteps[realFuncStepi].addBy = self.sessionName
funcSteps[realFuncStepi].save(force_insert=True)
return funcResult,funcMsg
def caseUpdate(self):
caseMsg = ""
caseResult = True
for tmpCaseDict in self.caseList:
tmpCase = tmpCaseDict['case']
tmpCaseSteps = tmpCaseDict['caseSteps']
if self.upType == "add":
tmpCase.caseId = "TMPCASEID_%s" % random.randint(0,10000000)
tmpCase.save(force_insert=True)
tmpCase.caseId = "TC_UI_%d" % tmpCase.id
tmpCase.addBy = self.sessionName
tmpCase.save(force_update=True)
else:
tmpUiTestcase = TbUiTestCase.objects.filter(caseId = tmpCase.caseId).all()
if tmpUiTestcase:
# 存在,那么就更新。
tmpCase.id = tmpUiTestcase[0].id
if tmpUiTestcase[0].addBy != self.sessionName:
print(tmpUiTestcase[0].addBy)
print("+++++++++++++++++++++++++++++++++")
caseMsg += "\n [%s]的创建人不是当前用户,跳过更新" % tmpUiTestcase[0].caseId
continue
else:
tmpCase.addBy = self.sessionName
tmpCase.addTime = tmpUiTestcase[0].addTime
tmpCase.save(force_update=True)
else:
print("不存在%s" % tmpCase.caseId)
continue
# 开始保存步骤。
caseSteps = tmpCaseSteps
caseStepsInDb = TbUiTestCaseStep.objects.filter(caseId = tmpCase.caseId).all()
if len(caseSteps) <= len(caseStepsInDb):
# 如果实际的小于数据库中的,直接更新实际的到数据库中,多余的步骤为0
for dbFuncStepi in range(0, len(caseStepsInDb)):
# TODO pass根据步骤的长度决定是插入还是更新。
if dbFuncStepi < len(caseSteps):
# 更新步骤到db中
caseSteps[dbFuncStepi].id = caseStepsInDb[dbFuncStepi].id
caseSteps[dbFuncStepi].caseId = tmpCase.caseId
caseSteps[dbFuncStepi].state = 1
caseSteps[dbFuncStepi].addTime = caseStepsInDb[dbFuncStepi].addTime
caseSteps[dbFuncStepi].save(force_update=True)
else:
# 更新数据库的步骤的state为0
caseStepsInDb[dbFuncStepi].state = 0
caseStepsInDb[dbFuncStepi].caseId = tmpCase.caseId
caseStepsInDb[dbFuncStepi].save(force_update=True)
else:
# 如果实际的大于数据库中的
for realFuncStepi in range(0, len(caseSteps)):
if realFuncStepi < len(caseStepsInDb):
# 更新到db中
caseSteps[realFuncStepi].id = caseStepsInDb[realFuncStepi].id
caseSteps[realFuncStepi].caseId = tmpCase.caseId
caseSteps[realFuncStepi].addTime = caseStepsInDb[realFuncStepi].addTime
caseSteps[realFuncStepi].state = 1
caseSteps[realFuncStepi].save(force_update=True)
else:
# 插入到db中
caseSteps[realFuncStepi].addBy = self.sessionName
caseSteps[realFuncStepi].caseId = tmpCase.caseId
caseSteps[realFuncStepi].save(force_insert=True)
return caseResult,caseMsg
if __name__ == '__main__':
excelFilePath = "D:/autotest/AutotestPlatform/RobotUiTest/testData/StandardTemplate.xls"
sheetName = "Testcase_OnlyWeb"
excelObj = ExcelProcesser(excelFilePath)
retBl,retReason,textDict,gvarDict,poDict,functionDict,caseList = excelObj.getAllDatas()
# print(retBl)
# print(retReason)
# print(textDict)
# print(gvarDict)
# print(poDict)
print("----------------------------------------------------")
print(functionDict)
# print(caseList)
updateDb = ExcelDataUpdateDB(sessionName="1",textDict=textDict,gvarDict=gvarDict,poDict=poDict,functionDict=functionDict,caseList=caseList,upType="update")
#saveText
textResult,textMsg = updateDb.textUpdate()
gvarResult,gvarMsg = updateDb.gvarUpdate()
#poDict
poResult,poMsg,elementResult,elementMsg = updateDb.poAndElementUpdate()
funcResult,funcMsg = updateDb.funcUpdate()
caseResult,caseMsg = updateDb.caseUpdate()
print(caseResult)
print(caseMsg)
if textResult:
textMsg = "text更新成功"
textMsg = "textMsg:%s" % textMsg
#testSaveText
# for k,v in textDict.items():
# dbV = TbUiGlobalText.objects.filter(textKey = k).all()
# #查找这个text,找到后判断这个text是不是自己添加的,如果是自己添加的就更新,如果不是就记录下来 最后给提示
# if dbV:
# v.id = dbV[0].id
# v.addTime = dbV[0].addTime
# v.save(force_update=True)
# print("#找到这个text")
# from apps.common.func.CommonFunc import *
# print(dbModelToDict(v))
# else:
# print("#没有找到这个text")
# print(v)
# v.save(force_insert=True)
#
# #testSaveGvar
# for k,v in gvarDict.items():
# dbV = TbUiGlobalVars.objects.filter(varKey = k).all()
# if dbV:
# v.id = dbV[0].id
# v.addTime = dbV[0].addTime
# v.save(force_update=True)
# else:
# v.save(force_insert=True)
#
# #testSavePageObjectandElemnts#######################################################################################
# for k,v in poDict.items():
# poKey = k
# po = v['po']
# poFromDb = TbUiPageObject.objects.filter(poKey = poKey).all()
# if poFromDb:
# po.id = poFromDb[0].id
# po.addTime = poFromDb[0].addTime
# po.save(force_update=True)
# else:
# po.save(force_insert=True)
#
# elementsList = v['elements']
# for tmpElement in elementsList:
# tmpEleFromDb = TbUiPageObjectElements.objects.filter(poKey=poKey,elementKey = tmpElement.elementKey).all()
# if tmpEleFromDb:
# tmpElement.id = tmpEleFromDb[0].id
# tmpElement.addTime = tmpEleFromDb[0].addTime
# tmpElement.save(force_update=True)
# else:
# tmpElement.save(force_insert=True)
#
# for k,v in functionDict.items():
# # ui functions #######################################################################################
# tmpUiFunction = TbUiFunctions.objects.filter(commonFuncKey = k).all()
# if tmpUiFunction:
# #有直接进行更新:
# tmpUiFunction = tmpUiFunction[0]
# else:
# #没有对应的func,创建func
# tmpUiFunction = TbUiFunctions()
# tmpUiFunction.commonFuncKey = k
# tmpUiFunction.commonFuncTitle = "默认值,请修改"
# tmpUiFunction.commonFuncDesc = "默认值,请修改"
# tmpUiFunction.save(force_insert=True)
#
# for k2,v2 in v.items():
# #对function testcase进行便利
# tmpUiFuncTestcase = TbUiFunctionsTestcase.objects.filter(commonFuncKey = k,functionName = k2).all()
# if tmpUiFuncTestcase:
# #存在,那么就更新。
# tmpFuntion = v2['function']
# tmpFuntion.id = tmpUiFuncTestcase[0].id
# tmpFuntion.addTime = tmpUiFuncTestcase[0].addTime
# tmpFuntion.save(force_update=True)
# else:
# tmpFuntion = v2['function']
# tmpFuntion.save(force_insert=True)
# #开始保存步骤。
# funcSteps = v2['functionSteps']
# funcStepsInDb = TbUiFunctionsTestcaseStep.objects.filter(commonFuncKey = k,functionName = k2).all()
# if len(funcSteps) <= len(funcStepsInDb):
# #如果实际的小于数据库中的,直接更新实际的到数据库中,多余的步骤为0
# for dbFuncStepi in range(0,len(funcStepsInDb)):
# #TODO pass根据步骤的长度决定是插入还是更新。
# if dbFuncStepi < len(funcSteps):
# #更新步骤到db中
# funcSteps[dbFuncStepi].id = funcStepsInDb[dbFuncStepi].id
# funcSteps[dbFuncStepi].state = 1
# funcSteps[dbFuncStepi].addTime = funcStepsInDb[dbFuncStepi].addTime
# funcSteps[dbFuncStepi].save(force_update=True)
# else:
# #更新数据库的步骤的state为0
# funcStepsInDb[dbFuncStepi].state = 0
# funcStepsInDb[dbFuncStepi].save(force_update=True)
# else:
# #如果实际的大于数据库中的
# for realFuncStepi in range(0,len(funcSteps)):
# if realFuncStepi < len(funcStepsInDb):
# #更新到db中
# funcSteps[realFuncStepi].id = funcStepsInDb[realFuncStepi].id
# funcSteps[realFuncStepi].addTime = funcStepsInDb[realFuncStepi].addTime
# funcSteps[realFuncStepi].state = 1
# funcSteps[realFuncStepi].save(force_update=True)
# else:
# #插入到db中
# funcSteps[realFuncStepi].save(force_insert=True)
#
# # ui functions #######################################################################################
#
# #testcases #######################################################################################
# upType = "update" # update
# for tmpCaseDict in caseList:
# tmpCase = tmpCaseDict['case']
# tmpCaseSteps = tmpCaseDict['caseSteps']
# if upType == "add":
# tmpCase.caseId = "TMPCASEID_%s" % random.randint(0,10000000)
# tmpCase.save(force_insert=True)
# tmpCase.caseId = "TC_UI_%d" % tmpCase.id
# tmpCase.save(force_update=True)
# else:
# tmpUiTestcase = TbUiTestCase.objects.filter(caseId = tmpCase.caseId).all()
# if tmpUiTestcase:
# # 存在,那么就更新。
# tmpCase.id = tmpUiTestcase[0].id
# tmpCase.addTime = tmpUiTestcase[0].addTime
# tmpCase.save(force_update=True)
# else:
# print("不存在%s" % tmpCase.caseId)
# continue
# # 开始保存步骤。
# caseSteps = tmpCaseSteps
# caseStepsInDb = TbUiTestCaseStep.objects.filter(caseId = tmpCase.caseId).all()
# if len(caseSteps) <= len(caseStepsInDb):
# # 如果实际的小于数据库中的,直接更新实际的到数据库中,多余的步骤为0
# for dbFuncStepi in range(0, len(caseStepsInDb)):
# # TODO pass根据步骤的长度决定是插入还是更新。
# if dbFuncStepi < len(caseSteps):
# # 更新步骤到db中
# caseSteps[dbFuncStepi].id = caseStepsInDb[dbFuncStepi].id
# caseSteps[dbFuncStepi].caseId = tmpCase.caseId
# caseSteps[dbFuncStepi].state = 1
# caseSteps[dbFuncStepi].addTime = caseStepsInDb[dbFuncStepi].addTime
# caseSteps[dbFuncStepi].save(force_update=True)
# else:
# # 更新数据库的步骤的state为0
# caseStepsInDb[dbFuncStepi].state = 0
# caseStepsInDb[dbFuncStepi].caseId = tmpCase.caseId
# caseStepsInDb[dbFuncStepi].save(force_update=True)
# else:
# # 如果实际的大于数据库中的
# for realFuncStepi in range(0, len(caseSteps)):
# if realFuncStepi < len(caseStepsInDb):
# # 更新到db中
# caseSteps[realFuncStepi].id = caseStepsInDb[realFuncStepi].id
# caseSteps[realFuncStepi].caseId = tmpCase.caseId
# caseSteps[realFuncStepi].addTime = caseStepsInDb[realFuncStepi].addTime
# caseSteps[realFuncStepi].state = 1
# caseSteps[realFuncStepi].save(force_update=True)
# else:
# # 插入到db中
# caseSteps[realFuncStepi].caseId = tmpCase.caseId
# caseSteps[realFuncStepi].save(force_insert=True)
#testcases #######################################################################################
|
macropodus/summarize/feature_base/text_pronouns.py | leileixiao/Macropodus | 485 | 12642125 | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/8/25 20:51
# @author :Mo
# @paper :Sentence Extraction Based Single Document Summarization(2005)
# @function :text summary of feature-base
# @evaluate :bad, it is for english, and that's not clearly explain of formula
from macropodus.preprocess.tools_ml import macropodus_cut, jieba_tag_cut
from macropodus.data.words_common.stop_words import stop_words
from macropodus.preprocess.tools_ml import extract_chinese
from macropodus.preprocess.tools_ml import cut_sentence
from macropodus.preprocess.tools_ml import get_ngrams
from collections import Counter
CHAR_PUMCTUATION = ',.:;?!`\'"[]{}<>。?!,、;:“” ‘’「」『』《》()[]〔〕【】——……—-~·《》〈〉﹏﹏.___'
CHAR_ENGLISH = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NUMBER = "0123456789零一二两三四五六七八九"
CHAR_CHINESE = "\u4e00-\u9fa5"
ES_MIN = 1e-9
class TextPronounsSum:
def __init__(self):
self.algorithm = 'text_pronouns'
self.stop_words = stop_words.values()
self.len_ideal = 18 # 中心句子长度, 默认
def score_position(self):
"""
文本长度得分
:param sentence:
:return:
"""
score_position = []
for i, _ in enumerate(self.sentences):
score_standard = i / (len(self.sentences))
if score_standard >= 0 and score_standard <= 0.1:
score_position.append(0.17)
elif score_standard > 0.1 and score_standard <= 0.2:
score_position.append(0.23)
elif score_standard > 0.2 and score_standard <= 0.3:
score_position.append(0.14)
elif score_standard > 0.3 and score_standard <= 0.4:
score_position.append(0.08)
elif score_standard > 0.4 and score_standard <= 0.5:
score_position.append(0.05)
elif score_standard > 0.5 and score_standard <= 0.6:
score_position.append(0.04)
elif score_standard > 0.6 and score_standard <= 0.7:
score_position.append(0.06)
elif score_standard > 0.7 and score_standard <= 0.8:
score_position.append(0.04)
elif score_standard > 0.8 and score_standard <= 0.9:
score_position.append(0.04)
elif score_standard > 0.9 and score_standard <= 1.0:
score_position.append(0.15)
else:
score_position.append(0)
return score_position
def score_length(self):
"""
文本长度得分
:param sentence:
:return:
"""
score_length = []
for i, sentence in enumerate(self.sentences):
score_len = 1 - abs(self.len_ideal - len(sentence)) / self.len_ideal
score_length.append(score_len)
return score_length
def score_tag(self):
"""
词性打分名词-动词-代词(n,v,r)
:return:
"""
score_tag = []
for i, sen_tag_score in enumerate(self.sentences_tag_cut):
sen_tag = sen_tag_score.values()
tag_dict = dict(Counter(sen_tag))
tag_n = tag_dict.get('n', 0) + tag_dict.get('nr', 0) + tag_dict.get('ns', 0) + \
tag_dict.get('nt', 0) + tag_dict.get('nz', 0) + tag_dict.get('ng', 0)
tag_v = tag_dict.get('v', 0) + tag_dict.get('vd', 0) + tag_dict.get('vn', 0) + tag_dict.get('vg', 0)
tag_p = tag_dict.get('r', 0)
score_sen_tag = (1.2 * tag_n + 1.0 * tag_v + 0.8 * tag_p)/(len(sen_tag_score) + 1)
score_tag.append(score_sen_tag)
return score_tag
def score_title(self, words):
"""
与标题重合部分词语
:param words:
:return:
"""
mix_word = [word for word in words if word in self.title]
len_mix_word = len(mix_word)
len_title_word = len(self.title)
return (len_mix_word + 1.0) / (len_mix_word + 2.0) / len_title_word
def summarize(self, text, num=320, title=None):
"""
文本句子排序
:param docs: list
:return: list
"""
# 切句
if type(text) == str:
self.sentences = cut_sentence(text)
elif type(text) == list:
self.sentences = text
else:
raise RuntimeError("text type must be list or str")
self.title = title
if self.title:
self.title = macropodus_cut(title)
# 切词,含词性标注
self.sentences_tag_cut = [jieba_tag_cut(extract_chinese(sentence)) for sentence in self.sentences]
# 词语,不含词性标注
sentences_cut = [[jc for jc in jtc.keys() ] for jtc in self.sentences_tag_cut]
# 去除停用词等
self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut]
# 词频统计
self.words = []
for sen in self.sentences_cut:
self.words = self.words + sen
self.word_count = dict(Counter(self.words))
# 按频次计算词语的得分, 得到self.word_freq=[{'word':, 'freq':, 'score':}]
self.word_freqs = {}
self.len_words = len(self.words)
for k, v in self.word_count.items():
self.word_freqs[k] = v * 0.5 / self.len_words
# uni_bi_tri_gram特征
[gram_uni, gram_bi, gram_tri] = get_ngrams("".join(self.sentences), ns=[1, 2, 3])
ngrams = gram_uni + gram_bi + gram_tri
self.ngrams_count = dict(Counter(ngrams))
# 句子位置打分
scores_posi = self.score_position()
# 句子长度打分
scores_length = self.score_length()
# 句子词性打分, 名词(1.2)-代词(0.8)-动词(1.0)
scores_tag = self.score_tag()
res_rank = {}
self.res_score = []
for i in range(len(sentences_cut)):
sen_cut = self.sentences_cut[i] # 句子中的词语
# ngram得分
[gram_uni_, gram_bi_, gram_tri_] = get_ngrams(self.sentences[i], ns=[1, 2, 3]) # gram_uni_bi_tri(self.sentences[i])
n_gram_s = gram_uni_ + gram_bi_ + gram_tri_
score_ngram = sum([self.ngrams_count[ngs] if ngs in self.ngrams_count else 0 for ngs in n_gram_s]) / (len(n_gram_s) + 1)
# 句子中词语的平均长度
score_word_length_avg = sum([len(sc) for sc in sen_cut])/(len(sen_cut)+1)
score_posi = scores_posi[i]
score_length = scores_length[i]
score_tag = scores_tag[i]
if self.title: # 有标题的文本打分合并
score_title = self.score_title(sen_cut)
score_total = (score_title * 0.5 + score_ngram * 2.0 + score_word_length_avg * 0.5 +
score_length * 0.5 + score_posi * 1.0 + score_tag * 0.6) / 6.0
# 可查阅各部分得分统计
self.res_score.append(["score_title", "score_ngram", "score_word_length_avg",
"score_length", "score_posi", "score_tag"])
self.res_score.append([score_title, score_ngram, score_word_length_avg,
score_length, score_posi, score_tag, self.sentences[i]])
else: # 无标题的文本打分合并
score_total = (score_ngram * 2.0 + score_word_length_avg * 0.5 + score_length * 0.5 +
score_posi * 1.0 + score_tag * 0.6) / 5.0
# 可查阅各部分得分统计
self.res_score.append(["score_ngram", "score_word_length_avg",
"score_length", "score_posi", "score_tag"])
self.res_score.append([score_ngram, score_word_length_avg,
score_length, score_posi, score_tag, self.sentences[i]])
res_rank[self.sentences[i].strip()] = score_total
# 最小句子数
num_min = min(num, int(len(self.word_count) * 0.6))
res_rank_sort = sorted(res_rank.items(), key=lambda rr: rr[1], reverse=True)
res_rank_sort_reverse = [(rrs[1], rrs[0]) for rrs in res_rank_sort][0:num_min]
return res_rank_sort_reverse
if __name__ == '__main__':
sen = "自然语言理解(NLU,Natural Language Understanding): 使计算机理解自然语言(人类语言文字)等,重在理解。"
tp = TextPronounsSum()
docs ="和投票目标的等级来决定新的等级.简单的说。" \
"是上世纪90年代末提出的一种计算网页权重的算法! " \
"当时,互联网技术突飞猛进,各种网页网站爆炸式增长。" \
"业界急需一种相对比较准确的网页重要性计算方法。" \
"是人们能够从海量互联网世界中找出自己需要的信息。" \
"百度百科如是介绍他的思想:PageRank通过网络浩瀚的超链接关系来确定一个页面的等级。" \
"Google把从A页面到B页面的链接解释为A页面给B页面投票。" \
"Google根据投票来源甚至来源的来源,即链接到A页面的页面。" \
"一个高等级的页面可以使其他低等级页面的等级提升。" \
"具体说来就是,PageRank有两个基本思想,也可以说是假设。" \
"即数量假设:一个网页被越多的其他页面链接,就越重)。" \
"质量假设:一个网页越是被高质量的网页链接,就越重要。" \
"总的来说就是一句话,从全局角度考虑,获取重要的信。"
docs1 = "/article/details/98530760。" \
"CSDN\n。" \
"文本生成NLG,不同于文本理解NLU(例如分词、词向量、分类、实体提取。" \
"是重在文本生成的另一种关键技术(常用的有翻译、摘要、同义句生成等)。" \
"传统的文本生成NLG任务主要是抽取式的,生成式的方法看起来到现在使用也没有那么普遍。" \
"现在,我记录的是textrank,一种使用比较广泛的抽取式关键句提取算法。" \
"版权声明:本文为CSDN博主「大漠帝国」的原创文章,遵循CC 4.0 by-sa版权协议," \
"转载请附上原文出处链接及本声明。原文链接:https://blog.csdn.net/rensihui" \
"CSDN是神"
sums = tp.summarize(docs)
for sum_ in sums:
print(sum_)
# ran_20 = range(20)
# print(type(ran_20))
# print(ran_20)
# idx = [1,2,3]
# idx.pop(1)
# print(idx)
# print(max([1,2,3,4]))
|
testing/test_setupplan.py | markshao/pytest | 9,225 | 12642154 | from _pytest.pytester import Pytester
def test_show_fixtures_and_test(
pytester: Pytester, dummy_yaml_custom_test: None
) -> None:
"""Verify that fixtures are not executed."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
assert False
def test_arg(arg):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"]
)
def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(
pytester: Pytester,
) -> None:
"""Verify that when a fixture lives for longer than a single test, --setup-plan
correctly displays the SETUP/TEARDOWN indicators the right number of times.
As reported in https://github.com/pytest-dev/pytest/issues/2049
--setup-plan was showing SETUP/TEARDOWN on every test, even when the fixture
should persist through multiple tests.
(Note that this bug never affected actual test execution, which used the
correct fixture lifetimes. It was purely a display bug for --setup-plan, and
did not affect the related --setup-show or --setup-only.)
"""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'class')
def fix():
return object()
class TestClass:
def test_one(self, fix):
assert False
def test_two(self, fix):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
setup_fragment = "SETUP C fix"
setup_count = 0
teardown_fragment = "TEARDOWN C fix"
teardown_count = 0
for line in result.stdout.lines:
if setup_fragment in line:
setup_count += 1
if teardown_fragment in line:
teardown_count += 1
# before the fix this tests, there would have been a setup/teardown
# message for each test, so the counts would each have been 2
assert setup_count == 1
assert teardown_count == 1
def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(
pytester: Pytester,
) -> None:
"""Verify that SETUP/TEARDOWN messages match what comes out of --setup-show."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'session')
def sess():
return True
@pytest.fixture(scope = 'module')
def mod():
return True
@pytest.fixture(scope = 'class')
def cls():
return True
@pytest.fixture(scope = 'function')
def func():
return True
def test_outside(sess, mod, cls, func):
assert True
class TestCls:
def test_one(self, sess, mod, cls, func):
assert True
def test_two(self, sess, mod, cls, func):
assert True
"""
)
plan_result = pytester.runpytest("--setup-plan")
show_result = pytester.runpytest("--setup-show")
# the number and text of these lines should be identical
plan_lines = [
line
for line in plan_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
show_lines = [
line
for line in show_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
assert plan_lines == show_lines
|
test/test_cli.py | KOLANICH-libs/hyper | 831 | 12642201 | # -*- coding: utf-8 -*-
import json
import pytest
from hyper.cli import KeyValue
from hyper.cli import get_content_type_and_charset, main, parse_argument
from hyper.cli import set_request_data, set_url_info
from hyper.common.headers import HTTPHeaderMap
# mock for testing
class DummyUrlInfo(object):
def __init__(self):
self.path = '/'
class DummyNamespace(object):
def __init__(self, attrs):
self.body = {}
self.headers = HTTPHeaderMap()
self.items = []
self.method = None
self._url = ''
self.url = DummyUrlInfo()
for key, value in attrs.items():
setattr(self, key, value)
class DummyResponse(object):
def __init__(self, headers):
self.headers = HTTPHeaderMap(headers.items())
def read(self):
ctype = self.headers.get('content-type')
if ctype is not None:
if 'json' in ctype[0].decode('utf-8'):
return b'{"data": "dummy"}'
return b'<html>dummy</html>'
def getheader(self, name):
return self.headers.get(name)
def getheaders(self):
return self.headers
class DummyConnection(object):
def __init__(self, host, port, secure=False):
self.host = host
self.port = port
self.response = DummyResponse({'content-type': 'application/json'})
self.secure = secure
def request(self, method, path, body, headers):
return method, path, body, headers
def get_response(self):
return self.response
def _get_value(obj, key):
if '.' in key:
attr1, attr2 = key.split('.')
return _get_value(getattr(obj, attr1), attr2)
else:
return getattr(obj, key)
@pytest.mark.parametrize('argv', [
['example.com'],
['example.com/'],
['http://example.com'],
['https://example.com'],
['https://example.com/'],
['https://example.com/httpbin/get'],
], ids=[
'specified host only',
'specified host and path',
'specified host with url scheme http://',
'specified host with url scheme https://',
'specified host with url scheme https:// and root',
'specified host with url scheme https:// and path',
])
def test_cli_normal(monkeypatch, argv):
monkeypatch.setattr('hyper.cli.HTTPConnection', DummyConnection)
main(argv)
assert True
@pytest.mark.parametrize('argv', [
[],
['-h'],
['--version'],
], ids=[
'specified no argument',
'specified "-h" option',
'specified "--version" option',
])
def test_cli_with_system_exit(argv):
with pytest.raises(SystemExit):
main(argv)
@pytest.mark.parametrize(('argv', 'expected'), [
(['--debug', 'example.com'], {'debug': True}),
(['get', 'example.com'], {'method': 'GET'}),
(['GET', 'example.com', 'x-test:header'],
{'method': 'GET', 'headers': {'x-test': 'header'}}),
(['GET', 'example.com', 'param==test'],
{'method': 'GET', 'url.path': '/?param=test'}),
(['POST', 'example.com', 'data=test'],
{'method': 'POST', 'body': '{"data": "test"}'}),
(['GET', 'example.com', ':authority:example.org'],
{'method': 'GET', 'headers': {
':authority': 'example.org'}}),
(['GET', 'example.com', ':authority:example.org', 'x-test:header'],
{'method': 'GET', 'headers': {
':authority': 'example.org',
'x-test': 'header'}}),
], ids=[
'specified "--debug" option',
'specify host with lower get method',
'specified host and additional header',
'specified host and get parameter',
'specified host and post data',
'specified host and override default header',
'specified host and override default header and additional header',
])
def test_parse_argument(argv, expected):
args = parse_argument(argv)
for key, value in expected.items():
assert value == _get_value(args, key)
@pytest.mark.parametrize(('response', 'expected'), [
(DummyResponse({}), ('unknown', 'utf-8')),
(DummyResponse({'content-type': 'text/html; charset=latin-1'}),
('text/html', 'latin-1')),
(DummyResponse({'content-type': 'application/json'}),
('application/json', 'utf-8')),
], ids=[
'unknown conetnt type and default charset',
'text/html and charset=latin-1',
'application/json and default charset',
])
def test_get_content_type_and_charset(response, expected):
ctype, charset = get_content_type_and_charset(response)
assert expected == (ctype, charset)
@pytest.mark.parametrize(('args', 'expected'), [
(DummyNamespace({}), {'headers': {}, 'method': 'GET'}),
(
DummyNamespace(
{'items': [
KeyValue('x-header', 'header', ':', ''),
KeyValue('param', 'test', '==', ''),
]}
),
{'headers': {'x-header': 'header'},
'method': 'GET',
'url.path': '/?param=test',
}
),
(
DummyNamespace(
{'items': [
KeyValue('data1', 'test1', '=', ''),
KeyValue('data2', 'test2', '=', ''),
]}
),
{'headers': {'content-type': 'application/json'},
'method': 'POST',
'body': json.dumps({'data1': 'test1', 'data2': 'test2'}),
}
),
], ids=[
'set no request data',
'set header and GET parameters',
'set header and POST data',
])
def test_set_request_data(args, expected):
set_request_data(args)
for key, value in expected.items():
assert value == _get_value(args, key)
@pytest.mark.parametrize(('args', 'expected'), [
(DummyNamespace({'_url': ''}),
{'query': None, 'host': 'localhost', 'fragment': None,
'port': 443, 'netloc': None, 'scheme': 'https', 'path': '/',
'secure': True}),
(DummyNamespace({'_url': 'example.com'}),
{'host': 'example.com', 'port': 443, 'path': '/', 'secure': True}),
(DummyNamespace({'_url': 'example.com/httpbin/get'}),
{'host': 'example.com', 'port': 443, 'path': '/httpbin/get',
'secure': True}),
(DummyNamespace({'_url': 'example.com:80'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'secure': True}),
(DummyNamespace({'_url': 'http://example.com'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'http://example.com/'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'http://example.com:8080'}),
{'host': 'example.com', 'port': 8080, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'https://example.com'}),
{'host': 'example.com', 'port': 443, 'path': '/', 'scheme': 'https',
'secure': True}),
(DummyNamespace({'_url': 'https://example.com/httpbin/get'}),
{'host': 'example.com', 'port': 443, 'path': '/httpbin/get',
'scheme': 'https', 'secure': True}),
(DummyNamespace({'_url': 'https://example.com:8443/httpbin/get'}),
{'host': 'example.com', 'port': 8443, 'path': '/httpbin/get',
'scheme': 'https', 'secure': True}),
], ids=[
'set no url (it means default settings)',
'set only hostname',
'set hostname with path',
'set hostname with port number',
'set url with http://',
'set url + "/" with http://',
'set url with http:// and port number',
'set url with https://',
'set url with path',
'set url with port number and path',
])
def test_set_url_info(args, expected):
set_url_info(args)
for key, value in expected.items():
assert value == getattr(args.url, key)
|
tfx/orchestration/experimental/kubernetes/node_wrapper.py | avelez93/tfx | 1,813 | 12642242 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper to pass a node without its type information."""
from typing import Any, Dict
from tfx.dsl.components.base import base_node
class NodeWrapper(base_node.BaseNode):
"""Wrapper of a node.
The wrapper is needed for container entrypoint to deserialize a component
wihtout knowning it's original python class. This enables users
to use container base component without re-compiling the tfx base image every
time they change the component and spec definitions.
"""
def __init__(self, node: base_node.BaseNode):
self.executor_spec = node.executor_spec
self.driver_class = node.driver_class
self._type = node.type
self._id = node.id
self._inputs = node.inputs
self._outputs = node.outputs
self._exec_properties = node.exec_properties
@property
def type(self) -> str:
return self._type
@property
def id(self) -> str:
return self._id
@property
def inputs(self) -> Dict[str, Any]:
return self._inputs
@property
def outputs(self) -> Dict[str, Any]:
return self._outputs
@property
def exec_properties(self) -> Dict[str, Any]:
return self._exec_properties
|
06d-tools-cnn-cv/google-ml/Google Cloud API/cio-connect-api-example/visionex/visionex.py | jugalh/data-x-plaksha | 117 | 12642245 | import io
from PIL import Image
from google.cloud import vision
from google.cloud.vision import types
# instantiante a client
vision_client = vision.ImageAnnotatorClient()
#name of image file to annotate
file_name ='480w_s.jpg'
#file_name = 'ikhlaq-sidhu-2015.png?itok=nMiTIQXV'
#file_name = 'pupr.png'
img = Image.open(file_name)
img.show()
# load image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# perform label detection on image file
response = vision_client.label_detection (image=image)
labels = response.label_annotations
# what does google api see?
print('\mLABELS:\n')
for label in labels: # iterating over object
print(label.description)
print('\n\n HOW COOL IS THAT!!!!!!\n\n')
# other attributes?
#print(dir(labels) + '\n\n')
#print(dir(label) + '\n\n')
|
custom_components/ble_monitor/test/test_inode_parser.py | ManuelLR/ble_monitor | 820 | 12642253 | """The tests for the iNode ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestInode:
def test_inode_energy_meter(self):
"""Test inode parser for iNode Energy Monitor."""
data_string = "043E2102010000473A6D6F1200150201060EFF90820400CFE40000DC05B0ED10020A08A5"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "iNode"
assert sensor_msg["type"] == "iNode Energy Meter"
assert sensor_msg["mac"] == "00126F6D3A47"
assert sensor_msg["packet"] == "0400cfe40000dc05b0ed10"
assert sensor_msg["data"]
assert sensor_msg["energy"] == 39.05
assert sensor_msg["energy unit"] == "kWh"
assert sensor_msg["power"] == 160.0
assert sensor_msg["power unit"] == "W"
assert sensor_msg["constant"] == 1500
assert sensor_msg["battery"] == 100
assert sensor_msg["voltage"] == 2.88
assert sensor_msg["light level"] == 0.0
assert sensor_msg["week day"] == 0
assert sensor_msg["week day total"] == 4333
assert sensor_msg["rssi"] == -91
|
Pyto/Samples/Turtle/polygon.py | snazari/Pyto | 701 | 12642319 | <reponame>snazari/Pyto<filename>Pyto/Samples/Turtle/polygon.py
# Taken from https://michael0x2a.com/blog/turtle-examples
from turtle import *
num_sides = 6
side_length = 70
angle = 360.0 / num_sides
for i in range(num_sides):
fd(side_length)
rt(angle)
done()
|
pyearth/test/test_knot_search.py | ktanishqk/py-earth | 360 | 12642332 | <filename>pyearth/test/test_knot_search.py
from pyearth._knot_search import (MultipleOutcomeDependentData,
KnotSearchWorkingData,
PredictorDependentData,
KnotSearchReadOnlyData,
KnotSearchData,
knot_search,
SingleWeightDependentData,
SingleOutcomeDependentData)
from nose.tools import assert_equal
import numpy as np
from numpy.testing.utils import assert_almost_equal, assert_array_equal
from scipy.linalg import qr
def test_outcome_dependent_data():
np.random.seed(10)
m = 1000
max_terms = 100
y = np.random.normal(size=m)
w = np.random.normal(size=m) ** 2
weight = SingleWeightDependentData.alloc(w, m, max_terms, 1e-16)
data = SingleOutcomeDependentData.alloc(y, weight, m, max_terms)
# Test updating
B = np.empty(shape=(m, max_terms))
for k in range(max_terms):
b = np.random.normal(size=m)
B[:, k] = b
code = weight.update_from_array(b)
if k >= 99:
1 + 1
data.update()
assert_equal(code, 0)
assert_almost_equal(
np.dot(weight.Q_t[:k + 1, :], np.transpose(weight.Q_t[:k + 1, :])),
np.eye(k + 1))
assert_equal(weight.update_from_array(b), -1)
# data.update(1e-16)
# Test downdating
q = np.array(weight.Q_t).copy()
theta = np.array(data.theta[:max_terms]).copy()
weight.downdate()
data.downdate()
weight.update_from_array(b)
data.update()
assert_almost_equal(q, np.array(weight.Q_t))
assert_almost_equal(theta, np.array(data.theta[:max_terms]))
assert_almost_equal(
np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w * y))
wB = B * w[:, None]
Q, _ = qr(wB, pivoting=False, mode='economic')
assert_almost_equal(np.abs(np.dot(weight.Q_t, Q)), np.eye(max_terms))
# Test that reweighting works
assert_equal(data.k, max_terms)
w2 = np.random.normal(size=m) ** 2
weight.reweight(w2, B, max_terms)
data.synchronize()
assert_equal(data.k, max_terms)
w2B = B * w2[:, None]
Q2, _ = qr(w2B, pivoting=False, mode='economic')
assert_almost_equal(np.abs(np.dot(weight.Q_t, Q2)), np.eye(max_terms))
assert_almost_equal(
np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w2 * y))
def test_knot_candidates():
np.random.seed(10)
m = 1000
x = np.random.normal(size=m)
p = np.random.normal(size=m)
p[np.random.binomial(p=.1, n=1, size=m) == 1] = 0.
x[np.random.binomial(p=.1, n=1, size=m) == 1] = 0.
predictor = PredictorDependentData.alloc(x)
candidates, candidates_idx = predictor.knot_candidates(
p, 5, 10, 0, 0, set())
assert_array_equal(candidates, x[candidates_idx])
assert_equal(len(candidates), len(set(candidates)))
# print candidates, np.sum(x==0)
# print candidates_idx
def slow_knot_search(p, x, B, candidates, outcomes):
# Brute force, utterly un-optimized knot search with no fast update.
# Use only for testing the actual knot search function.
# This version allows #for multiple outcome columns.
best_e = float('inf')
best_k = 0
best_knot = float('inf')
for k, knot in enumerate(candidates):
# Formulate the linear system for this candidate
X = np.concatenate(
[B, (p * np.maximum(x - knot, 0.0))[:, None]], axis=1)
# Solve the system for each y and w
e_squared = 0.0
for y, w in outcomes:
# Solve the system
beta = np.linalg.lstsq(w[:, None] * X, w * y)[0]
# Compute the error
r = w * (y - np.dot(X, beta))
e_squared += np.dot(r, r)
# Compute loss
e = e_squared # / np.sum(w ** 2)
# Compare to the best error
if e < best_e:
best_e = e
best_k = k
best_knot = knot
return best_knot, best_k, best_e
def generate_problem(m, q, r, n_outcomes, shared_weight):
# Generate some problem data
x = np.random.normal(size=m)
B = np.random.normal(size=(m, q))
p = B[:, 1]
knot = x[int(m / 2)]
candidates = np.array(sorted(
[knot] +
list(x[np.random.randint(low=0, high=m, size=r - 1)])))[::-1]
# These data need to be generated for each outcome
outcomes = []
if shared_weight:
w = np.random.normal(size=m) ** 2
# w = w * 0. + 1.
for _ in range(n_outcomes):
beta = np.random.normal(size=q + 1)
y = (np.dot(
np.concatenate([B, (p * np.maximum(x - knot, 0.0))[:, None]],
axis=1),
beta) + 0.01 * np.random.normal(size=m))
if not shared_weight:
w = np.random.normal(size=m) ** 2
# w = w * 0. + 1.
outcomes.append((y, w))
return x, B, p, knot, candidates, outcomes
def form_inputs(x, B, p, knot, candidates, y, w):
# Formulate the inputs for the fast version
m, q = B.shape
max_terms = q + 2
workings = []
n_outcomes = w.shape[1]
for _ in range(n_outcomes):
working = KnotSearchWorkingData.alloc(max_terms)
workings.append(working)
outcome = MultipleOutcomeDependentData.alloc(
y, w, m, n_outcomes, max_terms, 1e-16)
for j in range(B.shape[1]):
outcome.update_from_array(B[:, j])
predictor = PredictorDependentData.alloc(x)
constant = KnotSearchReadOnlyData(predictor, outcome)
return KnotSearchData(constant, workings, q)
def test_knot_search():
seed = 10
np.random.seed(seed)
m = 100
q = 5
r = 10
n_outcomes = 3
# Generate some problem data
x, B, p, knot, candidates, outcomes = generate_problem(
m, q, r, n_outcomes, False)
y = np.concatenate([y_[:, None] for y_, _ in outcomes], axis=1)
w = np.concatenate([w_[:, None] for _, w_ in outcomes], axis=1)
# Formulate the inputs for the fast version
data = form_inputs(x, B, p, knot, candidates, y, w)
# Get the answer using the slow version
best_knot, best_k, best_e = slow_knot_search(p, x, B, candidates, outcomes)
# Test the test
assert_almost_equal(best_knot, knot)
assert_equal(r, len(candidates))
assert_equal(m, B.shape[0])
assert_equal(q, B.shape[1])
assert_equal(len(outcomes), n_outcomes)
# Run fast knot search and compare results to slow knot search
fast_best_knot, fast_best_k, fast_best_e = knot_search(data, candidates,
p, q, m, r,
len(outcomes), 0)
assert_almost_equal(fast_best_knot, best_knot)
assert_equal(candidates[fast_best_k], candidates[best_k])
assert_almost_equal(fast_best_e, best_e)
|
tests/test_multiple_outputs.py | datagutt/brave | 572 | 12642357 | import time, pytest, inspect
from utils import *
def start_with_multiple_outputs(run_brave, create_config_file, output_image_location1, output_image_location2):
config = {
'mixers': [
{'pattern': 4}, # 4 is red
{'pattern': 5} # 5 is green
],
'outputs': [
{'type': 'image', 'source': 'mixer2', 'location': output_image_location1 },
{'type': 'image', 'location': output_image_location2 }
# ,{'type': 'local'}
]
}
config_file = create_config_file(config)
run_brave(config_file.name)
time.sleep(2)
check_brave_is_running()
def test_multiple_outputs_at_startup(run_brave, create_config_file):
output_image_location1 = create_output_image_location()
output_image_location2 = create_output_image_location()
start_with_multiple_outputs(run_brave, create_config_file, output_image_location1, output_image_location2)
assert_outputs([
{'type': 'image', 'source': 'mixer2', 'location': output_image_location1 },
{'type': 'image', 'source': 'mixer1', 'location': output_image_location2 }
])
assert_mixers([
{'id': 1, 'pattern': 4},
{'id': 2, 'pattern': 5}
])
# If they've linked right, one will be red and the other will be green
time.sleep(2)
assert_image_file_color(output_image_location1, (0,255,0))
assert_image_file_color(output_image_location2, (255,0,0))
def test_output_at_startup_to_missing_mixer(run_brave, create_config_file):
config = {
'outputs': [
{'type': 'image', 'source': 'mixer2'},
]
}
config_file = create_config_file(config)
run_brave(config_file.name)
time.sleep(1)
check_return_value(1)
def test_multiple_outputs_at_runtime(run_brave):
run_brave()
time.sleep(1)
# Mixer ID 1 exists:
add_output({'type': 'image', 'source': 'mixer1'})
# Mixer ID 2 does not exist:
response = add_output({'type': 'image', 'source': 'mixer2'}, 400)
assert 'does not exist' in response['error']
time.sleep(0.5)
assert_outputs([{'type': 'image', 'id': 1, 'source': 'mixer1'}])
add_mixer({})
# Now we have a second mixer, this will work:
add_output({'type': 'image', 'source': 'mixer2'})
# Do it again to prove we can have multiple outputs on the same mixer
add_output({'type': 'image', 'source': 'mixer2'})
time.sleep(1)
assert_outputs([{'type': 'image', 'source': 'mixer1'},
{'type': 'image', 'source': 'mixer2'},
{'type': 'image', 'source': 'mixer2'}])
|
content/test/gpu/run_gpu_integration_test.py | google-ar/chromium | 777 | 12642360 | <filename>content/test/gpu/run_gpu_integration_test.py
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import sys
from gpu_tests import path_util
import gpu_project_config
path_util.SetupTelemetryPaths()
from telemetry.testing import browser_test_runner
def PostprocessJSON(file_name):
def TrimPrefix(s):
return s[1 + s.rfind('.'):]
with open(file_name) as f:
test_result = json.load(f)
test_result['successes'] = map(TrimPrefix, test_result['successes'])
test_result['failures'] = map(TrimPrefix, test_result['failures'])
with open(file_name, 'w') as f:
json.dump(test_result, f)
def main():
options = browser_test_runner.TestRunOptions()
rest_args = sys.argv[1:]
retval = browser_test_runner.Run(
gpu_project_config.CONFIG, options, rest_args)
# Postprocess the outputted JSON to trim all of the prefixes from
# the test names, to keep them as similar to the old form as
# possible -- and keep them from getting crazily long.
parser = argparse.ArgumentParser(description='Temporary argument parser')
parser.add_argument(
'--write-abbreviated-json-results-to', metavar='FILENAME',
action='store',
help=('Full path for json results'))
option, _ = parser.parse_known_args(rest_args)
if option.write_abbreviated_json_results_to:
PostprocessJSON(option.write_abbreviated_json_results_to)
return retval
if __name__ == '__main__':
sys.exit(main())
|
vendor/node-semver/semver/tests/test_range.py | johnwen84/my-php-buildpack | 133 | 12642397 | # -*- coding:utf-8 -*-
import pytest
# node-semver/test/index.js
# import logging
# logging.basicConfig(level=logging.DEBUG, format="%(message)s")
cands = [
['1.0.0 - 2.0.0', '1.2.3', False],
['1.0.0', '1.0.0', False],
['>=*', '0.2.4', False],
['', '1.0.0', False],
['*', '1.2.3', False],
['*', 'v1.2.3-foo', True],
['>=1.0.0', '1.0.0', False],
['>=1.0.0', '1.0.1', False],
['>=1.0.0', '1.1.0', False],
['>1.0.0', '1.0.1', False],
['>1.0.0', '1.1.0', False],
['<=2.0.0', '2.0.0', False],
['<=2.0.0', '1.9999.9999', False],
['<=2.0.0', '0.2.9', False],
['<2.0.0', '1.9999.9999', False],
['<2.0.0', '0.2.9', False],
['>= 1.0.0', '1.0.0', False],
['>= 1.0.0', '1.0.1', False],
['>= 1.0.0', '1.1.0', False],
['> 1.0.0', '1.0.1', False],
['> 1.0.0', '1.1.0', False],
['<= 2.0.0', '2.0.0', False],
['<= 2.0.0', '1.9999.9999', False],
['<= 2.0.0', '0.2.9', False],
['< 2.0.0', '1.9999.9999', False],
['<\t2.0.0', '0.2.9', False],
['>=0.1.97', 'v0.1.97', True],
['>=0.1.97', '0.1.97', False],
['0.1.20 || 1.2.4', '1.2.4', False],
['>=0.2.3 || <0.0.1', '0.0.0', False],
['>=0.2.3 || <0.0.1', '0.2.3', False],
['>=0.2.3 || <0.0.1', '0.2.4', False],
['||', '1.3.4', False],
['2.x.x', '2.1.3', False],
['1.2.x', '1.2.3', False],
['1.2.x || 2.x', '2.1.3', False],
['1.2.x || 2.x', '1.2.3', False],
['x', '1.2.3', False],
['2.*.*', '2.1.3', False],
['1.2.*', '1.2.3', False],
['1.2.* || 2.*', '2.1.3', False],
['1.2.* || 2.*', '1.2.3', False],
['*', '1.2.3', False],
['2', '2.1.2', False],
['2.3', '2.3.1', False],
['~2.4', '2.4.0', False], # >=2.4.0 <2.5.0
['~2.4', '2.4.5', False],
['~>3.2.1', '3.2.2', False], # >=3.2.1 <3.3.0,
['~1', '1.2.3', False], # >=1.0.0 <2.0.0
['~>1', '1.2.3', False],
['~> 1', '1.2.3', False],
['~1.0', '1.0.2', False], # >=1.0.0 <1.1.0,
['~ 1.0', '1.0.2', False],
['~ 1.0.3', '1.0.12', False],
['>=1', '1.0.0', False],
['>= 1', '1.0.0', False],
['<1.2', '1.1.1', False],
['< 1.2', '1.1.1', False],
['1', '1.0.0beta', True],
['~v0.5.4-pre', '0.5.5', False],
['~v0.5.4-pre', '0.5.4', False],
['=0.7.x', '0.7.2', False],
['>=0.7.x', '0.7.2', False],
['=0.7.x', '0.7.0-asdf', False],
['>=0.7.x', '0.7.0-asdf', False],
['<=0.7.x', '0.6.2', False],
['~1.2.1 >=1.2.3', '1.2.3', False],
['~1.2.1 =1.2.3', '1.2.3', False],
['~1.2.1 1.2.3', '1.2.3', False],
['~1.2.1 >=1.2.3 1.2.3', '1.2.3', False],
['~1.2.1 1.2.3 >=1.2.3', '1.2.3', False],
['~1.2.1 1.2.3', '1.2.3', False],
['>=1.2.1 1.2.3', '1.2.3', False],
['1.2.3 >=1.2.1', '1.2.3', False],
['>=1.2.3 >=1.2.1', '1.2.3', False],
['>=1.2.1 >=1.2.3', '1.2.3', False],
['<=1.2.3', '1.2.3-beta', False],
['>1.2', '1.3.0-beta', False],
['>=1.2', '1.2.8', False],
['^1.2.3', '1.8.1', False],
['^1.2.3', '1.2.3-beta', False],
['^0.1.2', '0.1.2', False],
['^0.1', '0.1.2', False],
['^1.2', '1.4.2', False],
['^1.2 ^1', '1.4.2', False],
['^1.2', '1.2.0-pre', False],
['^1.2.3', '1.2.3-pre', False]
]
# cands = [
# ['^1.2', '1.4.2', False],
# ]
@pytest.mark.parametrize("range_, version, loose", cands)
def test_it(range_, version, loose):
from semver import satisfies
assert satisfies(version, range_, loose) is True
|
tests/testapp/tests/test_modelcustompk.py | dralley/django-lifecycle | 902 | 12642404 | <filename>tests/testapp/tests/test_modelcustompk.py
import datetime
from django.test import TestCase
from tests.testapp.models import ModelCustomPK
class ModelCustomPKTestCase(TestCase):
def test_update_created_at_before_create(self):
instance = ModelCustomPK.objects.create()
instance.refresh_from_db()
self.assertTrue(isinstance(instance.created_at, datetime.datetime))
def test_update_answer_after_create(self):
instance = ModelCustomPK.objects.create()
self.assertEqual(instance.answer, 42)
|
Plugins/Aspose_Words_Java_for_Jython/asposewords/programming_documents/SplitTables.py | freedomloveme/Aspose.Words-for-Java | 274 | 12642470 | from asposewords import Settings
from com.aspose.words import Document
from com.aspose.words import NodeType
from com.aspose.words import Paragraph
class SplitTables:
def __init__(self):
dataDir = Settings.dataDir + 'programming_documents/'
# Load the document.
doc = Document(dataDir + "tableDoc.doc")
# Get the first table in the document.
firstTable = doc.getChild(NodeType.TABLE, 0, True)
# We will split the table at the third row (inclusive).
row = firstTable.getRows().get(2)
# Create a new container for the split table.
table = firstTable.deepClone(False)
# Insert the container after the original.
firstTable.getParentNode().insertAfter(table, firstTable)
# Add a buffer paragraph to ensure the tables stay apart.
firstTable.getParentNode().insertAfter(Paragraph(doc), firstTable)
currentRow = ''
while (currentRow != row) :
currentRow = firstTable.getLastRow();
table.prependChild(currentRow);
doc.save(dataDir + "SplitTable.doc")
print "Done."
if __name__ == '__main__':
SplitTables() |
examples/callback_multiplexer_clientside.py | stlehmann/dash-extensions | 250 | 12642484 | import time
import dash_html_components as html
import dash_core_components as dcc
from dash_extensions.enrich import Output, DashProxy, Input, MultiplexerTransform
# Small example app.
proxy_container = dcc.Loading()
app = DashProxy(transforms=[MultiplexerTransform(proxy_location=proxy_container)])
app.layout = html.Div([html.Button("left", id="left", n_clicks=0),
html.Button("right", id="right", n_clicks=0),
html.Div("Initial value", id="log"), proxy_container])
# Client side function.
f = "function(n_clicks){return 'left (' + n_clicks + ')';}"
app.clientside_callback(f, Output("log", "children"), Input("left", "n_clicks"), prevent_initial_call=True)
@app.callback(Output("log", "children"), Input("right", "n_clicks"), prevent_initial_call=True)
def right(n_clicks):
time.sleep(2)
return f"right ({n_clicks})"
if __name__ == '__main__':
app.run_server(port=7777, debug=True)
|
torchmeta/utils/metrics.py | yusufraji/siren | 1,704 | 12642499 | import torch
import torch.nn.functional as F
from torchmeta.utils.prototype import get_prototypes
__all__ = ['hardness_metric']
def _pad_images(inputs, size=(224, 224), **kwargs):
height, width = inputs.shape[-2:]
pad_height, pad_width = (size[0] - height) // 2, (size[1] - width) // 2
padding = (pad_width, size[1] - width - pad_width,
pad_height, size[0] - height - pad_height)
return F.pad(inputs, padding, **kwargs)
def hardness_metric(batch, num_classes):
"""Hardness metric of an episode, as defined in [1].
Parameters
----------
batch : dict
The batch of tasks over which the metric is computed. The batch of tasks
is a dictionary containing the keys `train` (or `support`) and `test`
(or `query`). This is typically the output of `BatchMetaDataLoader`.
num_classes : int
The number of classes in the classification task. This corresponds to
the number of ways in an `N`-way classification problem.
Returns
-------
metric : `torch.FloatTensor` instance
Values of the hardness metric for each task in the batch.
References
----------
.. [1] <NAME>., <NAME>., <NAME>. and <NAME>. (2019).
A Baseline for Few-Shot Image Classification. (https://arxiv.org/abs/1909.02729)
"""
if ('train' not in batch) and ('support' not in batch):
raise ValueError('The tasks do not contain any training/support set. '
'Make sure the tasks contain either the "train" or the '
'"support" key.')
if ('test' not in batch) and ('query' not in batch):
raise ValueError('The tasks do not contain any test/query set. Make '
'sure the tasks contain either the "test" of the '
'"query" key.')
train = 'train' if ('train' in batch) else 'support'
test = 'test' if ('test' in batch) else 'query'
with torch.no_grad():
# Load a pre-trained backbone Resnet-152 model from PyTorch Hub
backbone = torch.hub.load('pytorch/vision:v0.5.0',
'resnet152',
pretrained=True,
verbose=False)
backbone.eval()
train_inputs, train_targets = batch[train]
test_inputs, test_targets = batch[test]
batch_size, num_images, num_channels = train_inputs.shape[:3]
num_test_images = test_inputs.size(1)
backbone.to(device=train_inputs.device)
if num_channels != 3:
raise ValueError('The images must be RGB images.')
# Pad the images so that they are compatible with the pre-trained model
padded_train_inputs = _pad_images(train_inputs,
size=(224, 224), mode='constant', value=0.)
padded_test_inputs = _pad_images(test_inputs,
size=(224, 224), mode='constant', value=0.)
# Compute the features from the logits returned by the pre-trained
# model on the train/support examples. These features are z(x, theta)_+,
# averaged for each class
train_logits = backbone(padded_train_inputs.view(-1, 3, 224, 224))
train_logits = F.relu(train_logits.view(batch_size, num_images, -1))
train_features = get_prototypes(train_logits, train_targets, num_classes)
# Get the weights by normalizing the features
weights = F.normalize(train_features, p=2, dim=2)
# Compute and normalize the logits of the test/query examples
test_logits = backbone(padded_test_inputs.view(-1, 3, 224, 224))
test_logits = test_logits.view(batch_size, num_test_images, -1)
test_logits = F.normalize(test_logits, p=2, dim=2)
# Compute the log probabilities of the test/query examples
test_logits = torch.bmm(weights, test_logits.transpose(1, 2))
test_log_probas = -F.cross_entropy(test_logits, test_targets,
reduction='none')
# Compute the log-odds ratios for each image of the test/query set
log_odds_ratios = torch.log1p(-test_log_probas.exp()) - test_log_probas
return torch.mean(log_odds_ratios, dim=1)
|
pixiedust/apps/connectionWidget.py | elgalu/pixiedust | 598 | 12642507 | <filename>pixiedust/apps/connectionWidget.py
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from .cfBrowser import CFBrowser
from pixiedust.display.app import *
from pixiedust.services.serviceManager import *
from pixiedust.utils import Logger
@Logger()
class ConnectionWidget(CFBrowser):
def getConnections(self):
return getConnections("cloudant")
def selectBluemixCredentials(self, service_name, credentials_str):
credentials = json.loads(credentials_str)
payload = {'name': service_name, 'credentials': credentials}
addConnection('cloudant', payload)
self.selectedConnection = payload['name']
#return self.dataSourcesList()
return """
<script>
pixiedust.executeDisplay({{pd_controls}}, {
'targetDivId': "dummy",
'script': "import json\\nprint(json.dumps( self.getConnections()))",
'onError': function(error){
alert(error);
},
'onSuccess': function(results){
var options = []
$.each(JSON.parse(results), function(key, value){
var selected = (value.name=="{{this.selectedConnection}}") ? 'selected="selected"' : "";
options.push('<option ' + selected + ' value="'+ value.name +'">'+ value.name +'</option>');
});
$("#connection{{prefix}}").html(options.join(''));
}
});
</script>
"""
@route(selectedConnection="*", editConnection="*")
def _editConnection(self):
jsOnLoad = """
var CodeMirror = require('codemirror/lib/codemirror');
function createEditor(content){
$('#connTextArea{{prefix}}').html('<textarea id="connection-info" rows=13 cols=80 name="connection-info"/>');
global.editor = CodeMirror.fromTextArea($("#connection-info")[0], {
lineNumbers: true,
matchBrackets: true,
indentUnit: 2,
autoIndent: true,
mode: 'application/json'
});
global.editor.setSize("100%", 300);
global.editor.setValue(content);
}
var connectionName = $("#connection{{prefix}}").val();
if ( connectionName ){
function toJson(v){
if (!v){
return ""
}
v = JSON.parse(v);
return JSON.stringify(v, null, '\t');
}
var script = "from pixiedust.services.serviceManager import getConnection\\n"+
"import json\\n"+
"print( json.dumps( getConnection(\\\"cloudant\\\",\\\"" + connectionName + "\\\", raw=False)))";
pixiedust.executeDisplay({{pd_controls}}, {
'targetDivId': "connTextArea{{prefix}}",
'script': script,
'onError': function(error){
$("#connection-error").text(error);
},
'onSuccess': function(results){
createEditor(toJson(results));
}
})
}else{
createEditor('{\\n\\
"name": "<<cloudant-connection name>>",\\n\\
"credentials": {\\n\\
"username": "<<userIdentifier>>-bluemix",\\n\\
"password": "<<password>>",\\n\\
"host": "<<hostIdentifier>>-bluemix.cloudant.com",\\n\\
"port": 443,\\n\\
"url": "https://<<userIdentifier>>-bluemix:<<password>>@<<hostIdentifier>>-bluemix.cloudant.com"\\n\\
}\\n\\
}');
}
"""
jsOK = """
try {
var connInfo = JSON.parse(global.editor.getValue());
var script = "from pixiedust.services.serviceManager import addConnection\\n"+
"print(addConnection(\\"cloudant\\"," + global.editor.getValue().replace(/[\\n\\r]+/g,"") + "))";
pixiedust.executeDisplay({{pd_controls}}, {
'targetDivId': "dummy",
'script': script,
'onError': function(error){
$("#connection-error").text(error);
},
'onSuccess': function(results){
if (typeof editConnCallback != "undefined"){
editConnCallback(results);
}
modal_obj.modal('hide')
}
});
return false;
} catch(e) {
console.log(e);
$("#connection-error").text('System error: ' + e.message);
return false;
}
"""
body = """
<div class="well">
Please enter your connection information as JSON in the text area below.
For a new connection, you can for example copy and paste the service credentials from Bluemix.
</div>
<div id="connection-error" style="color:red"/>
<div id="connTextArea{{prefix}}"/>
"""
return {"body":body, "jsOnLoad": jsOnLoad, "jsOK":jsOK}
@route(selectedConnection="*", deleteConnection="true")
def _deleteConnection(self):
deleteConnection("cloudant", self.selectedConnection)
self.deleteConnection = "false"
return self.dataSourcesList()
@route(selectedConnection="*", browseBMConnection="true")
def _browseBMConnection(self):
return self.startBrowsingBM()
@route(selectedConnection="*", newConnection="true")
def _newConnection(self):
body = """
<div class="well">
New Connection
</div>
<div class="container" id="newConnectionContainer{{prefix}}">
<div class="col-sm-2"/>
<div class="col-sm-4" style="border: 1px solid lightblue;margin: 10px;border-radius: 25px;cursor:pointer;
min-height: 150px;text-align: center;background-color:#e6eeff;display: flex;align-items: center;"
pd_options="manualConnection=true">
<h2>Enter the connection manually</h2>
</div>
<div class="col-sm-4" style="border: 1px solid lightblue;margin: 10px;border-radius: 25px;cursor:pointer;
min-height: 150px;text-align: center;background-color:#e6eeff;;display: flex;align-items: center;"
pd_options="browseBMConnection=true">
<h2>Browse your services on Bluemix</h2>
</div>
<div class="col-sm-2"/>
</div>
"""
return {"body":body, "dialogRoot": "newConnectionContainer{{prefix}}"}
@route(widget="DataSourcesList")
def dataSourcesList(self):
num_connections = len(self.getConnections())
self.debug('num connections = {}'.format(num_connections));
select_conn_script = ' pd_script="self.selectedConnection ='
if num_connections > 0:
select_conn_script += "'$val(connection{{prefix}})'\""
else:
select_conn_script += '\'none\'"'
output = """
<div>
<div class="form-group">
<label for="connection{{prefix}}" class="control-label col-sm-2">Select a cloudant connection:</label>
<div class="col-sm-7">
<select class="form-control" id="connection{{prefix}}">
{%for conn in this.getConnections() %}
{%set selected=(this.selectedConnection==conn.name)%}
<option {%if selected%}selected="selected"{%endif%} value="{{conn.name|escape}}">{{conn.name|escape}}</option>
{%endfor%}
</select>
</div>
<div class="col-sm-2 btn-toolbar" role="toolbar">
<div class="btn-group" role="group\"""" + select_conn_script + """>
"""
if num_connections > 0:
output += """
<button type="button" class="btn btn-default" pd_refresh>Go</button>'
<button type="button" class="btn btn-default" pd_options="dialog=true;editConnection=true">
<i class="fa fa-pencil-square-o"/>
</button>"""
output += """
<button type="button" class="btn btn-default" pd_options="dialog=true;newConnection=true">
<i class="fa fa-plus"/>
</button>"""
if num_connections > 0:
output += """
<button type="button" class="btn btn-default" pd_refresh>
<pd_script type="preRun">
return confirm("Delete " + $("#connection{{prefix}}").val() + "?");
</pd_script>
<pd_script>self.deleteConnection="true"</pd_script>
<i class="fa fa-trash"/>
</button>
</div>
</div>
</div>
</div>
"""
return output |
weasyl/followuser.py | kfkitsune/weasyl | 111 | 12642511 | <reponame>kfkitsune/weasyl
from libweasyl import ratings
from weasyl import define as d
from weasyl import ignoreuser
from weasyl import macro as m
from weasyl import media
from weasyl.configuration_builder import create_configuration, BoolOption
from weasyl.error import WeasylError
WatchSettings = create_configuration([
BoolOption("submit", "s"),
BoolOption("collect", "c"),
BoolOption("char", "f"),
BoolOption("stream", "t"),
BoolOption("journal", "j"),
])
def list_followed(userid, settings, rating=ratings.GENERAL.code, friends=False):
"""
Returns a list of users who are following the specified user.
"""
statement = [
"SELECT wu.userid FROM watchuser wu JOIN profile pr ON wu.userid = pr.userid WHERE wu.otherid = %(user)s"
" AND wu.settings ~ %(setting)s"
]
if friends:
statement.append("""
AND (
wu.userid IN (SELECT fu.userid FROM frienduser fu WHERE fu.otherid = wu.otherid AND fu.settings !~ 'p')
OR wu.userid IN (SELECT fu.otherid FROM frienduser fu WHERE fu.userid = wu.otherid AND fu.settings !~ 'p'))
""")
if rating == ratings.EXPLICIT.code:
# Only notify users who view explicit
statement.append(" AND pr.config ~ 'p'")
elif rating == ratings.MATURE.code:
# Only notify users who view mature or explicit
statement.append(" AND pr.config ~ '[ap]'")
return d.column(d.engine.execute("".join(statement), user=userid, setting=settings))
def select_settings(userid, otherid):
settings = d.engine.scalar(
"SELECT settings FROM watchuser WHERE (userid, otherid) = (%(user)s, %(other)s)",
user=userid,
other=otherid,
)
if settings is None:
raise WeasylError("watchuserRecordMissing")
return settings
def select_followed(userid, otherid, limit=None, backid=None, nextid=None, following=False):
"""
Returns the users who are following the specified user; note that
``following`` need never be passed explicitly.
"""
if following:
statement = ["SELECT wu.otherid, pr.username, pr.config FROM watchuser wu"
" INNER JOIN profile pr ON wu.otherid = pr.userid"
" WHERE wu.userid = %i" % (otherid,)]
else:
statement = ["SELECT wu.userid, pr.username, pr.config FROM watchuser wu"
" INNER JOIN profile pr ON wu.userid = pr.userid"
" WHERE wu.otherid = %i" % (otherid,)]
if userid:
statement.append(m.MACRO_IGNOREUSER % (userid, "pr"))
if backid:
statement.append(" AND pr.username < (SELECT username FROM profile WHERE userid = %i)" % (backid,))
elif nextid:
statement.append(" AND pr.username > (SELECT username FROM profile WHERE userid = %i)" % (nextid,))
statement.append(" ORDER BY pr.username%s LIMIT %i" % (" DESC" if backid else "", limit))
query = [{
"userid": i[0],
"username": i[1],
} for i in d.execute("".join(statement))]
media.populate_with_user_media(query)
return query[::-1] if backid else query
def select_following(userid, otherid, limit=None, backid=None, nextid=None):
"""
Returns the users whom the specified user is following.
"""
return select_followed(userid, otherid, limit, backid, nextid, following=True)
def manage_following(userid, limit, backid=None, nextid=None):
state = [
"SELECT pr.userid, pr.username, pr.config FROM watchuser wu"
" JOIN profile pr ON wu.otherid = pr.userid"
" WHERE wu.userid = %i" % (userid,)]
if backid:
state.append(" AND pr.username < (SELECT username FROM profile WHERE userid = %i)" % backid)
elif nextid:
state.append(" AND pr.username > (SELECT username FROM profile WHERE userid = %i)" % nextid)
state.append(" ORDER BY pr.username")
if backid:
state.append(" DESC")
state.append(" LIMIT %i" % limit)
query = [{
"userid": i[0],
"username": i[1],
} for i in d.execute("".join(state))]
media.populate_with_user_media(query)
return query[::-1] if backid else query
def insert(userid, otherid):
if ignoreuser.check(otherid, userid):
raise WeasylError("IgnoredYou")
elif ignoreuser.check(userid, otherid):
raise WeasylError("YouIgnored")
d.engine.execute(
'INSERT INTO watchuser (userid, otherid, settings) VALUES (%(user)s, %(other)s, %(settings)s) '
'ON CONFLICT DO NOTHING',
user=userid, other=otherid, settings=WatchSettings.from_code(d.get_config(userid)).to_code())
from weasyl import welcome
welcome.followuser_remove(userid, otherid)
welcome.followuser_insert(userid, otherid)
def update(userid, otherid, watch_settings):
d.engine.execute(
"UPDATE watchuser SET settings = %(settings)s WHERE (userid, otherid) = (%(user)s, %(other)s)",
settings=watch_settings.to_code(),
user=userid,
other=otherid,
)
def remove(userid, otherid):
d.execute("DELETE FROM watchuser WHERE (userid, otherid) = (%i, %i)", [userid, otherid])
from weasyl import welcome
welcome.followuser_remove(userid, otherid)
|
wetectron/utils/__init__.py | akobiisr/wetectron | 332 | 12642514 | <reponame>akobiisr/wetectron
# --------------------------------------------------------
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
# Nvidia Source Code License-NC
# -------------------------------------------------------- |
cflearn/api/cv/models/dino.py | carefree0910/carefree-learn | 400 | 12642526 | <reponame>carefree0910/carefree-learn
import torch
from PIL import Image
from tqdm import tqdm
from torch import Tensor
from typing import List
from typing import Tuple
from ..data import SSLTestTransform
from ..data import InferenceImageFolderData
from ..pipeline import SimplePipeline
from ..interface import predict_folder
from ....constants import LATENT_KEY
from ....misc.toolkit import to_torch
from ....misc.toolkit import to_device
from ....misc.toolkit import eval_context
class DINOPredictor:
def __init__(self, m: SimplePipeline, img_size: int, *, to_gray: bool = False):
self.m = m
self.dino = m.model
self.transform = SSLTestTransform(img_size, to_gray)
@property
def device(self) -> torch.device:
return self.dino.device
def get_latent(self, src_path: str) -> Tensor:
src = Image.open(src_path).convert("RGB")
net = self.transform(src)[None, ...].to(self.device)
with eval_context(self.dino):
return self.dino.get_latent(net)
def get_logits(self, src_path: str) -> Tensor:
src = Image.open(src_path).convert("RGB")
net = self.transform(src)[None, ...].to(self.device)
with eval_context(self.dino):
return self.dino.get_logits(net)
def get_folder_latent(
self,
src_folder: str,
*,
batch_size: int,
num_workers: int = 0,
use_tqdm: bool = True,
) -> Tuple[Tensor, List[str]]:
results = predict_folder(
self.m,
src_folder,
batch_size=batch_size,
num_workers=num_workers,
transform=self.transform,
use_tqdm=use_tqdm,
)
latent = to_torch(results.outputs[LATENT_KEY])
return latent, results.img_paths
def get_folder_logits(
self,
src_folder: str,
*,
batch_size: int,
num_workers: int = 0,
use_tqdm: bool = True,
) -> Tuple[Tensor, List[str]]:
data = InferenceImageFolderData(
src_folder,
batch_size=batch_size,
num_workers=num_workers,
transform=self.transform,
)
outputs = []
iterator = data.initialize()[0]
if use_tqdm:
iterator = tqdm(iterator, total=len(iterator))
with eval_context(self.dino):
for i, batch in enumerate(iterator):
batch = to_device(batch, self.device)
outputs.append(self.dino.student(i, batch).cpu())
return torch.cat(outputs, dim=0), data.dataset.img_paths
__all__ = [
"DINOPredictor",
]
|
web-bundle/resources/check-cookie-and-return-bundle.py | meyerweb/wpt | 14,668 | 12642566 | import os
def main(request, response):
origin = request.headers.get(b"origin")
if origin is not None:
response.headers.set(b"Access-Control-Allow-Origin", origin)
response.headers.set(b"Access-Control-Allow-Methods", b"GET")
response.headers.set(b"Access-Control-Allow-Credentials", b"true")
headers = [
(b"Content-Type", b"application/webbundle"),
(b"X-Content-Type-Options", b"nosniff"),
]
cookie = request.cookies.first(b"milk", None)
if (cookie is not None) and cookie.value == b"1":
if request.GET.get(b"bundle", None) == b"cross-origin":
bundle = "./wbn/simple-cross-origin.wbn"
else:
bundle = "./wbn/subresource.wbn"
with open(
os.path.join(os.path.dirname(__file__), bundle),
"rb",
) as f:
return (200, headers, f.read())
else:
return (400, [], "")
|
tools/virtuoso.py | datha88/kbqa-ar-smcnn | 139 | 12642600 | <reponame>datha88/kbqa-ar-smcnn<filename>tools/virtuoso.py
#!/usr/bin/python
import sys, json
from urllib import parse, request
# Setting global variables
data_source = 'fb2m:'
query_url = 'http://localhost:8890/sparql/'
# HTTP URL is constructed accordingly with JSON query results format in mind.
def sparql_query(query, URL, format='application/json'):
params={
'default-graph': '',
'should-sponge': 'soft',
'query': query.encode('utf8'),
'debug': 'on',
'timeout': '',
'format': format,
'save': 'display',
'fname': ''
}
encoded_query = parse.urlencode(params).encode('utf-8')
http_response = request.urlopen(URL, encoded_query).read()
try:
json_response = json.loads(http_response.decode('utf-8'))
return json_response
except:
print >> sys.stderr, 'json load error'
print >> sys.stderr, http_response
return None
# Using freebase mid to query its types
def id_query_type(node_id):
query = '''
SELECT ?type FROM <%s> WHERE {<%s> <fb:type.object.type> ?type}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
type_list = [item['type']['value'] for item in json_response['results']['bindings']]
return list(set(type_list))
except:
return []
# Using freebase mid to query its original cased name
def id_query_en_name(node_id):
query = '''
SELECT ?name FROM <%s> WHERE {<%s> <fb:type.object.en_name> ?name}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
name_list = [item['name']['value'] for item in json_response['results']['bindings']]
return list(set(name_list))
except:
return []
# Using freebase mid to query its original cased alias
def id_query_en_alias(node_id):
query = '''
SELECT ?alias FROM <%s> WHERE {<%s> <fb:common.topic.en_alias> ?alias}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
alias_list = [item['alias']['value'] for item in json_response['results']['bindings']]
return list(set(alias_list))
except:
return []
# Using freebase mid to query its processed & tokenized name
def id_query_name(node_id):
query = '''
SELECT ?name FROM <%s> WHERE {<%s> <fb:type.object.name> ?name}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
name_list = [item['name']['value'] for item in json_response['results']['bindings']]
return list(set(name_list))
except:
return []
# Using freebase mid to query its processed & tokenized alias
def id_query_alias(node_id):
query = '''
SELECT ?alias FROM <%s> WHERE {<%s> <fb:common.topic.alias> ?alias}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
alias_list = [item['alias']['value'] for item in json_response['results']['bindings']]
return list(set(alias_list))
except:
return []
# Using freebase mid to query its processed & tokenized name & alias
def id_query_str(node_id):
query = '''
SELECT ?str FROM <%s> WHERE { {<%s> <fb:type.object.name> ?str} UNION {<%s> <fb:common.topic.alias> ?str} }
''' % (data_source, node_id, node_id)
json_response = sparql_query(query, query_url)
try:
name_list = [item['str']['value'] for item in json_response['results']['bindings']]
return list(set(name_list))
except:
return []
# Using freebase mid to query all relations coming out of the entity
def id_query_out_rel(node_id, unique = True):
query = '''
SELECT ?relation FROM <%s> WHERE {<%s> ?relation ?object}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
relations = [str(item['relation']['value']) for item in json_response['results']['bindings']]
return list(set(relations))
except:
return []
# Using freebase mid to query all relations coming into the entity
def id_query_in_rel(node_id, unique = True):
query = '''
SELECT ?relation FROM <%s> WHERE {?subject ?relation <%s>}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
relations = [str(item['relation']['value']) for item in json_response['results']['bindings']]
return list(set(relations))
except:
return []
# Using the name of an entity to query its freebase mid
def name_query_id(name):
query = '''
SELECT ?node_id FROM <%s> WHERE {?node_id <fb:type.object.name> "%s"}
''' % (data_source, name)
json_response = sparql_query(query, query_url)
try:
node_id_list = [str(item['node_id']['value']) for item in json_response['results']['bindings']]
return list(set(node_id_list))
except:
return []
# Using the alias of an entity to query its freebase mid
def alias_query_id(alias):
query = '''
SELECT ?node_id FROM <%s> WHERE {?node_id <fb:common.topic.alias> "%s"}
''' % (data_source, alias)
json_response = sparql_query(query, query_url)
try:
node_id_list = [str(item['node_id']['value']) for item in json_response['results']['bindings']]
return list(set(node_id_list))
except:
return []
# Using the alias/name of an entity to query its freebase mid
def str_query_id(string):
query = '''
SELECT ?node_id FROM <%s> WHERE { {?node_id <fb:common.topic.alias> "%s"} UNION {?node_id <fb:type.object.name> "%s"} }
''' % (data_source, string, string)
json_response = sparql_query(query, query_url)
try:
node_id_list = [str(item['node_id']['value']) for item in json_response['results']['bindings']]
return list(set(node_id_list))
except:
return []
# Using freebase mid to query all object coming out of the entity
def id_query_in_entity(node_id, unique = True):
query = '''
SELECT ?subject FROM <%s> WHERE {?subject ?relation <%s>}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
subjects = [str(item['subject']['value']) for item in json_response['results']['bindings']]
return list(set(subjects))
except:
return []
# Using freebase mid to query all relation coming into the entity
def id_query_out_entity(node_id, unique = True):
query = '''
SELECT ?object FROM <%s> WHERE {<%s> ?relation ?object}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
try:
objects = [str(item['object']['value']) for item in json_response['results']['bindings']]
return list(set(objects))
except:
return []
# Using the subject and relation to query the corresponding object
def query_object(subject, relation):
query = '''
SELECT ?object FROM <%s> WHERE {<%s> <%s> ?object}
''' % (data_source, subject, relation)
json_response = sparql_query(query, query_url)
try:
return [str(item['object']['value']) for item in json_response['results']['bindings']]
except:
return []
# Using the object and relation to query the corresponding subject
def query_subject(obj, relation):
query = '''
SELECT ?subject FROM <%s> WHERE {?subject <%s> <%s>}
''' % (data_source, relation, obj)
json_response = sparql_query(query, query_url)
try:
return [str(item['subject']['value']) for item in json_response['results']['bindings']]
except:
return []
# Using the subject and object to query the corresponding relation
def query_relation(sub, obj):
query = '''
SELECT ?relation FROM <%s> WHERE {<%s> ?relation <%s>}
''' % (data_source, sub, obj)
json_response = sparql_query(query, query_url)
try:
objects = [str(item['relation']['value']) for item in json_response['results']['bindings']]
return list(set(objects))
except:
return []
def relation_query_subject(relation):
query = '''
SELECT ?subject FROM <%s> WHERE {?subject <%s> ?object}
'''% (data_source, relation)
json_response = sparql_query(query, query_url)
try:
return [str(item['subject']['value']) for item in json_response['results']['bindings']]
except:
return []
# Check whether a node is a CVT node
def check_cvt(node_id):
query = '''
SELECT ?tag FROM <%s> WHERE {<%s> <fb:cvt_node_identifier> ?tag}
''' % (data_source, node_id)
json_response = sparql_query(query, query_url)
ret = [str(item['tag']['value']) for item in json_response['results']['bindings']]
if len(ret) == 1 and ret[0] == 'true':
return True
else:
return False
|
python/example_code/rekognition/test/test_rekognition_image_detection.py | iconara/aws-doc-sdk-examples | 5,166 | 12642613 | <filename>python/example_code/rekognition/test/test_rekognition_image_detection.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Unit tests for rekognition_image_detection.py.
"""
import boto3
from botocore.exceptions import ClientError
import pytest
from rekognition_image_detection import RekognitionImage
from rekognition_objects import (
RekognitionFace, RekognitionCelebrity, RekognitionLabel,
RekognitionModerationLabel, RekognitionText)
TEST_IMAGE = {'Bytes': b'just some bytes'}
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_detect_faces(make_stubber, make_faces, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
image = RekognitionImage(TEST_IMAGE, 'test-image', rekognition_client)
faces = [RekognitionFace(face) for face in make_faces(3, True)]
rekognition_stubber.stub_detect_faces(image.image, faces, error_code=error_code)
if error_code is None:
got_faces = image.detect_faces()
assert (
[face.to_dict() for face in faces] ==
[face.to_dict() for face in got_faces]
)
else:
with pytest.raises(ClientError) as exc_info:
image.detect_faces()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_compare_faces(make_stubber, make_faces, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
source_image = RekognitionImage(TEST_IMAGE, 'source-image', rekognition_client)
target_image = RekognitionImage(TEST_IMAGE, 'target-image', rekognition_client)
matches = [RekognitionFace(face) for face in make_faces(1)]
unmatches = [RekognitionFace(face) for face in make_faces(2)]
similarity = 80
rekognition_stubber.stub_compare_faces(
source_image.image, target_image.image, similarity, matches, unmatches,
error_code=error_code)
if error_code is None:
got_matches, got_unmatches = source_image.compare_faces(
target_image, similarity)
assert (
[face.to_dict() for face in matches] ==
[face.to_dict() for face in got_matches]
)
assert (
[face.to_dict() for face in unmatches] ==
[face.to_dict() for face in got_unmatches]
)
else:
with pytest.raises(ClientError) as exc_info:
source_image.compare_faces(target_image, similarity)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_detect_labels(make_stubber, make_labels, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
image = RekognitionImage(TEST_IMAGE, 'test-image', rekognition_client)
labels = [RekognitionLabel(label) for label in make_labels(3)]
max_labels = 3
rekognition_stubber.stub_detect_labels(
image.image, max_labels, labels, error_code=error_code)
if error_code is None:
got_labels = image.detect_labels(max_labels)
assert (
[label.to_dict() for label in labels] ==
[label.to_dict() for label in got_labels])
else:
with pytest.raises(ClientError) as exc_info:
image.detect_labels(max_labels)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_detect_moderation_labels(make_stubber, make_moderation_labels, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
image = RekognitionImage(TEST_IMAGE, 'test-image', rekognition_client)
labels = [
RekognitionModerationLabel(label) for label in make_moderation_labels(3)]
rekognition_stubber.stub_detect_moderation_labels(
image.image, labels, error_code=error_code)
if error_code is None:
got_labels = image.detect_moderation_labels()
assert (
[label.to_dict() for label in labels] ==
[label.to_dict() for label in got_labels])
else:
with pytest.raises(ClientError) as exc_info:
image.detect_moderation_labels()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_detect_text(make_stubber, make_texts, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
image = RekognitionImage(TEST_IMAGE, 'test-image', rekognition_client)
texts = [RekognitionText(text) for text in make_texts(3)]
rekognition_stubber.stub_detect_text(image.image, texts, error_code=error_code)
if error_code is None:
got_texts = image.detect_text()
assert (
[text.to_dict() for text in texts] ==
[text.to_dict() for text in got_texts])
else:
with pytest.raises(ClientError) as exc_info:
image.detect_text()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_recognize_celebrities(make_stubber, make_faces, error_code):
rekognition_client = boto3.client('rekognition')
rekognition_stubber = make_stubber(rekognition_client)
image = RekognitionImage(TEST_IMAGE, 'test-image', rekognition_client)
celebrities = [RekognitionCelebrity(face)
for face in make_faces(3, is_celebrity=True)]
normals = [RekognitionFace(face) for face in make_faces(2)]
rekognition_stubber.stub_recognize_celebrities(
image.image, celebrities, normals, error_code=error_code)
if error_code is None:
got_celebrities, got_normals = image.recognize_celebrities()
assert (
[celeb.to_dict() for celeb in celebrities] ==
[celeb.to_dict() for celeb in got_celebrities])
assert (
[normal.to_dict() for normal in normals] ==
[normal.to_dict() for normal in got_normals])
else:
with pytest.raises(ClientError) as exc_info:
image.recognize_celebrities()
assert exc_info.value.response['Error']['Code'] == error_code
|
eeauditor/auditors/aws/Amazon_EFS_Auditor.py | kbhagi/ElectricEye | 442 | 12642637 | <filename>eeauditor/auditors/aws/Amazon_EFS_Auditor.py
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
efs = boto3.client("efs")
# loop through EFS file systems
def describe_file_systems(cache):
response = cache.get("describe_file_systems")
if response:
return response
cache["describe_file_systems"] = efs.describe_file_systems()
return cache["describe_file_systems"]
@registry.register_check("efs")
def efs_filesys_encryption_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[EFS.1] EFS File Systems should have encryption enabled"""
response = describe_file_systems(cache)
myFileSys = response["FileSystems"]
for filesys in myFileSys:
encryptionCheck = str(filesys["Encrypted"])
fileSysId = str(filesys["FileSystemId"])
fileSysArn = f"arn:{awsPartition}:elasticfilesystem:{awsRegion}:{awsAccountId}:file-system/{fileSysId}"
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if encryptionCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[EFS.1] EFS File Systems should have encryption enabled",
"Description": "EFS file system "
+ fileSysId
+ " does not have encryption enabled. EFS file systems cannot be encrypted after creation, consider backing up data and creating a new encrypted file system.",
"Remediation": {
"Recommendation": {
"Text": "For EFS encryption information refer to the Data Encryption in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[EFS.1] EFS File Systems should have encryption enabled",
"Description": "EFS file system " + fileSysId + " has encryption enabled.",
"Remediation": {
"Recommendation": {
"Text": "For EFS encryption information refer to the Data Encryption in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("efs")
def efs_filesys_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[EFS.2] EFS File Systems should not use the default file system policy"""
response = describe_file_systems(cache)
myFileSys = response["FileSystems"]
for filesys in myFileSys:
fileSysId = str(filesys["FileSystemId"])
fileSysArn = f"arn:{awsPartition}:elasticfilesystem:{awsRegion}:{awsAccountId}:file-system/{fileSysId}"
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = efs.describe_file_system_policy(
FileSystemId=fileSysId
)
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[EFS.2] EFS File Systems should not use the default file system policy",
"Description": "EFS file system " + fileSysId + " is not using the default file system policy.",
"Remediation": {
"Recommendation": {
"Text": "For EFS policies information refer to the Identity and Access Management in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST CSF PR.AC-1",
"NIST CSF PR.AC-4",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-5",
"AICPA TSC CC6.1",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.9.1.1",
"ISO 27001:2013 A.9.4.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except efs.exceptions.FileSystemNotFound:
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[EFS.2] EFS File Systems should not use the default file system policy",
"Description": "EFS file system " + fileSysId + " is using a default file system policy.",
"Remediation": {
"Recommendation": {
"Text": "For EFS policies information refer to the Identity and Access Management in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST CSF PR.AC-1",
"NIST CSF PR.AC-4",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-5",
"AICPA TSC CC6.1",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.9.1.1",
"ISO 27001:2013 A.9.4.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass |
bfxapi/examples/ws/subscribe_tickers.py | uggel/bitfinex-api-py | 162 | 12642641 | <filename>bfxapi/examples/ws/subscribe_tickers.py
import os
import sys
sys.path.append('../../../')
from bfxapi import Client
bfx = Client(
logLevel='DEBUG'
)
@bfx.ws.on('error')
def log_error(err):
print ("Error: {}".format(err))
@bfx.ws.on('new_funding_ticker')
def log_ticker(ticker):
print ("New ticker: {}".format(ticker))
async def start():
await bfx.ws.subscribe('ticker', 'fUSD')
bfx.ws.on('connected', start)
bfx.ws.run()
|
prohmr/models/heads/fc_head.py | akashsengupta1997/ProHMR | 120 | 12642643 | import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Tuple
from yacs.config import CfgNode
class FCHead(nn.Module):
def __init__(self, cfg: CfgNode):
"""
Fully connected head for camera and betas regression.
Args:
cfg (CfgNode): Model config as yacs CfgNode.
"""
super(FCHead, self).__init__()
self.cfg = cfg
self.npose = 6 * (cfg.SMPL.NUM_BODY_JOINTS + 1)
self.layers = nn.Sequential(nn.Linear(cfg.MODEL.FLOW.CONTEXT_FEATURES,
cfg.MODEL.FC_HEAD.NUM_FEATURES),
nn.ReLU(inplace=False),
nn.Linear(cfg.MODEL.FC_HEAD.NUM_FEATURES, 13))
nn.init.xavier_uniform_(self.layers[2].weight, gain=0.02)
mean_params = np.load(cfg.SMPL.MEAN_PARAMS)
init_cam = torch.from_numpy(mean_params['cam'].astype(np.float32))[None, None]
init_betas = torch.from_numpy(mean_params['shape'].astype(np.float32))[None, None]
self.register_buffer('init_cam', init_cam)
self.register_buffer('init_betas', init_betas)
def forward(self, smpl_params: Dict, feats: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Run forward pass.
Args:
smpl_params (Dict): Dictionary containing predicted SMPL parameters.
feats (torch.Tensor): Tensor of shape (N, C) containing the features computed by the backbone.
Returns:
pred_betas (torch.Tensor): Predicted SMPL betas.
pred_cam (torch.Tensor): Predicted camera parameters.
"""
batch_size = feats.shape[0]
num_samples = smpl_params['body_pose'].shape[1]
offset = self.layers(feats).reshape(batch_size, 1, 13).repeat(1, num_samples, 1)
betas_offset = offset[:, :, :10]
cam_offset = offset[:, :, 10:]
pred_cam = cam_offset + self.init_cam
pred_betas = betas_offset + self.init_betas
return pred_betas, pred_cam
|
src/simian/mac/admin/groups.py | tristansgray/simian | 326 | 12642650 | #!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Groups admin handler."""
import httplib
import urllib
from simian.mac import admin
from simian.mac import models
class Groups(admin.AdminHandler):
"""Handler for /admin/groups."""
def get(self):
"""GET handler."""
groups = models.Group.all()
groups = sorted(groups, key=lambda t: unicode.lower(t.key().name()))
d = {'groups': groups, 'can_mod_groups': self.IsAdminUser(),
'report_type': 'groups'}
self.Render('groups.html', d)
@admin.AdminHandler.XsrfProtected('groups')
def post(self):
"""POST handler."""
if not self.IsAdminUser():
self.error(httplib.FORBIDDEN)
return
group_name = urllib.unquote(self.request.get('group').strip())
action = self.request.get('action')
if action == 'create':
group = models.Group(key_name=group_name)
users = self.request.get_all('user')
if users:
group.users = users
group.put()
msg = 'Group successfully saved.'
elif action == 'delete':
group_manifest_mods = models.GroupManifestModification.all().filter(
'group_key_name =', group_name).get()
if group_manifest_mods:
msg = "Group not deleted as it's being used for Manifest Modifications."
else:
group = models.Group.get_by_key_name(group_name)
if group:
group.delete()
else:
self.error(httplib.NOT_FOUND)
return
msg = 'Group successfully deleted.'
elif action == 'change':
users = self.request.get_all('user')
add_group = self.request.get('add') == '1'
group = models.Group.get_by_key_name(group_name)
if not group:
self.error(httplib.NOT_FOUND)
return
if add_group:
group.users += users
else:
group.users = [u for u in group.users if u not in users]
group.put()
msg = 'Group successfully modified.'
self.redirect('/admin/groups?msg=%s' % msg)
|
test/issues/test_008.py | ajnelson-nist/pySHACL | 167 | 12642664 | <reponame>ajnelson-nist/pySHACL<filename>test/issues/test_008.py
# -*- coding: utf-8 -*-
#
"""
https://github.com/RDFLib/pySHACL/issues/8
"""
from pyshacl import validate
mixed_file_text = '''
# baseURI: http://semanticprocess.x10host.com/Ontology/Testsparql
# imports: http://datashapes.org/dash
# prefix: Testsparql
@prefix Testsparql: <http://semanticprocess.x10host.com/Ontology/Testsparql#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://semanticprocess.x10host.com/Ontology/Testsparql>
rdf:type owl:Ontology ;
owl:imports <http://datashapes.org/dash> ;
owl:versionInfo "Created with TopBraid Composer" ;
sh:declare Testsparql:PrefixDeclaration ;
.
Testsparql:Crane
rdf:type rdfs:Class ;
rdfs:subClassOf owl:Class ;
.
Testsparql:Crane_1
rdf:type Testsparql:Crane ;
Testsparql:Cranecapacity "500"^^xsd:decimal ;
.
Testsparql:Crane_2
rdf:type Testsparql:Crane ;
Testsparql:Cranecapacity "5000"^^xsd:decimal ;
.
Testsparql:Cranecapacity
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Crane ;
rdfs:range xsd:decimal ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:Module
rdf:type rdfs:Class ;
rdfs:subClassOf owl:Class ;
.
Testsparql:Module_1
rdf:type Testsparql:Module ;
Testsparql:Moduleweight "800"^^xsd:decimal ;
.
Testsparql:Moduleweight
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Module ;
rdfs:range xsd:decimal ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:PrefixDeclaration
rdf:type sh:PrefixDeclaration ;
sh:namespace "http://semanticprocess.x10host.com/Ontology/Testsparql#"^^xsd:anyURI ;
sh:prefix "Testsparql" ;
.
Testsparql:Process
rdf:type rdfs:Class ;
rdf:type sh:NodeShape ;
rdfs:subClassOf owl:Class ;
sh:sparql [
sh:message "Invalid process" ;
sh:prefixes <http://semanticprocess.x10host.com/Ontology/Testsparql> ;
sh:select """SELECT $this
WHERE {
$this rdf:type Testsparql:Process.
$this Testsparql:hasResource ?crane.
$this Testsparql:hasAssociation ?module.
?crane Testsparql:Cranecapacity ?cc.
?module Testsparql:Moduleweight ?mw.
FILTER (?cc <= ?mw).
}""" ;
] ;
.
Testsparql:ProcessID
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range xsd:string ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:Process_1
rdf:type Testsparql:Process ;
Testsparql:ProcessID "P1" ;
Testsparql:hasAssociation Testsparql:Module_1 ;
Testsparql:hasResource Testsparql:Crane_1 ;
.
Testsparql:Process_2
rdf:type Testsparql:Process ;
Testsparql:ProcessID "P2" ;
Testsparql:hasAssociation Testsparql:Module_1 ;
Testsparql:hasResource Testsparql:Crane_2 ;
.
Testsparql:hasAssociation
rdf:type owl:ObjectProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range Testsparql:Module ;
rdfs:subPropertyOf owl:topObjectProperty ;
.
Testsparql:hasResource
rdf:type owl:ObjectProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range Testsparql:Crane ;
rdfs:subPropertyOf owl:topObjectProperty ;
.
'''
shacl_file_text = '''
@prefix Testsparql: <http://semanticprocess.x10host.com/Ontology/Testsparql#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://semanticprocess.x10host.com/Ontology/Testsparql>
rdf:type owl:Ontology ;
owl:imports <http://datashapes.org/dash> ;
owl:versionInfo "Created with TopBraid Composer" ;
sh:declare Testsparql:PrefixDeclaration ;
.
Testsparql:PrefixDeclaration
rdf:type sh:PrefixDeclaration ;
sh:namespace "http://semanticprocess.x10host.com/Ontology/Testsparql#"^^xsd:anyURI ;
sh:prefix "Testsparql" ;
.
Testsparql:Process
rdf:type rdfs:Class ;
rdf:type sh:NodeShape ;
rdfs:subClassOf owl:Class ;
sh:sparql [
sh:message "Invalid process" ;
sh:prefixes <http://semanticprocess.x10host.com/Ontology/Testsparql> ;
sh:select """SELECT $this
WHERE {
$this rdf:type Testsparql:Process.
$this Testsparql:hasResource ?crane.
$this Testsparql:hasAssociation ?module.
?crane Testsparql:Cranecapacity ?cc.
?module Testsparql:Moduleweight ?mw.
FILTER (?cc <= ?mw).
}""" ;
] ;
.
'''
data_file_text = '''
@prefix Testsparql: <http://semanticprocess.x10host.com/Ontology/Testsparql#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
Testsparql:Crane
rdf:type rdfs:Class ;
rdfs:subClassOf owl:Class ;
.
Testsparql:Crane_1
rdf:type Testsparql:Crane ;
Testsparql:Cranecapacity "500"^^xsd:decimal ;
.
Testsparql:Crane_2
rdf:type Testsparql:Crane ;
Testsparql:Cranecapacity "5000"^^xsd:decimal ;
.
Testsparql:Cranecapacity
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Crane ;
rdfs:range xsd:decimal ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:Module
rdf:type rdfs:Class ;
rdfs:subClassOf owl:Class ;
.
Testsparql:Module_1
rdf:type Testsparql:Module ;
Testsparql:Moduleweight "800"^^xsd:decimal ;
.
Testsparql:Moduleweight
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Module ;
rdfs:range xsd:decimal ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:Process
rdf:type rdfs:Class ;
rdfs:subClassOf owl:Class ;
.
Testsparql:ProcessID
rdf:type owl:DatatypeProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range xsd:string ;
rdfs:subPropertyOf owl:topDataProperty ;
.
Testsparql:Process_1
rdf:type Testsparql:Process ;
Testsparql:ProcessID "P1" ;
Testsparql:hasAssociation Testsparql:Module_1 ;
Testsparql:hasResource Testsparql:Crane_1 ;
.
Testsparql:Process_2
rdf:type Testsparql:Process ;
Testsparql:ProcessID "P2" ;
Testsparql:hasAssociation Testsparql:Module_1 ;
Testsparql:hasResource Testsparql:Crane_2 ;
.
Testsparql:hasAssociation
rdf:type owl:ObjectProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range Testsparql:Module ;
rdfs:subPropertyOf owl:topObjectProperty ;
.
Testsparql:hasResource
rdf:type owl:ObjectProperty ;
rdfs:domain Testsparql:Process ;
rdfs:range Testsparql:Crane ;
rdfs:subPropertyOf owl:topObjectProperty ;
.
'''
def test_008():
res1 = validate(mixed_file_text, data_graph_format='turtle', shacl_graph_format='turtle', inference='both', debug=True)
conforms, graph, string = res1
assert not conforms
res2 = validate(data_file_text, shacl_graph=shacl_file_text, data_graph_format='turtle', shacl_graph_format='turtle', inference='both', debug=True)
conforms, graph, string = res2
assert not conforms
|
tests/test_data_utils/test_fastai_transforms.py | rajshah4/pytorch-widedeep | 692 | 12642681 | <reponame>rajshah4/pytorch-widedeep
"""
Given the fact that the module fastai_transforms is mostly a copy and paste
from the fastai v1 text's transforms library, the tests here are also copied
from that library to ensure adequate coverage
Credit for the code here to <NAME> and the fastai team
"""
from pytorch_widedeep.utils.fastai_transforms import * # noqa: F403
###############################################################################
# Test simple rules
###############################################################################
def test_rules():
assert fix_html("Some HTML text<br />") == "Some HTML& text\n"
assert replace_rep("I'm so excited!!!!!!!!") == "I'm so excited xxrep 8 ! "
assert (
replace_wrep("I've never ever ever ever ever ever ever ever done this.")
== "I've never xxwrep 7 ever done this."
)
assert (
rm_useless_spaces("Inconsistent use of spaces.")
== "Inconsistent use of spaces."
)
assert (
spec_add_spaces("I #like to #put #hashtags #everywhere!")
== "I # like to # put # hashtags # everywhere!"
)
assert replace_all_caps(["Mark", "CAPITALIZED", "Only"]) == [
"Mark",
"xxup",
"capitalized",
"Only",
]
assert deal_caps(["Mark", "Capitalized", "lower", "All"]) == [
"xxmaj",
"mark",
"xxmaj",
"capitalized",
"lower",
"xxmaj",
"all",
]
###############################################################################
# Test Tokenizer
###############################################################################
def test_tokenize():
texts = [
"one two three four",
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"I'm suddenly SHOUTING FOR NO REASON",
]
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert toks[0] == ["one", "two", "three", "four"]
assert toks[1][:6] == ["xxmaj", "lorem", "ipsum", "dolor", "sit", "amet,"]
assert (
" ".join(toks[2])
== "xxmaj i'm suddenly xxup shouting xxup for xxup no xxup reason"
)
def test_tokenize_handles_empty_lines():
texts = ["= Markdown Title =\n\nMakrdown Title does not have spaces around"]
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert toks[0] == [
"=",
"xxmaj",
"markdown",
"xxmaj",
"title",
"=",
"\n",
"\n",
"xxmaj",
"makrdown",
"xxmaj",
"title",
"does",
"not",
"have",
"spaces",
"around",
]
def test_tokenize_ignores_extraneous_space():
texts = ["test "]
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert toks[0] == ["test"]
def test_numericalize_and_textify():
toks = [
["ok", "!", "xxmaj", "nice", "!", "anti", "-", "virus"],
["!", "xxmaj", "meg", "xxmaj", "nice", "meg"],
]
vocab = Vocab.create(toks, max_vocab=20, min_freq=2)
assert vocab.numericalize(toks[0]) == [0, 9, 5, 10, 9, 0, 0, 0]
assert vocab.textify([0, 3, 10, 11, 9]) == "xxunk xxeos nice meg !"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.