repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
manticorp/i3dv-renderer | render.py | 1 | 6667 | import bpy
import sys, os, configparser
import math, time, json, pprint
# Because this is being run inside blender,
# the current folder is not in include path.
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from render_functions import *
global ob
global settings
settings = {}
model_id = sys.argv[-1]
start = check = time.time()
# scene
scene = bpy.data.scenes["Scene"]
# Parse the config file
defaultSettings = parseConfig()
# Get the arguments from the json file
configFile = defaultSettings["render settings"]["stl_folder"] + "/" + model_id + "/options.json"
settings = getRenderOptions(defaultSettings,configFile)
# Check for the existance of the 3D file
# The file is checked first here, and loaded later
# This is because loading the 3D file takes a while,
# so we want all available errors to pop up before
# we do the lengthy STL loading.
file = (settings["render settings"]["stl_folder"] + "/" + model_id + "/" + model_id + "." + settings["render settings"]["input_filetype"])
if not os.path.exists(file):
sys.exit("File doesn't exists")
# Setting the image resolution
if(is_numeric(settings["render settings"]["size"])):
size = float(settings["render settings"]["size"])
else:
try:
settings["image size settings"][settings["render settings"]["size"].lower()]
except KeyError:
size = defaultSettings["image size settings"][defaultSettings["render settings"]["size"].lower()]
else:
size = settings["image size settings"][settings["render settings"]["size"].lower()]
filetype = settings["render settings"]["output_filetype"].upper()
# Settings the device / engine etc.
bpy.context.scene.cycles.samples = size # Setting the amount of samples. higher = better quality, less noise
bpy.context.scene.cycles.device = settings["render settings"]["render_device"]
bpy.context.scene.render.antialiasing_samples = '8' # antialiasing_samples
if settings["render settings"]["render_engine"] == "BLENDER_RENDER":
bpy.context.scene.render.engine = settings["render settings"]["render_engine"]
# Smaller tile size for a CPU render
scene.render.tile_x = 16
scene.render.tile_y = 16
# Finally...make the material...
material = makeMaterial(
'Apply',
settings["render settings"]["diffuse_RGBi"][0:3],
settings["render settings"]["specular_RGBi"][0:3],
1,
settings["render settings"]["diffuse_RGBi"][3],
settings["render settings"]["specular_RGBi"][3]
)
elif settings["render settings"]["render_engine"] == "CYCLES":
bpy.context.scene.render.engine = settings["render settings"]["render_engine"]
else:
try:
raise InputError(settings["render settings"]["render_engine"],'Invalid Render Engine Given.')
except:
bpy.context.scene.render.engine = defaultSettings["render settings"]["render_engine"]
finally:
bpy.context.scene.render.engine = defaultSettings["render settings"]["render_engine"]
if bpy.context.scene.render.engine == "CYCLES":
if bpy.context.scene.cycles.device == "GPU":
scene.render.tile_x = 256
scene.render.tile_y = 256
else:
scene.render.tile_x = 16
scene.render.tile_y = 16
material = bpy.data.materials['Custom']
material.use_nodes = True
Mix_Shader = material.node_tree.nodes.new("MIX_SHADER")
Mix_Shader.inputs[0].default_value = 0.05
material.node_tree.nodes['Diffuse BSDF'].inputs[0].default_value = settings["render settings"]["diffuse_RGBi"]
material.node_tree.nodes['Glossy BSDF'].inputs[0].default_value = settings["render settings"]["specular_RGBi"]
# Camera levels and degrees
scene.levels = settings["render settings"]["levels"]
scene.degrees = settings["render settings"]["degrees"]
bpy.ops.my.button() ## this function creates the bubble and changes other necessary settings
# Deleting the plane
if(settings["render settings"]["plane"] == 0):
try:
bpy.data.objects['Plane'].select = True
bpy.ops.object.delete()
bpy.ops.object.select_all(action='DESELECT')
except:
# There was no plane.
print()
# Setting the background color
world = bpy.data.worlds["World"]
world.horizon_color = settings["render settings"]["background_color"]
try:
settings["render settings"]["zenith_color"]
except KeyError:
## No zenith therefore must be plain background
world.use_sky_blend = False
else:
world.use_sky_blend = True
world.zenith_color = settings["render settings"]["zenith_color"]
# Transparent background or not?
if bool(settings["render settings"]["transparent"]):
bpy.context.scene.cycles.film_transparent = True
else:
bpy.context.scene.cycles.film_transparent = False
# Whether the camera should appear stationary
if bool(settings["render settings"]["stationary_camera"]):
world.use_sky_paper = True
else:
world.use_sky_paper = False
# Amount of images
total_frames = bpy.context.scene.frame_end
# Setting the image name
imageName = settings["output settings"]["output_folder"] + "/" + model_id + "/" + settings["render settings"]["output"] + "/images/" + settings["render settings"]["output"]
thumbName = settings["output settings"]["output_folder"] + "/" + model_id + "/" + settings["render settings"]["output"] + "/images/thumb.png"
# Load the STL file returning ob
ob = loadStl(file)
# Setting the scale of the model
scale = 4.0/max(ob.dimensions[0], ob.dimensions[1], ob.dimensions[2])
for axis in range(0,3):
ob.scale[axis] = scale
# Set object material
setMaterial(ob,material)
renderTimes = []
# Render the thumbnail
setResolution(
x = settings["output settings"]["thumb_size"],
y = settings["output settings"]["thumb_size"],
percentage = 100,
quality = settings["output settings"]["thumb_quality"],
filetype = settings["output settings"]["thumb_filetype"]
)
if bpy.context.scene.render.engine == "CYCLES":
bpy.context.scene.cycles.samples = settings["output settings"]["thumb_samples"]
bpy.context.scene.frame_set(total_frames/2)
renderThumb(image=thumbName, anim=False)
renderTimes.append(time.time() - start)
start = time.time()
if bpy.context.scene.render.engine == "CYCLES":
bpy.context.scene.cycles.samples = size/2
setResolution(
x=size,
y=size,
percentage=100,
quality=settings["render settings"]["jpeg_quality"],
filetype=filetype
)
renderThumb(image=imageName, anim=True)
# @TODO: exec(montage outputfilename*.jpg -tile levelsxlevels -geometry +0+0 sprite.jpg)
renderTimes.append(time.time() - start)
start = time.time()
for time in renderTimes:
print("\nELAPSED TIME:\t\t%.03f secs\n" % (time)) | apache-2.0 | 2,177,082,250,014,198,500 | 34.849462 | 172 | 0.691915 | false |
openstack/horizon | openstack_dashboard/dashboards/admin/networks/ports/views.py | 1 | 3110 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.admin.networks.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.admin.networks.ports \
import tabs as ports_tabs
from openstack_dashboard.dashboards.admin.networks.ports \
import workflows as admin_workflows
from openstack_dashboard.dashboards.project.networks.ports \
import views as project_views
class CreateView(project_views.CreateView):
workflow_class = admin_workflows.CreatePort
failure_url = 'horizon:admin:networks:detail'
def get_initial(self):
network = self.get_network()
return {"network_id": self.kwargs['network_id'],
"network_name": network.name,
"target_tenant_id": network.tenant_id}
class DetailView(project_views.DetailView):
tab_group_class = ports_tabs.PortDetailTabs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
port = context["port"]
network_url = "horizon:admin:networks:detail"
subnet_url = "horizon:admin:networks:subnets:detail"
port.network_url = reverse(network_url, args=[port.network_id])
for ip in port.fixed_ips:
ip['subnet_url'] = reverse(subnet_url, args=[ip['subnet_id']])
table = ports_tables.PortsTable(self.request,
network_id=port.network_id)
# TODO(robcresswell) Add URL for "Ports" crumb after bug/1416838
breadcrumb = [
((port.network_name or port.network_id), port.network_url),
(_("Ports"), None)
]
context["custom_breadcrumb"] = breadcrumb
context["url"] = \
reverse('horizon:admin:networks:ports_tab', args=[port.network_id])
context["actions"] = table.render_row_actions(port)
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:networks:index')
class UpdateView(project_views.UpdateView):
workflow_class = admin_workflows.UpdatePort
failure_url = 'horizon:admin:networks:detail'
def get_initial(self):
initial = super().get_initial()
port = self._get_object()
if 'binding__host_id' in port:
initial['binding__host_id'] = port['binding__host_id']
initial['device_id'] = port['device_id']
initial['device_owner'] = port['device_owner']
return initial
| apache-2.0 | -4,485,813,691,591,716,000 | 37.875 | 79 | 0.665273 | false |
jesux/FruityWifi | FruityWifi/www/modules/responder/includes/Responder-master/LDAPPackets.py | 2 | 14612 | #! /usr/bin/env python
# NBT-NS/LLMNR Responder
# Created by Laurent Gaffie
# Copyright (C) 2014 Trustwave Holdings, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
from odict import OrderedDict
class Packet():
fields = OrderedDict([
("data", ""),
])
def __init__(self, **kw):
self.fields = OrderedDict(self.__class__.fields)
for k,v in kw.items():
if callable(v):
self.fields[k] = v(self.fields[k])
else:
self.fields[k] = v
def __str__(self):
return "".join(map(str, self.fields.values()))
class LDAPSearchDefaultPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLen", "\x0c"),
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x0f"),
("OpHeadASNID", "\x65"),
("OpHeadASNIDLen", "\x07"),
("SearchDoneSuccess", "\x0A\x01\x00\x04\x00\x04\x00"),#No Results.
])
class LDAPSearchSupportedCapabilitiesPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\x7e"),#126
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x64"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\x75"),#117
("ObjectName", "\x04\x00"),
("SearchAttribASNID", "\x30"),
("SearchAttribASNLenOfLen", "\x84"),
("SearchAttribASNLen", "\x00\x00\x00\x6d"),#109
("SearchAttribASNID1", "\x30"),
("SearchAttribASN1LenOfLen", "\x84"),
("SearchAttribASN1Len", "\x00\x00\x00\x67"),#103
("SearchAttribASN2ID", "\x04"),
("SearchAttribASN2Len", "\x15"),#21
("SearchAttribASN2Str", "supportedCapabilities"),
("SearchAttribASN3ID", "\x31"),
("SearchAttribASN3LenOfLen", "\x84"),
("SearchAttribASN3Len", "\x00\x00\x00\x4a"),
("SearchAttrib1ASNID", "\x04"),
("SearchAttrib1ASNLen", "\x16"),#22
("SearchAttrib1ASNStr", "1.2.840.113556.1.4.800"),
("SearchAttrib2ASNID", "\x04"),
("SearchAttrib2ASNLen", "\x17"),#23
("SearchAttrib2ASNStr", "1.2.840.113556.1.4.1670"),
("SearchAttrib3ASNID", "\x04"),
("SearchAttrib3ASNLen", "\x17"),#23
("SearchAttrib3ASNStr", "1.2.840.113556.1.4.1791"),
("SearchDoneASNID", "\x30"),
("SearchDoneASNLenOfLen", "\x84"),
("SearchDoneASNLen", "\x00\x00\x00\x10"),#16
("MessageIDASN2ID", "\x02"),
("MessageIDASN2Len", "\x01"),
("MessageIDASN2Str", "\x02"),
("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"),
## No need to calculate anything this time, this packet is generic.
])
class LDAPSearchSupportedMechanismsPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\x60"),#96
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x64"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\x57"),#87
("ObjectName", "\x04\x00"),
("SearchAttribASNID", "\x30"),
("SearchAttribASNLenOfLen", "\x84"),
("SearchAttribASNLen", "\x00\x00\x00\x4f"),#79
("SearchAttribASNID1", "\x30"),
("SearchAttribASN1LenOfLen", "\x84"),
("SearchAttribASN1Len", "\x00\x00\x00\x49"),#73
("SearchAttribASN2ID", "\x04"),
("SearchAttribASN2Len", "\x17"),#23
("SearchAttribASN2Str", "supportedSASLMechanisms"),
("SearchAttribASN3ID", "\x31"),
("SearchAttribASN3LenOfLen", "\x84"),
("SearchAttribASN3Len", "\x00\x00\x00\x2a"),#42
("SearchAttrib1ASNID", "\x04"),
("SearchAttrib1ASNLen", "\x06"),#6
("SearchAttrib1ASNStr", "GSSAPI"),
("SearchAttrib2ASNID", "\x04"),
("SearchAttrib2ASNLen", "\x0a"),#10
("SearchAttrib2ASNStr", "GSS-SPNEGO"),
("SearchAttrib3ASNID", "\x04"),
("SearchAttrib3ASNLen", "\x08"),#8
("SearchAttrib3ASNStr", "EXTERNAL"),
("SearchAttrib4ASNID", "\x04"),
("SearchAttrib4ASNLen", "\x0a"),#10
("SearchAttrib4ASNStr", "DIGEST-MD5"),
("SearchDoneASNID", "\x30"),
("SearchDoneASNLenOfLen", "\x84"),
("SearchDoneASNLen", "\x00\x00\x00\x10"),#16
("MessageIDASN2ID", "\x02"),
("MessageIDASN2Len", "\x01"),
("MessageIDASN2Str", "\x02"),
("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"),
## No need to calculate anything this time, this packet is generic.
])
class LDAPNTLMChallenge(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\xD0"),#208
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x61"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\xc7"),#199
("Status", "\x0A"),
("StatusASNLen", "\x01"),
("StatusASNStr", "\x0e"), #In Progress.
("MatchedDN", "\x04\x00"), #Null
("ErrorMessage", "\x04\x00"), #Null
("SequenceHeader", "\x87"),
("SequenceHeaderLenOfLen", "\x81"),
("SequenceHeaderLen", "\x82"), #188
("NTLMSSPSignature", "NTLMSSP"),
("NTLMSSPSignatureNull", "\x00"),
("NTLMSSPMessageType", "\x02\x00\x00\x00"),
("NTLMSSPNtWorkstationLen","\x1e\x00"),
("NTLMSSPNtWorkstationMaxLen","\x1e\x00"),
("NTLMSSPNtWorkstationBuffOffset","\x38\x00\x00\x00"),
("NTLMSSPNtNegotiateFlags","\x15\x82\x89\xe2"),
("NTLMSSPNtServerChallenge","\x81\x22\x33\x34\x55\x46\xe7\x88"),
("NTLMSSPNtReserved","\x00\x00\x00\x00\x00\x00\x00\x00"),
("NTLMSSPNtTargetInfoLen","\x94\x00"),
("NTLMSSPNtTargetInfoMaxLen","\x94\x00"),
("NTLMSSPNtTargetInfoBuffOffset","\x56\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionHigh","\x05"),
("NegTokenInitSeqMechMessageVersionLow","\x02"),
("NegTokenInitSeqMechMessageVersionBuilt","\xce\x0e"),
("NegTokenInitSeqMechMessageVersionReserved","\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionNTLMType","\x0f"),
("NTLMSSPNtWorkstationName","SMB12"),
("NTLMSSPNTLMChallengeAVPairsId","\x02\x00"),
("NTLMSSPNTLMChallengeAVPairsLen","\x0a\x00"),
("NTLMSSPNTLMChallengeAVPairsUnicodeStr","smb12"),
("NTLMSSPNTLMChallengeAVPairs1Id","\x01\x00"),
("NTLMSSPNTLMChallengeAVPairs1Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs1UnicodeStr","SERVER2008"),
("NTLMSSPNTLMChallengeAVPairs2Id","\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs2Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs2UnicodeStr","smb12.local"),
("NTLMSSPNTLMChallengeAVPairs3Id","\x03\x00"),
("NTLMSSPNTLMChallengeAVPairs3Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs3UnicodeStr","SERVER2008.smb12.local"),
("NTLMSSPNTLMChallengeAVPairs5Id","\x05\x00"),
("NTLMSSPNTLMChallengeAVPairs5Len","\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs5UnicodeStr","smb12.local"),
("NTLMSSPNTLMChallengeAVPairs6Id","\x00\x00"),
("NTLMSSPNTLMChallengeAVPairs6Len","\x00\x00"),
])
def calculate(self):
##Convert strings to Unicode first...
self.fields["NTLMSSPNtWorkstationName"] = self.fields["NTLMSSPNtWorkstationName"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"].encode('utf-16le')
###### Workstation Offset
CalculateOffsetWorkstation = str(self.fields["NTLMSSPSignature"])+str(self.fields["NTLMSSPSignatureNull"])+str(self.fields["NTLMSSPMessageType"])+str(self.fields["NTLMSSPNtWorkstationLen"])+str(self.fields["NTLMSSPNtWorkstationMaxLen"])+str(self.fields["NTLMSSPNtWorkstationBuffOffset"])+str(self.fields["NTLMSSPNtNegotiateFlags"])+str(self.fields["NTLMSSPNtServerChallenge"])+str(self.fields["NTLMSSPNtReserved"])+str(self.fields["NTLMSSPNtTargetInfoLen"])+str(self.fields["NTLMSSPNtTargetInfoMaxLen"])+str(self.fields["NTLMSSPNtTargetInfoBuffOffset"])+str(self.fields["NegTokenInitSeqMechMessageVersionHigh"])+str(self.fields["NegTokenInitSeqMechMessageVersionLow"])+str(self.fields["NegTokenInitSeqMechMessageVersionBuilt"])+str(self.fields["NegTokenInitSeqMechMessageVersionReserved"])+str(self.fields["NegTokenInitSeqMechMessageVersionNTLMType"])
###### AvPairs Offset
CalculateLenAvpairs = str(self.fields["NTLMSSPNTLMChallengeAVPairsId"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsLen"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs2Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs3Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs5Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs6Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs6Len"])
###### LDAP Packet Len
CalculatePacketLen = str(self.fields["MessageIDASNID"])+str(self.fields["MessageIDASNLen"])+str(self.fields["MessageIDASNStr"])+str(self.fields["OpHeadASNID"])+str(self.fields["OpHeadASNIDLenOfLen"])+str(self.fields["OpHeadASNIDLen"])+str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
OperationPacketLen = str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
NTLMMessageLen = CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
##### LDAP Len Calculation:
self.fields["ParserHeadASNLen"] = struct.pack(">i", len(CalculatePacketLen))
self.fields["OpHeadASNIDLen"] = struct.pack(">i", len(OperationPacketLen))
self.fields["SequenceHeaderLen"] = struct.pack(">B", len(NTLMMessageLen))
##### Workstation Offset Calculation:
self.fields["NTLMSSPNtWorkstationBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation))
self.fields["NTLMSSPNtWorkstationLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtWorkstationMaxLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
##### IvPairs Offset Calculation:
self.fields["NTLMSSPNtTargetInfoBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtTargetInfoLen"] = struct.pack("<h", len(CalculateLenAvpairs))
self.fields["NTLMSSPNtTargetInfoMaxLen"] = struct.pack("<h", len(CalculateLenAvpairs))
##### IvPair Calculation:
self.fields["NTLMSSPNTLMChallengeAVPairs5Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs3Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs2Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs1Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairsLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])))
| gpl-3.0 | -5,014,740,053,376,970,000 | 60.394958 | 927 | 0.635847 | false |
lmaycotte/quark | quark/tools/redis_sg_tool.py | 1 | 9429 | #!/usr/bin/python
# Copyright 2014 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quark Redis Security Groups CLI tool.
Usage: redis_sg_tool [-h] [--config-file=PATH] [--retries=<retries>]
[--retry-delay=<delay>] <command> [--yarly]
Options:
-h --help Show this screen.
--version Show version.
--config-file=PATH Use a different config file path
--retries=<retries> Number of times to re-attempt some operations
--retry-delay=<delay> Amount of time to wait between retries
Available commands are:
redis_sg_tool test-connection
redis_sg_tool vifs-in-redis
redis_sg_tool num-groups
redis_sg_tool ports-with-groups
redis_sg_tool purge-orphans [--yarly]
redis_sg_tool write-groups [--yarly]
redis_sg_tool -h | --help
redis_sg_tool --version
"""
import sys
import time
import docopt
import netaddr
from neutron.common import config
import neutron.context
from oslo_config import cfg
from quark.cache import security_groups_client as sg_client
from quark.db import api as db_api
from quark import exceptions as q_exc
VERSION = 0.1
RETRIES = 5
RETRY_DELAY = 1
class QuarkRedisTool(object):
def __init__(self, arguments):
self._args = arguments
self._retries = RETRIES
self._retry_delay = RETRY_DELAY
if self._args.get("--retries"):
self._retries = int(self._args["--retries"])
if self._args.get("--retry-delay"):
self._retry_delay = int(self._args["--retry-delay"])
config_args = []
if self._args.get("--config-file"):
config_args.append("--config-file=%s" %
self._args.pop("--config-file"))
self._dryrun = not self._args.get("--yarly")
config.init(config_args)
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the "
"default search paths (~/.neutron/, ~/, /etc/neutron/, "
"/etc/) and the '--config-file' option!"))
def dispatch(self):
command = self._args.get("<command>")
if command == "test-connection":
self.test_connection()
elif command == "vifs-in-redis":
self.vif_count()
elif command == "num-groups":
self.num_groups()
elif command == "ports-with-groups":
self.ports_with_groups()
elif command == "purge-orphans":
self.purge_orphans(self._dryrun)
elif command == "write-groups":
self.write_groups(self._dryrun)
else:
print("Redis security groups tool. Re-run with -h/--help for "
"options")
def _get_connection(self, giveup=True):
client = sg_client.SecurityGroupsClient()
try:
if client.ping():
return client
except Exception as e:
print(e)
if giveup:
print("Giving up...")
sys.exit(1)
def test_connection(self):
client = self._get_connection()
if client:
print("Connected Successfully")
return True
else:
print("Could not connect to Redis")
return False
def vif_count(self):
client = self._get_connection()
print(len(client.vif_keys(field=sg_client.SECURITY_GROUP_HASH_ATTR)))
def num_groups(self):
ctx = neutron.context.get_admin_context()
print(db_api.security_group_count(ctx))
def ports_with_groups(self):
ctx = neutron.context.get_admin_context()
print(db_api.ports_with_security_groups_count(ctx))
def purge_orphans(self, dryrun=False):
client = self._get_connection()
ctx = neutron.context.get_admin_context()
ports_with_groups = db_api.ports_with_security_groups_find(ctx).all()
if dryrun:
print()
print("Purging orphans in dry run mode. Existing rules in Redis "
"will be checked against those in the database. If any "
"are found in Redis but lack matching database rules, "
"they'll be deleted from the database.\n\nTo actually "
"apply the groups, re-run with the --yarly flag.")
print()
print("Found %s ports with security groups" %
len(ports_with_groups))
# Pre-spin the list of orphans
vifs = {}
for vif in client.vif_keys():
vifs[vif] = False
if dryrun:
print("Found %d VIFs in Redis" % len(vifs))
# Pop off the ones we find in the database
for port in ports_with_groups:
vif_key = client.vif_key(port["device_id"], port["mac_address"])
vifs.pop(vif_key, None)
if dryrun:
print("Found %d orphaned VIF rule sets" % len(vifs))
print('=' * 80)
for orphan in vifs.keys():
if dryrun:
print("VIF %s is orphaned" % orphan)
else:
for retry in xrange(self._retries):
try:
client.delete_key(orphan)
break
except q_exc.RedisConnectionFailure:
time.sleep(self._retry_delay)
client = self._get_connection(giveup=False)
if dryrun:
print('=' * 80)
print()
print("Re-run with --yarly to apply changes")
print("Done!")
def write_groups(self, dryrun=False):
client = self._get_connection()
ctx = neutron.context.get_admin_context()
ports_with_groups = db_api.ports_with_security_groups_find(ctx).all()
if dryrun:
print()
print("Writing groups in dry run mode. Existing rules in Redis "
"will be checked against those in the database, with a "
"running report generated of all those that will be "
"overwritten.\n\nTo actually apply the groups, re-run "
"with the --yarly flag.")
print()
print("Found %s ports with security groups" %
len(ports_with_groups))
if dryrun:
vifs = len(client.vif_keys())
if vifs > 0:
print("There are %d VIFs with rules in Redis, some of which "
"may be overwritten!" % vifs)
print()
overwrite_count = 0
for port in ports_with_groups:
mac = netaddr.EUI(port["mac_address"])
# Rather than loading everything in one giant chunk, we'll make
# trips per port.
group_ids = [g["id"] for g in port.security_groups]
rules = db_api.security_group_rule_find(ctx, group_id=group_ids,
scope=db_api.ALL)
if dryrun:
existing_rules = client.get_rules_for_port(port["device_id"],
port["mac_address"])
if existing_rules:
overwrite_count += 1
db_len = len(rules)
existing_len = len(existing_rules["rules"])
print("== Port ID:%s - MAC:%s - Device ID:%s - "
"Redis Rules:%d - DB Rules:%d" %
(port["id"], mac, port["device_id"], existing_len,
db_len))
if not dryrun:
for retry in xrange(self._retries):
try:
payload = client.serialize_rules(rules)
client.apply_rules(
port["device_id"], port["mac_address"], payload)
break
except q_exc.RedisConnectionFailure:
time.sleep(self._retry_delay)
client = self._get_connection(giveup=False)
if dryrun:
print()
print("Total number of VIFs to overwrite/were overwritten: %s" %
overwrite_count)
diff = vifs - overwrite_count
if diff > 0:
print("Orphaned VIFs in Redis:", diff)
print("Run purge-orphans to clean then up")
if dryrun:
print("Total number of VIFs to write: %d" %
len(ports_with_groups))
if dryrun:
print('=' * 80)
print("Re-run with --yarly to apply changes")
print("Done!")
def main():
arguments = docopt.docopt(__doc__,
version="Quark Redis CLI %.2f" % VERSION)
redis_tool = QuarkRedisTool(arguments)
redis_tool.dispatch()
if __name__ == "__main__":
main()
| apache-2.0 | 769,788,618,852,832,600 | 34.447368 | 79 | 0.540566 | false |
bovee/planknn | pyplanknn/convnetwork.py | 1 | 2361 | import json
import numpy as np
from pyplanknn.layer import Layer
from pyplanknn.convlayer import ConvPoolLayer
class ConvNetwork:
"""
A neural network comprised of several layers interconnected.
"""
def __init__(self, n_convs=10, hidden=4, n_outs=121):
"""
Create a series of layers.
"""
if isinstance(hidden, int):
args = hidden * [400] + [n_outs]
else:
args = hidden + [n_outs]
args = iter(args)
prev = next(args)
self.layers = [ConvPoolLayer(n_convs)]
self.layers.append(Layer(25 * n_convs, prev, 'tanh'))
for i in args:
self.layers.append(Layer(prev, i, 'logit'))
prev = i
# add a softmax layer at the end
self.layers.append(Layer(i, i, 'softmax'))
def __call__(self, vector, expected=None, learning_rate=0.1):
"""
If only one argument is passed in, return the results of
running the network on that vector.
If a second argument is received, train the network to return
that result given the first argument.
"""
# run through the network in the forward direction
for layer in self.layers:
vector = layer(vector)
if expected is not None:
# back propogate errors and train layers
error_out = vector - expected
for layer in reversed(self.layers):
new_error_out = layer.error_in(error_out)
layer.train(error_out, learning_rate)
error_out = new_error_out
return vector
def save(self, filename):
"""
Saves all layer weights as
"""
weights = {}
for i, l in enumerate(self.layers):
weights[i] = l.weights.tolist()
open(filename, 'w').write(json.dumps(weights))
def load(filename):
weights = json.loads(open(filename, 'r').read())
n_layers = max(int(i) for i in weights.keys())
n_convs = np.array(weights['0']).shape[0]
n_outs = np.array(weights[str(n_layers)]).shape[1]
hidden = [np.array(weights[str(w)]).shape[1] for w \
in range(1, n_layers - 1)]
network = ConvNetwork(n_convs, hidden, n_outs)
for i in range(n_layers + 1):
network.layers[i].weights = np.array(weights[str(i)])
return network
| mit | 2,300,784,070,463,185,700 | 31.342466 | 69 | 0.577298 | false |
thenetcircle/dino | dino/hooks/set_acl.py | 1 | 1983 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dino import environ
__author__ = 'Oscar Eriksson <[email protected]>'
class OnSetAclHooks(object):
@staticmethod
def set_acl(arg: tuple) -> None:
data, activity = arg
target_id = activity.target.id
is_for_channel = activity.target.object_type == 'channel'
acl_dict = dict()
for acl in activity.object.attachments:
# if the content is None, it means we're removing this ACL
if acl.content is None:
if is_for_channel:
environ.env.db.delete_acl_in_channel_for_action(target_id, acl.object_type, acl.summary)
else:
environ.env.db.delete_acl_in_room_for_action(target_id, acl.object_type, acl.summary)
continue
if acl.summary not in acl_dict:
acl_dict[acl.summary] = dict()
acl_dict[acl.summary][acl.object_type] = acl.content
# might have only removed acls, so could be size 0
if len(acl_dict) > 0:
for api_action, acls in acl_dict.items():
if is_for_channel:
environ.env.db.add_acls_in_channel_for_action(target_id, api_action, acls)
else:
environ.env.db.add_acls_in_room_for_action(target_id, api_action, acls)
@environ.env.observer.on('on_set_acl')
def _on_set_acl_set_acl(arg: tuple) -> None:
OnSetAclHooks.set_acl(arg)
| apache-2.0 | 5,935,591,124,777,318,000 | 37.882353 | 108 | 0.634392 | false |
paramiko/paramiko | paramiko/rsakey.py | 1 | 6647 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
RSA keys.
"""
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.py3compat import PY2
from paramiko.ssh_exception import SSHException
class RSAKey(PKey):
"""
Representation of an RSA key which can be used to sign and verify SSH2
data.
"""
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
key=None,
file_obj=None,
):
self.key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if key is not None:
self.key = key
else:
self._check_type_and_load_cert(
msg=msg,
key_type="ssh-rsa",
cert_type="[email protected]",
)
self.key = rsa.RSAPublicNumbers(
e=msg.get_mpint(), n=msg.get_mpint()
).public_key(default_backend())
@property
def size(self):
return self.key.key_size
@property
def public_numbers(self):
if isinstance(self.key, rsa.RSAPrivateKey):
return self.key.private_numbers().public_numbers
else:
return self.key.public_numbers()
def asbytes(self):
m = Message()
m.add_string("ssh-rsa")
m.add_mpint(self.public_numbers.e)
m.add_mpint(self.public_numbers.n)
return m.asbytes()
def __str__(self):
# NOTE: as per inane commentary in #853, this appears to be the least
# crummy way to get a representation that prints identical to Python
# 2's previous behavior, on both interpreters.
# TODO: replace with a nice clean fingerprint display or something
if PY2:
# Can't just return the .decode below for Py2 because stuff still
# tries stuffing it into ASCII for whatever godforsaken reason
return self.asbytes()
else:
return self.asbytes().decode("utf8", errors="ignore")
def __hash__(self):
return hash(
(self.get_name(), self.public_numbers.e, self.public_numbers.n)
)
def get_name(self):
return "ssh-rsa"
def get_bits(self):
return self.size
def can_sign(self):
return isinstance(self.key, rsa.RSAPrivateKey)
def sign_ssh_data(self, data):
sig = self.key.sign(
data, padding=padding.PKCS1v15(), algorithm=hashes.SHA1()
)
m = Message()
m.add_string("ssh-rsa")
m.add_string(sig)
return m
def verify_ssh_sig(self, data, msg):
if msg.get_text() != "ssh-rsa":
return False
key = self.key
if isinstance(key, rsa.RSAPrivateKey):
key = key.public_key()
try:
key.verify(
msg.get_binary(), data, padding.PKCS1v15(), hashes.SHA1()
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@staticmethod
def generate(bits, progress_func=None):
"""
Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.RSAKey` private key
"""
key = rsa.generate_private_key(
public_exponent=65537, key_size=bits, backend=default_backend()
)
return RSAKey(key=key)
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("RSA", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("RSA", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except ValueError as e:
raise SSHException(str(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
n, e, d, iqmp, p, q = self._uint32_cstruct_unpack(data, "iiiiii")
public_numbers = rsa.RSAPublicNumbers(e=e, n=n)
key = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=d % (p - 1),
dmq1=d % (q - 1),
iqmp=iqmp,
public_numbers=public_numbers,
).private_key(default_backend())
else:
self._got_bad_key_format_id(pkformat)
assert isinstance(key, rsa.RSAPrivateKey)
self.key = key
| lgpl-2.1 | 6,964,392,122,854,019,000 | 31.26699 | 79 | 0.592147 | false |
rowanv/ndt-e2e-clientworker | client_wrapper/names.py | 1 | 1080 | # Copyright 2016 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines string constant for different names used throughout the system.
This module defines the constant names of different strings used throughout the
testing system to identify NDT clients, browsers, and operating systems.
"""
# NDT client shortnames
NDT_HTML5 = 'ndt_js' # Official NDT HTML5 reference client
# Browser name constants
FIREFOX = 'firefox'
CHROME = 'chrome'
EDGE = 'edge'
SAFARI = 'safari'
# OS shortnames
WINDOWS_10 = 'win10'
UBUNTU_14 = 'ubuntu14.04'
OSX_10_11 = 'osx10.11'
| apache-2.0 | -3,175,681,662,177,151,500 | 32.75 | 79 | 0.758333 | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/ADH/1.6/services/SPARK2/package/scripts/spark_client.py | 1 | 2535 | #!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.core.exceptions import ClientComponentHasNoStatus
from resource_management.core.resources.system import Execute
from resource_management.core.logger import Logger
from resource_management.core import shell
from setup_spark import setup_spark
class SparkClient(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
Execute(('ln','-sf', format('/usr/lib/hadoop-hdfs/hadoop-hdfs-client.jar'),'/usr/lib/spark/jars/hadoop-hdfs-client.jar'),
not_if=format("ls /usr/lib/spark/jars/hadoop-hdfs-client.jar"),
only_if=format("ls /usr/lib/hadoop-hdfs/hadoop-hdfs-client.jar"),
sudo=True)
def configure(self, env, upgrade_type=None, config_dir=None):
import params
env.set_params(params)
setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
def status(self, env):
raise ClientComponentHasNoStatus()
def get_component_name(self):
return "spark2-client"
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
Logger.info("Executing Spark2 Client Stack Upgrade pre-restart")
conf_select.select(params.stack_name, "spark", params.version)
stack_select.select("spark2-client", params.version)
if __name__ == "__main__":
SparkClient().execute()
| apache-2.0 | 4,982,916,051,106,075,000 | 38 | 125 | 0.764892 | false |
ewels/MultiQC | multiqc/modules/hops/hops.py | 1 | 5139 | """ MultiQC module to parse output from HOPS postprocessing script """
from __future__ import print_function
from collections import OrderedDict
import logging
import json
from multiqc.plots import heatmap
from multiqc.utils import config
from multiqc.modules.base_module import BaseMultiqcModule
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="HOPS",
anchor="hops",
href="https://www.https://github.com/rhuebler/HOPS/",
info="is an ancient DNA characteristics screening tool of output from the metagenomic aligner MALT.",
)
# Find and load any HOPS post-processing JSONs
self.hops_data = dict()
for f in self.find_log_files("hops", filehandles=True):
try:
self.parseJSON(f)
except KeyError:
logging.warning("Error loading file {}".format(f["fn"]))
self.hops_data = self.ignore_samples(self.hops_data)
if len(self.hops_data) == 0:
raise UserWarning
log.info("Found {} samples".format(len(self.hops_data)))
# This type of data isn't 'summarise-able' for general stats, so
# skipping straight to heatmap. We also won't write data file to the
# multiqc_data directory because it would be exactly same as input JSON.
self.hops_heatmap()
def parseJSON(self, f):
"""Parse the JSON output from HOPS and save the summary statistics"""
try:
parsed_json = json.load(f["f"])
except JSONDecodeError as e:
log.debug("Could not parse HOPS JSON: '{}'".format(f["fn"]))
log.debug(e)
return None
# Convert JSON to dict for easier manipulation
for s in parsed_json:
s_name = self.clean_s_name(s, f)
if s_name in self.hops_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name=s_name)
self.hops_data[s_name] = {}
for t in parsed_json[s]:
self.hops_data[s_name][t] = parsed_json[s][t]
def hops_heatmap(self):
"""Heatmap showing all statuses for every sample"""
heatmap_numbers = {"none": 1, "edit_only": 2, "damage_only": 3, "edit_and_damage": 4}
samples = []
for s in self.hops_data:
samples.append(s)
# As all samples always have same taxa, will take from the first sample
taxa = []
for t in self.hops_data[samples[0]]:
taxa.append(t.replace("_", " "))
# Get values from named list into a list of lists required for heatmap
levels = []
for s in samples:
levels.append(self.hops_data[s].values())
pconfig = {
"id": "hops-heatmap",
"title": "HOPS: Potential Candidates",
"xTitle": "Node",
"yTitle": "Sample",
"min": 0,
"max": 1,
"square": False,
"colstops": [
[1, "#ededed"],
[2, "#FFFFC5"],
[3, "#F2B26C"],
[4, "#AD2A2B"],
],
"decimalPlaces": 0,
"legend": False,
"datalabels": False,
"xcats_samples": False,
}
extra_warning = ""
if len(self.hops_data) > 20:
extra_warning = """
<div class="alert alert-warning">
Large numbers of samples can result in Y-axis labels
overlapping. Drag the handle at the bottom of the plot down
to expand and see all samples names.
</div>
"""
self.add_section(
name="Potential Candidates",
anchor="hops_heatmap",
description="""
Heatmap of candidate taxa for downstream aDNA analysis, with
intensity representing additive categories of possible 'positive'
hits.
"""
+ extra_warning,
helptext="""
HOPS assigns a category based on how many ancient DNA
characteristics a given node (i.e. taxon) in a sample has.
The colours indicate the following:
* <span style="background-color: #ededed; padding:0.2rem 1rem;">**Grey**</span> - No characteristics detected
* <span style="background-color: #FFFFC5; padding:0.2rem 1rem;">**Yellow**</span> - Small edit distance from reference
* <span style="background-color: #F2B26C; padding:0.2rem 1rem;">**Orange**</span> - Typical aDNA damage pattern
* <span style="background-color: #AD2a2B; padding:0.2rem 1rem;">**Red**</span> - Small edit distance _and_ aDNA damage pattern
A red category typically indicates a good candidate for further investigation
in downstream analysis.
""",
plot=heatmap.plot(levels, xcats=taxa, ycats=samples, pconfig=pconfig),
)
| gpl-3.0 | 7,303,804,159,075,458,000 | 36.510949 | 138 | 0.564118 | false |
hydratk/hydratk-ext-yoda | src/hydratk/extensions/yoda/testresults/handlers/xjunit/testreport.py | 1 | 8165 | import xml.etree.ElementTree as ET
import xml.dom.minidom as MD
from src.hydratk.extensions.yoda.testresults.handlers.xjunit.testsuite import TestSuite
from .testCase import TestCase
from .utils import forceUnicode, cleanIllegalXmlChars
class TestReport(object):
class XmlDecodingFailure(Exception):
pass
class MergeFailure(Exception):
pass
def __init__(self, testSuites=None, **kwargs):
self.params = {
'time': None,
'name': None,
'tests': None,
'failures': None,
'errors': None,
'disabled': None,
'testSuites': [],
'timeAggregate': sum,
}
self.attributeNames = [
'time',
'name',
'tests',
'failures',
'errors',
'disabled',
]
if 'timeAggregate' in kwargs and kwargs['timeAggregate'] is not None:
self.params['timeAggregate'] = kwargs['timeAggregate']
if testSuites is not None and not isinstance(testSuites, list):
testSuites = [testSuites]
if testSuites is not None:
self.params['testSuites'] = testSuites
self._recalculateParams()
self.params.update(kwargs)
def toRawData(self):
testReportData = {
'testSuites': [],
}
for testSuite in self.params['testSuites']:
testSuiteData = {
'testCases': [],
}
for testCase in testSuite.params['testCases']:
testSuiteData['testCases'].append(testCase.params)
testSuiteData.update(dict([(k, v) for k, v in testSuite.params.items() if
k in testSuite.attributeNames]))
testReportData['testSuites'].append(testSuiteData)
testReportData.update(dict([(k, v) for k, v in self.params.items() if k in self.attributeNames]))
return testReportData
def toXml(self, prettyPrint=False, encoding=None):
testsuitesAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in self.params.items() if
key in self.attributeNames and
val is not None])
testsuitesNode = ET.Element('testsuites', attrib=testsuitesAttrib)
for testSuite in self.params['testSuites']:
testsuiteAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in testSuite.params.items() if
key in testSuite.attributeNames and
val is not None])
testsuiteNode = ET.SubElement(testsuitesNode, 'testsuite', attrib=testsuiteAttrib)
for testCase in testSuite.params['testCases']:
testcaseAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in testCase.params.items() if
key in testCase.attributeNames and
val is not None])
testcaseNode = ET.SubElement(testsuiteNode, 'testcase', attrib=testcaseAttrib)
for childName in testCase.childNames.keys():
childAttrib = dict([(key.split('_')[1], forceUnicode(val, encoding)) for key, val in
testCase.params.items() if
key.startswith('%s_' % childName) and
val is not None])
if testCase.params[childName] is not None or len(childAttrib.items()) > 0:
childNode = ET.SubElement(testcaseNode, testCase.childNames[childName], attrib=childAttrib)
childNode.text = forceUnicode((testCase.params[childName]), encoding)
uglyXml = ET.tostring(testsuitesNode, encoding=encoding)
uglyXml = uglyXml.decode(encoding or 'utf-8')
uglyXml = cleanIllegalXmlChars(uglyXml)
if prettyPrint:
uglyXml = uglyXml.encode(encoding or 'utf-8')
xml = MD.parseString(uglyXml)
xml = xml.toprettyxml(encoding=encoding)
if encoding:
xml = xml.decode(encoding or 'utf-8')
return xml
return uglyXml
def fromXml(self, xmlStr, encoding=None):
self._clearAttributes()
xmlStr = xmlStr.encode(encoding or 'utf-8')
root = ET.fromstring(xmlStr)
if root.tag != 'testsuites':
raise self.XmlDecodingFailure
self._fillAttributes(root.attrib)
self.params['testSuites'] = []
for child in root:
if child.tag == 'testsuite':
testSuite = TestSuite()
testSuite._fillAttributes(child.attrib)
for subchild in child:
if subchild.tag == 'testcase':
testCase = TestCase()
testCase._fillAttributes(subchild.attrib)
for subsubchild in subchild:
if subsubchild.tag in testCase.childNames.values():
childNamesToParamNames = dict([(v, k) for k, v in testCase.childNames.items()])
paramName = childNamesToParamNames[subsubchild.tag]
testCase.params[paramName] = subsubchild.text
for attributeName, attributeValue in subsubchild.attrib.items():
testCase.params['%s_%s' % (paramName, attributeName)] = attributeValue
testSuite.params['testCases'].append(testCase)
testSuite._recalculateParams()
self.params['testSuites'].append(testSuite)
self._recalculateParams()
def merge(self, testReport, recalculate=True):
testSuiteNames = [ts.params['name'] for ts in self.params['testSuites'] if ts.params['name'] is not None]
testSuitesToAdd = [ts for ts in testReport.params['testSuites'] if ts.params['name'] not in testSuiteNames]
testSuitesToMerge = [ts for ts in testReport.params['testSuites'] if ts.params['name'] in testSuiteNames]
self.params['testSuites'] += testSuitesToAdd
[intTs.merge(extTs, recalculate) for intTs in self.params['testSuites'] for extTs in testSuitesToMerge if
intTs.params['name'] == extTs.params['name']]
if recalculate:
self._recalculateParams()
def __str__(self):
return str(self.params)
def _clearAttributes(self):
for attributeName in self.attributeNames:
self.params[attributeName] = None
def _fillAttributes(self, attributes):
for attributeName in self.attributeNames:
if attributeName in attributes:
self.params[attributeName] = attributes[attributeName]
def _recalculateParams(self):
def anything2int(anything):
try:
return int(anything)
except:
return None
def anything2float(anything):
try:
return float(anything)
except:
return None
timesInSuites = [anything2float(ts.params['time']) for ts in self.params['testSuites']]
timesInSuites = [time for time in timesInSuites if time is not None]
self.params['time'] = self.params['timeAggregate'](timesInSuites)
testsInSuites = [anything2int(ts.params['tests']) for ts in self.params['testSuites']]
testsInSuites = [tests for tests in testsInSuites if tests is not None]
self.params['tests'] = sum(testsInSuites)
failuresInSuites = [anything2int(ts.params['failures']) for ts in self.params['testSuites']]
failuresInSuites = [failures for failures in failuresInSuites if failures is not None]
self.params['failures'] = sum(failuresInSuites)
errorsInSuites = [anything2int(ts.params['errors']) for ts in self.params['testSuites']]
errorsInSuites = [errors for errors in errorsInSuites if errors is not None]
self.params['errors'] = sum(errorsInSuites)
| bsd-3-clause | -2,012,294,854,052,842,200 | 39.02451 | 116 | 0.578322 | false |
ramineni/my_congress | congress/datasources/cinder_driver.py | 1 | 5973 | # Copyright (c) 2014 Montavista Software, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import cinderclient.client
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
class CinderDriver(datasource_driver.PollingDataSourceDriver,
datasource_driver.ExecutionDriver):
VOLUMES = "volumes"
SNAPSHOTS = "snapshots"
SERVICES = "services"
# This is the most common per-value translator, so define it once here.
value_trans = {'translation-type': 'VALUE'}
volumes_translator = {
'translation-type': 'HDICT',
'table-name': VOLUMES,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'size', 'translator': value_trans},
{'fieldname': 'user_id', 'translator': value_trans},
{'fieldname': 'status', 'translator': value_trans},
{'fieldname': 'description', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'bootable', 'translator': value_trans},
{'fieldname': 'created_at', 'translator': value_trans},
{'fieldname': 'volume_type', 'translator': value_trans})}
snapshots_translator = {
'translation-type': 'HDICT',
'table-name': SNAPSHOTS,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'size', 'translator': value_trans},
{'fieldname': 'status', 'translator': value_trans},
{'fieldname': 'volume_id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'created_at', 'translator': value_trans})}
services_translator = {
'translation-type': 'HDICT',
'table-name': SERVICES,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'status', 'translator': value_trans},
{'fieldname': 'binary', 'translator': value_trans},
{'fieldname': 'zone', 'translator': value_trans},
{'fieldname': 'state', 'translator': value_trans},
{'fieldname': 'updated_at', 'translator': value_trans},
{'fieldname': 'host', 'translator': value_trans},
{'fieldname': 'disabled_reason', 'translator': value_trans})}
TRANSLATORS = [volumes_translator, snapshots_translator,
services_translator]
def __init__(self, name='', args=None):
super(CinderDriver, self).__init__(name, args=args)
datasource_driver.ExecutionDriver.__init__(self)
session = ds_utils.get_keystone_session(args)
self.cinder_client = cinderclient.client.Client(version='2',
session=session)
self.add_executable_client_methods(self.cinder_client,
'cinderclient.v2.')
self.initialize_update_method()
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'cinder'
result['description'] = ('Datasource driver that interfaces with '
'OpenStack cinder.')
result['config'] = ds_utils.get_openstack_required_config()
result['config']['lazy_tables'] = constants.OPTIONAL
result['secret'] = ['password']
return result
def initialize_update_method(self):
volumes_method = lambda: self._translate_volumes(
self.cinder_client.volumes.list(detailed=True,
search_opts={'all_tenants': 1}))
self.add_update_method(volumes_method, self.volumes_translator)
snapshots_method = lambda: self._translate_snapshots(
self.cinder_client.volume_snapshots.list(
detailed=True, search_opts={'all_tenants': 1}))
self.add_update_method(snapshots_method, self.snapshots_translator)
services_method = lambda: self._translate_services(
self.cinder_client.services.list(host=None, binary=None))
self.add_update_method(services_method, self.services_translator)
@ds_utils.update_state_on_changed(VOLUMES)
def _translate_volumes(self, obj):
row_data = CinderDriver.convert_objs(obj, self.volumes_translator)
return row_data
@ds_utils.update_state_on_changed(SNAPSHOTS)
def _translate_snapshots(self, obj):
row_data = CinderDriver.convert_objs(obj, self.snapshots_translator)
return row_data
@ds_utils.update_state_on_changed(SERVICES)
def _translate_services(self, obj):
row_data = CinderDriver.convert_objs(obj, self.services_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.cinder_client, action, action_args)
| apache-2.0 | -2,842,564,202,007,806,000 | 42.282609 | 78 | 0.615269 | false |
italomaia/turtle-linux | games/Evolution/lib/trees.py | 1 | 6760 | import random
from OpenGL.GL import *
from euclid import *
def draw_trunk(bl, tr):
'''
- bottom & top control points at same point
- may vary from close to bottom to half way
- may vary from either side to other side
'''
x1, y1 = bl
y1 -= 64 # XXX OMG HAX HAX OMG SUXX0R HAX!!!1111111!1one1!!1
x2, y2 = tr
hy = (y2-y1)/2.
lx = x1 + (x2-x1) * (random.random()-.5)
rx = x2 + (x2-x1) * (random.random()-.5)
lhy = hy + hy * (random.random()-.5)
rhy = hy + hy * (random.random()-.5)
points = [
[(x1, y1, 0), (lx, y1+lhy, 0), (lx, y1+lhy, 0), (x1, y2, 0)],
[(x2, y1, 0), (rx, y1+rhy, 0), (rx, y1+rhy, 0), (x2, y2, 0)],
]
glColor(151/255., 123/255., 49/255., 1)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, points)
glMapGrid2f(12, 0.0, 1.0, 12, 0.0, 1.0)
glEvalMesh2(GL_FILL, 0, 12, 0, 12)
def draw_pointy(bl, tr):
'''
- triangle with base 1/3 width of height
- control points in direction of opposite points, half length or less,
varying either way from base line by up to +/- half point inside
angle
'''
x1, y1 = bl
x2, y2 = tr
hx = (x2-x1)/2.
fy = (y2-y1)/3.
draw_trunk((x1 + hx - hx/4, y1), (x1 + hx + hx/4, y1 + fy))
y1 += fy/2
# re-calc
x1 += hx * random.random() *.3
x2 -= hx * random.random() *.3
hx = (x2-x1)/2.
hy = (y2-y1)/2.
p1 = Point2(x1, y1)
p2 = Point2(x2, y1)
p3 = Point2(x1 + hx, y2)
# left side
mp = Point2(x1+hx/2, y1+hy)
v1 = p3 - p1
r = Matrix3.new_rotate(math.pi/2)
vp = r * v1.normalized() * ((random.random()-.5) * hx)
mp1 = mp + vp
points = [
[(p1.x, p1.y, 0), (mp1.x, mp1.y, 0), (mp1.x, mp1.y, 0), (p3.x, p3.y, 0)],
[(p1.x, p1.y, 0), (p1.x, p1.y, 0), (x1+hx, y1+hy, 0), (x1+hx, y1+hy, 0)],
]
glColor(123/255., 191/255., 49/255., 1)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, points)
glMapGrid2f(12, 0.0, 1.0, 12, 0.0, 1.0)
glEvalMesh2(GL_FILL, 0, 12, 0, 12)
# right side
mp = Point2(x2-hx/2, y1+hy)
v1 = p3 - p1
r = Matrix3.new_rotate(math.pi/2)
vp = r * v1.normalized() * ((random.random()-.5) * hx)
mp1 = mp + vp
points = [
[(p2.x, p2.y, 0), (mp1.x, mp1.y, 0), (mp1.x, mp1.y, 0), (p3.x, p3.y, 0)],
[(p2.x, p2.y, 0), (p2.x, p2.y, 0), (x1+hx, y1+hy, 0), (x1+hx, y1+hy, 0)],
]
glColor(123/255., 191/255., 49/255., 1)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, points)
glMapGrid2f(12, 0.0, 1.0, 12, 0.0, 1.0)
glEvalMesh2(GL_FILL, 0, 12, 0, 12)
# bottom side
mp = Point2(x1+hx, y1)
v1 = Vector2(0, 1) * ((random.random()-.5) * hy/2)
mp1 = mp + v1
points = [
[(p1.x, p1.y, 0), (mp1.x, mp1.y, 0), (mp1.x, mp1.y, 0), (p2.x, p2.y, 0)],
[(p1.x, p1.y, 0), (p1.x, p1.y, 0), (x1+hx, y1+hy, 0), (x1+hx, y1+hy, 0)],
]
glColor(123/255., 191/255., 49/255., 1)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, points)
glMapGrid2f(12, 0.0, 1.0, 12, 0.0, 1.0)
glEvalMesh2(GL_FILL, 0, 12, 0, 12)
def draw_fluffy(bl, tr):
'''
- N points around circle
- control points at up to 45 degrees from normal of point from circle
- length of control vector between 1/2R to R
'''
x1, y1 = bl
x2, y2 = tr
hx = (x2-x1)/2.
hy = (y2-y1)/2.
draw_trunk((x1 + hx - hx/4, y1), (x1 + hx + hx/4, y1 + hy))
y1 += hy/2
# re-calc
hx = (x2-x1)/2.
hy = (y2-y1)/2.
v = Vector2((x2-x1)/2., 0)
m = Point2(x1 + hx, y1+ hy)
NUM = random.choice((3, 4, 5))
angles = []
points = []
for i in range(NUM):
angle = math.pi * 2 * i / NUM
angle += math.pi * (random.random()-.5) * .1
angles.append(angle)
r = Matrix3.new_rotate(angle)
points.append(m + r * v)
glColor(123/255., 191/255., 49/255., 1)
glBegin(GL_POLYGON)
for i in range(NUM):
glVertex2f(*points[i])
glEnd()
# now figure control points for sides
for i in range(NUM):
if i == NUM-1: p1, p2 = points[i], points[0]
else: p1, p2 = points[i], points[i+1]
if i == NUM-1: a1, a2 = angles[i], angles[0] + math.pi*2
else: a1, a2 = angles[i], angles[i+1]
da = abs(a2-a1) / 2
a1 += math.pi/(NUM*2) + da * random.random()
a2 -= math.pi/(NUM*2) + da * random.random()
l1 = hx + hx * random.random()
l2 = hx + hx * random.random()
mp1 = p1 + Matrix3.new_rotate(a1) * Vector2(1, 0) * l1
mp2 = p2 + Matrix3.new_rotate(a2) * Vector2(1, 0) * l2
p = [
[(p1.x, p1.y, 0), (mp1.x, mp1.y, 0), (mp2.x, mp2.y, 0),
(p2.x, p2.y, 0)],
[(p1.x, p1.y, 0), (p1.x, p1.y, 0), (p2.x, p2.y, 0), (p2.x, p2.y, 0)],
]
glColor(123/255., 191/255., 49/255., 1)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, p)
glMapGrid2f(12, 0.0, 1.0, 12, 0.0, 1.0)
glEvalMesh2(GL_FILL, 0, 12, 0, 12)
'''
glPointSize(5)
colors = (
(255, 0, 0),
(255, 255, 0),
(255, 0, 255),
(0, 0, 255),
)
glBegin(GL_LINES)
glColor(*colors[i])
glVertex2f(*m)
glVertex2f(*p1)
glVertex2f(*m)
glVertex2f(*p2)
glVertex3f(*p[0][0])
glVertex3f(*p[0][1])
glVertex3f(*p[0][2])
glVertex3f(*p[0][3])
glEnd()
'''
if __name__ == '__main__':
import sys
import pygame
from pygame.locals import *
pygame.init()
vw, vh = viewport = (1024, 768)
screen = pygame.display.set_mode(viewport, OPENGL | DOUBLEBUF)
clock = pygame.time.Clock()
# set up 2d mode
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glViewport(0, 0, vw, vh)
glOrtho(0, vw, 0, vh, -50, 50)
glMatrixMode(GL_MODELVIEW)
glClearColor(.3, .3, .3, 1)
glLoadIdentity()
glDisable(GL_LIGHTING)
glDisable(GL_DEPTH_TEST)
glEnable(GL_MAP2_VERTEX_3)
while 1:
ts = clock.tick(60)
for e in pygame.event.get():
if e.type == QUIT: sys.exit()
if e.type == KEYDOWN and e.key == K_ESCAPE: sys.exit()
if e.type == KEYDOWN and e.key == K_SPACE:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glBegin(GL_LINES)
glColor(1, 1, 1, 1)
glVertex2f(0, 100)
glVertex2f(1024, 100)
glEnd()
glPushMatrix()
glTranslate(0, 100, 0)
draw_pointy((0, 0), (256, 512))
glTranslate(256, 0, 0)
draw_fluffy((0, 0), (256, 512))
glPopMatrix()
pygame.display.flip()
| gpl-3.0 | -2,740,534,610,567,499,300 | 28.264069 | 80 | 0.48713 | false |
CMU-dFabLab/dfab | python/dfab/gui/gl_drawing.py | 1 | 8505 | """Basic drawing functions that only depend on plain OpenGL. These are higher
level than raw GL and similar in purpose to the GLU functions.
gl_drawing.py, Copyright (c) 2001-2014, Garth Zeglin. All rights
reserved. Licensed under the terms of the BSD 3-clause license as included in
LICENSE.
"""
from OpenGL.GL import *
from OpenGL.GLU import *
import math
def RADIAN(degrees): return degrees * math.pi / 180.0
def DEG(radians): return radians * 180.0 / math.pi
#================================================================
def gl_draw_checkerboard_ground_plane( xmin = -3.0, ymin = -3.0, xmax = 3.0, ymax = 3.0, fore = 0.8, back = 0.2, gnd_z = 0.0 ):
"""Draw a ground plane as white and gray unit squares within a rectangle."""
glBegin( GL_QUADS )
x0 = math.floor(xmin)
y0 = math.floor(ymin)
xcount = int(math.ceil(xmax - xmin))
ycount = int(math.ceil(ymax - ymin))
for i in range(xcount):
x = x0 + i
for j in range(ycount):
y = y0 + j
intensity = fore if ((i&1)^(j&1)) == 0 else back
glColor3f( intensity, intensity, intensity )
glNormal3f(0.0, 0.0, 1.0)
glVertex3f( x , y , gnd_z )
glVertex3f( x + 1.0, y , gnd_z )
glVertex3f( x + 1.0, y + 1.0, gnd_z )
glVertex3f( x , y + 1.0, gnd_z )
glEnd()
return
#================================================================
def gl_init_default_lighting():
"""Define some kind of basic OpenGL lighting so things are visible."""
# With W==0, specify the light as directional and located at these
# coordinates in the current modelview. With a directional light,
# diffuse and specular components will take direction into account
# but not position, so this produces a "solar" field of parallel
# uniform rays.
light_position = [ 5.0, -5.0, 5.0, 0.0 ]
# With W==1, specify the light as positional and located at these
# coordinates in the current modelview.
# light_position = [ 5.0, -5.0, 5.0, 1.0 ]
light_specular = [ 1.0, 1.0, 1.0, 1.0 ] # specular RGBA intensity
light_ambient = [ 0.1, 0.1, 0.1, 1.0 ] # ambient RGBA intensity
light_diffuse = [ 0.9, 0.9, 0.9, 1.0 ] # diffuse RGBA intensity
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular)
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
# The attenuation for positional lights (not directional) is
# defined by distance D between a vertex and a light as follows:
# factor = 1.0 / ( constant + D*linear + D*D*quadratic ).
# The default parameters are (constant == 1, linear == 0, quadratic == 0).
# glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.1 )
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glDepthFunc(GL_LEQUAL)
glEnable(GL_DEPTH_TEST)
# Set the default material specularity.
glMaterialf(GL_FRONT, GL_SHININESS, 5.0 )
specularity = [ 0.3, 0.3, 0.3, 1.0 ]
glMaterialfv(GL_FRONT, GL_SPECULAR, specularity)
# By default, have material parameters track the current color, so
# the material color will can be changed just by calls to glColor.
# The default glColorMaterial mode is GL_FRONT_AND_BACK and
# GL_AMBIENT_AND_DIFFUSE.
glEnable(GL_COLOR_MATERIAL)
return
#================================================================
def set_camera_xyzypr( x, y, z, # camera position
yaw, pitch, roll, # camera orientation
fov, # field of view
far ): # view distance
"""Set the view using general camera parameters. The camera has a
location in space, an orientation specified as a quaternion, a
field of view specified in degrees, and a camera view range
(clipping depth).
Arguments:
x, y, z -- camera position
yaw, pitch, roll -- camera orientation
fov -- field of view in degrees
far -- view distance
"""
view_x, view_y, winWidth, winHeight = glGetIntegerv(GL_VIEWPORT)
if winWidth < winHeight:
hwidth = math.tan(0.5 * RADIAN(fov));
hheight = hwidth * (float(winHeight) / winWidth)
else:
hheight = math.tan(0.5 * RADIAN(fov))
hwidth = hheight * (float(winWidth) / winHeight)
# set projection
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum( -hwidth, hwidth, -hheight, hheight, 1.0, far )
# set camera transform
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() # start as right handed system looking along -Z
# rotate to look along +X with +Z up
glRotatef( 90.0, 0.0, 0.0, 1.0 )
glRotatef( 90.0, 0.0, 1.0, 0.0 )
# position the focal plane
glRotatef(DEG (roll), -1.0, 0.0, 0.0)
glRotatef(DEG (pitch), 0.0, -1.0, 0.0)
glRotatef(DEG (yaw), 0.0, 0.0, -1.0)
glTranslatef( -x, -y, -z )
return
#================================================================
# FIXME: This is translated from C and could probably be made faster for Python.
# 24x3 vertex data for a rectangular solid.
cube = \
[
[-1,-1, 1], [ 1,-1, 1], [ 1, 1, 1], [-1, 1, 1], # top (Z == 1)
[-1,-1,-1], [-1, 1,-1], [ 1, 1,-1], [ 1,-1,-1], # bottom (Z == -1)
[-1, 1, 1], [ 1, 1, 1], [ 1, 1,-1], [-1, 1,-1], # left (Y == 1)
[-1,-1, 1], [-1,-1,-1], [ 1,-1,-1], [ 1,-1, 1], # right (Y == -1)
[ 1,-1,-1], [ 1, 1,-1], [ 1, 1, 1], [ 1,-1, 1], # front (X == 1)
[-1,-1,-1], [-1,-1, 1], [-1, 1, 1], [-1, 1,-1] ] # back (X == -1)
# 6x3 list for normal vectors
cubenormal = \
[
[ 0.0, 0.0, 1.0 ], # top
[ 0.0, 0.0, -1.0 ], # bottom
[ 0.0, 1.0, 0.0 ], # left
[ 0.0, -1.0, 0.0 ], # right
[ 1.0, 0.0, 0.0 ], # front
[ -1.0, 0.0, 0.0 ]] # back
def gl_draw_box( x, y, z ):
"""Draw a simple box centered on the origin.
Arguments:
x, y, z -- dimensions along the axes
"""
hx = 0.5*x;
hy = 0.5*y;
hz = 0.5*z;
glBegin (GL_QUADS)
i = 0
for face in range(6):
glNormal3fv ( cubenormal[face] )
for j in range(4):
glVertex3f ( hx if (cube[i][0] > 0) else -hx,
hy if (cube[i][1] > 0) else -hy,
hz if (cube[i][2] > 0) else -hz )
i += 1
glEnd()
return
#================================================================
def gl_draw_arrow( length, quad ):
"""Draw an arrow along +Z in OpenGL using a line and a GLU cone (i.e. tapered cylinder).
Arguments:
length -- the length of each axis
quadric -- a GLU quadric object to use for drawing
"""
# draw the stem
glBegin( GL_LINES )
glVertex3f( 0.0, 0.0, 0.0 )
glVertex3f( 0.0, 0.0, 0.75*length )
glEnd()
# draw the conical tip
glPushMatrix()
glTranslatef( 0.0, 0.0, 0.75*length )
gluCylinder( quad, 0.1*length, 0.0, 0.25*length, 9, 1 )
glPopMatrix()
return
def gl_draw_frame( length = 1.0, quadric = None):
"""Illustrate the current OpenGL transformation matrix with red, green, and blue arrows aligned with the unit axes.
Optional arguments:
length -- the length of each axis (default = 1.0 unit)
quadric -- a GLU quadric object to use for drawing (default is to create a temporary one)
"""
# create a new quadric state if none was provided
quad = gluNewQuadric() if quadric is None else quadric
glColor3f( 1.0, 0.0, 0.0 )
glPushMatrix()
glRotatef( 90.0, 0.0, 1.0, 0.0 )
gl_draw_arrow( length, quad ) # +X
glRotatef( -90.0, 1.0, 0.0, 0.0 )
glColor3f( 0.0, 1.0, 0.0 )
gl_draw_arrow( length, quad ) # +Y
glPopMatrix()
glColor3f( 0.0, 0.0, 1.0 )
gl_draw_arrow( length, quad ) # +Z
if quadric is None:
gluDeleteQuadric( quad ) # free the temporary quadric
return
def gl_draw_transform( matrix, **kwargs ):
"""Draw a red-green-blue X-Y-Z coordinate frame representing the given homogeneous transform matrix."""
glPushMatrix()
glMultMatrixd( matrix.transpose() ) # OpenGL expects a different matrix layout
gl_draw_frame( **kwargs )
glPopMatrix()
#================================================================
| bsd-3-clause | -6,041,161,415,964,834,000 | 34.4375 | 127 | 0.546972 | false |
becm/meson | mesonbuild/modules/__init__.py | 1 | 3243 | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies that
# are UI-related.
import os
from .. import build
from ..mesonlib import unholder
class ExtensionModule:
def __init__(self, interpreter):
self.interpreter = interpreter
self.snippets = set() # List of methods that operate only on the interpreter.
def is_snippet(self, funcname):
return funcname in self.snippets
def get_include_args(include_dirs, prefix='-I'):
'''
Expand include arguments to refer to the source and build dirs
by using @SOURCE_ROOT@ and @BUILD_ROOT@ for later substitution
'''
if not include_dirs:
return []
dirs_str = []
for dirs in unholder(include_dirs):
if isinstance(dirs, str):
dirs_str += ['%s%s' % (prefix, dirs)]
continue
# Should be build.IncludeDirs object.
basedir = dirs.get_curdir()
for d in dirs.get_incdirs():
expdir = os.path.join(basedir, d)
srctreedir = os.path.join('@SOURCE_ROOT@', expdir)
buildtreedir = os.path.join('@BUILD_ROOT@', expdir)
dirs_str += ['%s%s' % (prefix, buildtreedir),
'%s%s' % (prefix, srctreedir)]
for d in dirs.get_extra_build_dirs():
dirs_str += ['%s%s' % (prefix, d)]
return dirs_str
def is_module_library(fname):
'''
Check if the file is a library-like file generated by a module-specific
target, such as GirTarget or TypelibTarget
'''
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in ('gir', 'typelib')
class ModuleReturnValue:
def __init__(self, return_value, new_objects):
self.return_value = return_value
assert(isinstance(new_objects, list))
self.new_objects = new_objects
class GResourceTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
class GResourceHeaderTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
class GirTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
class TypelibTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
class VapiTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
| apache-2.0 | -1,228,796,452,171,816,400 | 33.136842 | 85 | 0.654641 | false |
Ecogenomics/GtdbTk | tests/test_classify.py | 1 | 6867 | ###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import shutil
import tempfile
import unittest
import dendropy
import gtdbtk.config.config as Config
from gtdbtk.biolib_lite.taxonomy import Taxonomy
from gtdbtk.classify import Classify
from gtdbtk.config.output import *
from gtdbtk.io.pplacer_classification import PplacerClassifyFileAR122
class TestClassify(unittest.TestCase):
def setUp(self):
self.classify = Classify()
self.out_dir = tempfile.mkdtemp(prefix='gtdbtk_tmp_')
self.prefix = 'gtdbtk'
self.pplacer_dir_reference = 'tests/data/pplacer_dir_reference'
self.aln_dir_ref = 'tests/data/align_dir_reference/align'
self.user_msa_file = os.path.join(self.aln_dir_ref, 'gtdbtk.ar122.user_msa.fasta')
self.taxonomy_file = Config.TAXONOMY_FILE
self.gtdb_taxonomy = Taxonomy().read(self.taxonomy_file)
def tearDown(self):
shutil.rmtree(self.out_dir)
def test_standardise_taxonomy(self):
taxstring = 'p__phylum1;c_class1'
marker_set = 'bac120'
new_taxstring = self.classify.standardise_taxonomy(
taxstring, marker_set)
self.assertEqual(
new_taxstring, 'd__Bacteria;p__phylum1;c_class1;o__;f__;g__;s__')
# Test that the correct domain is returned.
self.assertEqual(self.classify.standardise_taxonomy('p__P;c__C;o__O;f__F;g__G;s__S', 'bac120'),
'd__Bacteria;p__P;c__C;o__O;f__F;g__G;s__S')
self.assertEqual(self.classify.standardise_taxonomy('p__P;c__C;o__O;f__F;g__G;s__S', 'ar122'),
'd__Archaea;p__P;c__C;o__O;f__F;g__G;s__S')
# Remove ranks and check
rank_order = {'p': 0, 'c': 1, 'o': 2, 'f': 3, 'g': 4, 's': 5}
rank_lst = ['p__P', 'c__C', 'o__O', 'f__F', 'g__G', 's__S']
ranks = {'p': 'P', 'c': 'C', 'o': 'O', 'f': 'F', 'g': 'G', 's': 'S'}
dom_info = {'d__Bacteria': 'bac120', 'd__Archaea': 'ar122'}
for k in range(1, len(ranks) - 1):
for cur_domain in ('d__Bacteria', 'd__Archaea'):
ranks_selected = rank_lst[0:-k]
expected = list()
test_lst = list()
for cur_rank, _ in sorted(rank_order.items(), key=lambda x: [1]):
if cur_rank in ranks_selected:
test_lst.append(f'{cur_rank}__{ranks[cur_rank]}')
expected.append(f'{cur_rank}__{ranks[cur_rank]}')
else:
expected.append(f'{cur_rank}__')
expected_str = f'{cur_domain};{";".join(expected)}'
test_str = ";".join(test_lst)
cur_dom = dom_info[cur_domain]
test_value = self.classify.standardise_taxonomy(test_str, cur_dom)
self.assertEqual(expected_str, test_value)
def test_get_pplacer_taxonomy(self):
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
tree = dendropy.Tree.get_from_path(os.path.join(os.getcwd(), self.pplacer_dir_reference,
'gtdbtk.ar122.classify.tree'),
schema='newick',
rooting='force-rooted',
preserve_underscores=True)
pplacer_classify_file = PplacerClassifyFileAR122(self.out_dir, self.prefix)
self.classify._get_pplacer_taxonomy(pplacer_classify_file, 'ar122', self.user_msa_file, tree)
results = {}
with open(os.path.join(self.out_dir, PATH_AR122_PPLACER_CLASS.format(prefix=self.prefix)), 'r') as f:
for line in f:
infos = line.strip().split('\t')
results[infos[0]] = infos[1]
self.assertTrue(len(results) == 3)
self.assertTrue('genome_1' in results)
self.assertTrue('genome_2' in results)
self.assertTrue('genome_3' in results)
self.assertEqual(results.get(
'genome_1'), 'd__Archaea;p__Euryarchaeota;c__Methanobacteria;o__Methanobacteriales;f__Methanobacteriaceae;g__Methanobrevibacter;s__')
def test_place_genomes(self):
tree_file = self.classify.place_genomes(
self.user_msa_file, 'ar122', self.out_dir, self.prefix)
with open(tree_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
self.assertTrue(last_line.startswith('('))
self.assertTrue(last_line.endswith('d__Archaea;'))
def test_formatnote(self):
first3genomes = list(self.gtdb_taxonomy.keys())[:3]
sorted_dict = ((first3genomes[0], {'ani': 98.5, 'af': 1.0}), (first3genomes[1], {
'ani': 92.6, 'af': 1.0}), (first3genomes[2], {'ani': 90.3, 'af': 1.3}))
labels = [first3genomes[0]]
note_list = self.classify._formatnote(sorted_dict, labels)
self.assertTrue(first3genomes[1] in note_list[0])
self.assertTrue(first3genomes[2] in note_list[1])
self.assertTrue(note_list[0].endswith(', 92.6, 1.0'))
self.assertTrue(note_list[1].endswith(', 90.3, 1.3'))
def test_calculate_red_distances(self):
tree = os.path.join(self.pplacer_dir_reference,
'gtdbtk.ar122.classify.tree')
result_tree = self.classify._calculate_red_distances(
tree, self.out_dir)
egs2 = [eg.length for eg in result_tree.postorder_edge_iter()
if eg.length is not None]
self.assertTrue(sum(egs2) / len(egs2) < 0.1)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -94,636,182,859,364,800 | 47.702128 | 145 | 0.528033 | false |
rlaverde/scorecard_cps | performance_indicators_project/perspectives/forms.py | 1 | 4634 | from django.forms import ModelForm, Textarea
from django.forms.models import inlineformset_factory
import re
from crispy_forms.helper import FormHelper
from .models import Target, Initiative, Perspective, Resource, InCharge, Committee
from indicators.models import Indicator, MainIndicator, Parameter
class BasicFormMixin(object):
def __init__(self, *args, **kwargs):
super(BasicFormMixin, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-10'
self.helper.form_tag = False
class PerspectiveForm(BasicFormMixin, ModelForm):
class Meta:
model = Perspective
fields = ['name']
labels = {'name': 'Nombre'}
class ResourceForm(BasicFormMixin, ModelForm):
class Meta:
model = Resource
fields = ['name']
labels = {'name': 'Nombre'}
class InChargeForm(BasicFormMixin, ModelForm):
class Meta:
model = InCharge
fields = ['name', 'email']
labels = {'name': 'Nombre',
'email': 'Correo electronico'
}
class CommitteeForm(BasicFormMixin, ModelForm):
class Meta:
model = Committee
fields = ['name', 'members']
labels = {'name': 'Nombre', 'members':'Miembros'}
class TargetForm(BasicFormMixin, ModelForm):
class Meta:
model = Target
fields = ['name', 'perspective', 'committees', 'resources']
labels = {'name':'Nombre', 'perspective':'Perspectiva', 'committees':'Comites', 'resources':'Recursos'}
class InitiativeForm(ModelForm):
class Meta:
model = Initiative
fields = ['name', 'target']
labels = {'name': 'Nombre'}
class IndicatorForm(ModelForm):
class Meta:
model = Indicator
fields = ['name', 'formula', 'target']
exclude = ['IsMain']
labels = {'name': 'Nombre', 'formula': 'Formula', 'target':'Objetivo'}
widgets = {
'formula': Textarea(attrs={'class': 'formula'}),
}
def __init__(self, *args, **kwargs):
super(IndicatorForm, self).__init__(*args, **kwargs)
if 'formula' in self.initial:
formula = self.initial.get('formula')
def re_function(match):
pk = int(match.group(1))
return '@[{}](id:{})'.format(Parameter.objects.get(id=pk).name, pk)
self.initial['formula'] = re.sub('p\[([0-9]+)\]',re_function, formula)
def clean_formula(self):
formula_data = self.cleaned_data.get('formula')
clean_formula = re.sub('@\[.*?\]\(id:([0-9]+)\)', r'p[\1]', formula_data)
return clean_formula
class MainIndicatorForm(ModelForm):
class Meta:
model = MainIndicator
fields = ['name', 'formula', 'target']
exclude = ['IsMain']
labels = {'name': 'Nombre', 'formula': 'Formula', 'target':'Objetivo'}
widgets = {
'formula': Textarea(attrs={'class': 'formula'}),
}
def __init__(self, *args, **kwargs):
super(MainIndicatorForm, self).__init__(*args, **kwargs)
if 'formula' in self.initial:
formula = self.initial.get('formula')
def re_function(match):
pk = int(match.group(1))
return '@[{}](id:{})'.format(Parameter.objects.get(id=pk).name, pk)
self.initial['formula'] = re.sub('p\[([0-9]+)\]',re_function, formula)
def clean_formula(self):
formula_data = self.cleaned_data.get('formula')
clean_formula = re.sub('@\[.*?\]\(id:([0-9]+)\)', r'p[\1]', formula_data)
return clean_formula
class InitiativeFormSetHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(InitiativeFormSetHelper, self).__init__(*args, **kwargs)
self.label_class = 'col-sm-2'
self.field_class = 'col-sm-10'
self.form_tag = False
class IndicatorFormSetHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(IndicatorFormSetHelper, self).__init__(*args, **kwargs)
self.label_class = 'col-sm-2'
self.field_class = 'col-sm-10'
self.form_tag = False
InitiativeFormSet = inlineformset_factory(Target, Initiative, extra=2, can_delete=False, form=InitiativeForm)
IndicatorFormSet = inlineformset_factory(Target, Indicator, extra=5, can_delete=False, form=IndicatorForm)
MainIndicatorFormSet = inlineformset_factory(Target, MainIndicator, extra=1, max_num=1, can_delete=False, form=MainIndicatorForm)
| gpl-3.0 | 6,310,318,416,087,374,000 | 31.180556 | 129 | 0.587829 | false |
turbulent/substance | substance/command/status.py | 1 | 1204 | from substance.monads import *
from substance.logs import *
from substance import (Command, Engine)
from tabulate import tabulate
from substance.constants import (EngineStates)
from substance.exceptions import (EngineNotRunning)
logger = logging.getLogger(__name__)
class Status(Command):
def getShellOptions(self, optparser):
return optparser
def getUsage(self):
return "substance status [options]"
def getHelpTitle(self):
return "Show substance engine and env status"
def main(self):
return self.core.loadCurrentEngine(name=self.parent.getOption('engine')) \
.bind(Engine.envLoadCurrent) \
.bind(Engine.envStatus, full=True) \
.bind(self.printStatus) \
.catch(self.exitError)
def printStatus(self, envStatus):
engine = envStatus.get('engine')
containers = envStatus.get('containers')
headers = ["ENGINE", "SUBENV"]
cols = [[engine.name, engine.currentEnv]]
table = tabulate(cols, headers=headers, tablefmt="plain")
logger.info(table)
if containers:
logger.info("")
logger.info(containers)
return OK(None)
| apache-2.0 | -85,619,144,083,130,860 | 29.1 | 82 | 0.651163 | false |
DataDog/integrations-core | datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py | 1 | 2512 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# TODO: remove ignore when we stop invoking Mypy with --py2
# type: ignore
from collections import ChainMap
from contextlib import contextmanager
from ....errors import ConfigurationError
from ... import AgentCheck
from .scraper import OpenMetricsScraper
class OpenMetricsBaseCheckV2(AgentCheck):
DEFAULT_METRIC_LIMIT = 2000
def __init__(self, name, init_config, instances):
super(OpenMetricsBaseCheckV2, self).__init__(name, init_config, instances)
# All desired scraper configurations, which subclasses can override as needed
self.scraper_configs = [self.instance]
# All configured scrapers keyed by the endpoint
self.scrapers = {}
self.check_initializations.append(self.configure_scrapers)
def check(self, _):
self.refresh_scrapers()
for endpoint, scraper in self.scrapers.items():
self.log.info('Scraping OpenMetrics endpoint: %s', endpoint)
with self.adopt_namespace(scraper.namespace):
scraper.scrape()
def configure_scrapers(self):
scrapers = {}
for config in self.scraper_configs:
endpoint = config.get('openmetrics_endpoint', '')
if not isinstance(endpoint, str):
raise ConfigurationError('The setting `openmetrics_endpoint` must be a string')
elif not endpoint:
raise ConfigurationError('The setting `openmetrics_endpoint` is required')
scrapers[endpoint] = self.create_scraper(config)
self.scrapers.clear()
self.scrapers.update(scrapers)
def create_scraper(self, config):
# Subclasses can override to return a custom scraper based on configuration
return OpenMetricsScraper(self, self.get_config_with_defaults(config))
def set_dynamic_tags(self, *tags):
for scraper in self.scrapers.values():
scraper.set_dynamic_tags(*tags)
def get_config_with_defaults(self, config):
return ChainMap(config, self.get_default_config())
def get_default_config(self):
return {}
def refresh_scrapers(self):
pass
@contextmanager
def adopt_namespace(self, namespace):
old_namespace = self.__NAMESPACE__
try:
self.__NAMESPACE__ = namespace or old_namespace
yield
finally:
self.__NAMESPACE__ = old_namespace
| bsd-3-clause | 3,899,255,253,901,641,000 | 31.623377 | 95 | 0.658838 | false |
vribeiro1/plainsboro_221 | plainsboro/appointments/migrations/0001_initial.py | 1 | 1047 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-08 04:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('doctor_subscriptions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pacient_email', models.CharField(max_length=255, verbose_name='email')),
('datetime', models.CharField(max_length=255, verbose_name='datetime')),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='doctor_subscriptions.Doctor', verbose_name='médico')),
],
options={
'verbose_name': 'agendamento',
'verbose_name_plural': 'agendamentos',
},
),
]
| mit | -8,226,026,083,457,915,000 | 32.741935 | 148 | 0.590822 | false |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/sympy/examples/advanced/gibbs_phenomenon.py | 2 | 3694 | #!/usr/bin/env python
"""
This example illustrates the Gibbs phenomenon.
It also calculates the Wilbraham-Gibbs constant by two approaches:
1) calculating the fourier series of the step function and determining the
first maximum.
2) evaluating the integral for si(pi).
See:
* http://en.wikipedia.org/wiki/Gibbs_phenomena
"""
from sympy import var, sqrt, integrate, conjugate, seterr, Abs, pprint, I, pi,\
sin, cos, sign, Plot, lambdify, Integral, S
# seterr(True)
x = var("x", real=True)
def l2_norm(f, lim):
"""
Calculates L2 norm of the function "f", over the domain lim=(x, a, b).
x ...... the independent variable in f over which to integrate
a, b ... the limits of the interval
Example:
>>> from sympy import Symbol
>>> from gibbs_phenomenon import l2_norm
>>> x = Symbol('x', real=True)
>>> l2_norm(1, (x, -1, 1))
sqrt(2)
>>> l2_norm(x, (x, -1, 1))
sqrt(6)/3
"""
return sqrt(integrate(Abs(f)**2, lim))
def l2_inner_product(a, b, lim):
"""
Calculates the L2 inner product (a, b) over the domain lim.
"""
return integrate(conjugate(a)*b, lim)
def l2_projection(f, basis, lim):
"""
L2 projects the function f on the basis over the domain lim.
"""
r = 0
for b in basis:
r += l2_inner_product(f, b, lim) * b
return r
def l2_gram_schmidt(list, lim):
"""
Orthonormalizes the "list" of functions using the Gram-Schmidt process.
Example:
>>> from sympy import Symbol
>>> from gibbs_phenomenon import l2_gram_schmidt
>>> x = Symbol('x', real=True) # perform computations over reals to save time
>>> l2_gram_schmidt([1, x, x**2], (x, -1, 1))
[sqrt(2)/2, sqrt(6)*x/2, 3*sqrt(10)*(x**2 - 1/3)/4]
"""
r = []
for a in list:
if r == []:
v = a
else:
v = a - l2_projection(a, r, lim)
v_norm = l2_norm(v, lim)
if v_norm == 0:
raise ValueError("The sequence is not linearly independent.")
r.append(v/v_norm)
return r
def integ(f):
return integrate(f, (x, -pi, 0)) + integrate(-f, (x, 0, pi))
def series(L):
"""
Normalizes the series.
"""
r = 0
for b in L:
r += integ(b)*b
return r
def msolve(f, x):
"""
Finds the first root of f(x) to the left of 0.
The x0 and dx below are taylored to get the correct result for our
particular function --- the general solver often overshoots the first
solution.
"""
f = lambdify(x, f)
x0 = -0.001
dx = 0.001
while f(x0 - dx) * f(x0) > 0:
x0 = x0 - dx
x_max = x0 - dx
x_min = x0
assert f(x_max) > 0
assert f(x_min) < 0
for n in range(100):
x0 = (x_max + x_min)/2
if f(x0) > 0:
x_max = x0
else:
x_min = x0
return x0
def main():
# L = l2_gram_schmidt([1, cos(x), sin(x), cos(2*x), sin(2*x)], (x, -pi, pi))
# L = l2_gram_schmidt([1, cos(x), sin(x)], (x, -pi, pi))
# the code below is equivalen to l2_gram_schmidt(), but faster:
L = [1/sqrt(2)]
for i in range(1, 100):
L.append(cos(i*x))
L.append(sin(i*x))
L = [f/sqrt(pi) for f in L]
f = series(L)
print("Fourier series of the step function")
pprint(f)
# Plot(f.diff(x), [x, -5, 5, 3000])
x0 = msolve(f.diff(x), x)
print("x-value of the maximum:", x0)
max = f.subs(x, x0).evalf()
print("y-value of the maximum:", max)
g = max*pi/2
print("Wilbraham-Gibbs constant :", g.evalf())
print("Wilbraham-Gibbs constant (exact):", \
Integral(sin(x)/x, (x, 0, pi)).evalf())
if __name__ == "__main__":
main()
| bsd-3-clause | -4,386,274,726,224,543,000 | 22.987013 | 84 | 0.554954 | false |
ogenstad/ansible | lib/ansible/modules/monitoring/grafana_dashboard.py | 1 | 11505 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_dashboard
author:
- Thierry Sallé (@tsalle)
version_added: "2.5"
short_description: Manage Grafana dashboards
description:
- Create, update, delete, export Grafana dashboards via API.
options:
grafana_url:
description:
- The Grafana URL.
required: true
grafana_user:
description:
- The Grafana API user.
default: admin
grafana_password:
description:
- The Grafana API password.
default: admin
grafana_api_key:
description:
- The Grafana API key.
- If set, I(grafana_user) and I(grafana_password) will be ignored.
org_id:
description:
- The Grafana Organisation ID where the dashboard will be imported / exported.
- Not used when I(grafana_api_key) is set, because the grafana_api_key only belongs to one organisation..
default: 1
state:
description:
- State of the dashboard.
required: true
choices: [ absent, export, present ]
default: present
slug:
description:
- slug of the dashboard. It's the friendly url name of the dashboard.
- When C(state) is C(present), this parameter can override the slug in the meta section of the json file.
- If you want to import a json dashboard exported directly from the interface (not from the api),
you have to specify the slug parameter because there is no meta section in the exported json.
path:
description:
- The path to the json file containing the Grafana dashboard to import or export.
overwrite:
description:
- Override existing dashboard when state is present.
type: bool
default: 'no'
message:
description:
- Set a commit message for the version history.
- Only used when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: Import Grafana dashboard foo
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: XXXXXXXXXXXX
state: present
message: Updated by ansible
overwrite: yes
path: /path/to/dashboards/foo.json
- name: Export dashboard
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: XXXXXXXXXXXX
state: export
slug: foo
path: /path/to/dashboards/foo.json
'''
RETURN = '''
---
slug:
description: slug of the created / deleted / exported dashboard.
returned: success
type: string
sample: foo
'''
import base64
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
__metaclass__ = type
class GrafanaAPIException(Exception):
pass
class GrafanaMalformedJson(Exception):
pass
class GrafanaExportException(Exception):
pass
def grafana_switch_organisation(module, grafana_url, org_id, headers):
r, info = fetch_url(module, '%s/api/user/using/%s' % (grafana_url, org_id), headers=headers, method='POST')
if info['status'] != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (org_id, info))
def grafana_dashboard_exists(module, grafana_url, slug, headers):
dashboard_exists = False
dashboard = {}
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (grafana_url, slug), headers=headers, method='GET')
if info['status'] == 200:
dashboard_exists = True
try:
dashboard = json.loads(r.read())
except Exception as e:
raise GrafanaMalformedJson(e)
elif info['status'] == 404:
dashboard_exists = False
else:
raise GrafanaAPIException('Unable to get dashboard %s : %s' % (slug, info))
return dashboard_exists, dashboard
def grafana_create_dashboard(module, data):
# define data payload for grafana API
try:
with open(data['path'], 'r') as json_file:
payload = json.load(json_file)
except Exception as e:
raise GrafanaMalformedJson("Can't load json file %s" % str(e))
# define http header
headers = {'content-type': 'application/json; charset=utf8'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
if data.get('slug'):
slug = data['slug']
elif 'meta' in payload and 'slug' in payload['meta']:
slug = payload['meta']['slug']
else:
raise GrafanaMalformedJson('No slug found in json')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], slug, headers=headers)
result = {}
if dashboard_exists is True:
if dashboard == payload:
# unchanged
result['slug'] = data['slug']
result['msg'] = "Dashboard %s unchanged." % data['slug']
result['changed'] = False
else:
# update
if 'overwrite' in data and data['overwrite']:
payload['overwrite'] = True
if 'message' in data and data['message']:
payload['message'] = data['message']
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['slug'] = slug
result['msg'] = "Dashboard %s updated" % slug
result['changed'] = True
else:
body = json.loads(info['body'])
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (slug, body['message']))
else:
# create
if 'dashboard' not in payload:
payload = {'dashboard': payload}
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['msg'] = "Dashboard %s created" % slug
result['changed'] = True
result['slug'] = slug
else:
raise GrafanaAPIException('Unable to create the new dashboard %s : %s - %s.' % (slug, info['status'], info))
return result
def grafana_delete_dashboard(module, data):
# define http headers
headers = {'content-type': 'application/json'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], data['slug'], headers=headers)
result = {}
if dashboard_exists is True:
# delete
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (data['grafana_url'], data['slug']), headers=headers, method='DELETE')
if info['status'] == 200:
result['msg'] = "Dashboard %s deleted" % data['slug']
result['changed'] = True
result['slug'] = data['slug']
else:
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (data['slug'], info))
else:
# dashboard does not exist, do nothing
result = {'msg': "Dashboard %s does not exist." % data['slug'],
'changed': False,
'slug': data['slug']}
return result
def grafana_export_dashboard(module, data):
# define http headers
headers = {'content-type': 'application/json'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], data['slug'], headers=headers)
if dashboard_exists is True:
try:
with open(data['path'], 'w') as f:
f.write(json.dumps(dashboard))
except Exception as e:
raise GrafanaExportException("Can't write json file : %s" % str(e))
result = {'msg': "Dashboard %s exported to %s" % (data['slug'], data['path']),
'slug': data['slug'],
'changed': True}
else:
result = {'msg': "Dashboard %s does not exist." % data['slug'],
'slug': data['slug'],
'changed': False}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent', 'export'],
default='present'),
grafana_url=dict(required=True),
grafana_user=dict(default='admin'),
grafana_password=dict(default='admin', no_log=True),
grafana_api_key=dict(type='str', no_log=True),
org_id=dict(default=1, type='int'),
slug=dict(type='str'),
path=dict(type='str'),
overwrite=dict(type='bool', default=False),
message=dict(type='str'),
validate_certs=dict(type='bool', default=True)
),
supports_check_mode=False,
required_together=[['grafana_user', 'grafana_password', 'org_id']],
mutually_exclusive=[['grafana_user', 'grafana_api_key']],
)
try:
if module.params['state'] == 'present':
result = grafana_create_dashboard(module, module.params)
elif module.params['state'] == 'absent':
result = grafana_delete_dashboard(module, module.params)
else:
result = grafana_export_dashboard(module, module.params)
except GrafanaAPIException as e:
module.fail_json(
failed=True,
msg="error : %s" % e
)
return
except GrafanaMalformedJson as e:
module.fail_json(
failed=True,
msg="error : json file does not contain a meta section with a slug parameter, or you did'nt specify the slug parameter"
)
return
except GrafanaExportException as e:
module.fail_json(
failed=True,
msg="error : json file cannot be written : %s" % str(e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
| gpl-3.0 | -3,438,470,058,666,596,000 | 33.44012 | 143 | 0.608363 | false |
mmdg-oxford/papers | Schlipf-PRL-2018/model/epw_mass.py | 1 | 1237 | from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_meV
import argparser
import mass_factor
import numpy as np
args = argparser.read_argument('Renormalize EPW calculation')
if args.vb: offset = -8.75333295715961e-03
else: offset = 8.53193322468371e-03
if args.vb: band_str = '36'
else: band_str = '37'
temp_str = '%03dK' % args.temp
if args.acoustic:
rng_qpt = range(8000, 10001, 500)
elif args.temp == 1:
rng_qpt = range(40000, 50001, 1000)
elif args.temp == 150:
rng_qpt = range(80000, 100001, 5000)
elif args.temp == 300:
rng_qpt = range(80000, 100001, 5000)
else:
print("temperature " + str(args.temp) + " not available")
exit()
dir_str = 'gx'
for qpt in rng_qpt:
qpt_str = '%06d' % qpt
if args.acoustic:
temp_str = '%dK' % args.temp
qpt_str = str(qpt)
filename = 'data/epw_all_28424_'+temp_str+'_5meV_acoustic_only/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
else:
filename = 'data/res_'+temp_str+'_1meV/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
file_epw = open(filename, 'r')
line = file_epw.readline()
data = line.split()
lam = np.float(data[4])
mf = mass_factor.eval(lam, args.method)
print(args.temp, mf, lam)
| gpl-3.0 | 8,698,819,622,413,661,000 | 25.319149 | 113 | 0.660469 | false |
apbard/scipy | scipy/ndimage/tests/test_io.py | 2 | 1049 | from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
import scipy.ndimage as ndi
import os
try:
from PIL import Image
pil_missing = False
except ImportError:
pil_missing = True
@pytest.mark.skipif(pil_missing, reason="The Python Image Library could not be found.")
def test_imread():
lp = os.path.join(os.path.dirname(__file__), 'dots.png')
with suppress_warnings() as sup:
# PIL causes a Py3k ResourceWarning
sup.filter(message="unclosed file")
img = ndi.imread(lp, mode="RGB")
assert_array_equal(img.shape, (300, 420, 3))
with suppress_warnings() as sup:
# PIL causes a Py3k ResourceWarning
sup.filter(message="unclosed file")
img = ndi.imread(lp, flatten=True)
assert_array_equal(img.shape, (300, 420))
with open(lp, 'rb') as fobj:
img = ndi.imread(fobj, mode="RGB")
assert_array_equal(img.shape, (300, 420, 3))
| bsd-3-clause | -3,530,541,556,877,071,000 | 28.971429 | 87 | 0.671115 | false |
tjtrebat/algorithms | algorithms/graph_algorithms/readgraph.py | 1 | 2178 | """
read_graph.py -- Used for reading graph node and edge information from an adjacency matrix contained within a file.
"""
__author__ = 'Tom'
class Graph:
def __init__(self):
self.num_nodes = 0
self.edge_weights = dict()
def read_graph(self, infile):
"""
Reads a file to populate the nodes and edges in a Graph.
Attributes:
filename -- a file containing edge weights
"""
if isinstance(infile, str):
infile = open(infile, 'r')
text_unicode = unicode(infile.read(), "utf-8").strip() # reads in unicode text
# Split the text on the newline character.
lines = text_unicode.split("\n")
self.num_nodes = len(lines)
self.edge_weights = dict()
for i, line in enumerate(lines):
for j, edge_weight in enumerate(line.split("\t")):
if j < self.num_nodes:
if Graph.is_float(edge_weight): # an edge exists between the two nodes
self.edge_weights[(i, j)] = float(edge_weight)
def compute_transpose(self):
t = dict()
for i, j in self.edge_weights.keys():
t[(j, i)] = self.edge_weights[(i, j)]
return t
@staticmethod
def is_float(weight):
""" Returns True if weight is a valid floating-point number
Attributes:
weight -- an edge weight
"""
try:
float(weight)
return True
except ValueError:
pass
return False
def has_edge(self, u, v):
try:
edge = self.edge_weights[(u, v)]
except KeyError:
return False
return True
def __unicode__(self):
"""
Prints the graph with a non-ascii character for positive infinity
"""
s = ""
for i in self.num_nodes:
for j in self.num_nodes:
if self.has_edge(i, j):
s += "%04.1f" % self.edge_weights[(i, j)]
else:
s += "%*s%s%*s" % (1, " ", u"\u221E", 3, " ",)
s += "\t"
s += "\n"
return s | gpl-2.0 | -1,201,915,395,122,980,400 | 28.445946 | 115 | 0.502755 | false |
ksmit799/Toontown-Source | toontown/catalog/CatalogItem.py | 1 | 14890 | from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from direct.interval.IntervalGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
import types
import sys
CatalogReverseType = None
CatalogItemVersion = 8
CatalogBackorderMarkup = 1.2
CatalogSaleMarkdown = 0.75
Customization = 1
DeliveryDate = 2
Location = 4
WindowPlacement = 8
GiftTag = 16
CatalogTypeUnspecified = 0
CatalogTypeWeekly = 1
CatalogTypeBackorder = 2
CatalogTypeMonthly = 3
CatalogTypeLoyalty = 4
class CatalogItem:
notify = DirectNotifyGlobal.directNotify.newCategory('CatalogItem')
def __init__(self, *args, **kw):
self.saleItem = 0
self.deliveryDate = None
self.posHpr = None
self.giftTag = None
self.giftCode = 0
self.hasPicture = False
self.volume = 0
self.specialEventId = 0
if len(args) >= 1 and isinstance(args[0], DatagramIterator):
self.decodeDatagram(*args, **kw)
else:
self.makeNewItem(*args, **kw)
return
def isAward(self):
result = self.specialEventId != 0
return result
def makeNewItem(self):
pass
def needsCustomize(self):
return 0
def saveHistory(self):
return 0
def getBackSticky(self):
itemType = 0
numSticky = 0
return (itemType, numSticky)
def putInBackCatalog(self, backCatalog, lastBackCatalog):
if self.saveHistory() and not self.isSaleItem():
if self not in backCatalog:
if self in lastBackCatalog:
lastBackCatalog.remove(self)
backCatalog.append(self)
def replacesExisting(self):
return 0
def hasExisting(self):
return 0
def getYourOldDesc(self):
return None
def storedInCloset(self):
return 0
def storedInTrunk(self):
return 0
def storedInAttic(self):
return 0
def notOfferedTo(self, avatar):
return 0
def getPurchaseLimit(self):
return 0
def reachedPurchaseLimit(self, avatar):
return 0
def hasBeenGifted(self, avatar):
if avatar.onGiftOrder.count(self) != 0:
return 1
return 0
def getTypeName(self):
return 'Unknown Type Item'
def getName(self):
return 'Unnamed Item'
def getDisplayName(self):
return self.getName()
def recordPurchase(self, avatar, optional):
self.notify.warning('%s has no purchase method.' % self)
return ToontownGlobals.P_NoPurchaseMethod
def isSaleItem(self):
return self.saleItem
def isGift(self):
if self.getEmblemPrices():
return 0
return 1
def isRental(self):
return 0
def forBoysOnly(self):
return 0
def forGirlsOnly(self):
return 0
def setLoyaltyRequirement(self, days):
self.loyaltyDays = days
def loyaltyRequirement(self):
if not hasattr(self, 'loyaltyDays'):
return 0
else:
return self.loyaltyDays
def getPrice(self, catalogType):
if catalogType == CatalogTypeBackorder:
return self.getBackPrice()
elif self.isSaleItem():
return self.getSalePrice()
else:
return self.getCurrentPrice()
def getCurrentPrice(self):
return int(self.getBasePrice())
def getBackPrice(self):
return int(self.getBasePrice() * CatalogBackorderMarkup)
def getSalePrice(self):
return int(self.getBasePrice() * CatalogSaleMarkdown)
def getDeliveryTime(self):
return 0
def getPicture(self, avatar):
self.hasPicture = True
return (None, None)
def cleanupPicture(self):
self.hasPicture = False
def requestPurchase(self, phone, callback, optional = -1):
phone.requestPurchase(self, callback, optional)
def requestGiftPurchase(self, phone, targetDoID, callback, optional = -1):
phone.requestGiftPurchase(self, targetDoID, callback, optional)
def requestPurchaseCleanup(self):
pass
def getRequestPurchaseErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogPurchaseItemAvailable
elif retcode == ToontownGlobals.P_ItemOnOrder:
return TTLocalizer.CatalogPurchaseItemOnOrder
elif retcode == ToontownGlobals.P_MailboxFull:
return TTLocalizer.CatalogPurchaseMailboxFull
elif retcode == ToontownGlobals.P_OnOrderListFull:
return TTLocalizer.CatalogPurchaseOnOrderListFull
else:
return TTLocalizer.CatalogPurchaseGeneralError % retcode
def getRequestGiftPurchaseErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogPurchaseGiftItemAvailable
elif retcode == ToontownGlobals.P_ItemOnOrder:
return TTLocalizer.CatalogPurchaseGiftItemOnOrder
elif retcode == ToontownGlobals.P_MailboxFull:
return TTLocalizer.CatalogPurchaseGiftMailboxFull
elif retcode == ToontownGlobals.P_OnOrderListFull:
return TTLocalizer.CatalogPurchaseGiftOnOrderListFull
elif retcode == ToontownGlobals.P_NotAGift:
return TTLocalizer.CatalogPurchaseGiftNotAGift
elif retcode == ToontownGlobals.P_WillNotFit:
return TTLocalizer.CatalogPurchaseGiftWillNotFit
elif retcode == ToontownGlobals.P_ReachedPurchaseLimit:
return TTLocalizer.CatalogPurchaseGiftLimitReached
elif retcode == ToontownGlobals.P_NotEnoughMoney:
return TTLocalizer.CatalogPurchaseGiftNotEnoughMoney
else:
return TTLocalizer.CatalogPurchaseGiftGeneralError % {'friend': '%s',
'error': retcode}
def acceptItem(self, mailbox, index, callback):
mailbox.acceptItem(self, index, callback)
def discardItem(self, mailbox, index, callback):
print 'Item discardItem'
mailbox.discardItem(self, index, callback)
def acceptItemCleanup(self):
pass
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_NoRoomForItem:
return TTLocalizer.CatalogAcceptRoomError
elif retcode == ToontownGlobals.P_ReachedPurchaseLimit:
return TTLocalizer.CatalogAcceptLimitError
elif retcode == ToontownGlobals.P_WillNotFit:
return TTLocalizer.CatalogAcceptFitError
elif retcode == ToontownGlobals.P_InvalidIndex:
return TTLocalizer.CatalogAcceptInvalidError
else:
return TTLocalizer.CatalogAcceptGeneralError % retcode
def output(self, store = -1):
return 'CatalogItem'
def getFilename(self):
return ''
def getColor(self):
return None
def formatOptionalData(self, store = -1):
result = ''
if store & Location and self.posHpr != None:
result += ', posHpr = (%s, %s, %s, %s, %s, %s)' % self.posHpr
return result
def __str__(self):
return self.output()
def __repr__(self):
return self.output()
def compareTo(self, other):
return 0
def getHashContents(self):
return None
def __cmp__(self, other):
c = cmp(self.__class__, other.__class__)
if c != 0:
return c
return self.compareTo(other)
def __hash__(self):
return hash((self.__class__, self.getHashContents()))
def getBasePrice(self):
return 0
def getEmblemPrices(self):
return ()
def loadModel(self):
return None
def decodeDatagram(self, di, versionNumber, store):
if store & DeliveryDate:
self.deliveryDate = di.getUint32()
if store & Location:
x = di.getArg(STInt16, 10)
y = di.getArg(STInt16, 10)
z = di.getArg(STInt16, 100)
if versionNumber < 2:
h = di.getArg(STInt16, 10)
p = 0.0
r = 0.0
elif versionNumber < 5:
h = di.getArg(STInt8, 256.0 / 360.0)
p = di.getArg(STInt8, 256.0 / 360.0)
r = di.getArg(STInt8, 256.0 / 360.0)
hpr = oldToNewHpr(VBase3(h, p, r))
h = hpr[0]
p = hpr[1]
r = hpr[2]
else:
h = di.getArg(STInt8, 256.0 / 360.0)
p = di.getArg(STInt8, 256.0 / 360.0)
r = di.getArg(STInt8, 256.0 / 360.0)
self.posHpr = (x,
y,
z,
h,
p,
r)
if store & GiftTag:
self.giftTag = di.getString()
if versionNumber >= 8:
self.specialEventId = di.getUint8()
else:
self.specialEventId = 0
def encodeDatagram(self, dg, store):
if store & DeliveryDate:
dg.addUint32(self.deliveryDate)
if store & Location:
dg.putArg(self.posHpr[0], STInt16, 10)
dg.putArg(self.posHpr[1], STInt16, 10)
dg.putArg(self.posHpr[2], STInt16, 100)
dg.putArg(self.posHpr[3], STInt8, 256.0 / 360.0)
dg.putArg(self.posHpr[4], STInt8, 256.0 / 360.0)
dg.putArg(self.posHpr[5], STInt8, 256.0 / 360.0)
if store & GiftTag:
dg.addString(self.giftTag)
dg.addUint8(self.specialEventId)
def getTypeCode(self):
import CatalogItemTypes
return CatalogItemTypes.CatalogItemTypes[self.__class__]
def applyColor(self, model, colorDesc):
if model == None or colorDesc == None:
return
for partName, color in colorDesc:
matches = model.findAllMatches(partName)
if color == None:
matches.hide()
elif isinstance(color, types.StringType):
tex = loader.loadTexture(color)
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
for i in range(matches.getNumPaths()):
matches.getPath(i).setTexture(tex, 1)
else:
needsAlpha = color[3] != 1
color = VBase4(color[0], color[1], color[2], color[3])
for i in range(matches.getNumPaths()):
matches.getPath(i).setColorScale(color, 1)
if needsAlpha:
matches.getPath(i).setTransparency(1)
return
def makeFrame(self):
from direct.gui.DirectGui import DirectFrame
frame = DirectFrame(parent=hidden, frameSize=(-1.0, 1.0, -1.0, 1.0), relief=None)
return frame
def makeFrameModel(self, model, spin = 1):
frame = self.makeFrame()
ival = None
if model:
model.setDepthTest(1)
model.setDepthWrite(1)
if spin:
pitch = frame.attachNewNode('pitch')
rotate = pitch.attachNewNode('rotate')
scale = rotate.attachNewNode('scale')
model.reparentTo(scale)
bMin, bMax = model.getTightBounds()
center = (bMin + bMax) / 2.0
model.setPos(-center[0], -center[1], -center[2])
pitch.setP(20)
bMin, bMax = pitch.getTightBounds()
center = (bMin + bMax) / 2.0
corner = Vec3(bMax - center)
scale.setScale(1.0 / max(corner[0], corner[1], corner[2]))
pitch.setY(2)
ival = LerpHprInterval(rotate, 10, VBase3(-270, 0, 0), startHpr=VBase3(90, 0, 0))
else:
scale = frame.attachNewNode('scale')
model.reparentTo(scale)
bMin, bMax = model.getTightBounds()
center = (bMin + bMax) / 2.0
model.setPos(-center[0], 2, -center[2])
corner = Vec3(bMax - center)
scale.setScale(1.0 / max(corner[0], corner[1], corner[2]))
return (frame, ival)
def getBlob(self, store = 0):
dg = PyDatagram()
dg.addUint8(CatalogItemVersion)
encodeCatalogItem(dg, self, store)
return dg.getMessage()
def getRequestPurchaseErrorTextTimeout(self):
return 6
def getDaysToGo(self, avatar):
accountDays = avatar.getAccountDays()
daysToGo = self.loyaltyRequirement() - accountDays
if daysToGo < 0:
daysToGo = 0
return int(daysToGo)
def encodeCatalogItem(dg, item, store):
import CatalogItemTypes
flags = item.getTypeCode()
if item.isSaleItem():
flags |= CatalogItemTypes.CatalogItemSaleFlag
if item.giftTag != None:
flags |= CatalogItemTypes.CatalogItemGiftTag
dg.addUint8(flags)
if item.giftTag != None:
dg.addUint32(item.giftTag)
if not item.giftCode:
item.giftCode = 0
dg.addUint8(item.giftCode)
item.encodeDatagram(dg, store)
return
def decodeCatalogItem(di, versionNumber, store):
global CatalogReverseType
import CatalogItemTypes
if CatalogReverseType == None:
CatalogReverseType = {}
for itemClass, index in CatalogItemTypes.CatalogItemTypes.items():
CatalogReverseType[index] = itemClass
startIndex = di.getCurrentIndex()
try:
flags = di.getUint8()
typeIndex = flags & CatalogItemTypes.CatalogItemTypeMask
gift = None
code = None
if flags & CatalogItemTypes.CatalogItemGiftTag:
gift = di.getUint32()
code = di.getUint8()
itemClass = CatalogReverseType[typeIndex]
item = itemClass(di, versionNumber, store=store)
except Exception, e:
CatalogItem.notify.warning('Invalid catalog item in stream: %s, %s' % (sys.exc_info()[0], e))
d = Datagram(di.getDatagram().getMessage()[startIndex:])
d.dumpHex(Notify.out())
import CatalogInvalidItem
return CatalogInvalidItem.CatalogInvalidItem()
if flags & CatalogItemTypes.CatalogItemSaleFlag:
item.saleItem = 1
item.giftTag = gift
item.giftCode = code
return item
def getItem(blob, store = 0):
dg = PyDatagram(blob)
di = PyDatagramIterator(dg)
try:
versionNumber = di.getUint8()
return decodeCatalogItem(di, versionNumber, store)
except Exception, e:
CatalogItem.notify.warning('Invalid catalog item: %s, %s' % (sys.exc_info()[0], e))
dg.dumpHex(Notify.out())
import CatalogInvalidItem
return CatalogInvalidItem.CatalogInvalidItem()
| mit | 7,182,130,796,234,648,000 | 30.95279 | 101 | 0.605776 | false |
AlexKordic/sublime-collaboration | collab/client.py | 1 | 3626 | import logging, doc, connection
class CollabClient:
def __init__(self, host, port):
self.docs = {}
self.state = 'connecting'
self.waiting_for_docs = []
self.connected = False
self.id = None
self.socket = connection.ClientSocket(host, port)
self.socket.on('message', self.socket_message)
self.socket.on('error', self.socket_error)
self.socket.on('open', self.socket_open)
self.socket.on('close', self.socket_close)
self.socket.start()
self._events = {}
def on(self, event, fct):
if event not in self._events: self._events[event] = []
self._events[event].append(fct)
return self
def removeListener(self, event, fct):
if event not in self._events: return self
self._events[event].remove(fct)
return self
def emit(self, event, *args):
if event not in self._events: return self
for callback in self._events[event]:
callback(*args)
return self
def socket_open(self):
self.set_state('handshaking')
def socket_close(self, reason=''):
self.set_state('closed', reason)
self.socket = None
def socket_error(self, error):
self.emit('error', error)
def socket_message(self, msg):
if 'auth' in msg:
if msg['auth'] is None or msg['auth'] == '':
logging.warning('Authentication failed: {0}'.format(msg['error']))
self.disconnect()
else:
self.id = msg['auth']
self.set_state('ok')
return
if 'docs' in msg:
if 'error' in msg:
for callback in self.waiting_for_docs:
callback(msg['error'], None)
else:
for callback in self.waiting_for_docs:
callback(None, msg['docs'])
self.waiting_for_docs = []
return
if 'doc' in msg and msg['doc'] in self.docs:
self.docs[msg['doc']].on_message(msg)
else:
logging.error('Unhandled message {0}'.format(msg))
def set_state(self, state, data=None):
if self.state is state: return
self.state = state
if state is 'closed':
self.id = None
self.emit(state, data)
def send(self, data):
if self.state is not "closed":
self.socket.send(data)
def disconnect(self):
if self.state is not "closed":
self.socket.close()
def get_docs(self, callback):
if self.state is 'closed':
return callback('connection closed', None)
if self.state is 'connecting':
return self.on('ok', lambda x: self.get_docs(callback))
if not self.waiting_for_docs:
self.send({"docs":None})
self.waiting_for_docs.append(callback)
def open(self, name, callback, **kwargs):
if self.state is 'closed':
return callback('connection closed', None)
if self.state is 'connecting':
return self.on('ok', lambda x: self.open(name, callback))
if name in self.docs:
return callback("doc {0} already open".format(name), None)
newdoc = doc.CollabDoc(self, name, kwargs.get('snapshot', None))
self.docs[name] = newdoc
newdoc.open(lambda error, doc: callback(error, doc if not error else None))
def closed(self, name):
del self.docs[name]
| unlicense | -8,670,789,396,532,765,000 | 28.991453 | 83 | 0.538058 | false |
MyRobotLab/pyrobotlab | home/moz4r/deprecated/Inmoov/InmoovScript_InmoovAI/INMOOV-AI_games.py | 1 | 7481 | # -*- coding: utf-8 -*-
################################################################################
# LOTO
################################################################################
def loto(phrase,the,chance,fin):
table1 = [(random.randint(1,49)), (random.randint(1,49)), (random.randint(1,49)), (random.randint(1,49)),(random.randint(1,49))]
tablefin = []
doublon = []
for i in table1:
if i not in tablefin:
tablefin.append(i) #supprime les doublons
else:
doublon.append(i) #extraire les doublons
d = len(doublon)
while d > 0:
#nouveau tirage
doublon = []
table1 = [(random.randint(1,49)), (random.randint(1,49)), (random.randint(1,49)), (random.randint(1,49)),(random.randint(1,49))]
# recherche doublon
for i in table1:
if i not in tablefin:
tablefin.append(i) #supprime les doublons
else:
doublon.append(i) #extraire les doublons
# si il existe doublon d+1 et vite la table
if (len(doublon)==1)or(len(doublon)==2)or(len(doublon)==3)or(len(doublon)==4)or(len(doublon)==5):
talkBlocking("j ai trouver un doublon , je refais un tirage")
d = d+1
doublon =[]
else:
d = 0
break
# tri la table avant de la dire
table1.sort()
talkBlocking(phrase)
talkBlocking(the+str(table1[0]))
talkBlocking(the+str(table1[1]))
talkBlocking(the+str(table1[2]))
talkBlocking(the+str(table1[3]))
talkBlocking(the+str(table1[4]))
talkBlocking(chance+str(random.randint(1,9)))
talkBlocking(fin)
################################################################################
# THE BOT REPEAT WORDS
################################################################################
def ParrotModFunc(ParrotModVal):
global ParrotMod
ParrotMod=ParrotModVal
chatBot.getResponse("SYSTEM PARROT " + str(ParrotModVal))
################################################################################
# JOUER AUX MOTS - PLAY WITH WORDS
################################################################################
def PlayWithWords(word):
FindImage(word)
talkBlocking(word)
for i in word.decode( "utf8" ):
if i.isalpha():
#print "SAY "+i
TimeNoSpeak="OFF"
folderLetterPic="pictures\\games\\alphabet\\"
print folderLetterPic+i+".jpg"
try:
r=image.displayFullScreen(folderLetterPic+i+".jpg",1)
except:
pass
talk(i)
sleep(2)
FindImage(word)
sleep(1)
image.exitFS()
image.closeAll()
TimeNoSpeak="ON"
################################################################################
#1. 2. 3. SUN !!! ( Grandmother's footsteps )
# SETUP :
################################################################################
global FourMeters
FourMeters=0.08
global InFrontOfMe
InFrontOfMe=0.28
################################################################################
ReculeTimer = Runtime.start("ReculeTimer","Clock")
ReculeTimer.setInterval(15000)
def ReculeTimerFunc(timedata):
#little fix to avoid speak loop
print openCvModule
global FaceDetected
global MoveHeadRandom
ear.pauseListening()
if FaceDetected==1:
if random.randint(1,2)==2:
RightArmAheadBehind()
#talk("recule")
chatBot.getResponse("SYSTEM YOU ARE TOO NEAR OF ME")
else:
RightArmAheadBehind()
#talk("recule")
chatBot.getResponse("SYSTEM YOU ARE TOO NEAR OF ME2")
else:
if random.randint(1,2)==2:
#talk("no visage")
chatBot.getResponse("SYSTEM I DONT SEE YOU")
else:
#talk("no visage")
chatBot.getResponse("SYSTEM I DONT SEE YOU2")
#WebkitSpeachReconitionFix.stopClock()
ReculeTimer.addListener("pulse", python.name, "ReculeTimerFunc")
########################################
#we ceate a separated thread : it is better to prevent slow down because of loops and sleep and opencv thread
########################################
class soleilEtape1(threading.Thread):
def __init__(self):
super(soleilEtape1, self).__init__()
print "Here we are"
self.running = False
def run(self):
global TimoutVar
global openCvModule
global FaceHadMoved
global DistanceOfTheFace
global Ispeak
global IcanMoveHeadRandom
global IcanMoveEyelids
global FourMeters
global InFrontOfMe
global etape
IcanMoveEyelids=0
sleep(3)
etape=0
ReculeTimerIsStarted=0
self.running = True
openCvModule="CalcDistance"
while self.running:
#print "dbg: MoveHeadRandom :",MoveHeadRandom
Ispeak=1
if etape==0:
print "opencv thread starting"
TimoutVar=-1
TimoutTimer.setInterval(60000)
WebkitSpeachReconitionFix.stopClock()
ear.pauseListening()
etape=1
TimoutTimer.startClock()
while etape==1 and DistanceOfTheFace!=10 and DistanceOfTheFace>FourMeters:
Ispeak=1
if ReculeTimerIsStarted==0:
ReculeTimer.startClock()
ReculeTimerIsStarted=1
if TimoutVar>=1:
chatBot.getResponse("SYSTEM TIMEOUT 123")
ReculeTimerIsStarted=0
ReculeTimer.stopClock()
Ispeak=0
sleep(10)
TimoutVar=-1
break
if etape==1 and DistanceOfTheFace!=0 and DistanceOfTheFace<=FourMeters:
talk("Ok tu es à 4 mètres environ")
ear.pauseListening()
talk("C'est parti!")
IcanMoveHeadRandom=0
ReculeTimer.stopClock()
TimoutTimer.stopClock()
sleep(7)
WebkitSpeachReconitionFix.stopClock()
ear.pauseListening()
openCvModule="123"
TimoutVar=1
etape=2
TimoutTimer.setInterval(6000)
TimoutTimer.startClock()
if etape==2 and (FaceHadMoved[0]!=0 or FaceHadMoved[1]!=0 or FaceHadMoved[2]!=0):
CauseMove=""
if FaceHadMoved[3]!=0:
CauseMove="De gauche à droite" # From lelt to right
if FaceHadMoved[4]!=0:
CauseMove="De haut en bas" # From up to bootm
if FaceHadMoved[5]!=0:
CauseMove="Basculé d'avant en arrière" # From ahead to behind
chatBot.getResponse("SYSTEM YOU LOSE BECAUSE " + CauseMove)
TimoutTimer.stopClock()
TimoutVar=-1
etape=1
ReculeTimerIsStarted=0
sleep(5)
openCvModule="CalcDistance"
DistanceOfTheFace=10
FaceHadMoved=[0,0,0,0,0,0]
if etape==2 and TimoutVar>0 and (FaceHadMoved[0]==0 and FaceHadMoved[1]==0 and FaceHadMoved[2]==0):
openCvModule="Nothing"
chatBot.getResponse("CACHE TES YEUX")
#talk("yeux")
sleep(4)
rest()
chatBot.getResponse("SYSTEM SOLEIL")
#talk("soleil")
WebkitSpeachReconitionFix.stopClock()
ear.pauseListening()
TimoutVar=-1
TimoutTimer.startClock()
openCvModule="123"
sleep(1)
FaceHadMoved=[0,0,0,0,0,0]
if etape==2 and DistanceOfTheFace>InFrontOfMe and DistanceOfTheFace!=10:
chatBot.getResponse("YOU WIN")
SuperThumb()
etape=3
if etape==3:
ReculeTimer.stopClock()
TimoutTimer.stopClock()
openCvModule="nothing"
etape=-1
self.running = False
break
#self.running = False
print "Stopped"
########################################
#end of the opencv thread
########################################
soleilEtape1 = soleilEtape1()
def soleil():
openCvInit()
sleep(2)
global MoveEyesRandom
global openCvModule
MoveEyesRandom=0
openCvModule="CalcDistance"
sleep(15)
if IsInmoovArduino==1:
head.rest()
soleilEtape1.start()
def stopJeux():
global etape
etape=3
try:
ReculeTimer.stopClock()
TimoutTimer.stopClock()
soleilEtape1.running = False
soleilEtape1.join()
except:
print "thread stop error"
| apache-2.0 | -1,827,207,560,315,940,000 | 24.260135 | 132 | 0.596174 | false |
giuspugl/COSMOMAP2 | tests/test_coarse_operator.py | 1 | 1345 | import scipy.linalg as la
from interfaces import *
from utilities import *
import numpy as np
def test_coarse_operator():
"""
Build and test the :class:`CoarseLO`.
"""
nt,npix,nb= 400,20,1
blocksize=nt/nb
d,pairs,phi,t,diag=system_setup(nt,npix,nb)
c=bash_colors()
runcase={'I':1,'QU':2,'IQU':3}
N=BlockLO(blocksize,t,offdiag=True)
diagN=lp.DiagonalOperator(diag*nt)
for pol in runcase.values():
npix = 20
processd = ProcessTimeSamples(pairs,npix,pol=pol ,phi=phi)
npix= processd.get_new_pixel[0]
P = SparseLO(npix,nt,pairs,pol=pol,angle_processed=processd)
Mbd = BlockDiagonalPreconditionerLO(processd ,npix,pol=pol)
B = BlockDiagonalLO(processd,npix,pol=pol)
x0=np.zeros(pol*npix)
b=P.T*N*d
A=P.T*N*P
tol=1.e-5
eigv ,Z=spla.eigsh(A,M=B,Minv=Mbd,k=5,which='SM',ncv=15,tol=tol)
r=Z.shape[1]
Zd=DeflationLO(Z)
# Build Coarse operator
Az=Z*0.
for i in xrange(r):
Az[:,i]=A*Z[:,i]
invE=CoarseLO(Z,Az,r,apply='eig')
E=dgemm(Z,Az.T)
v=np.ones(r)
y=invE*v
v2=np.dot(E,invE*v)
y2=la.solve(E,v)
assert np.allclose(v,v2) and np.allclose(y2,y)
filter_warnings("ignore")
#test_coarse_operator()
| gpl-3.0 | -1,678,375,014,878,100,500 | 28.888889 | 72 | 0.5829 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/models/key_vault_key_reference_py3.py | 1 | 1433 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyVaultKeyReference(Model):
"""Describes a reference to Key Vault Key.
All required parameters must be populated in order to send to Azure.
:param key_url: Required. The URL referencing a key encryption key in Key
Vault.
:type key_url: str
:param source_vault: Required. The relative URL of the Key Vault
containing the key.
:type source_vault: ~azure.mgmt.compute.v2015_06_15.models.SubResource
"""
_validation = {
'key_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'key_url': {'key': 'keyUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(self, *, key_url: str, source_vault, **kwargs) -> None:
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_url = key_url
self.source_vault = source_vault
| mit | -2,373,742,063,998,240,300 | 33.95122 | 77 | 0.589672 | false |
aparo/django-elasticsearch | tests/testproj/myapp/tests.py | 1 | 7873 | """
Test suite for django-elasticsearch.
"""
from django.test import TestCase
from testproj.myapp.models import Entry, Blog, StandardAutoFieldModel, Person, TestFieldModel, EModel
import datetime
import time
class DjangoESTest(TestCase):
# multi_db = True
# def test_add_and_delete_blog(self):
# blog1 = Blog(title="blog1")
# blog1.save()
# self.assertEqual(Blog.objects.count(), 1)
# blog2 = Blog(title="blog2")
# self.assertEqual(blog2.pk, None)
# blog2.save()
# self.assertNotEqual(blog2.pk, None)
# self.assertEqual(Blog.objects.count(), 2)
# blog2.delete()
# self.assertEqual(Blog.objects.count(), 1)
# blog1.delete()
# self.assertEqual(Blog.objects.count(), 0)
def test_simple_get(self):
blog1 = Blog(title="blog1")
blog1.save()
blog2 = Blog(title="blog2")
blog2.save()
self.assertEqual(Blog.objects.count(), 2)
self.assertEqual(
Blog.objects.get(title="blog2"),
blog2
)
self.assertEqual(
Blog.objects.get(title="blog1"),
blog1
)
def test_simple_filter(self):
blog1 = Blog(title="same title")
blog1.save()
blog2 = Blog(title="same title")
blog2.save()
blog3 = Blog(title="another title")
blog3.save()
self.assertEqual(Blog.objects.count(), 3)
blog4 = Blog.objects.get(pk=blog1.pk)
self.assertEqual(blog4, blog1)
self.assertEqual(
Blog.objects.filter(title="same title").count(),
2
)
self.assertEqual(
Blog.objects.filter(title="same title", pk=blog1.pk).count(),
1
)
self.assertEqual(
Blog.objects.filter(title__startswith="same").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__istartswith="SAME").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__endswith="title").count(),
3
)
self.assertEqual(
Blog.objects.filter(title__iendswith="Title").count(),
3
)
self.assertEqual(
Blog.objects.filter(title__icontains="same").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__contains="same").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__iexact="same Title").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__regex="s.me.*").count(),
2
)
self.assertEqual(
Blog.objects.filter(title__iregex="S.me.*").count(),
2
)
def test_change_model(self):
blog1 = Blog(title="blog 1")
blog1.save()
self.assertEqual(Blog.objects.count(), 1)
blog1.title = "new title"
blog1.save()
self.assertEqual(Blog.objects.count(), 1)
bl = Blog.objects.all()[0]
self.assertEqual(blog1.title, bl.title)
bl.delete()
# def test_dates_ordering(self):
# now = datetime.datetime.now()
# before = now - datetime.timedelta(days=1)
#
# entry1 = Entry(title="entry 1", date_published=now)
# entry1.save()
#
# entry2 = Entry(title="entry 2", date_published=before)
# entry2.save()
#
# self.assertEqual(
# list(Entry.objects.order_by('-date_published')),
# [entry1, entry2]
# )
#
## self.assertEqual(
## list(Entry.objects.order_by('date_published')),
## [entry2, entry1]
## )
#
#
## def test_dates_less_and_more_than(self):
## now = datetime.datetime.now()
## before = now + datetime.timedelta(days=1)
## after = now - datetime.timedelta(days=1)
##
## entry1 = Entry(title="entry 1", date_published=now)
## entry1.save()
##
## entry2 = Entry(title="entry 2", date_published=before)
## entry2.save()
##
## entry3 = Entry(title="entry 3", date_published=after)
## entry3.save()
##
## a = list(Entry.objects.filter(date_published=now))
## self.assertEqual(
## list(Entry.objects.filter(date_published=now)),
## [entry1]
## )
## self.assertEqual(
## list(Entry.objects.filter(date_published__lt=now)),
## [entry3]
## )
## self.assertEqual(
## list(Entry.objects.filter(date_published__gt=now)),
## [entry2]
## )
#
# def test_complex_queries(self):
# p1 = Person(name="igor", surname="duck", age=39)
# p1.save()
# p2 = Person(name="andrea", surname="duck", age=29)
# p2.save()
# self.assertEqual(
# Person.objects.filter(name="igor", surname="duck").count(),
# 1
# )
# self.assertEqual(
# Person.objects.filter(age__gte=20, surname="duck").count(),
# 2
# )
#
# def test_fields(self):
# t1 = TestFieldModel(title="p1",
# mlist=["ab", "bc"],
# mdict = {'a':23, "b":True },
# )
# t1.save()
#
# t = TestFieldModel.objects.get(id=t1.id)
# self.assertEqual(t.mlist, ["ab", "bc"])
# self.assertEqual(t.mlist_default, ["a", "b"])
# self.assertEqual(t.mdict, {'a':23, "b":True })
# self.assertEqual(t.mdict_default, {"a": "a", 'b':1})
#
#
# def test_embedded_model(self):
# em = EModel(title="1", pos = 1)
# em2 = EModel(title="2", pos = 2)
# t1 = TestFieldModel(title="p1",
# mlist=[em, em2],
# mdict = {'a':em, "b":em2 },
# )
# t1.save()
#
# t = TestFieldModel.objects.get(id=t1.id)
# self.assertEqual(len(t.mlist), 2)
# self.assertEqual(t.mlist[0].test_func(), 1)
# self.assertEqual(t.mlist[1].test_func(), 2)
#
# def test_simple_foreign_keys(self):
# now = datetime.datetime.now()
#
# blog1 = Blog(title="Blog")
# blog1.save()
# entry1 = Entry(title="entry 1", blog=blog1)
# entry1.save()
# entry2 = Entry(title="entry 2", blog=blog1)
# entry2.save()
# self.assertEqual(Entry.objects.count(), 2)
#
# for entry in Entry.objects.all():
# self.assertEqual(
# blog1,
# entry.blog
# )
#
# blog2 = Blog(title="Blog")
# blog2.save()
# entry3 = Entry(title="entry 3", blog=blog2)
# entry3.save()
# self.assertEqual(
# # it's' necessary to explicitly state the pk here
# len( list(Entry.objects.filter(blog=blog1.pk))),
# len([entry1, entry2])
# )
#
#
## def test_foreign_keys_bug(self):
## blog1 = Blog(title="Blog")
## blog1.save()
## entry1 = Entry(title="entry 1", blog=blog1)
## entry1.save()
## self.assertEqual(
## # this should work too
## list(Entry.objects.filter(blog=blog1)),
## [entry1]
## )
#
## def test_standard_autofield(self):
##
## sam1 = StandardAutoFieldModel(title="title 1")
## sam1.save()
## sam2 = StandardAutoFieldModel(title="title 2")
## sam2.save()
##
## self.assertEqual(
## StandardAutoFieldModel.objects.count(),
## 2
## )
##
## sam1_query = StandardAutoFieldModel.objects.get(title="title 1")
## self.assertEqual(
## sam1_query.pk,
## sam1.pk
## )
##
## sam1_query = StandardAutoFieldModel.objects.get(pk=sam1.pk)
##
#
| bsd-3-clause | 247,389,388,771,741,630 | 29.397683 | 101 | 0.513019 | false |
dyf102/Gomoku-online | server/service/chat_service.py | 1 | 2797 | import logging
import time
from baseservice import BaseService, handler, register
LOBBY_CHAT_ID = 0
class ChatService(BaseService):
# TODO: Add FIFO policy to the dict to limit the msg in memory
def __init__(self):
BaseService.__init__(self, 'ChatService')
# init
self.chat_room_list = []
self.chat_room = {}
self.chat_root_content = {}
# next cid
self.next_cid = LOBBY_CHAT_ID
# init lobby
self.make_new_chat_room()
self.load_handlers()
def make_new_chat_room(self, uid=None):
cid = self.next_cid
self.next_cid += 1
self.chat_room_list.append(cid)
self.chat_room[cid] = [] if uid is None else [uid]
self.chat_root_content[cid] = []
def load_handlers(self):
@register(self)
@handler
def join_chat_room(uid, cid):
if cid in self.chat_room_list:
self.chat_room[cid].append(uid)
return {'code': 200, 'uid': uid, 'cid': cid}
else:
return {'code': 404}
self.add_handler(join_chat_room)
@register(self)
@handler
def send_msg(uid, username, cid, msg):
if cid not in self.chat_root_content:
return {'code': 404, 'msg': 'cannot send msg. Room not exists or User not in the room'}
if uid not in self.chat_room[cid]:
return {'code': 404, 'msg': 'cannot send msg. Room not exists or User not in the room'}
if len(msg) >= 100:
return {'code': 404, 'msg': 'msg is too long(less 100 characters)'}
self.chat_root_content[cid].append(
{'time': time.strftime("%Y-%m-%d %H:%M"), 'username': username, 'msg': msg})
return {'code': 200, 'msg': ''}
@register(self)
@handler
def get_msg(cid, uid):
if cid not in self.chat_root_content:
return {'code': 404, 'msg': 'cannot send msg. Room not exists or User not in the room'}
if uid not in self.chat_room[cid]:
return {'code': 404, 'msg': 'uid %d not in the room cid: %d'.format(uid, cid)}
content_list = self.chat_root_content[cid]
# size = min(len(content_list), 20) # avoid size exceed
msgs = content_list # self.chat_root_content[cid][-size:]
return {'code': 200, 'cid': cid, 'data': msgs, 'token': str(hash(str(msgs)))}
@register(self)
@handler
def get_room_msg_list_hash(cid):
if cid not in self.chat_root_content:
return {'code': 404, 'msg': 'cannot send msg. Room not exists or User not in the room'}
return {'code': 200, 'token': hash(str(self.chat_root_content[cid]))}
| apache-2.0 | 91,080,623,419,363,280 | 37.315068 | 103 | 0.545585 | false |
gautamk/confidant | setup.py | 1 | 1491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'boto == 2.38.0',
'Beaker==1.7.0',
'six==1.9.0'
]
test_requirements = requirements + [
'mock==1.0.1',
'doublex==1.8.2'
]
setup(
name='confidant',
version='0.1',
description="Simple configuration management",
long_description=readme + '\n\n' + history,
author="Gautam Kumar",
author_email='[email protected]',
url='https://github.com/gautamk/confidant',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='confidant',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='confidant.tests',
tests_require=test_requirements,
)
| mit | 1,650,337,808,129,519,400 | 26.109091 | 81 | 0.612341 | false |
adamnovak/hgvm-builder | scripts/histogram.py | 1 | 27911 | #!/usr/bin/env python2.7
"""
histogram: plot a histogram of a file of numbers. Numbers can be floats, one per
line. Lines with two numbers are interpreted as pre-counted, with the number of
repeats of the first being given by the second.
Multiple instances of the same value in a category will be merged by adding
weights.
Re-uses sample code and documentation from
<http://users.soe.ucsc.edu/~karplus/bme205/f12/Scaffold.html>
"""
import argparse, sys, os, itertools, math, numpy, collections
import matplotlib, matplotlib.ticker
def intify(x):
"""
Turn an integral float into an int, if applicable.
"""
if isinstance(x, float) and x.is_integer():
return int(x)
return x
def draw_labels(bin_counts, bar_patches, size=None):
"""
Put the given count labels on the given bar patches, on the current axes.
Takes an optional font size.
"""
from matplotlib import pyplot
# Grab the axes
axes = pyplot.gca()
for bin_count, bar_patch in itertools.izip(bin_counts, bar_patches):
if(bin_count.is_integer()):
# Intify if applicable
bin_count = int(bin_count)
# Label each bar
if bin_count == 0:
# Except those for empty bins
continue
# Find the center of the bar
bar_center_x = bar_patch.get_x() + bar_patch.get_width() / float(2)
# And its height
bar_height = bar_patch.get_height()
# Label the bar
axes.annotate("{:,}".format(bin_count), (bar_center_x, bar_height),
ha="center", va="bottom", rotation=45, xytext=(0, 5),
textcoords="offset points", size=size)
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Now add all the options to it
parser.add_argument("data", nargs="+",
help="the file to read")
parser.add_argument("--redPortion", type=float, action="append", default=[],
help="portion of each bin to color red")
parser.add_argument("--redWeight", type=float, action="append", default=[],
help="value to plot in red in each bin")
parser.add_argument("--title", default="Histogram",
help="the plot title")
parser.add_argument("--x_label", default="Value",
help="the plot title")
parser.add_argument("--y_label", default="Number of Items (count)",
help="the plot title")
parser.add_argument("--bins", type=int, default=10,
help="the number of histogram bins")
parser.add_argument("--x_min", "--min", type=float, default=None,
help="minimum value allowed")
parser.add_argument("--x_max", "--max", type=float, default=None,
help="maximum value allowed")
parser.add_argument("--y_min", type=float, default=None,
help="minimum count on plot")
parser.add_argument("--y_max", type=float, default=None,
help="maximum count on plot")
parser.add_argument("--cutoff", type=float, default=None,
help="note portion above and below a value, and draw a vertical line")
parser.add_argument("--font_size", type=int, default=12,
help="the font size for text")
parser.add_argument("--categories", nargs="+", default=None,
help="categories to plot, in order")
parser.add_argument("--category_labels", "--labels", nargs="+",
default=[],
help="labels for all categories or data files, in order")
parser.add_argument("--colors", nargs="+", default=[],
help="use the specified Matplotlib colors per category or file")
parser.add_argument("--styles", nargs="+", default=[],
help="use the specified line styles per category or file")
parser.add_argument("--cumulative", action="store_true",
help="plot cumulatively")
parser.add_argument("--log", action="store_true",
help="take the base-10 logarithm of values before plotting histogram")
parser.add_argument("--log_counts", "--logCounts", action="store_true",
help="take the logarithm of counts before plotting histogram")
parser.add_argument("--fake_zero", action="store_true",
help="split lines where points would be 0")
parser.add_argument("--split_at_zero", action="store_true",
help="split lines between positive and negative")
parser.add_argument("--stats", action="store_true",
help="print data stats")
parser.add_argument("--save",
help="save figure to the given filename instead of showing it")
parser.add_argument("--dpi", type=int, default=300,
help="save the figure with the specified DPI, if applicable")
parser.add_argument("--sparse_ticks", action="store_true",
help="use sparse tick marks on both axes")
parser.add_argument("--sparse_x", action="store_true",
help="use sparse tick marks on X axis")
parser.add_argument("--sparse_y", action="store_true",
help="use sparse tick marks on Y axis")
parser.add_argument("--ticks", nargs="+", default=None,
help="use particular X tick locations")
parser.add_argument("--scientific_x", action="store_true",
help="use scientific notation on the X axis")
parser.add_argument("--scientific_y", action="store_true",
help="use scientific notation on the Y axis")
parser.add_argument("--label", action="store_true",
help="label bins with counts")
parser.add_argument("--label_size", type=float,
help="bin count label font size")
parser.add_argument("--no_n", dest="show_n", action="store_false",
help="don't add n value to title")
parser.add_argument("--normalize", action="store_true",
help="normalize to total weight of 1")
parser.add_argument("--line", action="store_true",
help="draw a line instead of a barchart")
parser.add_argument("--no_zero_ends", dest="zero_ends", default=True,
action="store_false",
help="don't force line ends to zero")
parser.add_argument("--legend_overlay", default=None,
help="display the legend overlayed on the graph at this location")
parser.add_argument("--no_legend", action="store_true",
help="don't display a legend when one would otherwise be dispalyed")
parser.add_argument("--points", action="store_true",
help="draw points instead of a barchart")
parser.add_argument("--width", type=float, default=8,
help="plot width in inches")
parser.add_argument("--height", type=float, default=6,
help="plot height in inches")
return parser.parse_args(args)
def filter2(criterion, key_list, other_list):
"""
Filter two lists of corresponding items based on some function of the first
list.
"""
# Make the output lists
out1 = []
out2 = []
for key_val, other_val in itertools.izip(key_list, other_list):
# Pair up the items
if criterion(key_val):
# The key passed the filter, so take both.
out1.append(key_val)
out2.append(other_val)
return out1, out2
def filter_n(*args):
"""
Filter any number of lists of corresponding items based on some function of
the first list.
"""
filter_function = args[0]
to_filter = args[1:]
to_return = [list() for _ in to_filter]
for i in xrange(len(to_filter[0])):
# For each run of entries
if filter_function(to_filter[0][i]):
# If the key passes the filter
for j in xrange(len(to_filter)):
# Keep the whole row
if i < len(to_filter[j]):
to_return[j].append(to_filter[j][i])
# return all the lists as a tuple, which unpacks as multiple return values
return tuple(to_return)
def main(args):
"""
Parses command line arguments, and plots a histogram.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
options = parse_args(args) # This holds the nicely-parsed options object
if options.save is not None:
# Set up plot for use in headless mode if we just want to save. See
# <http://stackoverflow.com/a/2766194/402891>. We need to do this before
# we grab pyplot.
matplotlib.use('Agg')
from matplotlib import pyplot
# Make the figure with the appropriate size and DPI.
pyplot.figure(figsize=(options.width, options.height), dpi=options.dpi)
# This will hold a dict of dicts from data value to weight, by category or
# file name. Later gets converted to a dict of lists of (value, weight)
# pairs, aggregated by value.
all_data = collections.defaultdict(lambda: collections.defaultdict(float))
for data_filename in options.data:
for line_number, line in enumerate(open(data_filename)):
# Split each line
parts = line.split()
if len(parts) == 1:
# This is one instance of a value
all_data[data_filename][float(parts[0])] += 1.0
elif len(parts) == 2:
if len(options.data) > 1:
# This is multiple instances of a value, and we are doing
# categories by filename.
all_data[data_filename][float(parts[0])] += float(parts[1])
else:
try:
value = float(parts[0])
# If the first column is a number, this is value, weight
# data.
all_data[data_filename][value] += float(parts[1])
except ValueError:
# This is category, instance data, since first column
# isn't a number.
all_data[parts[0]][float(parts[1])] += 1.0
elif len(parts) == 3:
# This is category, instance, weight data
all_data[parts[0]][float(parts[1])] += float(parts[2])
else:
raise Exception("Wrong number of fields on {} line {}".format(
data_filename, line_number + 1))
for category in all_data.iterkeys():
# Strip NaNs and Infs and weight-0 entries, and convert to a dict of
# lists of tuples.
all_data[category] = [(value, weight) for (value, weight)
in all_data[category].iteritems() if
value < float("+inf") and value > float("-inf") and weight > 0]
# Calculate our own bins, over all the data. First we need the largest and
# smallest observed values. The fors in the comprehension have to be in
# normal for loop order and not the other order.
bin_min = options.x_min if options.x_min is not None else min((pair[0]
for pair_list in all_data.itervalues() for pair in pair_list))
bin_max = options.x_max if options.x_max is not None else max((pair[0]
for pair_list in all_data.itervalues() for pair in pair_list))
if options.log:
# Do our bins in log space, so they look evenly spaced on the plot.
bin_max = math.log10(bin_max)
bin_min = math.log10(bin_min)
# Work out what step we should use between bin edges
bin_step = (bin_max - bin_min) / float(options.bins)
# Work out where the bin edges should be
bins = [bin_min + bin_step * i for i in xrange(options.bins + 1)]
# Work out where the bin centers should be
bin_centers = [left_edge + bin_step / 2.0 for left_edge in bins[:-1]]
if options.log:
# Bring bins back into data space
bins = [math.pow(10, x) for x in bins]
bin_centers = [math.pow(10, x) for x in bin_centers]
if options.categories is not None:
# Order data by category order
ordered_data = [(category, all_data[category]) for category in
options.categories]
elif len(options.data) > 1:
# Order data by file order
ordered_data = [(filename, all_data[filename]) for filename in
options.data]
else:
# Order arbitrarily
ordered_data = list(all_data.iteritems())
for (category, data_and_weights), label, color, line_style, marker in \
itertools.izip(ordered_data,
itertools.chain(options.category_labels, itertools.repeat(None)),
itertools.chain(options.colors, itertools.cycle(
['b', 'g', 'r', 'c', 'm', 'y', 'k'])),
itertools.chain(options.styles, itertools.cycle(
['-', '--', ':', '-.'])),
itertools.cycle(
['o', 'v', '^', '<', '>', 's', '+', 'x', 'D', '|', '_'])):
# For every category and its display properties...
if len(data_and_weights) == 0:
# Skip categories with no data
continue
# Split out the data and the weights for this category/file
data = [pair[0] for pair in data_and_weights]
weights = [pair[1] for pair in data_and_weights]
# For each set of data and weights that we want to plot, and the label
# it needs (or None)...
# We may want to normalize by total weight
# We need a float here so we don't get int division later.
total_weight_overall = float(0)
for value, weight in itertools.izip(data, weights):
# Sum up the weights overall
total_weight_overall += weight
if options.normalize and total_weight_overall > 0:
# Normalize all the weight to 1.0 total weight.
weights = [w / total_weight_overall for w in weights]
# Apply the limits after normalization
if options.x_min is not None:
data, weights = filter2(lambda x: x >= options.x_min, data, weights)
if options.x_max is not None:
data, weights = filter2(lambda x: x <= options.x_max, data, weights)
# Work out how many samples there are left within the chart area
samples = intify(sum(weights))
if options.stats:
# Compute and report some stats
data_min = numpy.min(data)
data_min_count = weights[numpy.argmin(data)]
data_max = numpy.max(data)
data_max_count = weights[numpy.argmax(data)]
# The mode is the data item with maximal count
data_mode = data[numpy.argmax(weights)]
data_mode_count = numpy.max(weights)
# Intify floats pretending to be ints
data_min = intify(data_min)
data_min_count = intify(data_min_count)
data_max = intify(data_max)
data_max_count = intify(data_max_count)
data_mode = intify(data_mode)
data_mode_count = intify(data_mode_count)
# TODO: median, mean
print("Min: {} occurs {} times".format(data_min, data_min_count))
print("Mode: {} occurs {} times".format(data_mode, data_mode_count))
print("Max: {} occurs {} times".format(data_max, data_max_count))
if options.cutoff is not None:
# Work out how much weight is above and below the cutoff
above = 0
below = 0
for value, weight in itertools.izip(data, weights):
if value > options.cutoff:
above += weight
else:
below += weight
# Report the results wrt the cutoff.
print "{} above {}, {} below".format(
above / total_weight_overall, options.cutoff,
below / total_weight_overall)
if options.line or options.points:
# Do histogram binning manually
# Do the binning
bin_values, _ = numpy.histogram(data, bins=bins, weights=weights)
if options.cumulative:
# Calculate cumulative weights for each data point
bin_values = numpy.cumsum(bin_values)
if options.zero_ends:
if options.cumulative:
# Pin things to 0 on the low end and max on the high
all_bin_centers = [bins[0]] + list(bin_centers) + [bins[-1]]
all_bin_values = [0] + list(bin_values) + [sum(weights)]
else:
# Pin things to 0 on the end
all_bin_centers = [bins[0]] + list(bin_centers) + [bins[-1]]
all_bin_values = [0] + list(bin_values) + [0]
else:
all_bin_centers = bin_centers
all_bin_values = bin_values
# Now we make a bunch of deries for each line, potentially. This
# holds pairs of (centers, values) lists.
series = []
if options.fake_zero or options.split_at_zero:
# We need to split into multiple series, potentially.
# This holds the series we are working on.
this_series = ([], [])
# What was the last bin we saw?
last_bin = 0
for center, value in itertools.izip(all_bin_centers,
all_bin_values):
# For every point on the line, see if we need to break here
# because it's zero.
# This logic gets complicated so we do some flags.
# Do we keep this point?
includeSample = True
# Do we split the line?
breakSeries = False
if options.fake_zero and value == 0:
# We don't want this sample, and we need to break the
# series
includeSample = False
breakSeries = True
if options.split_at_zero and last_bin < 0 and center > 0:
# We crossed the y axis, or we went down to the x axis.
# We can maybe keep the sample, and we need to break the
# series
breakSeries = True
if breakSeries and len(this_series[0]) > 0:
# Finish the series and start another
series.append(this_series)
this_series = ([], [])
if includeSample:
# Stick this point in the series
this_series[0].append(center)
this_series[1].append(value)
last_bin = center
if len(this_series[0]) > 0:
# Finish the last series
series.append(this_series)
else:
# Just do one series
series.append((all_bin_centers, all_bin_values))
# We only want to label the first series in the legend, so we'll
# none this out after we use it.
label_to_use = label
for series_centers, series_values in series:
# Plot every series
if options.line and options.points:
# Do the plots as lines with points
pyplot.plot(series_centers, series_values,
label=label_to_use, linestyle=line_style, color=color,
marker=marker)
label_to_use = None
elif options.line:
# Do the plots as lines only
pyplot.plot(series_centers, series_values,
label=label_to_use, linestyle=line_style, color=color)
label_to_use= None
elif options.points:
# Do the plot as points.
pyplot.scatter(series_centers, series_values,
label=label_to_use, color=color, marker=marker)
label_to_use = None
if options.log_counts:
# Log the Y axis
pyplot.yscale('log')
if options.split_at_zero:
# Put a big vertical line.
pyplot.axvline(linewidth=2, color="k")
else:
# Do the plot. Do cumulative, or logarithmic Y axis, optionally.
# Keep the bin total counts and the bar patches.
bin_counts, _, bar_patches = pyplot.hist(data, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=weights, alpha=0.5 if len(options.data) > 1 else 1.0,
label=label)
if options.cutoff is not None:
# Put a vertical line at the cutoff.
pyplot.axvline(x=options.cutoff, color="r")
if len(options.redPortion) > 0:
# Plot a red histogram over that one, modified by redPortion.
red_data = []
red_weights = []
for item, weight in itertools.izip(data, weights):
# For each item, what bin is it in?
bin_number = int(item / bin_step)
if bin_number < len(options.redPortion):
# We have a potentially nonzero scaling factor. Apply that.
weight *= options.redPortion[bin_number]
# Keep this item.
red_data.append(item)
red_weights.append(weight)
# Plot the re-weighted data with the same bins, in red
red_counts, _, red_patches = pyplot.hist(red_data, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=red_weights, color='#FF9696', hatch='/'*6)
if options.label:
# Label all the red portion-based bars
draw_labels(red_counts, red_patches, size=options.label_size)
if len(options.redWeight) > 0:
# Plot a red histogram over that one, modified by redPortion.
# Grab an item in each bin
items = bins[0:len(options.redWeight)]
# Plot the re-weighted data with the same bins, in red
red_counts, _, red_patches = pyplot.hist(items, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=options.redWeight, color='#FF9696', hatch='/'*6)
if options.label:
# Label all the red weight-based bars
draw_labels(red_counts, red_patches, size=options.label_size)
# StackOverflow provides us with font sizing. See
# <http://stackoverflow.com/q/3899980/402891>
matplotlib.rcParams.update({"font.size": options.font_size})
if options.show_n:
# Add an n value to the title
options.title += " (n = {:,})".format(samples)
pyplot.title(options.title)
pyplot.xlabel(options.x_label)
pyplot.ylabel(options.y_label)
if options.log:
# Set the X axis to log mode
pyplot.xscale('log')
if options.x_min is not None:
# Set only the lower x limit
pyplot.xlim((options.x_min, pyplot.xlim()[1]))
if options.x_max is not None:
# Set only the upper x limit
pyplot.xlim((pyplot.xlim()[0], options.x_max))
if options.y_min is not None:
# Set only the lower y limit
pyplot.ylim((options.y_min, pyplot.ylim()[1]))
elif options.log_counts:
# Make sure the default lower Y limit is 1 on log plots.
pyplot.ylim((1, pyplot.ylim()[1]))
if options.y_max is not None:
# Set only the upper y limit
pyplot.ylim((pyplot.ylim()[0], options.y_max))
if options.sparse_ticks or options.sparse_x:
# Set up X tickmarks to have only 2 per axis, at the ends
pyplot.gca().xaxis.set_major_locator(
matplotlib.ticker.FixedLocator(pyplot.xlim()))
if options.sparse_ticks or options.sparse_y:
# Same for the Y axis
pyplot.gca().yaxis.set_major_locator(
matplotlib.ticker.FixedLocator(pyplot.ylim()))
if options.ticks is not None:
# Use these particular X ticks instead
pyplot.gca().xaxis.set_major_locator(
matplotlib.ticker.FixedLocator(
[float(pos) for pos in options.ticks]))
# Make sure tick labels don't overlap. See
# <http://stackoverflow.com/a/20599129/402891>
pyplot.gca().tick_params(axis="x", pad=0.5 * options.font_size)
# Make our own scientific notation formatter since set_scientific is not
# working
sci_formatter = matplotlib.ticker.FormatStrFormatter("%1.2e")
if options.scientific_x:
# Force scientific notation on X axis
pyplot.gca().xaxis.set_major_formatter(sci_formatter)
if options.scientific_y:
# Force scientific notation on Y axis
pyplot.gca().yaxis.set_major_formatter(sci_formatter)
if options.label:
# Label all the normal bars
draw_labels(bin_counts, bar_patches, size=options.label_size)
# Make everything fit
pyplot.tight_layout()
if len(options.category_labels) > 0 and not options.no_legend:
# We need a legend
if options.legend_overlay is None:
# We want the default legend, off to the right of the plot.
# First shrink the plot to make room for it.
# TODO: automatically actually work out how big it will be.
bounds = pyplot.gca().get_position()
pyplot.gca().set_position([bounds.x0, bounds.y0,
bounds.width * 0.5, bounds.height])
# Make the legend
pyplot.legend(loc="center left", bbox_to_anchor=(1.05, 0.5))
else:
# We want the legend on top of the plot at the user-specified
# location, and we want the plot to be full width.
pyplot.legend(loc=options.legend_overlay)
if options.save is not None:
# Save the figure to a file
pyplot.savefig(options.save, dpi=options.dpi)
else:
# Show the figure to the user
pyplot.show()
return 0
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| apache-2.0 | -3,313,469,020,805,510,000 | 40.720478 | 80 | 0.561929 | false |
meco-group/omg-tools | omgtools/problems/gcodeschedulerproblem.py | 1 | 66507 | # This file is part of OMG-tools.
#
# OMG-tools -- Optimal Motion Generation-tools
# Copyright (C) 2016 Ruben Van Parys & Tim Mercy, KU Leuven.
# All rights reserved.
#
# OMG-tools is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
from .problem import Problem
from .gcodeproblem import GCodeProblem
from ..basics.shape import Rectangle, Square, Circle
from ..environment.environment import Environment
from ..basics.shape import Rectangle, Ring
from ..basics.geometry import distance_between_points, point_in_polyhedron
from ..basics.spline import BSplineBasis, BSpline
from ..basics.spline_extra import concat_splines, running_integral, definite_integral
from casadi import MX, Function, nlpsol, vertcat
from scipy.interpolate import interp1d
import scipy.linalg as la
import numpy as np
import pickle
import time
import warnings
class GCodeSchedulerProblem(Problem):
def __init__(self, tool, GCode, options=None, **kwargs):
options = options or {}
# split large circle segments in multiple segments
self.split_circle = kwargs['split_circle'] if 'split_circle' in kwargs else False
# use tight tolerance in the middle of a segment and a wider one at the borders
self.variable_tolerance = kwargs['variable_tolerance'] if 'variable_tolerance' in kwargs else False
# minimal required length of the segment to split it in three parts
self.split_length = kwargs['split_length'] if 'split_length' in kwargs else 0.
# % of total segment length for the length of the start and end part, when using variable tolerance
self.split_small = kwargs['split_small'] if 'split_small' in kwargs else 0.1
# amount of segments to combine
self.n_segments = kwargs['n_segments'] if 'n_segments' in kwargs else 1
self._n_segments = self.n_segments # save original value (for plotting)
environment = self.get_environment(GCode, tool)
# pass on environment and tool to Problem constructor,
# generates self.vehicles and self.environment
# self.vehicles[0] = tool
Problem.__init__(self, tool, environment, options, label='schedulerproblem')
self.curr_state = self.vehicles[0].prediction['state'] # initial vehicle position
self.goal_state = self.vehicles[0].poseT # overall goal
self.problem_options = options # e.g. selection of problem type (freeT, fixedT)
self.problem_options['freeT'] = True # only this one is available
self.n_current_block = 0 # number of the block that the tool will follow next/now
self.start_time = 0.
self.update_times=[]
self.motion_time_log = [] # save the required motion times
self.segments = []
if not isinstance(self.vehicles[0].shapes[0], Circle):
raise RuntimeError('Vehicle shape can only be a Circle when solving a GCodeSchedulerProblem')
def init(self):
# otherwise the init of Problem is called, which is not desirable
pass
def initialize(self, current_time):
self.local_problem.initialize(current_time)
def reinitialize(self):
# this function is called at the start and creates the first local problem
self.segments = []
# select the next blocks of GCode that will be handled
# if less than self.n_segments are left, only the remaining blocks
# will be selected
self.segments = self.environment.room[
self.n_current_block:self.n_current_block+self.n_segments]
# if there is only one segment, save the next one to check when the tool enters the next segment
if self.n_segments == 1:
if len(self.environment.room) > 1:
self.next_segment = self.environment.room[self.n_current_block+1]
# total number of considered segments in the provided GCode
self.cnt = len(self.environment.room)-1
# get initial guess for trajectories (based on central line, with bang-bang jerk)
# and motion times, for all segments
init_guess, self.motion_times = self.get_init_guess()
# get a problem representation of the combination of segments
# the gcodeschedulerproblem (self) has a local_problem (gcodeproblem) at each moment
self.local_problem = self.generate_problem()
# pass on init_guess
self.local_problem.reset_init_guess(init_guess)
def solve(self, current_time, update_time):
# solve the local problem with a receding horizon,
# and update segments if necessary
# update current state
if not hasattr(self.vehicles[0], 'signals'):
# first iteration
self.curr_state = self.vehicles[0].prediction['state']
else:
# all other iterations
self.curr_state = self.vehicles[0].signals['state'][:,-1]
# did we move far enough over the current segment yet?
print('Current GCode block: ', self.n_current_block)
segments_valid = self.check_segments()
if not segments_valid:
# add new segment and remove first one
if hasattr(self, 'no_update') and self.no_update:
# don't update number or segments, because the deployer wants to
# re-compute the same segment, that e.g. was infeasible
# this boolean is set by the deployer in deployer.update_segment()
# self.local_problem = self.generate_problem()
pass
else:
self.n_current_block += 1
self.update_segments()
# transform segments into local_problem
self.local_problem = self.generate_problem()
# self.init_guess is filled in by update_segments()
# this also updates self.motion_time
self.local_problem.reset_init_guess(self.init_guess)
# solve local problem
self.local_problem.solve(current_time, update_time)
# update motion time variables (remaining time)
for k in range(self.n_segments):
self.motion_times[k] = self.local_problem.father.get_variables(
self.local_problem, 'T'+str(k),)[0][0]
# save motion time for current segment
self.motion_time_log.append(self.motion_times[0])
# save solving time
self.update_times.append(self.local_problem.update_times[-1])
# ========================================================================
# Simulation related functions
# ========================================================================
def store(self, current_time, update_time, sample_time):
# call store of local problem
self.local_problem.store(current_time, update_time, sample_time)
def _add_to_memory(self, memory, data_to_add, repeat=1):
memory.extend([data_to_add for k in range(repeat)])
def stop_criterium(self, current_time, update_time):
# check if the current segment is the last one
if self.segments[0]['end'] == self.goal_state:
# if we now reach the goal, the tool has arrived
if self.local_problem.stop_criterium(current_time, update_time):
return True
else:
return False
def final(self):
print('The tool has reached its goal!')
print(self.cnt, ' GCode commands were executed.')
# print 'Total machining time when considering standstill-standstill segments: ', np.round(self.get_init_guess_total_motion_time(),3), ' s'
print('Total machining time for computed trajectories: ', np.round(sum(self.motion_time_log),3), ' s')
if self.options['verbose'] >= 1:
print('%-18s %6g ms' % ('Max update time:',
max(self.update_times)*1000.))
print('%-18s %6g ms' % ('Av update time:',
(sum(self.update_times)*1000. /
len(self.update_times))))
# ========================================================================
# Export related functions
# ========================================================================
def export(self, options=None):
raise NotImplementedError('Please implement this method!')
# ========================================================================
# Plot related functions
# ========================================================================
def init_plot(self, argument, **kwargs):
# initialize environment plot
info = Problem.init_plot(self, argument)
gray = [60./255., 61./255., 64./255.]
if info is not None:
for k in range(self._n_segments):
# initialize segment plot, always use segments[0]
pose_2d = self.segments[0]['pose'][:2] + [0.] # shape was already rotated
# Todo: generalize to 3d later
s, l = self.segments[0]['shape'].draw(pose_2d)
surfaces = [{'facecolor': 'none', 'edgecolor': 'red', 'linestyle' : '--', 'linewidth': 1.2} for _ in s]
info[0][0]['surfaces'] += surfaces
# initialize global path plot
info[0][0]['lines'] += [{'color': 'red', 'linestyle' : '--', 'linewidth': 1.2}]
return info
def update_plot(self, argument, t, **kwargs):
# plot environment
data = Problem.update_plot(self, argument, t)
if data is not None:
for k in range(len(self.segment_storage[t])):
# for every frame at this point in time
# plot frame border
# Todo: generalize to 3d later
pose_2d = self.segment_storage[t][k]['pose'][:2] + [0.] # shape was already rotated
s, l = self.segment_storage[t][k]['shape'].draw(pose_2d)
data[0][0]['surfaces'] += s
return data
# ========================================================================
# GCodeSchedulerProblem specific functions
# ========================================================================
def get_environment(self, GCode, tool):
# convert the list of GCode blocks into an environment object
# each GCode block is represented as a room in which the trajectory
# has to stay
number = 0 # each room has a number
room = []
tolerance = tool.tolerance
if (self.variable_tolerance and tool.tolerance_small == 0):
raise RuntimeError('Using variable tolerance, but no small tolerance provided,'+
' add this to the vehicle you constructed.')
for block in GCode:
# convert block to room
if block.type in ['G00', 'G01']:
# not using variable tolerance, or segment is too short to split
if (not self.variable_tolerance or distance_between_points(block.start, block.end) < self.split_length):
# add tolerance to width to obtain the complete reachable region
width = distance_between_points(block.start, block.end) + 2*tolerance
height = 2*tolerance
orientation = np.arctan2(block.end[1]-block.start[1], block.end[0]-block.start[0])
shape = Rectangle(width = width, height = height, orientation = orientation)
pose = [block.start[0] + (block.end[0]-block.start[0])*0.5,
block.start[1] + (block.end[1]-block.start[1])*0.5,
block.start[2] + (block.end[2]-block.start[2])*0.5,
orientation,0.,0.]
# Todo: for now orientation is only taken into account as if it were a 2D segment
new_room = [{'shape': shape, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': block.start, 'end': block.end, 'number':number}]
else:
# divide segment in three parts, with variable tolerance:
# large tolerances in the first and last parts, tight tolerance in the middle part
orientation = np.arctan2(block.end[1]-block.start[1], block.end[0]-block.start[0])
width = distance_between_points(block.start, block.end) # default width
## part 1
l1 = self.split_small*width
width1 = l1 + 2*tolerance
height1 = 2*tolerance
shape1 = Rectangle(width = width1, height = height1, orientation = orientation)
pose1 = [block.start[0] + 0.5*l1*np.cos(orientation),
block.start[1] + 0.5*l1*np.sin(orientation),
block.start[2] + (block.end[2]-block.start[2])*0.5,
orientation,0.,0.]
end1 = [block.start[0] + l1*np.cos(orientation),
block.start[1] + l1*np.sin(orientation),
block.start[2] + (block.end[2]-block.start[2])]
## part 2, tolerance = tolerance_small
l2 = (1-2*self.split_small)*width
width2 = l2 + 2*tool.tolerance_small
height2 = 2*tool.tolerance_small
shape2 = Rectangle(width = width2, height = height2, orientation = orientation)
pose2 = [block.start[0] + (l1+0.5*l2)*np.cos(orientation),
block.start[1] + (l1+0.5*l2)*np.sin(orientation),
block.start[2] + (block.end[2]-block.start[2])*0.5,
orientation,0.,0.]
start2 = end1
end2 = [block.start[0] + (l1+l2)*np.cos(orientation),
block.start[1] + (l1+l2)*np.sin(orientation),
block.start[2] + (block.end[2]-block.start[2])]
## part 3
l3 = l1
width3 = width1
height3 = height1
shape3 = Rectangle(width = width3, height = height3, orientation = orientation)
pose3 = [block.end[0] - 0.5*l3*np.cos(orientation),
block.end[1] - 0.5*l3*np.sin(orientation),
block.start[2] + (block.end[2]-block.start[2])*0.5,
orientation,0.,0.]
start3 = end2
new_room = [{'shape': shape1, 'pose': pose1, 'position': pose1[:2], 'draw':True,
'start': block.start, 'end': end1, 'number':number},
{'shape': shape2, 'pose': pose2, 'position': pose2[:2], 'draw':True,
'start': start2 , 'end': end2, 'number':number+1},
{'shape': shape3, 'pose': pose3, 'position': pose3[:2], 'draw':True,
'start': start3, 'end': block.end, 'number':number+2}]
elif block.type in ['G02', 'G03']:
radius_in = block.radius - tolerance
radius_out = block.radius + tolerance
# move to origin
start = np.array(block.start) - np.array(block.center)
end = np.array(block.end) - np.array(block.center)
# adapt start and end to include tolerance, i.e. make ring a little wider, such that
# perpendicular distance from start (and end) to border of ring = tolerance
theta = np.arctan2(tolerance,((radius_in+radius_out)*0.5)) # angle over which to rotate
# provide two turning directions
R1 = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]) # rotation matrix
R2 = np.array([[np.cos(-theta), -np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]]) # rotation matrix
# Todo: rotation only works for 2D XY arcs for now
if block.type == 'G02':
direction = 'CW'
start[:2] = np.dot(R1, start[:2]) # slightly rotated start point
end[:2] = np.dot(R2, end[:2]) # slightly rotated end point
else:
direction = 'CCW'
start[:2] = np.dot(R2, start[:2]) # slightly rotated start point
end[:2] = np.dot(R1, end[:2]) # slightly rotated end point
# split ring segments of more than 135 degrees in two equal parts
# use extended version of the ring
angle1 = np.arctan2(start[1], start[0])
angle2 = np.arctan2(end[1], end[0])
if block.type == 'G02':
if angle1 < angle2:
# clockwise so angle2 must be < angle1
# probably angle2 is smaller, but arctan2 returned a negative angle
angle1 += 2*np.pi
arc_angle = angle1 - angle2
elif block.type == 'G03':
if angle1 > angle2:
# counter-clockwise so angle2 must be > angle1
# probably angle2 is bigger, but arctan2 returned a negative angle
angle2 += 2*np.pi
arc_angle = angle2 - angle1
else:
raise RuntimeError('Invalid block type: ', block.type)
new_room = self.split_ring_segment(block, arc_angle, start, end, radius_in, radius_out, direction, tolerance, number)
if self.variable_tolerance:
divided_rooms = []
for r in new_room:
# following parameters are the same for all segments
pose = r['pose']
radius_in = r['shape'].radius_in
radius_out = r['shape'].radius_out
direction = r['shape'].direction
# arc angle of ring, without including tolerance
# must be positive, sign of angle is determined by CW or CCW
start = np.array(r['start'])-np.array(r['pose'][:3])
end = np.array(r['end'])-np.array(r['pose'][:3])
angle1 = np.arctan2(start[1], start[0])
angle2 = np.arctan2(end[1], end[0])
if direction == 'CW':
if angle1 < angle2:
# clockwise so angle2 must be < angle1
# probably angle2 is smaller, but arctan2 returned a negative angle
angle1 += 2*np.pi
arc_angle = angle1 - angle2
else:
if angle1 > angle2:
# counter-clockwise so angle2 must be > angle1
# probably angle2 is bigger, but arctan2 returned a negative angle
angle2 += 2*np.pi
arc_angle = angle2 - angle1
arc_angle = np.abs(arc_angle)
arc1 = self.split_small*arc_angle # = arc3
arc2 = (1-2*self.split_small)*arc_angle
## part 1
# adapt start to include tolerance, i.e. make ring start a little earlier, such that
# perpendicular distance from start to border of ring = tolerance
seg_start1 = np.array(r['start'])-np.array(r['pose'][:3]) # without including tolerance = start of segment
start1 = np.array(seg_start1) # with including tolerance = start of shape
end1 = np.array(seg_start1) # initialize
# angle over which to rotate to account for large tolerance
delta_arc_big = np.arctan2(tolerance,((radius_in+radius_out)*0.5))
if direction == 'CW':
R_delta = np.array([[np.cos(delta_arc_big), -np.sin(delta_arc_big)],[np.sin(delta_arc_big), np.cos(delta_arc_big)]])
start1[:2] = np.dot(R_delta, seg_start1[:2]) # slightly rotated start point
theta = arc1 + delta_arc_big
R = np.array([[np.cos(-theta), -np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]])
end1[:2] = np.dot(R, seg_start1[:2]) # rotate start point to end + a bit further
else:
R_delta = np.array([[np.cos(-delta_arc_big), -np.sin(-delta_arc_big)],[np.sin(-delta_arc_big), np.cos(-delta_arc_big)]])
start1[:2] = np.dot(R_delta, seg_start1[:2]) # slightly rotated start point
theta = arc1 + delta_arc_big
R = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
end1[:2] = np.dot(R, seg_start1[:2]) # rotate start point to end
shape1 = Ring(radius_in = radius_in, radius_out = radius_out,
start = start1, end = end1, direction = direction)
seg_end1 = np.array(seg_start1) # initialize
if direction == 'CW':
R = np.array([[np.cos(-arc1), -np.sin(-arc1)],[np.sin(-arc1), np.cos(-arc1)]])
seg_end1[:2] = np.dot(R, seg_start1[:2]) # slightly rotated start point
else:
R = np.array([[np.cos(arc1), -np.sin(arc1)],[np.sin(arc1), np.cos(arc1)]])
seg_end1[:2] = np.dot(R, seg_start1[:2]) # slightly rotated start point
## part 2, tolerance = tolerance_small
# adapt start to include tolerance, i.e. make ring start a little earlier, such that
# perpendicular distance from start to border of ring = tolerance
seg_start2 = np.array(seg_end1)
start2 = np.array(seg_end1)
end2 = np.array(seg_end1) # initialize
# angle over which to rotate to account for large tolerance
delta_arc_small = np.arctan2(tool.tolerance_small,((radius_in+radius_out)*0.5))
if direction == 'CW':
R_delta = np.array([[np.cos(delta_arc_small), -np.sin(delta_arc_small)],[np.sin(delta_arc_small), np.cos(delta_arc_small)]])
start2[:2] = np.dot(R_delta, seg_start2[:2]) # slightly rotated start point
theta = arc2 + delta_arc_small
R = np.array([[np.cos(-theta), -np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]])
end2[:2] = np.dot(R, seg_start2[:2]) # rotate start point to end
else:
R_delta = np.array([[np.cos(-delta_arc_small), -np.sin(-delta_arc_small)],[np.sin(-delta_arc_small), np.cos(-delta_arc_small)]])
start2[:2] = np.dot(R_delta, seg_start2[:2]) # slightly rotated start point
theta = arc2 + delta_arc_small
R = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
end2[:2] = np.dot(R, seg_start2[:2]) # rotate start point to end
shape2 = Ring(radius_in = radius_in + (tolerance-tool.tolerance_small), radius_out = radius_out-(tolerance-tool.tolerance_small),
start = start2, end = end2, direction = direction)
seg_end2 = np.array(seg_start2) # initialize
if direction == 'CW':
R = np.array([[np.cos(-arc2), -np.sin(-arc2)],[np.sin(-arc2), np.cos(-arc2)]])
seg_end2[:2] = np.dot(R, seg_start2[:2]) # slightly rotated start point
else:
R = np.array([[np.cos(arc2), -np.sin(arc2)],[np.sin(arc2), np.cos(arc2)]])
seg_end2[:2] = np.dot(R, seg_start2[:2]) # slightly rotated start point
## part 3
# adapt start to include tolerance, i.e. make ring start a little earlier, such that
# perpendicular distance from start to border of ring = tolerance
seg_start3 = np.array(seg_end2)
start3 = np.array(seg_start3)
end3 = np.array(seg_start3) # initialize
# angle over which to rotate to account for large tolerance
delta_arc_big = np.arctan2(tolerance,((radius_in+radius_out)*0.5))
if direction == 'CW':
R_delta = np.array([[np.cos(delta_arc_big), -np.sin(delta_arc_big)],[np.sin(delta_arc_big), np.cos(delta_arc_big)]])
start3[:2] = np.dot(R_delta, seg_start3[:2]) # slightly rotated start point
theta = arc1 + delta_arc_big
R = np.array([[np.cos(-theta), -np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]])
end3[:2] = np.dot(R, seg_start3[:2]) # rotate start point to end
else:
R_delta = np.array([[np.cos(-delta_arc_big), -np.sin(-delta_arc_big)],[np.sin(-delta_arc_big), np.cos(-delta_arc_big)]])
start3[:2] = np.dot(R_delta, seg_start3[:2]) # slightly rotated start point
theta = arc1 + delta_arc_big
R = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
end3[:2] = np.dot(R, seg_start3[:2]) # rotate start point to end
shape3 = Ring(radius_in = radius_in, radius_out = radius_out,
start = start3, end = end3, direction = direction)
# start and end of ring shape is for a shape centered in the origin,
# room start and end are shifted away from origin
seg_end1 = (seg_end1 + np.array(r['pose'][:3])).tolist() # move from origin to real position
seg_start2 = seg_end1
seg_end2 = (seg_end2 + np.array(r['pose'][:3])).tolist()
seg_start3 = seg_end2
divided_rooms.extend([
{'shape': shape1, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': block.start, 'end': seg_end1, 'number':number},
{'shape': shape2, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': seg_start2 , 'end': seg_end2, 'number':number+1},
{'shape': shape3, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': seg_start3, 'end': block.end, 'number':number+2}])
# assign divided rooms to old variable
new_room = divided_rooms
# save original GCode block in the room description
for r in new_room:
room.append(r)
number += 1
return Environment(room=room)
def split_ring_segment(self, block, arc_angle, start, end, radius_in, radius_out, direction, tolerance, number):
if (self.split_circle and arc_angle > 3*np.pi/4):
# compute middle of ring segment
arc = arc_angle*0.5
# adapt start and end to include tolerance, i.e. make ring a little wider, such that
# perpendicular distance from start (and end) to border of ring = tolerance
theta = np.arctan2(tolerance,((radius_in+radius_out)*0.5)) # angle over which to rotate
mid1 = np.array(start) # use np.array() to get a copy of the object
mid2 = np.array(start) # mid of second part of the segment
if block.type == 'G02':
R1 = np.array([[np.cos(-arc-theta), -np.sin(-arc-theta)],[np.sin(-arc-theta), np.cos(-arc-theta)]]) # rotation matrix
R2 = np.array([[np.cos(-arc+theta), -np.sin(-arc+theta)],[np.sin(-arc+theta), np.cos(-arc+theta)]]) # rotation matrix
# create overlap region between the two new segments
mid1[:2] = np.dot(R1, mid1[:2]) # rotate start point over half arc, and a bit further
mid2[:2] = np.dot(R2, mid2[:2]) # rotate start point over half arc, a bit less far
else:
R1 = np.array([[np.cos(arc+theta), -np.sin(arc+theta)],[np.sin(arc+theta), np.cos(arc+theta)]]) # rotation matrix
R2 = np.array([[np.cos(arc-theta), -np.sin(arc-theta)],[np.sin(arc-theta), np.cos(arc-theta)]]) # rotation matrix
# create overlap region between the two new segments
mid1[:2] = np.dot(R1, mid1[:2]) # rotate start point over half arc, and a bit further
mid2[:2] = np.dot(R2, mid2[:2]) # rotate start point over half arc, a bit less far
# segment1
start1 = np.array(start) # keep start of segment1
end1 = mid1
# segment2
start2 = mid2
end2 = np.array(end) # keep end of segment2
# shape is located in the origin
shape1 = Ring(radius_in = radius_in, radius_out = radius_out,
start = start1, end = end1, direction = direction)
shape2 = Ring(radius_in = radius_in, radius_out = radius_out,
start = start2, end = end2, direction = direction)
pose = list(block.center)
pose.extend([0.,0.,0.]) # [x,y,z,orientation], ring always has orientation 0
# room start and end is shifted away from origin
mid1_shift = list(mid1 + np.array(block.center)) # move from origin to real position
mid2_shift = list(mid2 + np.array(block.center))
new_room = [{'shape': shape1, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': block.start, 'end': mid1_shift, 'number':number},
{'shape': shape2, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': mid2_shift, 'end': block.end, 'number':number+1}]
else:
# make a single ring segment
shape = Ring(radius_in = radius_in, radius_out = radius_out,
start = start, end = end, direction = direction)
pose = block.center
pose.extend([0.,0.,0.]) # [x,y,z,orientation], ring always has orientation 0
new_room = [{'shape': shape, 'pose': pose, 'position': pose[:2], 'draw':True,
'start': block.start, 'end': block.end, 'number':number}]
return new_room
def check_segments(self):
# check if the tool still has to move over the first element of
# self.segments, if so this means no movement is made in this iteration yet
# if tool has already moved (i.e. the tool position is inside the overlap region
# between the two segments), we will add an extra segment and drop the first one
# if final goal is not on the current segment, check if current state overlaps with the next segment
if (self.segments[0]['end'] == self.goal_state and
self.segments[0]['start'] == self.environment.room[-1]['start']):
# this is the last segment, keep it until arrival
valid = True
return valid
else:
if (self.n_segments == 1 and hasattr(self, 'next_segment')):
if self.point_in_extended_shape(self.next_segment, self.curr_state[:2], distance=self.vehicles[0].shapes[0].radius):
# if point in extended shape of next segment (=complete ring, or segment with infinite length),
# we can move to this next segment
# only called if self.n_segments = 1,
# then self.segments[1] doesn't exist and self.next_segment does exist
valid = False
else:
valid = True
return valid
elif self.point_in_extended_shape(self.segments[1], self.curr_state[:2], distance=self.vehicles[0].shapes[0].radius):
# if point in extended shape of next segment (=complete ring, or segment with infinite length),
# we can move to this next segment
valid = False
return valid
else:
valid = True
return valid
if (np.array(self.curr_state) == np.array(self.segments[0]['end'])).all():
# current state is equal to end of segment 0
return False
else:
# current state is not yet equal to the end of segment 0
return True
def update_segments(self):
# update the considered segments: remove first one, and add a new one
if self.segments[-1]['number'] < self.cnt:
# last segment is not yet in self.segments, so there are some segments left,
# create segment for next block
new_segment = self.environment.room[self.n_current_block+(self.n_segments-1)]
self.segments.append(new_segment) # add next segment
if self.n_segments == 1:
if self.segments[-1]['number'] < self.cnt:
self.next_segment = self.environment.room[self.n_current_block+1]
else:
self.next_segment = None
else:
# all segments are currently in self.segments, don't add a new one
# and lower the amount of segments that are combined
self.n_segments -= 1
self.segments = self.segments[1:] # drop first segment
# self.get_init_guess() uses previous solution to get an initial guess for
# all segments except the last one,
# for this one get initial guess based on the center line
# analogously for the motion_times
self.init_guess, self.motion_times = self.get_init_guess()
# def point_in_segment(self, segment, point, distance=0):
# # check if the provided point is inside segment
# # distance is the margin to take into account (due to the tool size)
# # for the check, re-use the collision avoidance constraints of tool.py
# if (isinstance(segment['shape'], (Rectangle, Square))):
# # we have a diagonal line segment
# if point_in_polyhedron(point, segment['shape'], segment['position'], margin=distance):
# return True
# else:
# return False
# elif (isinstance(segment['shape'], (Ring))):
# # we have a ring/circle segment
# # use polar coordinates to go from point(x,y) to point(r,theta)
# # then check if r and theta are inside the ring
# center = segment['pose']
# angle1 = np.arctan2(point[1] - center[1], point[0] - center[0])
# angle2 = angle1 + 2*np.pi
# r = np.sqrt((point[0]-center[0])**2+(point[1]-center[1])**2)
# if (r >= segment['shape'].radius_in+distance and r <= segment['shape'].radius_out-distance):
# # Todo: shift start and end_angle according to distance (i.e. make ring a little smaller) to
# # account for the tolerance (tool point may not lie infinitely close to the border)
# if segment['shape'].direction == 'CW':
# if (angle1 <= segment['shape'].start_angle and angle1 >= segment['shape'].end_angle):
# return True
# if (angle2 <= segment['shape'].start_angle and angle2 >= segment['shape'].end_angle):
# return True
# elif segment['shape'].direction == 'CCW':
# if (angle1 >= segment['shape'].start_angle and angle1 <= segment['shape'].end_angle):
# return True
# if (angle2 >= segment['shape'].start_angle and angle2 <= segment['shape'].end_angle):
# return True
# return False
# else:
# return False
def point_in_extended_shape(self, segment, point, distance=0):
# check if the provided point is inside the extended/infinite version of the shape, meaning
# that we check if the point is in the complete ring (instead of in the ring segment), or if
# the point is inside the rectangle with infinite width (meaning that it is inside the GCode segment
# with infinite length)
# this is to check if the current state (probably = the connection point between spline segments),
# is valid to continue to the next segment (= the segment provided to this function)
# difference with point_in_segment: checks if point is in the finite/normal version of the shape
# distance is the margin to take into account (due to the tool size)
if (isinstance(segment['shape'], (Rectangle, Square))):
if (segment['shape'].orientation%(np.pi) == 0):
# horizontal line segment
if (point[1] < max(segment['shape'].vertices[1,:]+segment['position'][1]) and
point[1] > min(segment['shape'].vertices[1,:]+segment['position'][1])):
return True
else:
return False
elif (segment['shape'].orientation%(np.pi/2.) == 0):
# vertical line segment
# Note: also a shape with orientation 0 would pass this test, but this was
# already captured in first if-test
if (point[0] < max(segment['shape'].vertices[0,:]+segment['position'][0]) and
point[0] > min(segment['shape'].vertices[0,:]+segment['position'][0])):
return True
else:
return False
else:
# we have a diagonal line GCode segment
# find the lines of the rectangle representing the line GCode segment with tolerances,
# that have the length of the segment length
couples = []
for k in range(len(segment['shape'].vertices[0])-1):
point1 = segment['shape'].vertices[:,k]+segment['position']
point2 = segment['shape'].vertices[:,k+1]+segment['position']
dist = distance_between_points(point1,point2)
if abs(dist - segment['shape'].width) < 1e-3:
# the connection between the points gives a side of length = width of the shape
couples.append([point1,point2])
if len(couples) != 2:
# not yet found two couples, so the distance between last vertex and first must also be = width
couples.append([segment['shape'].vertices[:,-1]+segment['position'],segment['shape'].vertices[:,0]+segment['position']])
# compute the equations for these two lines, to check if the point is at the right side of them,
# i.e. inside the rectangle with infinite width = the segment with infinite length
# Note: supposed that the vertices are stored in clockwise order here
side = []
for couple in couples:
x1, y1 = couple[0] # point1
x2, y2 = couple[1] # point2
vector = [x2-x1, y2-y1] # vector from point2 to point1
a = np.array([-vector[1],vector[0]])*(1/np.sqrt(vector[0]**2+vector[1]**2)) # normal vector
b = np.dot(a,np.array([x1,y1])) # offset
side.append(np.dot(a, point) - b) # fill in point
if all(s<-distance for s in side):
# point is inside the shape and a distance tolerance away from border
return True
else:
return False
elif (isinstance(segment['shape'], (Ring))):
# we have a ring/circle segment, check if distance from point to center lies between
# the inner and outer radius
center = segment['pose']
r = np.sqrt((point[0]-center[0])**2+(point[1]-center[1])**2)
if (r >= segment['shape'].radius_in+distance and r <= segment['shape'].radius_out-distance):
return True
else:
return False
def generate_problem(self):
local_rooms = self.environment.room[self.n_current_block:self.n_current_block+self.n_segments]
local_environment = Environment(room=local_rooms)
problem = GCodeProblem(self.vehicles[0], local_environment, self.n_segments, motion_time_guess=self.motion_times)
problem.set_options({'solver_options': self.options['solver_options']})
problem.init()
# reset the current_time, to ensure that predict uses the provided
# last input of previous problem and vehicle velocity is kept from one frame to another
problem.initialize(current_time=0.)
return problem
def get_init_guess(self, **kwargs):
# if first iteration, compute init_guess based on center line (i.e. connection between start and end) for all segments
# else, use previous solutions to build a new initial guess:
# if combining 2 segments: combine splines in segment 1 and 2 to form a new spline in a single segment = new segment1
# if combining 3 segments or more: combine segment1 and 2 and keep splines of segment 3 and next as new splines of segment2 and next
start_time = time.time()
# initialize variables to hold guesses
init_splines = []
motion_times = []
if hasattr(self, 'local_problem') and hasattr(self.local_problem.father, '_var_result'):
# local_problem was already solved, re-use previous solutions to form initial guess
if self.n_segments > 1:
# end up here when combining two or more segments --> take solution of segment1
# as initial guess
# if updating in receding horizon with small steps:
# combine first two spline segments into a new spline = guess for new current segment
# init_spl, motion_time = self.get_init_guess_combined_segment()
# if updating per segment:
# the first segment disappears and the guess is given by data of next segment
# spline through next segment and its motion time
init_splines.append(np.array(self.local_problem.father.get_variables()[self.vehicles[0].label,'splines_seg1']))
motion_times.append(self.local_problem.father.get_variables(self.local_problem, 'T1',)[0][0])
# only make guess using center line for last segment
guess_idx = [self.n_segments-1]
if self.n_segments > 2:
# use old solutions for segment 2 until second last segment, these don't change
for k in range(2, self.n_segments):
# Todo: strange notation required, why not the same as in schedulerproblem.py?
init_splines.append(np.array(self.local_problem.father.get_variables()[self.vehicles[0].label,'splines_seg'+str(k)]))
motion_times.append(self.local_problem.father.get_variables(self.local_problem, 'T'+str(k),)[0][0])
# only make guess using center line for last segment
guess_idx = [self.n_segments-1]
if (self.n_segments==1 and self._n_segments>1):
# looking at last segment when you were originally combining two or more segments -->
# take solution of segment1 as initial guess
init_splines.append(np.array(self.local_problem.father.get_variables()[self.vehicles[0].label,'splines_seg1']))
motion_times.append(self.local_problem.father.get_variables(self.local_problem, 'T1',)[0][0])
# unneccessary to make a new guess
guess_idx = []
elif self._n_segments == 1:
guess_idx = list(range(1))
else:
# local_problem was not solved yet, make guess using center line for all segments
guess_idx = list(range(self.n_segments))
# make guesses based on center line of GCode
for k in guess_idx:
init_spl, motion_time = self.get_init_guess_new_segment(self.segments[k])
init_splines.append(init_spl)
motion_times.append(motion_time)
# pass on initial guess
self.vehicles[0].set_init_spline_values(init_splines, n_seg = self.n_segments)
# deployer.run_segment() calls vehicle.predict(), which calls problem.predict(),
# and sets the initial conditions,
# so don't repeat here since this would erase the input and dinput values
self.vehicles[0].set_terminal_conditions(self.segments[-1]['end'])
end_time = time.time()
if self.options['verbose'] >= 2:
print('elapsed time in get_init_guess ', end_time - start_time)
return init_splines, motion_times
def get_init_guess_new_segment(self, segment):
if isinstance(segment['shape'], Rectangle):
# generate a feasible initial guess with a bang-bang jerk profile
init_guess, motion_time = self.get_init_guess_bangbang_jerk(segment)
elif isinstance(segment['shape'], Ring):
# create feasible initial guess for x and y, meaning that r_in <= x**2+y**2 <= r_out,
# Note: this does not mean that all coefficients of x and y lie inside the ring segment when plotting
init_guess, motion_time = self.get_init_guess_ring(segment)
# Note: initial and final velocity and acceleration are already
# forced to zero inside get_init_guess_ring()
else:
raise RuntimeError('Segment with invalid (not Rectangle or Ring) shape: ', segment['shape'])
pos_x = BSpline(self.vehicles[0].basis, init_guess[:,0])
pos_y = BSpline(self.vehicles[0].basis, init_guess[:,1])
dpos_x = pos_x.derivative(1)
ddpos_x = pos_x.derivative(2)
dddpos_x = pos_x.derivative(3)
dpos_y = pos_y.derivative(1)
ddpos_y = pos_y.derivative(2)
dddpos_y = pos_y.derivative(3)
eval = np.linspace(0,1,100)
maxvx = max(dpos_x(eval)/motion_time)
maxvy = max(dpos_y(eval)/motion_time)
maxax = max(ddpos_x(eval)/motion_time**2)
maxay = max(ddpos_y(eval)/motion_time**2)
maxjx = max(dddpos_x(eval)/motion_time**3)
maxjy = max(dddpos_y(eval)/motion_time**3)
if maxvx > self.vehicles[0].vxmax:
print(maxvx)
raise RuntimeError('Velx guess too high')
if maxvy > self.vehicles[0].vymax:
print(maxvy)
raise RuntimeError('Vely guess too high')
if maxax > self.vehicles[0].axmax:
print(maxax)
raise RuntimeError('Accx guess too high')
if maxay > self.vehicles[0].aymax:
print(maxay)
raise RuntimeError('Accy guess too high')
if maxjx > self.vehicles[0].jxmax:
print(maxjx)
raise RuntimeError('Jerkx guess too high')
if maxjy > self.vehicles[0].jymax:
print(maxjy)
raise RuntimeError('Jerky guess too high')
return init_guess, motion_time
def get_init_guess_bangbang_jerk(self, segment):
x0 = segment['start'][0]
y0 = segment['start'][1]
x1 = segment['end'][0]
y1 = segment['end'][1]
z0 = segment['start'][2]
z1 = segment['end'][2]
j_lim = self.vehicles[0].jxmax # jerk limit
if j_lim != self.vehicles[0].jymax:
raise RuntimeError('Generating initial guess only possible for x-limit = y-limit')
if j_lim != -self.vehicles[0].jxmin:
raise RuntimeError('Generating initial guess only possible for upper and lower bounds of equal size')
# self.vehicles[0].basis is for position, take third derivative to obtain the basis for the jerk spline
# length of this basis determines amount of coeffs that are required
n_coeffs = len(self.vehicles[0].basis.derivative(3)[0])
multiple, rest = divmod(n_coeffs, 4)
# check on amount of coeffs that are required to make desired jerk profile
# the basic options to obtain a profile with an average of zero are:
# 4 coeffs: [1, -1, -1, 1] * j_lim
# 5 coeffs: [1, -1, 0 -1, 1] * j_lim
# 6 coeffs: [1, 0, -1, -1, 0, 1] * j_lim
# 7 coeffs: [1, 0, -1, 0, -1, 0, 1] * j_lim
# for 8 coeffs or more, all non-zero values are copied, with 'multiple'
# 8 coeffs: [1, 1, -1, -1, -1, -1, 1, 1] * j_lim
if rest == 0:
coeffs_j = np.r_[j_lim*np.ones((multiple,1)),-j_lim*np.ones((2*multiple,1)),j_lim*np.ones((multiple,1))]
elif rest == 1:
coeffs_j = np.r_[j_lim*np.ones((multiple,1)),-j_lim*np.ones((multiple,1)),0*np.ones((1,1)),-j_lim*np.ones((multiple,1)),j_lim*np.ones((multiple,1))]
elif rest == 2:
coeffs_j = np.r_[j_lim*np.ones((multiple,1)), 0*np.ones((1,1)),-j_lim*np.ones((2*multiple,1)), 0*np.ones((1,1)),j_lim*np.ones((multiple,1))]
elif rest == 3:
coeffs_j = np.r_[j_lim*np.ones((multiple,1)), 0*np.ones((1,1)),-j_lim*np.ones((multiple,1)), 0*np.ones((1,1)),-j_lim*np.ones((multiple,1)),0*np.ones((1,1)),j_lim*np.ones((multiple,1))]
else:
raise RuntimeError('Something wrong with n_coeffs, it was not an int: ', n_coeffs)
# make jerk spline and integrate to obtain corresponding position spline
jerk = BSpline(self.vehicles[0].basis.derivative(3)[0], coeffs_j)
acc = running_integral(jerk)
vel = running_integral(acc)
pos = running_integral(vel)
guess = pos.coeffs # coefficients guess
# shift and scale to obtain trajectory from x0 to x1
guess_x = [g/pos.coeffs[-1]*(x1-x0)+x0 for g in guess]
guess_y = [g/pos.coeffs[-1]*(y1-y0)+y0 for g in guess]
# initial and final velocity and acceleration are 0
guess_x[0] = x0
guess_x[1] = x0
guess_x[2] = x0
guess_x[-3] = x1
guess_x[-2] = x1
guess_x[-1] = x1
# initial and final velocity and acceleration are 0
guess_y[0] = y0
guess_y[1] = y0
guess_y[2] = y0
guess_y[-3] = y1
guess_y[-2] = y1
guess_y[-1] = y1
# linear interpolation between start and end for z-guess
guess_z = np.linspace(z0,z1, len(guess_x)).tolist()
init_guess = np.c_[guess_x, guess_y, guess_z]
# from matplotlib import pyplot as plt
# basis = self.vehicles[0].basis
# x = BSpline(basis, guess_x)
# y = BSpline(basis, guess_y)
# eval = np.linspace(0, 1, 100)
# plt.figure(20)
# plt.plot(x(eval), y(eval), 'g') # guess
# points = segment['shape'].draw(segment['pose'][:2]+[0])[0][0] # don't draw z, always pick 0.
# # add first point again to close shape
# points = np.c_[points, [points[0,0], points[1,0]]]
# plt.plot(points[0,:], points[1,:], color='red', linestyle = '--', linewidth= 1.2) # ring segment
# plt.plot(x0, y0, 'bx') # coeffs guess before solving
# plt.plot(guess_x,guess_y, 'gx') # coeffs from solution
# plt.figure(21)
# plt.plot(eval, jerk(eval))
# plt.figure(22)
# plt.plot(eval, acc(eval))
# plt.figure(23)
# plt.plot(eval, vel(eval))
motion_time = self.get_init_guess_motion_time(segment, coeff_guess=init_guess)
return init_guess, motion_time
def get_init_guess_combined_segment(self):
# combines the splines of the first two segments into a single one, forming the guess
# for the new current segment
# remaining spline through current segment
spl1 = self.local_problem.father.get_variables(self.vehicles[0], 'splines_seg0')
# spline through next segment
spl2 = self.local_problem.father.get_variables(self.vehicles[0], 'splines_seg1')
time1 = self.local_problem.father.get_variables(self.local_problem, 'T0',)[0][0]
time2 = self.local_problem.father.get_variables(self.local_problem, 'T1',)[0][0]
motion_time = time1 + time2 # guess for motion time
# form connection of spl1 and spl2, in union basis
spl = concat_splines([spl1, spl2], [time1, time2])
# now find spline in original basis (the one of spl1 = the one of spl2) which is closest to
# the one in the union basis, by solving a system
coeffs = [] # holds new coeffs
degree = [s.basis.degree for s in spl1]
knots = [s.basis.knots*motion_time for s in spl1] # scale knots with guess for motion time
for l in range (len(spl1)):
new_basis = BSplineBasis(knots[l], degree[l]) # make basis with new knot sequence
grev_bc = new_basis.greville()
# shift greville points inwards, to avoid that evaluation at the greville points returns
# zero, because they fall outside the domain due to numerical errors
grev_bc[0] = grev_bc[0] + (grev_bc[1]-grev_bc[0])*0.01
grev_bc[-1] = grev_bc[-1] - (grev_bc[-1]-grev_bc[-2])*0.01
# evaluate connection of splines greville points of new basis
eval_sc = spl[l](grev_bc)
# evaluate basis at its greville points
eval_bc = new_basis(grev_bc).toarray()
# solve system to obtain coefficients of spl in new_basis
coeffs.append(la.solve(eval_bc, eval_sc))
# put in correct format
init_splines = np.r_[coeffs].transpose()
return init_splines, motion_time
def get_init_guess_ring(self, segment):
# solve optimization problem to get a feasible initial guess for a circle segment
basis = self.vehicles[0].basis
# make variables
X = MX.sym("x", 2 * len(basis))
cx = X[:len(basis)]
cy = X[len(basis):]
# unknown splines
s_x = BSpline(basis,cx)
s_y = BSpline(basis,cy)
# set up constraints
con = []
# spline needs to lie inside ring segment
con.extend([-((s_x-segment['position'][0])**2 + (s_y-segment['position'][1])**2) + segment['shape'].radius_in**2,
((s_x-segment['position'][0])**2 + (s_y-segment['position'][1])**2) - segment['shape'].radius_out**2])
# translate to constraints on the coeffs
con = vertcat(*[c.coeffs for c in con])
con = vertcat(con,
s_x(0.) - (segment['start'][0]), # fix initial position
s_y(0.) - (segment['start'][1]),
s_x(1.) - (segment['end'][0]), # fix final position
s_y(1.) - (segment['end'][1]),
s_x.derivative(1)(0.), # velocity zero at start
s_y.derivative(1)(0.),
s_x.derivative(1)(1.), # velocity zero at end
s_y.derivative(1)(1.),
s_x.derivative(2)(0.), # acceleration zero at start
s_y.derivative(2)(0.),
s_x.derivative(2)(1.), # acceleration zero at end
s_y.derivative(2)(1.),)
# set up objective function
circ = (s_x-segment['position'][0])**2 + (s_y-segment['position'][1])**2
# stay as close to center line of ring as possible
# obj = ((definite_integral(circ,0,1.) - ((segment['shape'].radius_out+segment['shape'].radius_in)*0.5)**2)**2)
# limit the jerk of the trajectory, to avoid nervous solutions
obj = definite_integral(s_x.derivative(3)**2,0,1.) + definite_integral(s_y.derivative(3)**2,0,1.)
# make nlp
nlp = {'x':X, 'f':obj, 'g':con}
# set options
options = {}
# options['ipopt.linear_solver'] = 'ma57' # must be installed separately
options['ipopt.tol'] = 1e-8
options['ipopt.print_level'] = 0
options['print_time'] = 0
options['ipopt.warm_start_init_point'] = 'yes'
options['ipopt.max_iter'] = 3000
# create solver
solver = nlpsol('solver','ipopt', nlp, options)
# set bounds for constraints
lbg = np.r_[-np.inf * np.ones(con.size1()-12), np.zeros(12)]
ubg = np.r_[np.zeros(con.size1()-12), np.zeros(12)]
# set bounds for variables
lbx = -np.inf * np.ones(X.size1())
ubx = np.inf * np.ones(X.size1())
# create solver input
solver_input = {}
solver_input['lbx'] = lbx
solver_input['ubx'] = ubx
solver_input['lbg'] = lbg
solver_input['ubg'] = ubg
# make initial guess
center = segment['position']
theta0 = np.arctan2(segment['start'][1] - center[1], segment['start'][0] - center[0]) # start angle
theta1 = np.arctan2(segment['end'][1] - center[1], segment['end'][0] - center[0]) # end angle
if segment['shape'].direction == 'CW': # theta must decrease
if theta0 < theta1: # theta0 needs to be bigger
theta0 += 2*np.pi
else: # counter-clockwise, theta must increase
if theta0 > theta1: # theta1 needs to be bigger
theta1 += 2*np.pi
# calculate circle radius
r = np.sqrt((center[0] - segment['start'][0])**2 + (center[1] - segment['start'][1])**2)
angles = np.linspace(theta0, theta1, len(basis))
x0 = r*np.cos(angles) + center[0]
y0 = r*np.sin(angles) + center[1]
var0 = np.r_[x0, y0] #initial guess
# add initial guess to solver
solver_input['x0'] = var0
# solve optimization problem
solver_output = solver(**solver_input)
# stats = solver.stats()
# print stats['return_status']
# process ouput
X = solver_output['x']
init_guess_x = X[:len(basis)]
init_guess_y = X[len(basis):2*len(basis)]
x = BSpline(basis, init_guess_x)
y = BSpline(basis, init_guess_y)
init_guess_x = np.array(init_guess_x.T)[0]
init_guess_y = np.array(init_guess_y.T)[0]
init_guess_z = 0*init_guess_x
init_guess = np.c_[init_guess_x, init_guess_y, init_guess_z]
# plot results
# from matplotlib import pyplot as plt
# eval = np.linspace(0, 1, 100)
# plt.figure(20)
# plt.plot(x(eval), y(eval), 'g') # guess
# points = segment['shape'].draw(segment['pose'][:2]+[0])[0][0] # don't draw z, always pick 0.
# # add first point again to close shape
# points = np.c_[points, [points[0,0], points[1,0]]]
# plt.plot(points[0,:], points[1,:], color='red', linestyle = '--', linewidth= 1.2) # ring segment
# plt.plot(x0, y0, 'bx') # coeffs guess before solving
# plt.plot(init_guess_x,init_guess_y, 'gx') # coeffs from solution
motion_time = self.get_init_guess_motion_time(segment, coeff_guess=init_guess)
return init_guess, motion_time
def get_init_guess_motion_time(self, segment, coeff_guess=None):
# compute initial guess for the motion time
if coeff_guess is not None:
# spline coefficients were provided
guess_x = coeff_guess[:,0]
guess_y = coeff_guess[:,1]
guess_z = coeff_guess[:,2]
# construct corresponding splines
pos_x = BSpline(self.vehicles[0].basis, guess_x)
vel_x = pos_x.derivative(1)
acc_x = pos_x.derivative(2)
jerk_x = pos_x.derivative(3)
pos_y = BSpline(self.vehicles[0].basis, guess_y)
vel_y = pos_y.derivative(1)
acc_y = pos_y.derivative(2)
jerk_y = pos_y.derivative(3)
pos_z = BSpline(self.vehicles[0].basis, guess_z)
vel_z = pos_z.derivative(1)
acc_z = pos_z.derivative(2)
jerk_z = pos_z.derivative(3)
# determine which limit is the most strict, and therefore determines the motion_time
eval = np.linspace(0,1,100)
# take into account scaling factor, with appropriate power
j_lim = self.vehicles[0].jxmax
if j_lim != 0.:
# xy-plane movement
motion_time_j = (max(np.r_[abs(jerk_x(eval)), abs(jerk_y(eval))])/float(j_lim))**(1/3.)
else:
# z-movement
j_lim = self.vehicles[0].jzmax
motion_time_j = (max(abs(jerk_z(eval)))/float(j_lim))**(1/3.)
a_lim = self.vehicles[0].axmax # jerk limit
if a_lim != 0.:
# xy-plane movement
motion_time_a = np.sqrt(max(np.r_[abs(acc_x(eval)), abs(acc_y(eval))])/float(a_lim))
else:
# z-movement
a_lim = self.vehicles[0].azmax
motion_time_a = np.sqrt(max(abs(acc_z(eval)))/float(a_lim))
v_lim = self.vehicles[0].vxmax # jerk limit
if v_lim != 0.:
# xy-plane movement
motion_time_v = max(np.r_[abs(vel_x(eval)), abs(vel_y(eval))])/float(v_lim)
else:
# z-movement
v_lim = self.vehicles[0].vzmax
motion_time_v = max(abs(vel_z(eval)))/float(v_lim)
motion_time = max(motion_time_j, motion_time_a, motion_time_v)
motion_time = 1.05*motion_time # take some margin to avoid numerical errors
else:
# no spline coefficients were provided, make theoretical guess
# predict the time of each of the 8 phases of the guess:
# 1: j_lim
# 2: a_lim
# 3: -j_lim
# 4 & 5: v_lim
# 6: -j_lim
# 7: -a_lim
# 8: j_lim
# for z-movement, the limits in x and y are set to zero, so set the according values for
# j_lim, a_lim and v_lim
j_lim = self.vehicles[0].jxmax if self.vehicles[0].jxmax != 0. else self.vehicles[0].jzmax
a_lim = self.vehicles[0].axmax if self.vehicles[0].axmax != 0. else self.vehicles[0].azmax
v_lim = self.vehicles[0].vxmax if self.vehicles[0].vxmax != 0. else self.vehicles[0].vzmax
if isinstance(segment['shape'], Rectangle):
distance = 0
for l in range(len(segment['start'])):
distance += (segment['end'][l] - segment['start'][l])**2
elif isinstance(segment['shape'], Ring):
# split arc in two lines going from start point, respectively end point to half of the ring
radius = (segment['shape'].radius_in + segment['shape'].radius_out)*0.5
# arc length
distance = radius * abs(segment['shape'].end_angle - segment['shape'].start_angle)
else:
raise RuntimeError('Invalid shape of segment given in get_init_guess_motion_time: ', segment['shape'])
# determine what the limiting factor is when applying max jerk in phase 1
# this factor determines the selected T1
T1_acc = (a_lim/j_lim) # apply max jerk, when is amax reached
T1_vel = np.sqrt(v_lim/j_lim) # apply max jerk, when is vmax reached
T1_pos = (32 * distance/j_lim)**(1/3.)/4 # apply max jerk, when is distance reached
T1 = min([T1_acc, T1_vel, T1_pos])
T3 = T1
if T1 == T1_pos: # apply max jerk, until half distance is reached
T2 = 0.
T4 = 0.
elif T1 == T1_vel: # apply max jerk until vmax is reached and keep vmax until d/2 reached
T2 = 0.
T4 = float(distance/2.-(j_lim*T1**3))/v_lim
else:
T2_pos = (2*np.sqrt((a_lim*(a_lim**3 + 4*distance*j_lim**2))/4.) - 3*a_lim**2)/(2.*a_lim*j_lim) # distance limit
T2_vel = (float(-a_lim**2)/j_lim + v_lim)/a_lim
T2 = min([T2_vel, T2_pos])
if T2 == T2_vel:
T4 = -(a_lim**2*v_lim - j_lim*distance*a_lim + j_lim*v_lim**2)/float(2*a_lim*j_lim*v_lim)
else:
T4 = 0.
T = [T1, T2, T3, T4, T4, T3, T2, T1]
motion_time = sum(T)
return motion_time
def get_init_guess_total_motion_time(self):
guess_total_time = 0
for segment in self.environment.room:
time = self.get_init_guess_motion_time(segment)
guess_total_time += time
return guess_total_time | lgpl-3.0 | 1,146,049,252,038,892,700 | 52.463023 | 196 | 0.545206 | false |
hq20051252/sogouWeixin | sogouWeixin/items.py | 1 | 2374 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SogouweixinItem(scrapy.Item):
# define the fields for your item here like:
# sogou中, 帐号的历史页面
sogougzh = scrapy.Field()
# 帐号的logo
logo = scrapy.Field()
# 帐号的二维码
qrcode = scrapy.Field()
# 帐号别名
nickname = scrapy.Field()
# 帐号
userid = scrapy.Field()
# 加密帐号
serid = scrapy.Field()
# 帐号宣传语
gongneng = scrapy.Field()
# 认证类型
renzhenginfo = scrapy.Field()
# 最近发表文章
latestarticle = scrapy.Field()
# 最近发表时间
latestupdatetime = scrapy.Field()
# 最近文章链接
latestarticleuri = scrapy.Field()
class ArticleweixinItem(scrapy.Item):
# sogou中, 帐号的历史页面
sogougzh = scrapy.Field()
# 加密帐号
serid = scrapy.Field()
# 帐号别名
nickname = scrapy.Field()
# 文章标题
title = scrapy.Field()
# 文章摘要
summary = scrapy.Field()
# 文章封面
cover = scrapy.Field()
# 文章发布时间
updatetime = scrapy.Field()
# 文章链接
articleuri = scrapy.Field()
# 文章期号
mid = scrapy.Field()
# 文章当期编号
idx = scrapy.Field()
# N\A
sn = scrapy.Field()
class SogouArticleweixinItem(scrapy.Item):
# docid
docid = scrapy.Field()
# classid
classid = scrapy.Field()
# headimage
headimage = scrapy.Field()
# sogou中, 帐号的历史页面
sogougzh = scrapy.Field()
# 加密帐号
serid = scrapy.Field()
# 帐号别名
nickname = scrapy.Field()
# 文章标题
title = scrapy.Field()
# 文章摘要
summary = scrapy.Field()
# 文章封面
cover = scrapy.Field()
# 文章发布时间
updatetime = scrapy.Field()
# 文章修改时间
lastmodified = scrapy.Field()
# 文章链接
articleuri = scrapy.Field()
# 文章期号
mid = scrapy.Field()
# 文章当期编号
idx = scrapy.Field()
# N\A
sn = scrapy.Field()
class ChuansongmeItem(scrapy.Item):
#nickname
nickname = scrapy.Field()
# 帐号
userid = scrapy.Field()
# 帐号宣传语
gongneng = scrapy.Field()
| gpl-2.0 | 3,447,349,317,792,681,500 | 19.48 | 51 | 0.597656 | false |
mpatek/runcalc | runcalc/cli.py | 1 | 4177 | import datetime
import click
import re
_multipliers = {
's': 1,
'm': 60,
'h': 3600,
}
_pattern = re.compile(
'(?:(?:(?P<h>\d+):)?(?P<m>\d+):)?(?P<s>\d+(?:\.\d+)?)'
)
def time_str_to_seconds(s):
"""
Convert a string representation of a time to number of seconds.
Args:
s (str): A string representation of a time.
Returns:
float: The number of seconds represented by the time string.
Raises:
ValueError: If the time string is in an unrecognized format.
Examples:
>>> time_str_to_seconds('123.45')
123.45
>>> time_str_to_seconds('7:15.45')
435.45
>>> time_str_to_seconds('1:07:15.45')
4035.45
"""
match = _pattern.match(s)
if match:
return sum(
_multipliers[k] * float(v)
for k, v in match.groupdict().items()
if v and k in _multipliers
)
raise ValueError('Unknown time format: "{}"'.format(s))
def format_timedelta(td):
"""
Format a timedelta
Args:
td (datetime.timedelta): A timedelta
Returns:
str: A string which represents the timedelta
Examples:
>>> import datetime
>>> td = datetime.timedelta(days=3)
>>> format_timedelta(td)
'3 days'
>>> td = datetime.timedelta(days=1)
>>> format_timedelta(td)
'1 day'
>>> td = datetime.timedelta(seconds=14.2567)
>>> format_timedelta(td)
'14.26 seconds'
>>> td = datetime.timedelta(seconds=64.6734)
>>> format_timedelta(td)
'1 minute 4.67 seconds'
>>> td = datetime.timedelta(seconds=3600)
>>> format_timedelta(td)
'1 hour'
>>> td = datetime.timedelta(seconds=3673.123)
>>> format_timedelta(td)
'1 hour 1 minute 13.12 seconds'
>>> td = datetime.timedelta(seconds=.878)
>>> format_timedelta(td)
'0.88 seconds'
>>> td = datetime.timedelta(seconds=0)
>>> format_timedelta(td)
'0 seconds'
>>> td = datetime.timedelta(seconds=1)
>>> format_timedelta(td)
'1 second'
>>> td = datetime.timedelta(seconds=1.234)
>>> format_timedelta(td)
'1.23 seconds'
"""
if not td:
return '0 seconds'
parts = []
if td.days:
parts.append('{} day{}'.format(td.days, 's' if td.days > 1 else ''))
if td.seconds or td.microseconds:
hours = td.seconds // 3600
if hours:
parts.append('{} hour{}'.format(hours, 's' if hours > 1 else ''))
minutes = (td.seconds % 3600) // 60
seconds = (td.seconds % 3600) % 60
else:
minutes = td.seconds // 60
seconds = td.seconds % 60
if minutes:
parts.append('{} minute{}'.format(
minutes,
's' if minutes > 1 else '',
))
if seconds or td.microseconds:
hundredths = int(round(td.microseconds / 10000.))
f_hundredths = '.{}'.format(hundredths) if hundredths else ''
parts.append('{}{} second{}'.format(
seconds,
f_hundredths,
'' if (seconds == 1 and not f_hundredths) else 's',
))
return ' '.join(parts)
class TimeType(click.ParamType):
name = 'time'
def convert(self, value, param, ctx):
try:
return time_str_to_seconds(value)
except ValueError as e:
self.fail(e, param, ctx)
TIME_PARAM = TimeType()
@click.command()
@click.option('--time', '-t', type=TIME_PARAM)
@click.option('--distance', '-d', type=float)
@click.option('--unit', '-u', default='mile')
def cli(time, distance, unit):
""" Calculate running pace. """
if not time:
time = time_str_to_seconds(
str(input('Enter the run time: '))
)
if not distance:
distance = float(
input('Enter the run distance: ')
)
pace = time / distance
td = datetime.timedelta(seconds=pace)
print('Pace: {} per {}'.format(format_timedelta(td), unit))
if __name__ == '__main__':
cli()
| mit | -3,712,212,842,191,771,000 | 25.436709 | 77 | 0.526215 | false |
CLVsol/odoo_addons | clv_medicament_template/wkf/clv_medicament_template_wkf.py | 1 | 2671 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp.osv import fields, osv
from openerp import netsvc
class clv_medicament_template(osv.osv):
_inherit = 'clv_medicament.template'
_columns = {
'state': fields.selection([('draft','Draft'),
('revised','Revised'),
('waiting','Waiting'),
('done','Done'),
('canceled','Canceled'),
], string='Status', readonly=True, required=True, help=""),
}
_defaults = {
'state': 'draft',
}
def button_draft(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'draft'})
def button_revised(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'revised'})
def button_waiting(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'waiting'})
def button_done(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'done'})
def button_cancel(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'canceled'})
def set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
| agpl-3.0 | 405,023,882,919,092,160 | 44.271186 | 94 | 0.445152 | false |
ATNF/askapsdp | Tools/scons_tools/doxybuilder.py | 1 | 2704 | # Copyright (c) 2009 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# [email protected]
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import string
import SCons.Action
import SCons.Defaults
import SCons.Builder
from askapdev.rbuild.utils import tag_name
def doxy_emitter(target, source, env):
insrc = str(source[0])
outsrc = insrc+".tmp"
curd = os.path.abspath(".")
doxytagname = tag_name(curd)
newsrc = open(outsrc, "w")
cpr = env["ASKAP_ROOT"]
newsrc.write(
"""@INCLUDE_PATH = %s
@INCLUDE = doxygen.conf
HTML_STYLESHEET = %s
OPTIMIZE_OUTPUT_JAVA = NO
EXTRACT_STATIC = YES
GENERATE_TAGFILE = %s
FILE_PATTERNS = *.h *.tcc *.cc *.c
""" % (os.path.join(cpr, env["DOXYINC"]),
os.path.join(cpr, env["DOXYCSS"]),
doxytagname,
)
)
newsrc.write(file(insrc, "r").read())
if env.has_key("DOXYTAGS"):
if hasattr(env["DOXYTAGS"], "__len__") and len(env["DOXYTAGS"]):
newsrc.write("TAGFILES = %s\n" % string.join(env["DOXYTAGS"]))
if env.has_key("PACKAGEINC") and env["PACKAGEINC"] is not None:
newsrc.write("INPUT += %s\n" % env["PACKAGEINC"])
newsrc.close()
env["DOXYGEN"] = os.path.join(cpr, env["DOXYGEN"])
return target, [outsrc]
def doxy_str(target, source, env):
return "Generating doxygen documentation"
doxy_action = SCons.Action.Action("$DOXYGEN $SOURCES", doxy_str)
doxygen_builder = SCons.Builder.Builder(
action = [doxy_action, SCons.Defaults.Delete("$SOURCES")],
src_suffix = ".conf",
emitter = doxy_emitter
)
def generate(env):
env.AppendUnique(
DOXYGEN = 'bin/doxygen',
DOXYINC = "share/doxygen",
DOXYCSS = "share/doxygen/doxygen.css",
PACKAGEINC = None
)
env.Append(BUILDERS = {
'Doxygen': doxygen_builder
})
def exists(env):
return True
| gpl-2.0 | 6,290,299,520,852,421,000 | 29.382022 | 77 | 0.678994 | false |
s-will/LocARNA | src/Utils/relsubseqs.py | 1 | 5767 | #!/usr/bin/env python
## enumerate the relevant sub sequences for a crossing RNA (dp format)
import re
from optparse import OptionParser
## ============================================================
## function definitions
def parseCmdLine():
"parse the command line arguments"
usage = "usage: %prog [options] input_file"
parser=OptionParser(usage=usage)
parser.add_option("-p", "--minprob", dest="minprob",
default=1e-6,
help="minimal accepted probability, used for filtering")
parser.add_option("-s", "--strategy", dest="strategy",
default="right",
help="strategy for reduction (left,right,leftright)")
parser.add_option("-i", "--informat", dest="informat",
default="dp",
help="input format")
(options,args)=parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
return (options,args[0])
def readdp(filename,minprob):
"""
read a dp file (output of RNAfold -p)
and return tuple of sequence and list of basepairs with probabilities
"""
input=open(filename,"r")
Lines=input.readlines()
state=''
sequence=''
pairs=[]
uboxpat=re.compile('(\d+) (\d+) (\d\.\d+) ubox')
for line in Lines:
#print line
if state=='sequence':
if re.match('\) } def', line):
state=''
else:
line=line[:-2]
sequence = sequence + line
elif re.match('/sequence.*',line):
state='sequence'
else:
ubox=uboxpat.match(line)
if ubox:
uboxgr=ubox.groups()
p=float(uboxgr[2])*float(uboxgr[2])
if (p>=float(minprob)):
pairs.append((int(uboxgr[0]),int(uboxgr[1]),p))
return sequence,pairs
def readsimple(filename,minprob):
"""
read a simple format for describing RNAs with crossing secondary structure
and return tuple of sequence and list of basepairs with probabilities
"""
input=open(filename,"r")
Lines=input.readlines()
state='sequence'
sequence=''
pairs=[]
pat=re.compile('(\d+) (\d+)')
for line in Lines:
#print line
if state=='sequence':
if re.match('^$',line):
state=''
else:
line=line[:-1]
sequence = sequence + line
else:
pair=pat.match(line)
if pair:
pairgr=pair.groups()
pairs.append((int(pairgr[0]),int(pairgr[1])))
return sequence,pairs
def incident_pairs_from_right(start,end,pairs):
"""
returns list of basepairs that have right end 'end'
and left end >= 'start'
"""
return filter((lambda p: p[0]>=start and p[1]==end),pairs)
def incident_pairs_from_left(start,end,pairs):
"""
symmetric version of incident_pairs_from_right
"""
return filter((lambda p: p[0]==start and p[1]<=end),pairs)
def decideChoiceKlein(start,end,l_incpairs,r_incpairs,pairs):
"make the choice for reducing left or right --- much in the way of Klein"
def max_dist(pairs):
sizes=[p[1]-p[0] for p in pairs]
sizes.append(0)
size=reduce((lambda x,y: max(x,y)), sizes)
return size
lsize=max_dist(l_incpairs)
rsize=max_dist(r_incpairs)
if (lsize<rsize): return 'left'
else: return 'right'
def count_enclosing_pairs(pos,pairs):
"count the number of pairs that enclose a position >pos<"
return len(filter((lambda p: p[0]<=pos and p[1]>=pos),pairs))
def decideChoice(start,end,pairs):
"make the choice for reducing left or right --- new experimental way"
lsize=len(filter((lambda p: p[0]<=start and p[1]>start),pairs)) #count_enclosing_pairs(start,pairs)
rsize=len(filter((lambda p: p[0]<end and p[1]>=end),pairs)) #count_enclosing_pairs(end,pairs)
if (lsize<rsize): return 'left'
else: return 'right'
def reduce_subseq(start,end,pairs,seen,strategy):
"recursively reduce to subsequences from left or right or using sort of Kleins strategy"
if end<start: return
if seen.has_key((start,end)):
# print 'seen',start,end
return
seen[(start,end)]=True
if (start-1,end+1) in pairs:
print " "*(start-1+2)+"("+"."*(end-start+1)+")"
print " "*(start-1+3-len(str(start)))+str(start)+"-"*(end-start+1)+str(end)
## make a choice, whether to reduce from left or right
r_incpairs=incident_pairs_from_right(start,end,pairs)
l_incpairs=incident_pairs_from_left(start,end,pairs)
if strategy=='right' or strategy=='left': theChoice=strategy
else:
# theChoice=decideChoiceKlein(start,end,l_incpairs,r_incpairs,pairs)
theChoice=decideChoice(start,end,pairs)
if theChoice=='right' :
for p in r_incpairs:
reduce_subseq(start,p[0]-1,pairs,seen,strategy)
reduce_subseq(p[0]+1,p[1]-1,pairs,seen,strategy)
reduce_subseq(start,end-1,pairs,seen,strategy)
elif theChoice=='left':
for p in l_incpairs:
reduce_subseq(p[0]+1,p[1]-1,pairs,seen,strategy)
reduce_subseq(p[1]+1,end,pairs,seen,strategy)
reduce_subseq(start+1,end,pairs,seen,strategy)
## ============================================================
## main program
(options,filename)=parseCmdLine()
if options.informat=='dp':
(seq,pairs)=readdp(filename,options.minprob)
else:
(seq,pairs)=readsimple(filename,options.minprob)
seqlen=len(seq)
print "seqlen =", seqlen, ", #pairs =",len(pairs)
print
print " "+seq
seen={}
reduce_subseq(1,seqlen,pairs,seen,options.strategy)
| gpl-3.0 | -6,923,049,886,996,048,000 | 26.593301 | 103 | 0.584879 | false |
shigh/py3d3v | py3d3v/setup.py | 1 | 1503 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext':build_ext},
include_dirs = [np.get_include()],
ext_modules = [Extension("interp",["interp.pyx", "par_interp.cpp"],
libraries=["m"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs = [np.get_include()],
language="c++"),
Extension("ewald",["ewald.pyx"],
libraries=["m"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs = [np.get_include()],
language="c++"),
Extension("core",["core.pyx", "par_core.cpp"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs = [np.get_include()],
language="c++"),
Extension("solvers",["solvers.pyx", "par_solvers.cpp"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs = [np.get_include()],
language="c++")]
)
| gpl-2.0 | -401,854,307,524,964,200 | 47.483871 | 74 | 0.413839 | false |
dekom/threepress-bookworm-read-only | bookworm/gdata/tests/gdata_tests/blogger/live_client_test.py | 2 | 5483 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import gdata.blogger.client
import gdata.blogger.data
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
class BloggerClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.blogger.client.BloggerClient()
conf.configure_client(self.client, 'BloggerTest', 'blogger')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Add a blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'test post from BloggerClientTest',
'Hey look, another test!',
labels=['test', 'python'])
self.assertEqual(created.title.text, 'test post from BloggerClientTest')
self.assertEqual(created.content.text, 'Hey look, another test!')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is None)
# Change the title of the blog post we just added.
created.title.text = 'Edited'
updated = self.client.update(created)
self.assertEqual(updated.title.text, 'Edited')
self.assert_(isinstance(updated, gdata.blogger.data.BlogPost))
self.assertEqual(updated.content.text, created.content.text)
# Delete the test entry from the blog.
self.client.delete(updated)
def test_create_draft_post(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_post')
# Add a draft blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'draft test post from BloggerClientTest',
'This should only be a draft.',
labels=['test2', 'python'], draft=True)
self.assertEqual(created.title.text,
'draft test post from BloggerClientTest')
self.assertEqual(created.content.text, 'This should only be a draft.')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
# Publish the blog post.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete the test entry from the blog using the URL instead of the entry.
self.client.delete(updated.find_edit_link())
def test_create_draft_page(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_page')
# List all pages on the blog.
pages_before = self.client.get_pages(conf.options.get_value('blogid'))
# Add a draft page to blog.
created = self.client.add_page(conf.options.get_value('blogid'),
'draft page from BloggerClientTest',
'draft content',
draft=True)
self.assertEqual(created.title.text, 'draft page from BloggerClientTest')
self.assertEqual(created.content.text, 'draft content')
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
self.assertEqual(str(int(created.get_page_id())), created.get_page_id())
# List all pages after adding one.
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry) + 1, len(pages_after.entry))
# Publish page.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete test page.
self.client.delete(updated.find_edit_link())
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry), len(pages_after.entry))
def suite():
return conf.build_suite([BloggerClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| bsd-3-clause | 1,090,791,838,829,291,600 | 34.836601 | 77 | 0.671348 | false |
HopkinsIDD/EpiForecastStatMech | epi_forecast_stat_mech/utils.py | 1 | 1845 | # Lint as: python3
"""Functions for manipulating nested structures of arrays."""
from typing import Any, Union
import jax
import jax.numpy as jnp
def slice_along_axis(
inputs: Any,
axis: int,
idx: Union[slice, int]
):
"""Returns slice of `inputs` defined by `idx` along axis `axis`.
Args:
inputs: pytree of arrays to slice.
axis: axis along which to slice the `inputs`.
idx: index or slice along axis `axis` that is returned.
Returns:
Slice of `inputs` defined by `idx` along axis `axis`.
"""
leaves, tree_def = jax.tree_flatten(inputs)
sliced = []
for array in leaves:
ndim = array.ndim
slc = tuple(idx if j == axis else slice(None, None) for j in range(ndim))
sliced.append(array[slc])
return jax.tree_unflatten(tree_def, sliced)
def split_along_axis(
inputs: Any,
axis: int,
split_idx: int,
post_squeeze_first: bool = True,
post_squeeze_second: bool = False
):
"""Returns a tuple of slices of `inputs` split along `axis` at `split_idx`.
Args:
inputs: pytree of arrays to split.
axis: axis along which to split the `inputs`.
split_idx: index along axis `axis` representing the first element in the
second split.
post_squeeze_first: whether to squeeze first slice along the `axis`.
post_squeeze_second: whether to squeeze second slice along the `axis`.
Returns:
Tuple of slices of `inputs` split along `axis` at `split_idx`.
"""
first_slice = slice_along_axis(inputs, axis, slice(0, split_idx))
second_slice = slice_along_axis(inputs, axis, slice(split_idx, None))
squeeze_fn = jax.partial(jnp.squeeze, axis=axis)
if post_squeeze_first:
first_slice = jax.tree_map(squeeze_fn, first_slice)
if post_squeeze_second:
second_slice = jax.tree_map(squeeze_fn, second_slice)
return first_slice, second_slice
| gpl-3.0 | -7,539,086,425,729,540,000 | 29.245902 | 77 | 0.680759 | false |
eJRF/ejrf | questionnaire/services/users.py | 1 | 3518 | from django.utils.datastructures import SortedDict
from questionnaire.models import Answer, AnswerGroup
from questionnaire.services.questionnaire_entry_form_service import QuestionnaireEntryFormService
class UserQuestionnaireService(object):
def __init__(self, country, questionnaire, version=None):
self.version = version
self.country = country
self.questionnaire = questionnaire
self.answers_in_questionnaire = self.questionnaire_answers()
self.current_answer_status = Answer.DRAFT_STATUS
self.set_versions()
self.answers = self.answers_in_questionnaire.filter(version=self.POST_version)
self.edit_after_submit = not self.POST_version == self.GET_version
def all_answers(self):
return Answer.objects.filter(country=self.country).select_subclasses()
def questionnaire_answers(self):
answer_groups = AnswerGroup.objects.filter(
grouped_question__subsection__section__questionnaire=self.questionnaire)
answers = Answer.objects.filter(country=self.country, answergroup__in=answer_groups,
questionnaire=self.questionnaire).select_subclasses()
if self.version:
return answers.filter(version=self.version)
return answers
def submit(self):
for answer in self.answers:
answer.status = Answer.SUBMITTED_STATUS
answer.save()
self.questionnaire.submissions.create(country=self.country, version=self.version or self.GET_version)
def answer_version(self):
answers = self.answers_in_questionnaire
if not answers.exists():
return 1
draft_answers = answers.filter(status=Answer.DRAFT_STATUS)
if draft_answers.exists():
return draft_answers.latest('modified').version
self.current_answer_status = Answer.SUBMITTED_STATUS
return answers.latest('modified').version + 1
def set_versions(self):
self.POST_version = self.answer_version()
if self.current_answer_status == Answer.SUBMITTED_STATUS:
self.GET_version = self.POST_version - 1
else:
self.GET_version = self.POST_version
def required_sections_answered(self):
for section in self.questionnaire.sections.all():
if not self.answered_required_questions_in(section):
self.unanswered_section = section
return False
return True
def answered_required_questions_in(self, section):
required_question_in_section = filter(lambda question: question.is_required, section.ordered_questions())
return self.answers.filter(question__in=required_question_in_section).count() == len(
required_question_in_section)
def all_sections_questionnaires(self):
initial = {'country': self.country, 'status': 'Draft', 'version': self.version or self.POST_version,
'questionnaire': self.questionnaire}
questionnaires = SortedDict()
for section in self.questionnaire.sections.order_by('order'):
questionnaires[section] = QuestionnaireEntryFormService(section, initial=initial)
return questionnaires
def preview(self):
version = self.version or self.POST_version
return self.questionnaire.submissions.filter(country=self.country, version=version).exists()
def attachments(self):
return self.questionnaire.support_documents.filter(country=self.country) | bsd-3-clause | -1,901,261,143,422,016,500 | 42.9875 | 113 | 0.681069 | false |
nttks/edx-platform | common/lib/xmodule/xmodule/tabs.py | 1 | 18918 | """
Implement CourseTab
"""
from abc import ABCMeta
import logging
from xblock.fields import List
from openedx.core.lib.api.plugins import PluginError
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class CourseTab(object):
"""
The Course Tab class is a data abstraction for all tabs (i.e., course navigation links) within a course.
It is an abstract class - to be inherited by various tab types.
Derived classes are expected to override methods as needed.
When a new tab class is created, it should define the type and add it in this class' factory method.
"""
__metaclass__ = ABCMeta
# Class property that specifies the type of the tab. It is generally a constant value for a
# subclass, shared by all instances of the subclass.
type = ''
icon = ''
# The title of the tab, which should be internationalized using
# ugettext_noop since the user won't be available in this context.
title = None
# Class property that specifies whether the tab can be hidden for a particular course
is_hideable = False
# Class property that specifies whether the tab is hidden for a particular course
is_hidden = False
# The relative priority of this view that affects the ordering (lower numbers shown first)
priority = None
# Class property that specifies whether the tab can be moved within a course's list of tabs
is_movable = True
# Class property that specifies whether the tab is a collection of other tabs
is_collection = False
# True if this tab is dynamically added to the list of tabs
is_dynamic = False
# True if this tab is a default for the course (when enabled)
is_default = True
# True if this tab can be included more than once for a course.
allow_multiple = False
# If there is a single view associated with this tab, this is the name of it
view_name = None
def __init__(self, tab_dict):
"""
Initializes class members with values passed in by subclasses.
Args:
tab_dict (dict) - a dictionary of parameters used to build the tab.
"""
self.name = tab_dict.get('name', self.title)
self.tab_id = tab_dict.get('tab_id', getattr(self, 'tab_id', self.type))
self.link_func = tab_dict.get('link_func', link_reverse_func(self.view_name))
self.is_hidden = tab_dict.get('is_hidden', False)
@classmethod
def is_enabled(cls, course, user=None):
"""Returns true if this course tab is enabled in the course.
Args:
course (CourseDescriptor): the course using the feature
user (User): an optional user interacting with the course (defaults to None)
"""
raise NotImplementedError()
def get(self, key, default=None):
"""
Akin to the get method on Python dictionary objects, gracefully returns the value associated with the
given key, or the default if key does not exist.
"""
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
"""
This method allows callers to access CourseTab members with the d[key] syntax as is done with
Python dictionary objects.
"""
if key == 'name':
return self.name
elif key == 'type':
return self.type
elif key == 'tab_id':
return self.tab_id
elif key == 'is_hidden':
return self.is_hidden
else:
raise KeyError('Key {0} not present in tab {1}'.format(key, self.to_json()))
def __setitem__(self, key, value):
"""
This method allows callers to change CourseTab members with the d[key]=value syntax as is done with
Python dictionary objects. For example: course_tab['name'] = new_name
Note: the 'type' member can be 'get', but not 'set'.
"""
if key == 'name':
self.name = value
elif key == 'tab_id':
self.tab_id = value
elif key == 'is_hidden':
self.is_hidden = value
else:
raise KeyError('Key {0} cannot be set in tab {1}'.format(key, self.to_json()))
def __eq__(self, other):
"""
Overrides the equal operator to check equality of member variables rather than the object's address.
Also allows comparison with dict-type tabs (needed to support callers implemented before this class
was implemented).
"""
if isinstance(other, dict) and not self.validate(other, raise_error=False):
# 'other' is a dict-type tab and did not validate
return False
# allow tabs without names; if a name is required, its presence was checked in the validator.
name_is_eq = (other.get('name') is None or self.name == other['name'])
# only compare the persisted/serialized members: 'type' and 'name'
return self.type == other.get('type') and name_is_eq
def __ne__(self, other):
"""
Overrides the not equal operator as a partner to the equal operator.
"""
return not self == other
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Validates the given dict-type tab object to ensure it contains the expected keys.
This method should be overridden by subclasses that require certain keys to be persisted in the tab.
"""
return key_checker(['type'])(tab_dict, raise_error)
@classmethod
def load(cls, type_name, **kwargs):
"""
Constructs a tab of the given type_name.
Args:
type_name (str) - the type of tab that should be constructed
**kwargs - any other keyword arguments needed for constructing this tab
Returns:
an instance of the CourseTab subclass that matches the type_name
"""
json_dict = kwargs.copy()
json_dict['type'] = type_name
return cls.from_json(json_dict)
def to_json(self):
"""
Serializes the necessary members of the CourseTab object to a json-serializable representation.
This method is overridden by subclasses that have more members to serialize.
Returns:
a dictionary with keys for the properties of the CourseTab object.
"""
to_json_val = {'type': self.type, 'name': self.name}
if self.is_hidden:
to_json_val.update({'is_hidden': True})
return to_json_val
@staticmethod
def from_json(tab_dict):
"""
Deserializes a CourseTab from a json-like representation.
The subclass that is instantiated is determined by the value of the 'type' key in the
given dict-type tab. The given dict-type tab is validated before instantiating the CourseTab object.
If the tab_type is not recognized, then an exception is logged and None is returned.
The intention is that the user should still be able to use the course even if a
particular tab is not found for some reason.
Args:
tab: a dictionary with keys for the properties of the tab.
Raises:
InvalidTabsException if the given tab doesn't have the right keys.
"""
# TODO: don't import openedx capabilities from common
from openedx.core.lib.course_tabs import CourseTabPluginManager
tab_type_name = tab_dict.get('type')
if tab_type_name is None:
log.error('No type included in tab_dict: %r', tab_dict)
return None
try:
tab_type = CourseTabPluginManager.get_plugin(tab_type_name)
except PluginError:
log.exception(
"Unknown tab type %r Known types: %r.",
tab_type_name,
CourseTabPluginManager.get_tab_types()
)
return None
tab_type.validate(tab_dict)
return tab_type(tab_dict=tab_dict)
class StaticTab(CourseTab):
"""
A custom tab.
"""
type = 'static_tab'
icon = 'fa fa-circle'
is_default = False # A static tab is never added to a course by default
allow_multiple = True
def __init__(self, tab_dict=None, name=None, url_slug=None):
def link_func(course, reverse_func):
""" Returns a url for a given course and reverse function. """
return reverse_func(self.type, args=[course.id.to_deprecated_string(), self.url_slug])
self.url_slug = tab_dict.get('url_slug') if tab_dict else url_slug
if tab_dict is None:
tab_dict = dict()
if name is not None:
tab_dict['name'] = name
tab_dict['link_func'] = link_func
tab_dict['tab_id'] = 'static_tab_{0}'.format(self.url_slug)
super(StaticTab, self).__init__(tab_dict)
@classmethod
def is_enabled(cls, course, user=None):
"""
Static tabs are viewable to everyone, even anonymous users.
"""
return True
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Ensures that the specified tab_dict is valid.
"""
return (super(StaticTab, cls).validate(tab_dict, raise_error)
and key_checker(['name', 'url_slug'])(tab_dict, raise_error))
def __getitem__(self, key):
if key == 'url_slug':
return self.url_slug
else:
return super(StaticTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'url_slug':
self.url_slug = value
else:
super(StaticTab, self).__setitem__(key, value)
def to_json(self):
""" Return a dictionary representation of this tab. """
to_json_val = super(StaticTab, self).to_json()
to_json_val.update({'url_slug': self.url_slug})
return to_json_val
def __eq__(self, other):
if not super(StaticTab, self).__eq__(other):
return False
return self.url_slug == other.get('url_slug')
class CourseTabList(List):
"""
An XBlock field class that encapsulates a collection of Tabs in a course.
It is automatically created and can be retrieved through a CourseDescriptor object: course.tabs
"""
# TODO: Ideally, we'd like for this list of tabs to be dynamically
# generated by the tabs plugin code. For now, we're leaving it like this to
# preserve backwards compatibility.
@staticmethod
def initialize_default(course):
"""
An explicit initialize method is used to set the default values, rather than implementing an
__init__ method. This is because the default values are dependent on other information from
within the course.
"""
course.tabs.extend([
CourseTab.load('courseware'),
CourseTab.load('course_info')
])
# Presence of syllabus tab is indicated by a course attribute
if hasattr(course, 'syllabus_present') and course.syllabus_present:
course.tabs.append(CourseTab.load('syllabus'))
# If the course has a discussion link specified, use that even if we feature
# flag discussions off. Disabling that is mostly a server safety feature
# at this point, and we don't need to worry about external sites.
if course.discussion_link:
discussion_tab = CourseTab.load(
'external_discussion', name=_('External Discussion'), link=course.discussion_link
)
else:
discussion_tab = CourseTab.load('discussion')
course.tabs.extend([
CourseTab.load('textbooks'),
discussion_tab,
CourseTab.load('wiki'),
CourseTab.load('progress'),
])
@staticmethod
def get_discussion(course):
"""
Returns the discussion tab for the given course. It can be either of type 'discussion'
or 'external_discussion'. The returned tab object is self-aware of the 'link' that it corresponds to.
"""
# the discussion_link setting overrides everything else, even if there is a discussion tab in the course tabs
if course.discussion_link:
return CourseTab.load(
'external_discussion', name=_('External Discussion'), link=course.discussion_link
)
# find one of the discussion tab types in the course tabs
for tab in course.tabs:
if tab.type == 'discussion' or tab.type == 'external_discussion':
return tab
return None
@staticmethod
def get_tab_by_slug(tab_list, url_slug):
"""
Look for a tab with the specified 'url_slug'. Returns the tab or None if not found.
"""
return next((tab for tab in tab_list if tab.get('url_slug') == url_slug), None)
@staticmethod
def get_tab_by_type(tab_list, tab_type):
"""
Look for a tab with the specified type. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.type == tab_type), None)
@staticmethod
def get_tab_by_id(tab_list, tab_id):
"""
Look for a tab with the specified tab_id. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.tab_id == tab_id), None)
@staticmethod
def iterate_displayable(course, user=None, inline_collections=True):
"""
Generator method for iterating through all tabs that can be displayed for the given course and
the given user with the provided access settings.
"""
for tab in course.tabs:
if tab.is_enabled(course, user=user) and not (user and tab.is_hidden):
if tab.is_collection:
# If rendering inline that add each item in the collection,
# else just show the tab itself as long as it is not empty.
if inline_collections:
for item in tab.items(course):
yield item
elif len(list(tab.items(course))) > 0:
yield tab
else:
yield tab
@classmethod
def validate_tabs(cls, tabs):
"""
Check that the tabs set for the specified course is valid. If it
isn't, raise InvalidTabsException with the complaint.
Specific rules checked:
- if no tabs specified, that's fine
- if tabs specified, first two must have type 'courseware' and 'course_info', in that order.
"""
if tabs is None or len(tabs) == 0:
return
if len(tabs) < 2:
raise InvalidTabsException("Expected at least two tabs. tabs: '{0}'".format(tabs))
if tabs[0].get('type') != 'courseware':
raise InvalidTabsException(
"Expected first tab to have type 'courseware'. tabs: '{0}'".format(tabs))
if tabs[1].get('type') != 'course_info':
raise InvalidTabsException(
"Expected second tab to have type 'course_info'. tabs: '{0}'".format(tabs))
# the following tabs should appear only once
# TODO: don't import openedx capabilities from common
from openedx.core.lib.course_tabs import CourseTabPluginManager
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.allow_multiple:
cls._validate_num_tabs_of_type(tabs, tab_type.type, 1)
@staticmethod
def _validate_num_tabs_of_type(tabs, tab_type, max_num):
"""
Check that the number of times that the given 'tab_type' appears in 'tabs' is less than or equal to 'max_num'.
"""
count = sum(1 for tab in tabs if tab.get('type') == tab_type)
if count > max_num:
msg = (
"Tab of type '{type}' appears {count} time(s). "
"Expected maximum of {max} time(s)."
).format(
type=tab_type, count=count, max=max_num,
)
raise InvalidTabsException(msg)
def to_json(self, values):
"""
Overrides the to_json method to serialize all the CourseTab objects to a json-serializable representation.
"""
json_data = []
if values:
for val in values:
if isinstance(val, CourseTab):
json_data.append(val.to_json())
elif isinstance(val, dict):
json_data.append(val)
else:
continue
return json_data
def from_json(self, values):
"""
Overrides the from_json method to de-serialize the CourseTab objects from a json-like representation.
"""
self.validate_tabs(values)
tabs = []
for tab_dict in values:
tab = CourseTab.from_json(tab_dict)
if tab:
tabs.append(tab)
return tabs
# Validators
# A validator takes a dict and raises InvalidTabsException if required fields are missing or otherwise wrong.
# (e.g. "is there a 'name' field?). Validators can assume that the type field is valid.
def key_checker(expected_keys):
"""
Returns a function that checks that specified keys are present in a dict.
"""
def check(actual_dict, raise_error=True):
"""
Function that checks whether all keys in the expected_keys object is in the given actual_dict object.
"""
missing = set(expected_keys) - set(actual_dict.keys())
if not missing:
return True
if raise_error:
raise InvalidTabsException(
"Expected keys '{0}' are not present in the given dict: {1}".format(expected_keys, actual_dict)
)
else:
return False
return check
def link_reverse_func(reverse_name):
"""
Returns a function that takes in a course and reverse_url_func,
and calls the reverse_url_func with the given reverse_name and course's ID.
This is used to generate the url for a CourseTab without having access to Django's reverse function.
"""
return lambda course, reverse_url_func: reverse_url_func(reverse_name, args=[course.id.to_deprecated_string()])
def need_name(dictionary, raise_error=True):
"""
Returns whether the 'name' key exists in the given dictionary.
"""
return key_checker(['name'])(dictionary, raise_error)
class InvalidTabsException(Exception):
"""
A complaint about invalid tabs.
"""
pass
class UnequalTabsException(Exception):
"""
A complaint about tab lists being unequal
"""
pass
| agpl-3.0 | -7,294,548,381,858,526,000 | 35.241379 | 118 | 0.609208 | false |
joshspeagle/dynesty | tests/test_ellipsoid.py | 1 | 2592 | import dynesty.bounding as db
import numpy as np
import scipy.stats
from utils import get_rstate
def test_sample():
# test sampling of two overlapping ellipsoids that samples are uniform
# within
rad = 1
shift = 0.75
ndim = 10
cen1 = np.zeros(ndim)
cen2 = np.zeros(ndim)
cen2[0] = shift
sig = np.eye(10) * rad**2
ells = [db.Ellipsoid(cen1, sig), db.Ellipsoid(cen2, sig)]
mu = db.MultiEllipsoid(ells)
R = []
nsim = 100000
rstate = get_rstate()
for i in range(nsim):
R.append(mu.sample(rstate=rstate)[0])
R = np.array(R)
assert (all([mu.contains(_) for _ in R]))
# here I'm checking that all the points are uniformly distributed
# within each ellipsoid
for curc in [cen1, cen2]:
dist1 = (np.sqrt(np.sum((R - curc)**2, axis=1)) / rad)
# radius from 0 to 1
xdist1 = dist1**ndim
# should be uniformly distributed from 0 to 1
xdist1 = xdist1[xdist1 < 1]
pval = scipy.stats.kstest(xdist1,
scipy.stats.uniform(loc=0.0, scale=1).cdf)[1]
assert ((pval > 0.003) & (pval < 0.997))
nhalf = (R[:, 0] > shift / 2.).sum()
print(nhalf, nsim)
assert (np.abs(nhalf - 0.5 * nsim) < 5 * np.sqrt(0.5 * nsim))
def test_sample_q():
# test sampling of two overlapping ellipsoids that samples are uniform
# within
rad = 1
shift = 0.75
ndim = 10
cen1 = np.zeros(ndim)
cen2 = np.zeros(ndim)
cen2[0] = shift
sig = np.eye(10) * rad**2
ells = [db.Ellipsoid(cen1, sig), db.Ellipsoid(cen2, sig)]
mu = db.MultiEllipsoid(ells)
R = []
nsim = 100000
rstate = get_rstate()
for i in range(nsim):
while True:
x, _, q = mu.sample(return_q=True, rstate=rstate)
if rstate.uniform() < 1. / q:
R.append(x)
break
R = np.array(R)
assert (all([mu.contains(_) for _ in R]))
# here I'm checking that all the points are uniformly distributed
# within each ellipsoid
for curc in [cen1, cen2]:
dist1 = (np.sqrt(np.sum((R - curc)**2, axis=1)) / rad)
# radius from 0 to 1
xdist1 = dist1**ndim
# should be uniformly distributed from 0 to 1
xdist1 = xdist1[xdist1 < 1]
pval = scipy.stats.kstest(xdist1,
scipy.stats.uniform(loc=0.0, scale=1).cdf)[1]
assert ((pval > 0.003) & (pval < 0.997))
nhalf = (R[:, 0] > shift / 2.).sum()
print(nhalf, nsim)
assert (np.abs(nhalf - 0.5 * nsim) < 5 * np.sqrt(0.5 * nsim))
| mit | 3,505,348,217,995,768,000 | 31.810127 | 79 | 0.559799 | false |
spesmilo/electrum | electrum/plugins/safe_t/clientbase.py | 1 | 10111 | import time
from struct import pack
from typing import Optional
from electrum import ecc
from electrum.i18n import _
from electrum.util import UserCancelled
from electrum.keystore import bip39_normalize_passphrase
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32
from electrum.logging import Logger
from electrum.plugin import runs_in_hwd_thread
from electrum.plugins.hw_wallet.plugin import HardwareClientBase, HardwareHandlerBase
class GuiMixin(object):
# Requires: self.proto, self.device
handler: Optional[HardwareHandlerBase]
# ref: https://github.com/trezor/trezor-common/blob/44dfb07cfaafffada4b2ce0d15ba1d90d17cf35e/protob/types.proto#L89
messages = {
3: _("Confirm the transaction output on your {} device"),
4: _("Confirm internal entropy on your {} device to begin"),
5: _("Write down the seed word shown on your {}"),
6: _("Confirm on your {} that you want to wipe it clean"),
7: _("Confirm on your {} device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"{} device"),
10: _("Confirm wallet address on your {} device"),
14: _("Choose on your {} device where to enter your passphrase"),
'default': _("Check your {} device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.FailureType.PinCancelled,
self.types.FailureType.ActionCancelled,
self.types.FailureType.NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message.format(self.device), self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
show_strength = True
if msg.type == 2:
msg = _("Enter a new PIN for your {}:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your {}.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current {} PIN:")
show_strength = False
pin = self.handler.get_pin(msg.format(self.device), show_strength=show_strength)
if len(pin) > 9:
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
pin = '' # to cancel below
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if req and hasattr(req, 'on_device') and req.on_device is True:
return self.proto.PassphraseAck()
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your {} will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the bitcoins in the wallet.").format(self.device)
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
ack = self.proto.PassphraseAck(passphrase=passphrase)
length = len(ack.passphrase)
if length > 50:
self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length))
return self.proto.Cancel()
return ack
def callback_PassphraseStateRequest(self, msg):
return self.proto.PassphraseStateAck()
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step {}/24. Enter seed word as explained on "
"your {}:").format(self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
class SafeTClientBase(HardwareClientBase, GuiMixin, Logger):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
HardwareClientBase.__init__(self, plugin=plugin)
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
Logger.__init__(self)
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
return self.features.label
def get_soft_device_id(self):
return self.features.device_id
def is_initialized(self):
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
try:
res = self.ping("electrum pinging device")
assert res == "electrum pinging device"
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
@runs_in_hwd_thread
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.logger.info("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
return convert_bip32_path_to_list_of_uint32(n)
@runs_in_hwd_thread
def cancel(self):
'''Provided here as in keepkeylib but not safetlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return BIP32Node(xtype=xtype,
eckey=ecc.ECPubkey(node.public_key),
chaincode=node.chain_code,
depth=node.depth,
fingerprint=self.i4b(node.fingerprint),
child_number=self.i4b(node.child_num)).to_xpub()
@runs_in_hwd_thread
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your {} device to disable passphrases")
else:
self.msg = _("Confirm on your {} device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
@runs_in_hwd_thread
def change_label(self, label):
self.msg = _("Confirm the new label on your {} device")
self.apply_settings(label=label)
@runs_in_hwd_thread
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your {} device to change your home screen")
self.apply_settings(homescreen=homescreen)
@runs_in_hwd_thread
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your {} device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your {} device to change your PIN")
else:
self.msg = _("Confirm on your {} device to set a PIN")
self.change_pin(remove)
@runs_in_hwd_thread
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.logger.info(f"clear session: {self}")
self.prevent_timeouts()
try:
super(SafeTClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.logger.info(f"clear_session: ignoring error {e}")
@runs_in_hwd_thread
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(SafeTClientBase, self).get_public_node(address_n)
@runs_in_hwd_thread
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.logger.info("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| mit | 6,721,036,304,798,242,000 | 37.154717 | 119 | 0.608842 | false |
macarena/aulas | batalhanaval/batalha_naval_sabado.py | 1 | 2113 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from random import randint
from time import sleep
class Navio:
pass
def __init__(self, nome):
self.nome = nome
self.vivo = True
def posiciona(self, linha, coluna):
self.linha = linha
self.coluna = coluna
def __repr__(self):
if self.vivo:
return "W"
else:
return "!"
class Tabuleiro:
tentativa = 0
def __init__(self, navios, linhas, colunas):
if colunas:
self.colunas = colunas
else:
self.colunas = linhas
self.linhas = linhas
self.casas = []
for x in range(linhas):
self.casas.append(["O"] * colunas)
for navio in navios:
self.arrumar(navio)
def arrumar(self, navio):
casa = ''
while casa != "O":
linha = randint(0, self.linhas -1)
coluna = randint(0, self.colunas -1)
casa = self.casas[linha][coluna]
self.casas[linha][coluna] = navio
navio.posiciona(linha,coluna)
def mostra(self):
for linha in self.casas:
print " ".join(str(casa) for casa in linha)
def chute(self, linha, coluna):
if isinstance(self.casas[linha][coluna], Navio):
print "\nParabéns! Acertou mizerávi...\n"
self.casas[linha][coluna].vivo = False
else:
print "\nVocê errou!\n"
self.casas[linha][coluna] = "X"
def vivos(navios):
vivos = [n for n in navios if n.vivo]
if len(vivos) > 0:
return True
else:
return False
barcos = [Navio("titanic"),Navio("bote"),Navio("caravela")]
print "Vamos jogar batalha naval! UHUUUUU!!!"
jogo = True
tabuleiro = Tabuleiro(barcos, 5, 5)
tabuleiro.mostra()
while vivos(barcos):
chute_linha = input("qual linha?") -1
chute_coluna = input("qual coluna?") -1
tabuleiro.chute(chute_linha,chute_coluna)
tabuleiro.mostra()
print "PARABÉNS, VOCÊ DETONOU!!"
| gpl-2.0 | -2,727,341,878,696,295,000 | 22.287356 | 59 | 0.534785 | false |
zellyn/django-kanboard | src/kanboard/migrations/0003_created_by.py | 1 | 6538 |
from south.db import db
from django.db import models
from kanboard.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Card.created_by'
db.add_column('kanboard_card', 'created_by', orm['kanboard.card:created_by'])
def backwards(self, orm):
# Deleting field 'Card.created_by'
db.delete_column('kanboard_card', 'created_by_id')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'kanboard.board': {
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.card': {
'backlogged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'blocked_because': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cards'", 'to': "orm['kanboard.Board']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'done_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cards'", 'to': "orm['kanboard.Phase']"}),
'ready': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.phase': {
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['kanboard.Board']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'progress'", 'max_length': '25'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.phaselog': {
'Meta': {'unique_together': "(('phase', 'date'),)"},
'count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['kanboard.Phase']"})
}
}
complete_apps = ['kanboard']
| bsd-3-clause | -6,799,312,279,836,243,000 | 65.714286 | 139 | 0.539768 | false |
Davideddu/kivy-forkedtouch | kivy/uix/codeinput.py | 1 | 6518 | '''
Code Input
==========
.. versionadded:: 1.5.0
.. image:: images/codeinput.jpg
The :class:`CodeInput` provides a box of editable highlited text like the one
shown in the image.
It supports all the features provided by the :class:`~kivy.uix.textinput` as
well as code highliting for `languages supported by pygments
<http://pygments.org/docs/lexers/>`_ along with `KivyLexer` for
:mod:`kivy.lang` highliting.
Usage example
-------------
To create a CodeInput with highliting for `KV language`::
from kivy.uix.codeinput import CodeInput
from kivy.extras.highlight import KivyLexer
codeinput = CodeInput(lexer=KivyLexer())
To create a CodeInput with highliting for `Cython`::
from kivy.uix.codeinput import CodeInput
from pygments.lexers import CythonLexer
codeinput = CodeInput(lexer=CythonLexer())
'''
__all__ = ('CodeInput', )
from pygments import highlight
from pygments import lexers
from pygments.formatters import BBCodeFormatter
from kivy.uix.textinput import TextInput
from kivy.core.text.markup import MarkupLabel as Label
from kivy.cache import Cache
from kivy.properties import ObjectProperty
from kivy.utils import get_hex_from_color
Cache_get = Cache.get
Cache_append = Cache.append
# TODO: color chooser for keywords/strings/...
class CodeInput(TextInput):
'''CodeInput class, used for displaying highlighted code.
'''
lexer = ObjectProperty(None)
'''This holds the selected Lexer used by pygments to highlight the code.
:data:`lexer` is an :class:`~kivy.properties.ObjectProperty` and
defaults to `PythonLexer`.
'''
def __init__(self, **kwargs):
self.formatter = BBCodeFormatter()
self.lexer = lexers.PythonLexer()
self.text_color = '#000000'
self._label_cached = Label()
self.use_text_color = True
super(CodeInput, self).__init__(**kwargs)
self._line_options = kw = self._get_line_options()
self._label_cached = Label(**kw)
# use text_color as foreground color
text_color = kwargs.get('foreground_color')
if text_color:
self.text_color = get_hex_from_color(text_color)
# set foreground to white to allow text colors to show
# use text_color as the default color in bbcodes
self.use_text_color = False
self.foreground_color = [1, 1, 1, .999]
if not kwargs.get('background_color'):
self.background_color = [.9, .92, .92, 1]
def _create_line_label(self, text, hint=False):
# Create a label from a text, using line options
ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width)
if self.password and not hint: # Don't replace hint_text with *
ntext = u'*' * len(ntext)
ntext = self._get_bbcode(ntext)
kw = self._get_line_options()
cid = '%s\0%s' % (ntext, str(kw))
texture = Cache_get('textinput.label', cid)
if not texture:
# FIXME right now, we can't render very long line...
# if we move on "VBO" version as fallback, we won't need to
# do this.
# try to find the maximum text we can handle
label = Label(text=ntext, **kw)
if text.find(u'\n') > 0:
label.text = u''
else:
label.text = ntext
try:
label.refresh()
except ValueError:
return
# ok, we found it.
texture = label.texture
Cache_append('textinput.label', cid, texture)
label.text = ''
return texture
def _get_line_options(self):
kw = super(CodeInput, self)._get_line_options()
kw['markup'] = True
kw['valign'] = 'top'
kw['codeinput'] = repr(self.lexer)
return kw
def _get_text_width(self, text, tab_width, _label_cached):
# Return the width of a text, according to the current line options
width = Cache_get('textinput.width', text + u'_' +
repr(self._get_line_options()))
if width:
return width
lbl = self._create_line_label(text)
width = lbl.width if lbl else 0
Cache_append(
'textinput.width',
text + u'_' + repr(self._get_line_options()), width)
return width
def _get_bbcode(self, ntext):
# get bbcoded text for python
try:
ntext[0]
# replace brackets with special chars that aren't highlighted
# by pygment. can't use &bl; ... cause & is highlighted
ntext = ntext.replace(u'[', u'\x01;').replace(u']', u'\x02;')
ntext = highlight(ntext, self.lexer, self.formatter)
ntext = ntext.replace(u'\x01;', u'&bl;').replace(u'\x02;', u'&br;')
# replace special chars with &bl; and &br;
ntext = ''.join((u'[color=', str(self.text_color), u']',
ntext, u'[/color]'))
ntext = ntext.replace(u'\n', u'')
return ntext
except IndexError:
return ''
# overriden to prevent cursor position off screen
def _cursor_offset(self):
'''Get the cursor x offset on the current line
'''
offset = 0
try:
if self.cursor_col:
offset = self._get_text_width(
self._lines[self.cursor_row][:self.cursor_col])
return offset
except:
pass
finally:
return offset
def on_lexer(self, instance, value):
self._trigger_refresh_text()
def on_foreground_color(self, instance, text_color):
if not self.use_text_color:
self.use_text_color = True
return
self.text_color = get_hex_from_color(text_color)
self.use_text_color = False
self.foreground_color = (1, 1, 1, .999)
self._trigger_refresh_text()
if __name__ == '__main__':
from kivy.extras.highlight import KivyLexer
from kivy.app import App
class CodeInputTest(App):
def build(self):
return CodeInput(lexer=KivyLexer(),
font_name='data/fonts/DroidSansMono.ttf',
font_size=12,
text='''
#:kivy 1.0
<YourWidget>:
canvas:
Color:
rgb: .5, .5, .5
Rectangle:
pos: self.pos
size: self.size''')
CodeInputTest().run()
| mit | 3,261,649,836,463,144,400 | 31.108374 | 79 | 0.577017 | false |
carthach/essentia | test/src/unittests/standard/test_nsgconstantq.py | 1 | 5662 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import numpy as np
testdir = join(filedir(), 'nsgconstantq')
class TestNSGConstantQ(TestCase):
def initNsgconstantq(self, inputSize=2048, fmin=27, fmax=10000, binsPerOctave=24, normalize='none'):
return NSGConstantQ(inputSize=inputSize,
minFrequency=fmin,
maxFrequency=fmax,
binsPerOctave=binsPerOctave,
sampleRate=44100,
rasterize='full',
phaseMode='global',
gamma=0,
normalize=normalize,
window='hannnsgcq',
)
def testRegression(self):
input = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(2048) / 44100))
# Compared against the implementation of the MATLAB CQT_toolbox_2013
expected = np.array([ 0.01764389 +8.19244758e-06j, -0.00327444 +1.78957267e-03j,
-0.00379942 +1.00535053e-02j, 0.00479218 +8.65996905e-03j,
0.00636455 -1.14715385e-03j, -0.00165716 -6.73704576e-03j,
-0.00948407 +1.39929814e-03j, -0.00132517 +9.10799044e-03j,
0.00804364 +2.70849478e-03j, 0.00373902 -8.13302867e-03j,
-0.00733613 -6.00933843e-03j, -0.00738841 +5.56821084e-03j,
0.00371405 +8.43253605e-03j, 0.00904939 -1.72925594e-03j,
0.00034281 -9.21268760e-03j, -0.00891524 -2.47832619e-03j,
-0.00459810 +8.25670810e-03j, 0.00651840 +6.09559784e-03j,
0.00661061 -5.63534139e-03j, -0.00441447 -8.19178966e-03j,
-0.00905809 +1.89702405e-03j, 0.00139695 +6.62663074e-03j,
0.00708779 -1.61311132e-03j, 0.00229181 -9.95998412e-03j,
-0.00574295 -7.79506339e-03j, -0.00166257 +5.33548630e-04j])
output = np.mean(self.initNsgconstantq(normalize='sine')(input)[0],axis=0)
self.assertAlmostEqualVector(np.abs(expected), np.abs(output), 1e-6)
def testDC(self):
# Checks the DC component of the transform
input= essentia.array(np.ones(2**11))
# Second output of NSGConstantQ contains the DC information nedeed for the inverse transform.
DCfilter = self.initNsgconstantq()(input)[1]
# Integrates the energy. DC filter should contain all the energy of the signal in this case.
DCenergy = np.sum(DCfilter)
inputEnergy = np.sum(input)
self.assertEqual(inputEnergy , DCenergy)
def testNyquist(self):
inputSize = 2**11
signalNyquist = [-1, 1] * int(inputSize / 2)
CQ, DC, Nyquist = self.initNsgconstantq(inputSize=inputSize)(signalNyquist)
# Checks that all the energy is contained in the Nyquist band
self.assertEqual(np.sum(np.abs(CQ)), 0)
self.assertEqual(np.sum(np.abs(DC)), 0)
self.assertGreater(np.sum(np.abs(Nyquist)), 0)
def testZero(self):
inputSize = 2**11
signalZero = [0] * inputSize
output = np.abs(np.mean(self.initNsgconstantq()(signalZero)[0]))
self.assertEqual(0, output)
def testEmpty(self):
# Checks whether an empty input vector yields an exception
self.assertComputeFails(self.initNsgconstantq(), [])
def testOne(self,normalize='none'):
# Checks for a single value
self.assertComputeFails(self.initNsgconstantq(), [1])
def testInvalidParam(self):
self.assertConfigureFails(self.initNsgconstantq(), {'inputSize': -1})
self.assertConfigureFails(self.initNsgconstantq(), {'inputSize': 0})
self.assertConfigureFails(self.initNsgconstantq(), {'minFrequency': 30000})
self.assertConfigureFails(self.initNsgconstantq(), {'minFrequency': 1000,
'maxFrequency': 500})
self.assertConfigureFails(self.initNsgconstantq(), {'maxFrequency': 0})
self.assertConfigureFails(self.initNsgconstantq(), {'binsPerOctave': 0})
self.assertConfigureFails(self.initNsgconstantq(), {'sampleRate': 0})
self.assertConfigureFails(self.initNsgconstantq(), {'gamma': -1})
self.assertConfigureFails(self.initNsgconstantq(), {'minimumWindow': 1})
self.assertConfigureFails(self.initNsgconstantq(), {'windowSizeFactor': 0})
self.assertConfigureFails(self.initNsgconstantq(), {'minimumWindow': 1})
def testOddInput(self):
# Checks that compute does not fail for even input (former behavior).
a = np.ones(4099, dtype='float32')
NSGConstantQ()(a)
suite = allTests(TestNSGConstantQ)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | 7,265,654,673,886,626,000 | 45.409836 | 104 | 0.623984 | false |
Scandie/openprocurement.tender.esco | setup.py | 1 | 1518 | from setuptools import setup, find_packages
import os
version = '1.0'
requires = [
'setuptools',
'openprocurement.api>=2.3',
'openprocurement.tender.openeu',
]
test_requires = requires + [
'webtest',
'python-coveralls',
]
docs_requires = requires + [
'sphinxcontrib-httpdomain',
]
entry_points = {
'openprocurement.tender.core.plugins': [
'esco.EU = openprocurement.tender.esco.includeme:includeme',
],
}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
setup(name='openprocurement.tender.esco',
version=version,
description="",
long_description=README,
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords="web services",
author='Quintagroup, Ltd.',
author_email='[email protected]',
url='https://github.com/openprocurement/openprocurement.tender.esco',
license='Apache License 2.0',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['openprocurement', 'openprocurement.tender'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={'test': test_requires, 'docs': docs_requires},
test_suite="openprocurement.tender.esco.tests.main.suite",
entry_points=entry_points)
| apache-2.0 | 1,375,784,628,097,224,700 | 27.111111 | 75 | 0.656126 | false |
biosustain/driven | driven/vizualization/plotting/with_plotly.py | 1 | 2430 | # Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warnings import warn
from plotly.graph_objs import Figure, Layout, Scatter
from driven.vizualization.plotting import Plotter
class PlotlyPlotter(Plotter):
def __init__(self, **defaults):
warn("Plotly requires configuration before start (https://plot.ly/python/getting-started/)")
super(PlotlyPlotter, self).__init__(**defaults)
def scatter(self, dataframe, x=None, y=None, width=None, height=None, color=None, title='Scatter', xaxis_label=None,
yaxis_label=None, label=None):
color = self.__default_options__.get('color', None) if color is None else color
width = self.__default_options__.get('width', None) if width is None else width
scatter = Scatter(x=dataframe[x],
y=dataframe[y],
mode='markers',
marker=dict(color=color))
if label:
scatter['text'] = dataframe[label]
width, height = self._width_height(width, height)
layout = Layout(title=title,
width=width,
height=height)
if xaxis_label:
layout['xaxis'] = dict(title=xaxis_label)
if yaxis_label:
layout['yaxis'] = dict(title=yaxis_label)
return Figure(data=[scatter], layout=layout)
def histogram(self, dataframe, bins=100, width=None, height=None, palette=None, title='Histogram', values=None,
groups=None, legend=True):
pass
@classmethod
def display(cls, plot):
pass
def heatmap(self, dataframe, y=None, x=None, values=None, width=None, height=None, max_color=None, min_color=None,
mid_color=None, title='Heatmap'):
pass
def _palette(self, palette, *args, **kwargs):
pass
| apache-2.0 | 248,582,597,926,571,700 | 34.735294 | 120 | 0.639506 | false |
gandelman-a/neutron-lbaas | neutron_lbaas/services/loadbalancer/data_models.py | 1 | 22151 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module holds the data models for the load balancer service plugin. These
are meant simply as replacement data structures for dictionaries and
SQLAlchemy models. Using dictionaries as data containers for many components
causes readability issues and does not intuitively give the benefits of what
classes and OO give. Using SQLAlchemy models as data containers for many
components can become an issue if you do not want to give certain components
access to the database.
These data models do provide methods for instantiation from SQLAlchemy models
and also converting to dictionaries.
"""
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db
from sqlalchemy.ext import orderinglist
from sqlalchemy.orm import collections
from neutron_lbaas.db.loadbalancer import models
class BaseDataModel(object):
def to_dict(self, **kwargs):
ret = {}
for attr in self.__dict__:
if attr.startswith('_') or not kwargs.get(attr, True):
continue
if isinstance(getattr(self, attr), list):
ret[attr] = []
for item in self.__dict__[attr]:
if isinstance(item, BaseDataModel):
ret[attr].append(item.to_dict())
else:
ret[attr] = item
elif isinstance(getattr(self, attr), BaseDataModel):
ret[attr] = self.__dict__[attr].to_dict()
elif isinstance(self.__dict__[attr], unicode):
ret[attr.encode('utf8')] = self.__dict__[attr].encode('utf8')
else:
ret[attr] = self.__dict__[attr]
return ret
def to_api_dict(self, **kwargs):
return {}
@classmethod
def from_dict(cls, model_dict):
return cls(**model_dict)
@classmethod
def from_sqlalchemy_model(cls, sa_model, calling_class=None):
attr_mapping = vars(cls).get("attr_mapping")
instance = cls()
for attr_name in vars(instance):
if attr_name.startswith('_'):
continue
if attr_mapping and attr_name in attr_mapping.keys():
attr = getattr(sa_model, attr_mapping[attr_name])
else:
attr = getattr(sa_model, attr_name)
# Handles M:1 or 1:1 relationships
if isinstance(attr, model_base.BASEV2):
if hasattr(instance, attr_name):
data_class = SA_MODEL_TO_DATA_MODEL_MAP[attr.__class__]
if calling_class != data_class and data_class:
setattr(instance, attr_name,
data_class.from_sqlalchemy_model(
attr, calling_class=cls))
# Handles 1:M or M:M relationships
elif (isinstance(attr, collections.InstrumentedList) or
isinstance(attr, orderinglist.OrderingList)):
for item in attr:
if hasattr(instance, attr_name):
data_class = SA_MODEL_TO_DATA_MODEL_MAP[item.__class__]
attr_list = getattr(instance, attr_name) or []
attr_list.append(data_class.from_sqlalchemy_model(
item, calling_class=cls))
setattr(instance, attr_name, attr_list)
# This isn't a relationship so it must be a "primitive"
else:
setattr(instance, attr_name, attr)
return instance
@property
def root_loadbalancer(self):
"""Returns the loadbalancer this instance is attached to."""
if isinstance(self, LoadBalancer):
lb = self
elif isinstance(self, Listener):
lb = self.loadbalancer
elif isinstance(self, Pool):
lb = self.listener.loadbalancer
elif isinstance(self, SNI):
lb = self.listener.loadbalancer
else:
# Pool Member or Health Monitor
lb = self.pool.listener.loadbalancer
return lb
# NOTE(brandon-logan) AllocationPool, HostRoute, Subnet, IPAllocation, Port,
# and ProviderResourceAssociation are defined here because there aren't any
# data_models defined in core neutron or neutron services. Instead of jumping
# through the hoops to create those I've just defined them here. If ever
# data_models or similar are defined in those packages, those should be used
# instead of these.
class AllocationPool(BaseDataModel):
def __init__(self, start=None, end=None):
self.start = start
self.end = end
class HostRoute(BaseDataModel):
def __init__(self, destination=None, nexthop=None):
self.destination = destination
self.nexthop = nexthop
class Subnet(BaseDataModel):
def __init__(self, id=None, name=None, tenant_id=None, network_id=None,
ip_version=None, cidr=None, gateway_ip=None, enable_dhcp=None,
ipv6_ra_mode=None, ipv6_address_mode=None, shared=None,
dns_nameservers=None, host_routes=None, allocation_pools=None,
subnetpool_id=None):
self.id = id
self.name = name
self.tenant_id = tenant_id
self.network_id = network_id
self.ip_version = ip_version
self.cidr = cidr
self.gateway_ip = gateway_ip
self.enable_dhcp = enable_dhcp
self.ipv6_ra_mode = ipv6_ra_mode
self.ipv6_address_mode = ipv6_address_mode
self.shared = shared
self.dns_nameservers = dns_nameservers
self.host_routes = host_routes
self.allocation_pools = allocation_pools
self.subnetpool_id = subnetpool_id
@classmethod
def from_dict(cls, model_dict):
host_routes = model_dict.pop('host_routes', [])
allocation_pools = model_dict.pop('allocation_pools', [])
model_dict['host_routes'] = [HostRoute.from_dict(route)
for route in host_routes]
model_dict['allocation_pools'] = [AllocationPool.from_dict(ap)
for ap in allocation_pools]
return Subnet(**model_dict)
class IPAllocation(BaseDataModel):
def __init__(self, port_id=None, ip_address=None, subnet_id=None,
network_id=None):
self.port_id = port_id
self.ip_address = ip_address
self.subnet_id = subnet_id
self.network_id = network_id
@classmethod
def from_dict(cls, model_dict):
subnet = model_dict.pop('subnet', None)
# TODO(blogan): add subnet to __init__. Can't do it yet because it
# causes issues with converting SA models into data models.
instance = IPAllocation(**model_dict)
setattr(instance, 'subnet', None)
if subnet:
setattr(instance, 'subnet', Subnet.from_dict(subnet))
return instance
class Port(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
mac_address=None, admin_state_up=None, status=None,
device_id=None, device_owner=None, fixed_ips=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.network_id = network_id
self.mac_address = mac_address
self.admin_state_up = admin_state_up
self.status = status
self.device_id = device_id
self.device_owner = device_owner
self.fixed_ips = fixed_ips or []
@classmethod
def from_dict(cls, model_dict):
fixed_ips = model_dict.pop('fixed_ips', [])
model_dict['fixed_ips'] = [IPAllocation.from_dict(fixed_ip)
for fixed_ip in fixed_ips]
return Port(**model_dict)
class ProviderResourceAssociation(BaseDataModel):
def __init__(self, provider_name=None, resource_id=None):
self.provider_name = provider_name
self.resource_id = resource_id
@classmethod
def from_dict(cls, model_dict):
device_driver = model_dict.pop('device_driver', None)
instance = ProviderResourceAssociation(**model_dict)
setattr(instance, 'device_driver', device_driver)
return instance
class SessionPersistence(BaseDataModel):
def __init__(self, pool_id=None, type=None, cookie_name=None,
pool=None):
self.pool_id = pool_id
self.type = type
self.cookie_name = cookie_name
self.pool = pool
def to_api_dict(self):
return super(SessionPersistence, self).to_dict(pool=False,
pool_id=False)
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return SessionPersistence(**model_dict)
class LoadBalancerStatistics(BaseDataModel):
def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None,
active_connections=None, total_connections=None,
loadbalancer=None):
self.loadbalancer_id = loadbalancer_id
self.bytes_in = bytes_in
self.bytes_out = bytes_out
self.active_connections = active_connections
self.total_connections = total_connections
self.loadbalancer = loadbalancer
def to_api_dict(self):
return super(LoadBalancerStatistics, self).to_dict(
loadbalancer_id=False, loadbalancer=False)
class HealthMonitor(BaseDataModel):
def __init__(self, id=None, tenant_id=None, type=None, delay=None,
timeout=None, max_retries=None, http_method=None,
url_path=None, expected_codes=None, provisioning_status=None,
admin_state_up=None, pool=None):
self.id = id
self.tenant_id = tenant_id
self.type = type
self.delay = delay
self.timeout = timeout
self.max_retries = max_retries
self.http_method = http_method
self.url_path = url_path
self.expected_codes = expected_codes
self.provisioning_status = provisioning_status
self.admin_state_up = admin_state_up
self.pool = pool
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.listener and
self.pool.listener.loadbalancer)
def to_api_dict(self):
ret_dict = super(HealthMonitor, self).to_dict(
provisioning_status=False, pool=False)
ret_dict['pools'] = []
if self.pool:
ret_dict['pools'].append({'id': self.pool.id})
return ret_dict
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return HealthMonitor(**model_dict)
class Pool(BaseDataModel):
# Map deprecated attribute names to new ones.
attr_mapping = {'sessionpersistence': 'session_persistence'}
def __init__(self, id=None, tenant_id=None, name=None, description=None,
healthmonitor_id=None, protocol=None, lb_algorithm=None,
admin_state_up=None, operating_status=None,
provisioning_status=None, members=None, healthmonitor=None,
session_persistence=None, listener=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.healthmonitor_id = healthmonitor_id
self.protocol = protocol
self.lb_algorithm = lb_algorithm
self.admin_state_up = admin_state_up
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.members = members or []
self.healthmonitor = healthmonitor
self.session_persistence = session_persistence
# NOTE(eezhova): Old attribute name is kept for backwards
# compatibility with out-of-tree drivers.
self.sessionpersistence = self.session_persistence
self.listener = listener
def attached_to_loadbalancer(self):
return bool(self.listener and self.listener.loadbalancer)
def to_api_dict(self):
ret_dict = super(Pool, self).to_dict(
provisioning_status=False, operating_status=False,
healthmonitor=False, listener=False, session_persistence=False)
# NOTE(blogan): Returning a list to future proof for M:N objects
# that are not yet implemented.
ret_dict['listeners'] = []
if self.listener:
ret_dict['listeners'].append({'id': self.listener.id})
ret_dict['session_persistence'] = None
if self.session_persistence:
ret_dict['session_persistence'] = (
self.session_persistence.to_api_dict())
ret_dict['members'] = [{'id': member.id} for member in self.members]
return ret_dict
@classmethod
def from_dict(cls, model_dict):
healthmonitor = model_dict.pop('healthmonitor', None)
session_persistence = model_dict.pop('session_persistence', None)
model_dict.pop('sessionpersistence', None)
listener = model_dict.pop('listener', [])
members = model_dict.pop('members', [])
model_dict['members'] = [Member.from_dict(member)
for member in members]
if listener:
model_dict['listener'] = Listener.from_dict(listener)
if healthmonitor:
model_dict['healthmonitor'] = HealthMonitor.from_dict(
healthmonitor)
if session_persistence:
model_dict['session_persistence'] = SessionPersistence.from_dict(
session_persistence)
return Pool(**model_dict)
class Member(BaseDataModel):
def __init__(self, id=None, tenant_id=None, pool_id=None, address=None,
protocol_port=None, weight=None, admin_state_up=None,
subnet_id=None, operating_status=None,
provisioning_status=None, pool=None):
self.id = id
self.tenant_id = tenant_id
self.pool_id = pool_id
self.address = address
self.protocol_port = protocol_port
self.weight = weight
self.admin_state_up = admin_state_up
self.subnet_id = subnet_id
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.pool = pool
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.listener and
self.pool.listener.loadbalancer)
def to_api_dict(self):
return super(Member, self).to_dict(
provisioning_status=False, operating_status=False, pool=False)
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return Member(**model_dict)
class SNI(BaseDataModel):
def __init__(self, listener_id=None, tls_container_id=None,
position=None, listener=None):
self.listener_id = listener_id
self.tls_container_id = tls_container_id
self.position = position
self.listener = listener
def attached_to_loadbalancer(self):
return bool(self.listener and self.listener.loadbalancer)
def to_api_dict(self):
return super(SNI, self).to_dict(listener=False)
@classmethod
def from_dict(cls, model_dict):
return SNI(**model_dict)
class TLSContainer(BaseDataModel):
def __init__(self, id=None, certificate=None, private_key=None,
passphrase=None, intermediates=None, primary_cn=None):
self.id = id
self.certificate = certificate
self.private_key = private_key
self.passphrase = passphrase
self.intermediates = intermediates
self.primary_cn = primary_cn
class Listener(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, description=None,
default_pool_id=None, loadbalancer_id=None, protocol=None,
default_tls_container_id=None, sni_containers=None,
protocol_port=None, connection_limit=None,
admin_state_up=None, provisioning_status=None,
operating_status=None, default_pool=None, loadbalancer=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.default_pool_id = default_pool_id
self.loadbalancer_id = loadbalancer_id
self.protocol = protocol
self.default_tls_container_id = default_tls_container_id
self.sni_containers = sni_containers or []
self.protocol_port = protocol_port
self.connection_limit = connection_limit
self.admin_state_up = admin_state_up
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.default_pool = default_pool
self.loadbalancer = loadbalancer
def attached_to_loadbalancer(self):
return bool(self.loadbalancer)
def to_api_dict(self):
ret_dict = super(Listener, self).to_dict(
loadbalancer=False, loadbalancer_id=False, default_pool=False,
operating_status=False, provisioning_status=False,
sni_containers=False)
# NOTE(blogan): Returning a list to future proof for M:N objects
# that are not yet implemented.
ret_dict['loadbalancers'] = []
if self.loadbalancer:
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
ret_dict['sni_container_refs'] = [container.tls_container_id
for container in self.sni_containers]
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
return ret_dict
@classmethod
def from_dict(cls, model_dict):
default_pool = model_dict.pop('default_pool', None)
loadbalancer = model_dict.pop('loadbalancer', None)
sni_containers = model_dict.pop('sni_containers', [])
model_dict['sni_containers'] = [SNI.from_dict(sni)
for sni in sni_containers]
if default_pool:
model_dict['default_pool'] = Pool.from_dict(default_pool)
if loadbalancer:
model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer)
return Listener(**model_dict)
class LoadBalancer(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, description=None,
vip_subnet_id=None, vip_port_id=None, vip_address=None,
provisioning_status=None, operating_status=None,
admin_state_up=None, vip_port=None, stats=None,
provider=None, listeners=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.vip_subnet_id = vip_subnet_id
self.vip_port_id = vip_port_id
self.vip_address = vip_address
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.admin_state_up = admin_state_up
self.vip_port = vip_port
self.stats = stats
self.provider = provider
self.listeners = listeners or []
def attached_to_loadbalancer(self):
return True
def to_api_dict(self):
ret_dict = super(LoadBalancer, self).to_dict(
vip_port=False, stats=False, listeners=False)
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
if self.provider:
ret_dict['provider'] = self.provider.provider_name
return ret_dict
@classmethod
def from_dict(cls, model_dict):
listeners = model_dict.pop('listeners', [])
vip_port = model_dict.pop('vip_port', None)
provider = model_dict.pop('provider', None)
model_dict.pop('stats', None)
model_dict['listeners'] = [Listener.from_dict(listener)
for listener in listeners]
if vip_port:
model_dict['vip_port'] = Port.from_dict(vip_port)
if provider:
model_dict['provider'] = ProviderResourceAssociation.from_dict(
provider)
return LoadBalancer(**model_dict)
SA_MODEL_TO_DATA_MODEL_MAP = {
models.LoadBalancer: LoadBalancer,
models.HealthMonitorV2: HealthMonitor,
models.Listener: Listener,
models.SNI: SNI,
models.PoolV2: Pool,
models.MemberV2: Member,
models.LoadBalancerStatistics: LoadBalancerStatistics,
models.SessionPersistenceV2: SessionPersistence,
models_v2.IPAllocation: IPAllocation,
models_v2.Port: Port,
servicetype_db.ProviderResourceAssociation: ProviderResourceAssociation
}
DATA_MODEL_TO_SA_MODEL_MAP = {
LoadBalancer: models.LoadBalancer,
HealthMonitor: models.HealthMonitorV2,
Listener: models.Listener,
SNI: models.SNI,
Pool: models.PoolV2,
Member: models.MemberV2,
LoadBalancerStatistics: models.LoadBalancerStatistics,
SessionPersistence: models.SessionPersistenceV2,
IPAllocation: models_v2.IPAllocation,
Port: models_v2.Port,
ProviderResourceAssociation: servicetype_db.ProviderResourceAssociation
}
| apache-2.0 | -1,961,598,836,779,295,700 | 37.523478 | 79 | 0.615954 | false |
bmcfee/crema | training/chords/01-prepare.py | 1 | 3644 | #!/usr/bin/env python
'''CREMA structured chord model'''
import argparse
import sys
import os
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
from jams.util import smkdirs
import pumpp
import crema.utils
OUTPUT_PATH = 'resources'
def process_arguments(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--sample-rate', dest='sr', type=float, default=44100.,
help='Sampling rate for audio analysis')
parser.add_argument('--hop-length', dest='hop_length', type=int,
default=4096,
help='Hop length for audio analysis')
parser.add_argument('--octaves', dest='n_octaves', type=int,
default=6,
help='Number of octaves above C1')
parser.add_argument('--jobs', dest='n_jobs', type=int,
default=1,
help='Number of jobs to run in parallel')
parser.add_argument('--augmentation-path', dest='augment_path', type=str,
default=None,
help='Path for augmented data (optional)')
parser.add_argument('input_path', type=str,
help='Path for directory containing (audio, jams)')
parser.add_argument('output_path', type=str,
help='Path to store pump output')
return parser.parse_args(args)
def make_pump(sr, hop_length, n_octaves):
p_feature = pumpp.feature.HCQTMag(name='cqt',
sr=sr, hop_length=hop_length,
harmonics=[1, 2],
log=True, conv='tf', n_octaves=n_octaves)
p_chord_tag = pumpp.task.ChordTagTransformer(name='chord_tag',
sr=sr, hop_length=hop_length,
sparse=True)
p_chord_struct = pumpp.task.ChordTransformer(name='chord_struct',
sr=sr, hop_length=hop_length,
sparse=True)
pump = pumpp.Pump(p_feature, p_chord_tag, p_chord_struct)
# Save the pump
with open(os.path.join(OUTPUT_PATH, 'pump.pkl'), 'wb') as fd:
pickle.dump(pump, fd)
return pump
def convert(aud, jam, pump, outdir):
data = pump.transform(aud, jam)
fname = os.path.extsep.join([os.path.join(outdir, crema.utils.base(aud)),
'h5'])
crema.utils.save_h5(fname, **data)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
smkdirs(OUTPUT_PATH)
smkdirs(params.output_path)
print('{}: pre-processing'.format(__doc__))
print(params)
pump = make_pump(params.sr, params.hop_length, params.n_octaves)
stream = tqdm(crema.utils.get_ann_audio(params.input_path),
desc='Converting training data')
Parallel(n_jobs=params.n_jobs)(delayed(convert)(aud, ann,
pump,
params.output_path)
for aud, ann in stream)
if params.augment_path:
stream = tqdm(crema.utils.get_ann_audio(params.augment_path),
desc='Converting augmented data')
Parallel(n_jobs=params.n_jobs)(delayed(convert)(aud, ann,
pump,
params.output_path)
for aud, ann in stream)
| bsd-2-clause | 4,136,463,791,362,030,000 | 33.704762 | 79 | 0.51674 | false |
stephanos/subvoc | api/subtitle/model_test.py | 1 | 1422 | from api.subtitle.model import to_model, Media, Subtitle
def test_subtitle_equality():
assert Subtitle('S1', None, 'srt', 1, 'enc', False) == \
Subtitle('S1', None, 'srt', 1, 'enc', False)
assert Subtitle('S2', None, 'srt', 1, 'enc', False) != \
Subtitle('S1', None, 'srt', 1, 'enc', False)
def test_media_equality():
assert Media('M1', 'War and Peace', 'movie', '2000') == \
Media('M1', 'War and Peace', 'movie', '2000')
assert Media('M1', 'War and Peace', 'movie', '2000') != \
Media('M2', 'War and Peace', 'movie', '2000')
def test_to_model():
input = {
'IDMovieImdb': '1',
'IDSubtitleFile': 'file-id',
'MovieKind': 'movie',
'MovieName': 'name',
'MovieYear': '1986',
'Nonsense': 'nonsense',
'SubSumCD': '1',
'SubDownloadsCnt': '100',
'SubFormat': 'srt',
'SubEncoding': 'enc'
}
result = to_model(input)
assert result == Subtitle(
id='file-id',
media=Media(
id='tt0000001',
kind='movie',
title='name',
year='1986',
),
format='srt',
partial=False,
encoding='enc',
downloads=100,
)
def test_to_model_for_partial_subtitles():
input = {
'SubSumCD': '2',
'SubDownloadsCnt': '100',
}
result = to_model(input)
assert result.partial
| mit | -8,464,037,668,767,110,000 | 23.947368 | 61 | 0.509142 | false |
OpenDrift/opendrift | opendrift/scripts/opendrift_gui.py | 1 | 31046 | #!/usr/bin/env python
import matplotlib
from matplotlib import pyplot as plt
if __name__ == '__main__':
matplotlib.use('TKAgg')
import sys
import os
from datetime import datetime, timedelta
import numpy as np
from PIL import ImageTk, Image
import tkinter as tk
from tkinter import ttk
import opendrift
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.openoil import OpenOil
from opendrift.models.leeway import Leeway
from opendrift.models.shipdrift import ShipDrift
from opendrift.models.openberg import OpenBerg
from opendrift.models.plastdrift import PlastDrift
from opendrift.models.radionuclides import RadionuclideDrift
# Class to redirect output to text box
class TextRedirector:
def __init__(self, widget, tag='stdout'):
self.defstdout = sys.stdout
self.widget = widget
self.tag = tag
def write(self, str):
self.widget.configure(state='normal')
self.widget.insert('end', str, (self.tag,))
self.widget.update_idletasks()
self.widget.see(tk.END)
def flush(self):
self.defstdout.flush()
# Class for help-text
# https://stackoverflow.com/questions/20399243/display-message-when-hovering-over-something-with-mouse-cursor-in-python
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 57
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffff00", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "10", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def CreateToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
class OpenDriftGUI(tk.Tk):
# Supported models as dictionary {model_name:model_class}
opendrift_models = {m.__name__:m for m in
[Leeway, OpenOil, ShipDrift, OpenBerg, OceanDrift, PlastDrift, RadionuclideDrift]}
extra_args = {'OpenOil': {'location': 'NORWAY'}}
# Overriding some default config settings, suitable for GUI
# TODO: should be set as default-default
GUI_config = {
'general:time_step_minutes': {'default': 15, 'min': 1},
'general:time_step_output_minutes': {'default': 30, 'min': 5},
'seed:number': {'default': 5000, 'max': 100000},
'seed:m3_per_hour': {'default': 100}
}
def __init__(self):
tk.Tk.__init__(self)
self.title('OpenDrift ' + opendrift.__version__ + ' GTI Turbo Ultra')
##################
# Layout frames
##################
self.n = ttk.Notebook(self.master)
self.n.grid()
self.seed = ttk.Frame(self.n)
self.confignotebook = ttk.Notebook(self.n)
self.config = ttk.Frame(self.confignotebook)
self.forcing = ttk.Frame(self.n)
self.n.add(self.seed, text='Seeding')
self.n.add(self.confignotebook, text='Config')
self.n.add(self.forcing, text='Forcing')
self.confignotebook.add(self.config, text='SubConfig')
# Top
self.top = tk.Frame(self.seed,
relief=tk.FLAT, pady=25, padx=25)
self.top.grid(row=0, column=1, rowspan=1)
# Time start and end
self.start_t = tk.Frame(self.seed, relief=tk.FLAT)
self.start_t.grid(row=20, column=0, rowspan=1)
self.end_t = tk.Frame(self.seed, relief=tk.FLAT)
self.end_t.grid(row=30, column=0, rowspan=1)
self.start = tk.Frame(self.seed, bg='lightgray', bd=2,
relief=tk.SUNKEN, pady=5, padx=5)
self.start.grid(row=20, column=1, rowspan=1)
self.end = tk.Frame(self.seed, bg='gray', bd=2,
relief=tk.SUNKEN, padx=5, pady=5)
self.end.grid(row=30, column=1)
self.coastline = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=0)
self.coastline.grid(row=40, column=1)
self.duration = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=5)
self.duration.grid(row=50, column=1)
self.output = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=0)
self.output.grid(row=70, column=0, columnspan=7, sticky='nsew')
self.results = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=0)
self.results.grid(row=60, column=7, columnspan=1, sticky='ew')
#######################################################
tk.Label(self.top, text='Simulation type').grid(row=0, column=0)
self.model = tk.StringVar()
self.model.set(list(self.opendrift_models)[0])
self.modeldrop = tk.OptionMenu(self.top, self.model,
*(list(self.opendrift_models)), command=self.set_model)
self.modeldrop.grid(row=0, column=1)
help_button = tk.Button(self.top, text='Help',
command=self.show_help)
help_button.grid(row=0, column=2, padx=50)
##########
# Release
##########
startlabel = tk.Label(self.start_t, text="\n\nStart release\n\n")
startlabel.grid(row=0, column=0)
tk.Label(self.start, text='Longitude').grid(row=0, column=1)
tk.Label(self.start, text='Latitude').grid(row=0, column=0)
tk.Label(self.start, text='Radius [m]').grid(row=0, column=2)
self.latvar = tk.StringVar()
self.lonvar = tk.StringVar()
self.radiusvar = tk.StringVar()
self.lat = tk.Entry(self.start, textvariable=self.latvar,
width=6, justify=tk.RIGHT)
self.lon = tk.Entry(self.start, textvariable=self.lonvar,
width=6, justify=tk.RIGHT)
self.radius = tk.Entry(self.start, width=6,
textvariable=self.radiusvar,
justify=tk.RIGHT)
self.lon.grid(row=10, column=1)
self.lon.insert(0, '4.5')
self.lat.grid(row=10, column=0)
self.lat.insert(0, '60.0')
self.radius.grid(row=10, column=2)
self.radius.insert(0, '1000')
self.lonvar.trace('w', self.copy_position)
self.latvar.trace('w', self.copy_position)
self.radiusvar.trace('w', self.copy_position)
##########
# Time
##########
now = datetime.utcnow()
tk.Label(self.start, text='Day').grid(row=20, column=0)
tk.Label(self.start, text='Month').grid(row=20, column=1)
tk.Label(self.start, text='Year').grid(row=20, column=2)
tk.Label(self.start, text='Hour').grid(row=20, column=3)
tk.Label(self.start, text='Minutes [UTC]').grid(row=20, column=4)
self.datevar = tk.StringVar()
self.dates = range(1, 32)
self.datevar.set(now.day)
self.date = tk.OptionMenu(self.start, self.datevar, *self.dates)
self.date.grid(row=30, column=0)
self.monthvar = tk.StringVar()
self.months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December']
self.monthvar.set(self.months[now.month-1])
self.month = tk.OptionMenu(self.start, self.monthvar,
*self.months)
self.month.grid(row=30, column=1)
self.yearvar = tk.StringVar()
self.years = range(2015, now.year+2)
self.yearvar.set(now.year)
self.year = tk.OptionMenu(self.start, self.yearvar, *self.years)
self.year.grid(row=30, column=2)
self.hourvar = tk.StringVar()
self.hours = range(0, 24)
self.hourvar.set(now.hour)
self.hour = tk.OptionMenu(self.start, self.hourvar, *self.hours)
self.hour.grid(row=30, column=3)
self.minutevar = tk.StringVar()
self.minutes = range(0, 60, 5)
self.minutevar.set(now.minute)
self.minute = tk.OptionMenu(self.start, self.minutevar,
*self.minutes)
self.minute.grid(row=30, column=4)
self.datevar.trace('w', self.copy_position)
self.monthvar.trace('w', self.copy_position)
self.yearvar.trace('w', self.copy_position)
self.hourvar.trace('w', self.copy_position)
self.minutevar.trace('w', self.copy_position)
###############
# Release End
###############
endlabel = tk.Label(self.end_t, text="\n\nEnd release\n\n")
endlabel.grid(row=0, column=0)
tk.Label(self.end, text='Longitude', bg='gray').grid(row=0, column=1)
tk.Label(self.end, text='Latitude', bg='gray').grid(row=0, column=0)
tk.Label(self.end, text='Radius [m]', bg='gray').grid(row=0, column=2)
self.elat = tk.Entry(self.end, width=6, justify=tk.RIGHT)
self.elon = tk.Entry(self.end, width=6, justify=tk.RIGHT)
self.eradius = tk.Entry(self.end, width=6, justify=tk.RIGHT)
self.elon.grid(row=10, column=1)
self.elon.insert(0, '4.5')
self.elat.grid(row=10, column=0)
self.elat.insert(0, '60.0')
self.eradius.grid(row=10, column=2)
self.eradius.insert(0, '1000')
##########
# Time
##########
now = datetime.utcnow()
tk.Label(self.end, text='Day', bg='gray').grid(row=20, column=0)
tk.Label(self.end, text='Month', bg='gray').grid(row=20, column=1)
tk.Label(self.end, text='Year', bg='gray').grid(row=20, column=2)
tk.Label(self.end, text='Hour', bg='gray').grid(row=20, column=3)
tk.Label(self.end, text='Minutes [UTC]', bg='gray').grid(row=20, column=4)
self.edatevar = tk.StringVar()
self.edates = range(1, 32)
self.edatevar.set(now.day)
self.edate = tk.OptionMenu(self.end, self.edatevar, *self.edates)
self.edate.grid(row=30, column=0)
self.emonthvar = tk.StringVar()
self.emonthvar.set(self.months[now.month-1])
self.emonth = tk.OptionMenu(self.end, self.emonthvar,
*self.months)
self.emonth.grid(row=30, column=1)
self.eyearvar = tk.StringVar()
self.eyears = range(2015, now.year+2)
self.eyearvar.set(now.year)
self.eyear = tk.OptionMenu(self.end, self.eyearvar, *self.eyears)
self.eyear.grid(row=30, column=2)
self.ehourvar = tk.StringVar()
self.ehours = range(0, 24)
self.ehourvar.set(now.hour)
self.ehour = tk.OptionMenu(self.end, self.ehourvar, *self.ehours)
self.ehour.grid(row=30, column=3)
self.eminutevar = tk.StringVar()
self.eminutes = range(0, 60, 5)
self.eminutevar.set(now.minute)
self.eminute = tk.OptionMenu(self.end, self.eminutevar,
*self.eminutes)
self.eminute.grid(row=30, column=4)
self.eyear.config(bg='gray')
self.emonth.config(bg='gray')
self.edate.config(bg='gray')
self.ehour.config(bg='gray')
self.eminute.config(bg='gray')
# Check seeding
check_seed = tk.Button(self.end_t, text='Check seeding',
command=self.check_seeding)
check_seed.grid(row=10, column=0, padx=0)
#######################
# Simulation duration
#######################
tk.Label(self.duration, text='Run simulation ').grid(row=50, column=0)
self.durationhours = tk.Entry(self.duration, width=3,
justify=tk.RIGHT)
self.durationhours.grid(row=50, column=1)
self.durationhours.insert(0, 12)
tk.Label(self.duration, text=' hours ').grid(row=50, column=2)
self.directionvar = tk.StringVar()
self.directionvar.set('forwards')
self.direction = tk.OptionMenu(self.duration, self.directionvar,
'forwards', 'backwards')
self.direction.grid(row=50, column=3)
tk.Label(self.duration, text=' in time ').grid(row=50, column=4)
##############
# Output box
##############
self.text = tk.Text(self.output, wrap="word", height=18)
self.text.grid(row=60, columnspan=6, sticky='nsw')
self.text.tag_configure("stderr", foreground="#b22222")
sys.stdout = TextRedirector(self.text, "stdout")
sys.stderr = TextRedirector(self.text, "stderr")
s = tk.Scrollbar(self)
s.grid(row=60, column=6, sticky='ns')
s.config(command=self.text.yview)
self.text.config(yscrollcommand=s.set)
# Diana
self.dianadir = '/vol/vvfelles/opendrift/output/'
if os.path.exists(self.dianadir):
self.has_diana = True
print('Diana is available!')
self.outputdir = '/vol/vvfelles/opendrift/output_native/'
startbutton = 'PEIS PAO'
else:
self.has_diana = False
startbutton = 'START'
##############
# Initialise
##############
self.set_model(list(self.opendrift_models)[0])
forcingfiles = open(self.o.test_data_folder() + '../../opendrift/scripts/data_sources.txt').readlines()
print(forcingfiles)
for i, ff in enumerate(forcingfiles):
tk.Label(self.forcing, text=ff.strip(), wraplength=650, font=('Courier', 8)).grid(
row=i, column=0, sticky=tk.W)
##########################
try:
img = ImageTk.PhotoImage(Image.open(
self.o.test_data_folder() +
'../../docs/opendrift_logo.png'))
panel = tk.Label(self.seed, image=img)
panel.image = img
panel.grid(row=0, column=0)
except Exception as e:
print(e)
pass # Could not display logo
##########
# RUN
##########
tk.Button(self.seed, text=startbutton, bg='green',
command=self.run_opendrift).grid(row=80, column=1,
sticky=tk.W, pady=4)
def copy_position(self, a, b, c):
self.elat.delete(0, tk.END)
self.elat.insert(0, self.lat.get())
self.elon.delete(0, tk.END)
self.elon.insert(0, self.lon.get())
self.eradius.delete(0, tk.END)
self.eradius.insert(0, self.radius.get())
self.edatevar.set(self.datevar.get())
self.emonthvar.set(self.monthvar.get())
self.eyearvar.set(self.yearvar.get())
self.ehourvar.set(self.hourvar.get())
self.eminutevar.set(self.minutevar.get())
def handle_result(self, command):
from os.path import expanduser
homefolder = expanduser("~")
filename = homefolder + '/' + self.simulationname
if command[0:4] == 'save':
plt.switch_backend('agg')
elif command[0:4] == 'show':
plt.switch_backend('TkAgg')
if command == 'saveanimation':
filename = filename + '.mp4'
self.o.animation(filename=filename)
print('='*30 + '\nAnimation saved to file:\n'
+ filename + '\n' + '='*30)
elif command == 'showanimation':
self.o.animation()
elif command == 'saveplot':
filename = filename + '.png'
self.o.plot(filename=filename)
print('='*30 + '\nPlot saved to file:\n'
+ filename + '\n' + '='*30)
elif command == 'showplot':
self.o.plot()
elif command == 'showoilbudget':
self.o.plot_oil_budget()
elif command == 'saveoilbudget':
filename = filename + '_oilbudget.png'
self.o.plot_oil_budget(filename=filename)
print('='*30 + '\nPlot saved to file: '
+ filename + '\n' + '='*30)
def validate_config(self, value_if_allowed, prior_value, key):
"""From config menu selection."""
print('Input: %s -> %s', (key, value_if_allowed))
if value_if_allowed == 'None':
print('Setting None value')
return True
if value_if_allowed in ' -':
print('Allowing temporally empty or minus sign')
return True
sc = self.o._config[key]
if sc['type'] in ['int', 'float']:
try:
value_if_allowed = float(value_if_allowed)
except:
print('Nonumber')
return False
try:
print('Setting: %s -> %s', (key, value_if_allowed))
self.o.set_config(key, value_if_allowed)
return True
except:
return False
def set_model(self, model, rebuild_gui=True):
# Creating simulation object (self.o) of chosen model class
print('Setting model: ' + model)
if model in self.extra_args:
extra_args = self.extra_args[model]
else:
extra_args = {}
self.o = self.opendrift_models[model](**extra_args)
self.modelname = model # So that new instance may be initiated at repeated run
# Setting GUI-specific default config values
for k,v in self.GUI_config.items():
try:
if 'default' in v:
self.o._set_config_default(k, v['default'])
if 'min' in v:
self.o._config[k]['min'] = v['min']
if 'max' in v:
self.o._config[k]['max'] = v['max']
except:
pass
if rebuild_gui is False:
return
# Remove current GUI components and rebuild with new
for con in self.confignotebook.winfo_children():
con.destroy()
self.subconfig = {}
confnames = list(set([cn.split(':')[0] for cn in self.o._config]))
confnames.extend(['environment:constant', 'environment:fallback'])
confnames.remove('environment')
for sub in confnames:
self.subconfig[sub] = tk.Frame(self.confignotebook, pady=25)
self.confignotebook.add(self.subconfig[sub], text=sub)
sc = self.o.get_configspec(level=[2, 3])
self.config_input = {}
self.config_input_var = {}
for i, key in enumerate(list(sc)):
if key.startswith('environment:constant'):
tab = self.subconfig['environment:constant']
keystr = key.split(':')[-1]
elif key.startswith('environment:fallback'):
tab = self.subconfig['environment:fallback']
keystr = key.split(':')[-1]
else:
tab = self.subconfig[key.split(':')[0]]
keystr = ''.join(key.split(':')[1:])
lab = tk.Label(tab, text=keystr)
lab.grid(row=i, column=1, rowspan=1)
if sc[key]['type'] in ['float', 'int']:
self.config_input_var[i] = tk.StringVar()
vcmd = (tab.register(self.validate_config),
'%P', '%s', key)
self.config_input[i] = tk.Entry(
tab, textvariable=self.config_input_var[i],
validate='key', validatecommand=vcmd,
width=6, justify=tk.RIGHT)
self.config_input[i].insert(0, str(sc[key]['default']))
self.config_input[i].grid(row=i, column=2, rowspan=1)
tk.Label(tab, text='[%s] min: %s, max: %s' % (
sc[key]['units'], sc[key]['min'], sc[key]['max'])
).grid(row=i, column=3, rowspan=1)
elif sc[key]['type'] == 'bool':
if self.o.get_config(key) is True:
value = 1
else:
value = 0
self.config_input_var[i] = tk.IntVar(value=value)
vcb = (tab.register(self.set_config_checkbox),
key, i)
self.config_input[i] = tk.Checkbutton(
tab, variable=self.config_input_var[i],
command=vcb, text='')
self.config_input[i].grid(row=i, column=2, rowspan=1)
elif sc[key]['type'] == 'enum':
self.config_input_var[i] = tk.StringVar(value=self.o.get_config(key))
width = len(max(sc[key]['enum'], key=len))
self.config_input[i] = ttk.Combobox(
tab, width=width,
textvariable=self.config_input_var[i],
values=sc[key]['enum'])
self.config_input[i].bind("<<ComboboxSelected>>",
lambda event, keyx=key, ix=i:
self.set_config_enum(event, keyx, ix))
self.config_input[i].grid(row=i, column=2, rowspan=1)
CreateToolTip(lab, sc[key]['description'])
try:
self.results.destroy()
except:
pass
# Only ESSENTIAL config items are shown on front page with seeding
sc = self.o.get_configspec(level=self.o.CONFIG_LEVEL_ESSENTIAL)
self.seed_input = {}
self.seed_input_var = {}
self.seed_input_label = {}
self.seed_frame = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=0)
self.seed_frame.grid(row=60, columnspan=8, sticky='nsew')
# FIND
for num, i in enumerate(sc):
varlabel = i.split(':')[-1]
if i in self.o.ElementType.variables.keys():
if 'units' in self.o.ElementType.variables[i].keys():
units = self.o.ElementType.variables[i]['units']
if units == '1':
units = 'fraction'
varlabel = '%s [%s]' % (varlabel, units)
self.seed_input_label[i] = tk.Label(self.seed_frame,
text=varlabel + '\t')
self.seed_input_label[i].grid(row=num, column=0)
CreateToolTip(self.seed_input_label[i], text=sc[i]['description'])
actual_val = self.o.get_config(i)
if sc[i]['type'] == 'enum':
self.seed_input_var[i] = tk.StringVar()
self.seed_input[i] = ttk.Combobox(
self.seed_frame, width=50,
textvariable=self.seed_input_var[i],
values=sc[i]['enum'])
self.seed_input_var[i].set(actual_val)
elif sc[i]['type'] == 'bool':
self.seed_input_var[i] = tk.IntVar(value=sc[i]['value'])
self.seed_input[i] = tk.Checkbutton(
self.seed_frame, variable=self.seed_input_var[i],
text=sc[i]['description'])
else:
self.seed_input_var[i] = tk.StringVar()
self.seed_input[i] = tk.Entry(
self.seed_frame, textvariable=self.seed_input_var[i],
width=6, justify=tk.RIGHT)
self.seed_input[i].insert(0, actual_val)
self.seed_input[i].grid(row=num, column=1)
def set_config_checkbox(self, key, i):
i = int(i)
newval = self.config_input_var[i].get()
if newval == 0:
print('Setting %s to False' % key)
self.o.set_config(key, False)
elif newval == 1:
print('Setting %s to True' % key)
self.o.set_config(key, True)
def set_config_enum(self, event, key, i):
newval = self.config_input_var[i].get()
print('Setting ' + key + newval)
self.o.set_config(key, newval)
def show_help(self):
help_url = 'https://opendrift.github.io/gui.html'
print('Opening help website:\n' + help_url)
import webbrowser
webbrowser.open(help_url)
def check_seeding(self):
print('#'*50)
print('Hang on, plot is comming in a few seconds...')
print('#'*50)
month = int(self.months.index(self.monthvar.get()) + 1)
start_time = datetime(int(self.yearvar.get()), month,
int(self.datevar.get()),
int(self.hourvar.get()),
int(self.minutevar.get()))
emonth = int(self.months.index(self.emonthvar.get()) + 1)
end_time = datetime(int(self.eyearvar.get()), emonth,
int(self.edatevar.get()),
int(self.ehourvar.get()),
int(self.eminutevar.get()))
sys.stdout.flush()
lon = float(self.lon.get())
lat = float(self.lat.get())
radius = float(self.radius.get())
elon = float(self.elon.get())
elat = float(self.elat.get())
eradius = float(self.eradius.get())
if lon != elon or lat != elat or start_time != end_time:
lon = [lon, elon]
lat = [lat, elat]
radius = [radius, eradius]
start_time = [start_time, end_time]
cone = True
else:
cone = False
so = Leeway(loglevel=50)
for k,v in self.GUI_config.items():
try:
so.set_config(k, v)
except:
pass
so.seed_cone(lon=lon, lat=lat, radius=radius, time=start_time)
so.plot(buffer=.5, fast=True)
del so
def run_opendrift(self):
sys.stdout.write('running OpenDrift')
# Creating fresh instance of the current model, but keeping config
adjusted_config = self.o._config
self.set_model(self.modelname, rebuild_gui=False)
self.o._config = adjusted_config
try:
self.budgetbutton.destroy()
except Exception as e:
print(e)
pass
month = int(self.months.index(self.monthvar.get()) + 1)
start_time = datetime(int(self.yearvar.get()), month,
int(self.datevar.get()),
int(self.hourvar.get()),
int(self.minutevar.get()))
emonth = int(self.months.index(self.emonthvar.get()) + 1)
end_time = datetime(int(self.eyearvar.get()), emonth,
int(self.edatevar.get()),
int(self.ehourvar.get()),
int(self.eminutevar.get()))
sys.stdout.flush()
lon = float(self.lon.get())
lat = float(self.lat.get())
radius = float(self.radius.get())
elon = float(self.elon.get())
elat = float(self.elat.get())
eradius = float(self.eradius.get())
if lon != elon or lat != elat or start_time != end_time:
lon = [lon, elon]
lat = [lat, elat]
radius = [radius, eradius]
start_time = [start_time, end_time]
cone = True
else:
cone = False
for se in self.seed_input:
val = self.seed_input_var[se].get()
if self.o._config[se]['type'] in ['float', 'int']:
val = float(val)
elif self.o._config[se]['type'] == 'bool':
if val == 1:
val = True
elif val == 0:
val = False
else:
nothing
self.o.set_config(se, val)
self.o.add_readers_from_file(self.o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
self.o.seed_cone(lon=lon, lat=lat, radius=radius,
time=start_time)#, #cone=cone,
#**extra_seed_args)
time_step = self.o.get_config('general:time_step_minutes')*60
time_step_output = self.o.get_config('general:time_step_output_minutes')*60
duration = int(self.durationhours.get())*3600/time_step
extra_args = {'time_step': time_step, 'time_step_output': time_step_output}
if self.directionvar.get() == 'backwards':
extra_args['time_step'] = -extra_args['time_step']
extra_args['time_step_output'] = -extra_args['time_step_output']
if self.has_diana is True:
extra_args['outfile'] = self.outputdir + '/opendrift_' + \
self.model.get() + self.o.start_time.strftime('_%Y%m%d_%H%M.nc')
self.simulationname = 'opendrift_' + self.model.get() + \
self.o.start_time.strftime('_%Y%m%d_%H%M')
# Starting simulation run
self.o.run(steps=duration, **extra_args)
print(self.o)
self.results.destroy()
self.results = tk.Frame(self.seed, bd=2,
relief=tk.FLAT, padx=5, pady=0)
self.results.grid(row=70, column=3, columnspan=1, sticky='ew')
tk.Button(self.results, text='Show animation',
command=lambda: self.handle_result(
'showanimation')).grid(row=10, column=1)
tk.Button(self.results, text='Save animation',
command=lambda: self.handle_result(
'saveanimation')).grid(row=20, column=1)
tk.Button(self.results, text='Show plot',
command=lambda: self.handle_result(
'showplot')).grid(row=30, column=1)
tk.Button(self.results, text='Save plot',
command=lambda: self.handle_result(
'saveplot')).grid(row=40, column=1)
if self.model.get() == 'OpenOil':
tk.Button(self.results, text='Save oil budget',
command=lambda: self.handle_result(
'saveoilbudget')).grid(row=50, column=1)
tk.Button(self.results, text='Show oil budget',
command=lambda: self.handle_result(
'showoilbudget')).grid(row=60, column=1)
if self.has_diana is True:
diana_filename = self.dianadir + self.simulationname + '.nc'
self.o.write_netcdf_density_map(diana_filename)
tk.Button(self.results, text='Show in Diana',
command=lambda: os.system('diana &')
).grid(row=80, column=1)
if __name__ == '__main__':
OpenDriftGUI().mainloop()
| gpl-2.0 | 9,179,950,566,380,875,000 | 40.011889 | 119 | 0.532661 | false |
camisatx/pySecMaster | pySecMaster/pySecMaster.py | 1 | 27817 | #!/usr/bin/env python3
import argparse
from datetime import datetime
import time
from create_tables import create_database, main_tables, data_tables,\
events_tables
from extractor import QuandlCodeExtract, QuandlDataExtraction,\
GoogleFinanceDataExtraction, YahooFinanceDataExtraction, CSIDataExtractor,\
NASDAQSectorIndustryExtractor
from load_aux_tables import LoadTables
from build_symbology import create_symbology
from cross_validator import CrossValidate
from utilities.database_queries import query_all_active_tsids
from utilities.user_dir import user_dir
from utilities.database_check import postgres_test
__author__ = 'Josh Schertz'
__copyright__ = 'Copyright (C) 2018 Josh Schertz'
__description__ = 'An automated system to store and maintain financial data.'
__email__ = 'josh[AT]joshschertz[DOT]com'
__license__ = 'GNU AGPLv3'
__maintainer__ = 'Josh Schertz'
__status__ = 'Development'
__url__ = 'https://joshschertz.com/'
__version__ = '1.5.0'
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
''' pySecMaster.py
This manages the pysecmaster database. It should be run daily as a maximum.
Database maintenance tasks:
Creates the tables in the database.
Loads auxiliary tables from included CSV files.
Downloads all available Quandl Codes for the Quandl Databases selected.
Downloads the specified CSI Data factsheet (stocks, commodities).
Creates the symbology table which establishes a unique code for every
item in the database, along with translating different source's codes
Database data download tasks:
Downloads Quandl data based on the download selection criteria using either
the official Quandl Codes or implied codes from CSI Data.
Downloads Google Finance minute stock data.
Can either append only the new data, or replace part of the existing data.
'''
###############################################################################
# Database maintenance options:
userdir = user_dir()
csidata_type = 'stock' # stock, commodity
# Don't change these unless you know what you are doing
database_url = ['https://www.quandl.com/api/v2/datasets.csv?query=*&'
'source_code=', '&per_page=300&page=']
# http://www.csidata.com/factsheets.php?type=stock&format=html
csidata_url = 'http://www.csidata.com/factsheets.php?'
tables_to_load = ['data_vendor', 'exchanges']
nasdaq_sector_industry_url = 'http://www.nasdaq.com/screening/' \
'companies-by-industry.aspx?'
nasdaq_sector_industry_extractor_exchanges = ['NASDAQ', 'NYSE', 'AMEX']
nasdaq_sector_industry_redownload_time = 7
###############################################################################
# Database data download options:
today = datetime.today()
# Don't change these variables unless you know what you are doing!
quandl_data_url = ['https://www.quandl.com/api/v1/datasets/', '.csv']
google_fin_url = {'root': 'http://www.google.com/finance/getprices?',
'ticker': 'q=',
'exchange': 'x=',
'interval': 'i=', # 60; 60 seconds is the shortest interval
# 'sessions': 'sessions=ext_hours',
'period': 'p=', # 20d; 15d is the longest period for min
'fields': 'f=d,c,v,o,h,l'} # order doesn't change anything
yahoo_end_date = ('d=%s&e=%s&f=%s' %
(str(int(today.month - 1)).zfill(2), today.day, today.year))
yahoo_fin_url = {'root': 'http://real-chart.finance.yahoo.com/table.csv?',
'ticker': 's=', # Exchange is added after ticker and '.'
'interval': 'g=', # d, w, m, v: (daily, wkly, mth, dividends)
'start_date': 'a=00&b=1&c=1900', # The entire price history
'end_date': yahoo_end_date, # Today's date (MM; D; YYYY)
'csv': 'ignore=.csv'} # Returns a CSV file
###############################################################################
def maintenance(database_options, quandl_key, quandl_ticker_source,
database_list, threads, quandl_update_range,
csidata_update_range, symbology_sources):
"""
:param database_options: Dictionary of the postgres database options
:param quandl_key: Optional string of the Quandl API key
:param quandl_ticker_source: String of which source the Quandl data should
use when determining which codes to download (csidata, quandl)
:param database_list: List of strings indicating which Quandl databases
should have their codes downloaded (WIKI, GOOG, YAHOO)
:param threads: Integer of the threads to run when downloading Quandl codes
:param quandl_update_range: Integer of the number of days before the
Quandl codes should be updated
:param csidata_update_range: Integer of the number of days before the CSI
Data factsheet should be updated
:param symbology_sources: List of strings of which symbology sources
should be created (csi_data, tsid, quandl_wiki)
"""
print('Starting Security Master table maintenance function. This can take '
'some time to finish if large databases are used. If this fails, '
'rerun it after a few minutes.')
# Create the SQL tables if they don't already exist
create_database(admin_user=database_options['admin_user'],
admin_password=database_options['admin_password'],
database=database_options['database'],
user=database_options['user'])
main_tables(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'])
data_tables(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'])
events_tables(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'])
LoadTables(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
tables_to_load=tables_to_load,
load_tables=userdir['load_tables'])
# Always extract CSI values, as they are used for the symbology table
CSIDataExtractor(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
db_url=csidata_url,
data_type=csidata_type,
redownload_time=csidata_update_range)
if quandl_ticker_source == 'quandl':
QuandlCodeExtract(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
quandl_token=quandl_key,
database_list=database_list,
database_url=database_url,
update_range=quandl_update_range,
threads=threads)
create_symbology(database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
source_list=symbology_sources)
NASDAQSectorIndustryExtractor(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
db_url=nasdaq_sector_industry_url,
exchange_list=nasdaq_sector_industry_extractor_exchanges,
redownload_time=nasdaq_sector_industry_redownload_time)
def data_download(database_options, quandl_key, download_list, threads=4,
verbose=False):
""" Loops through all provided data sources in download_list, and runs
the associated data extractor using the provided source variables.
:param database_options: Dictionary of the postgres database options
:param quandl_key: Optional string of the Quandl API key
:param download_list: List of dictionaries, with each dictionary containing
all of the relevant variables for the specific source
:param threads: Integer indicating how many threads should be used to
concurrently download data
:param verbose: Boolean of whether debugging prints should occur.
"""
for source in download_list:
if source['interval'] == 'daily':
table = 'daily_prices'
if source['source'] == 'google':
google_fin_url['interval'] = 'i=' + str(60*60*24)
elif source['source'] == 'yahoo':
yahoo_fin_url['interval'] = 'g=d'
elif source['interval'] == 'minute':
table = 'minute_prices'
if source['source'] == 'google':
google_fin_url['interval'] = 'i=' + str(60)
elif source['source'] == 'yahoo':
raise SystemError('Yahoo Finance does not provide minute data.')
else:
raise SystemError('No interval was provided for %s in '
'data_download in pySecMaster.py' %
source['interval'])
if source['source'] == 'quandl':
if quandl_key:
# Download data for selected Quandl codes
print('\nDownloading all Quandl fields for: %s'
'\nNew data will %s the prior %s day\'s data' %
(source['selection'], source['data_process'],
source['replace_days_back']))
# NOTE: Quandl only allows a single concurrent download with
# their free account
QuandlDataExtraction(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
quandl_token=quandl_key,
db_url=quandl_data_url,
download_selection=source['selection'],
redownload_time=source['redownload_time'],
data_process=source['data_process'],
days_back=source['replace_days_back'],
threads=2,
table=table,
load_tables=userdir['load_tables'],
verbose=verbose)
else:
print('\nNot able to download Quandl data for %s because '
'there was no Quandl API key provided.' %
(source['selection'],))
elif source['source'] == 'google':
# Download data for selected Google Finance codes
print('\nDownloading all Google Finance fields for: %s'
'\nNew data will %s the prior %s day\'s data' %
(source['selection'], source['data_process'],
source['replace_days_back']))
google_fin_url['period'] = 'p=' + str(source['period']) + 'd'
GoogleFinanceDataExtraction(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
db_url=google_fin_url,
download_selection=source['selection'],
redownload_time=source['redownload_time'],
data_process=source['data_process'],
days_back=source['replace_days_back'],
threads=threads,
table=table,
load_tables=userdir['load_tables'],
verbose=verbose)
elif source['source'] == 'yahoo':
# Download data for selected Google Finance codes
print('\nDownloading all Yahoo Finance fields for: %s'
'\nNew data will %s the prior %s day\'s data' %
(source['selection'], source['data_process'],
source['replace_days_back']))
YahooFinanceDataExtraction(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
db_url=yahoo_fin_url,
download_selection=source['selection'],
redownload_time=source['redownload_time'],
data_process=source['data_process'],
days_back=source['replace_days_back'],
threads=threads,
table=table,
load_tables=userdir['load_tables'],
verbose=verbose)
else:
print('The %s source is currently not implemented. Skipping it.' %
source['source'])
print('All available data values have been downloaded for: %s' %
download_list)
def post_download_maintenance(database_options, download_list, period=None,
verbose=False):
""" Perform tasks that require all data to be downloaded first, such as the
source cross validator function.
:param database_options: Dictionary of the postgres database options
:param download_list: List of dictionaries, with each dictionary containing
all of the relevant variables for the specific source
:param period: Optional integer indicating the prior number of days whose
values should be cross validated. If None is provided, then the
entire set of values will be validated.
:param verbose: Boolean of whether debugging prints should occur.
"""
intervals = {}
for source in download_list:
if source['interval'] == 'daily':
intervals['daily_prices'] = True
elif source['interval'] == 'minute':
print('The cross validator for minute prices is disabled because '
'there is currently only one source (Google Finance). Thus, '
'it does not make sense to run it. When you add a second '
'source you can re-enable it by un-commenting out the line '
'below this message in post_download_maintenance within '
'pySecMaster.py')
# intervals['minute_prices'] = True
else:
raise SystemError('No interval was provided for %s in '
'data_download in pySecMaster.py' %
source['interval'])
for key, value in intervals.items():
# The key is the table name to process
table = key
if verbose:
print('Starting cross validator for %s' % table)
tsids_df = query_all_active_tsids(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
table=table,
period=period)
tsid_list = tsids_df['tsid'].values
CrossValidate(
database=database_options['database'],
user=database_options['user'],
password=database_options['password'],
host=database_options['host'],
port=database_options['port'],
table=table, tsid_list=tsid_list, period=period, verbose=verbose)
if __name__ == '__main__':
# Establish the argument parser
parser = argparse.ArgumentParser(
description='The pySecMaster is an automated financial securities '
'master framework. Currently, NYSE and NASDAQ securities '
'will have their daily prices loaded into the database. '
'Additional data can be integrated with the system, '
'including minute prices, option chain values, tick '
'values, corporate events (splits, dividends, IPOs), '
'and fundamental data.'
)
# Optional arguments
parser.add_argument('--csidata-update-range', type=int,
default=7,
help='Number of days before the data will be refreshed.')
parser.add_argument('--daily-downloads', type=str, nargs='*',
default=['quandl', 'yahoo', 'google'],
help='Sources whose daily prices will be downloaded. By default, '
'quandl, yahoo and google daily prices will be downloaded.')
parser.add_argument('--database-list', type=str, nargs='+',
default=['WIKI'],
help='The Quandl databases that will have their codes downloaded if '
'the quanddl-ticker-source argument is set to quandl. '
'Provide selections one after the other without quotes. Options '
'include: WIKI, GOOG, YAHOO, SEC, EIA, JODI, CURRFX, FINRA.')
parser.add_argument('--minute-downloads', type=str, nargs='*',
help='Sources whose minute prices will be downloaded. Only google '
'is implemented right now. By default, no minute prices are '
'downloaded.')
parser.add_argument('--quandl-update-range', type=int,
default=30,
help='Number of days before the ticker tables will be refreshed. If '
'the database was not completely downloaded within this range, '
'the remainder of the codes will attempt to be downloaded.')
parser.add_argument('--quandl-ticker-source', type=str,
choices=['quandl', 'csidata'], default='csidata',
help='Determines where the extractor should get the ticker codes from '
'when downloading Quandl data. Options include using the official '
'list from Quandl (quandl), or make implied codes from the CSI '
'data stock factsheet (csidata) which is more accurate but tries '
'more non-existent tickers.')
parser.add_argument('--symbology-sources', type=str, nargs='+',
default = ['csi_data', 'tsid', 'quandl_wiki', 'quandl_eod',
'quandl_goog', 'seeking_alpha', 'yahoo'],
help='Sources that will be integrated into the symbology table, '
'including have symbology specific identifiers created and '
'being linked with other sources symbology identifiers for the '
'same underlying item. The following sources are used by default: '
'csi_data, tsid, quandl_wiki, qualdo_eod, quandl_goog, '
'seeking_alpha, yahoo. Ommitting key sources may break the '
'system. Provide selections one after the other without quotes.')
parser.add_argument('-t', '--threads', type=int,
help='Number of threads to allocate to the system. The total system '
'cores are used by default.')
parser.add_argument('--validator-period', type=int,
help='Prior number of days whose values should be cross validated, '
'with 30 being a good option. If no value is provided, the '
'entire period will be validated.')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Print out the status of the system.')
# Retrieve the arguments
args = parser.parse_args()
############################################################################
# Database maintenance options:
test_database_options = {
'admin_user': userdir['postgresql']['main_user'],
'admin_password': userdir['postgresql']['main_password'],
'database': userdir['postgresql']['pysecmaster_db'],
'user': userdir['postgresql']['pysecmaster_user'],
'password': userdir['postgresql']['pysecmaster_password'],
'host': userdir['postgresql']['pysecmaster_host'],
'port': userdir['postgresql']['pysecmaster_port'],
}
test_quandl_key = userdir['quandl']['quandl_token']
############################################################################
# Database data download options:
# TODO: Move these parameters to a config file
# Example download list: should be a list of dictionaries, with the
# dictionaries containing all relevant variables for the specific source
test_download_list = [
# Quandl WIKI daily data with wiki
{'source': 'quandl', 'selection': 'wiki', 'interval': 'daily',
'redownload_time': 60 * 60 * 12, 'data_process': 'replace',
'replace_days_back': 60},
# Quandl EOD daily data with eod
{'source': 'quandl', 'selection': 'eod', 'interval': 'daily',
'redownload_time': 60 * 60 * 12, 'data_process': 'replace',
'replace_days_back': 60},
# Yahoo Fin daily data with us_main - 9300 seconds (2.58 hours)
{'source': 'yahoo', 'selection': 'us_main', 'interval': 'daily',
'redownload_time': 60 * 60 * 12, 'data_process': 'replace',
'replace_days_back': 60},
# Google daily data with us_main_no_end_date (max of 50 day's prices)
{'source': 'google', 'selection': 'us_main_no_end_date',
'interval': 'daily', 'period': 60, 'redownload_time': 60 * 60 * 12,
'data_process': 'replace', 'replace_days_back': 10},
# Google minute data with us_main (max of 15 day's prices)
{'source': 'google', 'selection': 'us_main', 'interval': 'minute',
'period': 20, 'redownload_time': 60 * 60 * 12,
'data_process': 'replace', 'replace_days_back': 10}
]
# source: String of which data provider should have their data downloaded
# selection: String of which data from the source should be downloaded. To
# understand what is actually being downloaded, go to either the
# query_q_codes function or query_codes function in
# utilities/database_queries.py and view the SQL queries.
# (Quandl: 'wiki', 'eod', 'goog', 'goog_us_main',
# 'goog_us_main_no_end_date', 'goog_us_canada_london', 'goog_etf';
# Google: 'all', 'us_main', 'us_main_no_end_date', 'us_canada_london')
# interval: String of what interval the data should be in (daily or minute).
# period: Integer of how many day's data should be downloaded (Google
# finance only). Minute data only has data back 15 days, and daily data
# only has data back 50 days.
# redownload_time: Integer representing time in seconds before the data is
# allowed to be re-downloaded. Allows the system to be restarted without
# downloading the same data again.
# data_process: String of how the new data will interact with the existing
# data ('replace': replace the prior x days of data (replace_days_back);
# 'append': append the latest data to the existing data (will ignore
# replace_days_back variable). 'append' requires less system resources
# since it only adds new values, instead of deleting overlapping values.
# replace_days_back: Integer of the number of days whose existing data
# should be replaced by new data (50000 replaces all existing data). Due
# to weekends, the days replaced may differ depending on what day this
# function is run
############################################################################
# Build the download list from the argparse arguments provided. There is
# probably a much better way to do this
download_list = []
if args.daily_downloads or args.minute_downloads:
daily_d = args.daily_downloads
minute_d = args.minute_downloads
# Cycle through all download templates specified above
for source in test_download_list:
if daily_d and source['interval'] == 'daily':
for cur_d in daily_d:
# Need to do string to string comparisson
if source['source'] in cur_d:
# For quandl, ensure selection matches argparse exactly
if source['source'] in 'quandl' and \
source['selection'] == cur_d[cur_d.find('.')+1:]:
download_list.append(source)
elif source['source'] not in 'quandl':
download_list.append(source)
if minute_d and source['source'] in minute_d and \
source['interval'] == 'minute':
download_list.append(source)
if args.threads:
threads = args.threads
else:
import multiprocessing
threads = multiprocessing.cpu_count()
# Try connecting to the postgres database
while True:
db_available = postgres_test(database_options=test_database_options)
if db_available:
print('%s database is available' %
test_database_options['database'])
break
else:
print('%s database is unavailable' %
test_database_options['database'])
time.sleep(1)
maintenance(database_options=test_database_options,
quandl_key=test_quandl_key,
quandl_ticker_source=args.quandl_ticker_source,
database_list=args.database_list,
threads=threads,
quandl_update_range=args.quandl_update_range,
csidata_update_range=args.csidata_update_range,
symbology_sources=args.symbology_sources)
if download_list:
data_download(database_options=test_database_options,
quandl_key=test_quandl_key,
download_list=download_list,
threads=threads,
verbose=args.verbose)
# 15 hours for complete build; adds ~6 GB
post_download_maintenance(database_options=test_database_options,
download_list=download_list,
period=args.validator_period,
verbose=args.verbose)
else:
print('No download sources were specified for either the daily data '
'or the minute data, therefore no prices will be downloaded nor '
'will the post download maintenance functions be run.')
print(datetime.now())
| agpl-3.0 | -3,861,362,414,208,868,400 | 47.293403 | 81 | 0.594385 | false |
doordash/auto_ml | tests/classifiers.py | 1 | 16338 | import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from quantile_ml import Predictor
from quantile_ml.utils_models import load_ml_model
import dill
import numpy as np
import pandas as pd
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
lower_bound = -0.215
if model_name == 'DeepLearningClassifier':
lower_bound = -0.25
assert lower_bound < test_score < -0.17
def categorical_ensembling_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, categorical_column='embarked')
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
lower_bound = -0.215
if model_name == 'DeepLearningClassifier':
lower_bound = -0.24
if model_name == 'XGBClassifier':
lower_bound = -0.235
if model_name == 'LGBMClassifier':
lower_bound = -0.22
if model_name == 'GradientBoostingClassifier':
lower_bound = -0.23
assert lower_bound < test_score < -0.17
def getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
# if model_name == 'DeepLearningClassifier':
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
# else:
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.215
if model_name == 'DeepLearningClassifier':
lower_bound = -0.25
assert lower_bound < first_score < -0.17
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.17
def getting_single_predictions_multilabel_classification(model_name=None):
# quantile_ml does not support multilabel classification for deep learning at the moment
if model_name == 'DeepLearningClassifier':
return
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
column_descriptions = {
'airline_sentiment': 'output'
, 'airline': 'categorical'
, 'text': 'ignore'
, 'tweet_location': 'categorical'
, 'user_timezone': 'categorical'
, 'tweet_created': 'date'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_twitter_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
# if model_name == 'DeepLearningClassifier':
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
# else:
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_twitter_test_dictionaries = df_twitter_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = 0.67
if model_name == 'LGBMClassifier':
lower_bound = 0.655
assert lower_bound < first_score < 0.79
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_twitter_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
# time_upper_bound = 10
# if model_name == 'XGBClassifier':
# time_upper_bound = 4
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('df_twitter_test_dictionaries')
print(df_twitter_test_dictionaries)
second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < 0.79
def feature_learning_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train(df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
# from quantile_ml.utils_models import load_keras_model
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.215
if model_name == 'DeepLearningClassifier':
lower_bound = -0.25
if model_name == 'GradientBoostingClassifier' or model_name is None:
lower_bound = -0.23
if model_name == 'LGBMClassifier':
lower_bound = -0.221
if model_name == 'XGBClassifier':
lower_bound = -0.245
assert lower_bound < first_score < -0.17
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.17
def feature_learning_categorical_ensembling_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, feature_learning=False, fl_data=fl_data, categorical_column='embarked')
file_name = ml_predictor.save(str(random.random()))
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
from quantile_ml.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.215
if model_name == 'DeepLearningClassifier':
lower_bound = -0.25
if model_name == 'GradientBoostingClassifier' or model_name is None:
lower_bound = -0.25
if model_name == 'LGBMClassifier':
lower_bound = -0.221
if model_name == 'XGBClassifier':
lower_bound = -0.25
assert lower_bound < first_score < -0.17
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.17
| mit | -3,767,392,773,445,112,000 | 33.468354 | 157 | 0.683437 | false |
scylladb/scylla-longevity-tests | sdcm/tester.py | 1 | 59525 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2016 ScyllaDB
import re
import os
import logging
import time
import types
from functools import wraps
import boto3.session
import libvirt
from avocado import Test
from avocado.utils.process import CmdError
from cassandra import ConsistencyLevel
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster as ClusterDriver
from cassandra.cluster import NoHostAvailable
from cassandra.policies import RetryPolicy
from cassandra.policies import WhiteListRoundRobinPolicy
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
from keystore import KeyStore
from . import cluster
from . import nemesis
from .cluster_libvirt import LoaderSetLibvirt
from .cluster_openstack import LoaderSetOpenStack
from .cluster_libvirt import MonitorSetLibvirt
from .cluster_openstack import MonitorSetOpenStack
from .cluster import NoMonitorSet, SCYLLA_DIR
from .cluster import RemoteCredentials
from .cluster_libvirt import ScyllaLibvirtCluster
from .cluster_openstack import ScyllaOpenStackCluster
from .cluster import UserRemoteCredentials
from .cluster_gce import ScyllaGCECluster
from .cluster_gce import LoaderSetGCE
from .cluster_gce import MonitorSetGCE
from .cluster_aws import CassandraAWSCluster
from .cluster_aws import ScyllaAWSCluster
from .cluster_aws import LoaderSetAWS
from .cluster_aws import MonitorSetAWS
from .utils import get_data_dir_path, log_run_info, retrying
from . import docker
from . import cluster_baremetal
from . import db_stats
from db_stats import PrometheusDBStats
from results_analyze import PerformanceResultsAnalyzer
try:
from botocore.vendored.requests.packages.urllib3.contrib.pyopenssl import extract_from_urllib3
# Don't use pyOpenSSL in urllib3 - it causes an ``OpenSSL.SSL.Error``
# exception when we try an API call on an idled persistent connection.
# See https://github.com/boto/boto3/issues/220
extract_from_urllib3()
except ImportError:
pass
TEST_LOG = logging.getLogger('avocado.test')
class FlakyRetryPolicy(RetryPolicy):
"""
A retry policy that retries 5 times
"""
def on_read_timeout(self, *args, **kwargs):
if kwargs['retry_num'] < 5:
TEST_LOG.debug("Retrying read after timeout. Attempt #%s",
str(kwargs['retry_num']))
return self.RETRY, None
else:
return self.RETHROW, None
def on_write_timeout(self, *args, **kwargs):
if kwargs['retry_num'] < 5:
TEST_LOG.debug("Retrying write after timeout. Attempt #%s",
str(kwargs['retry_num']))
return self.RETRY, None
else:
return self.RETHROW, None
def on_unavailable(self, *args, **kwargs):
if kwargs['retry_num'] < 5:
TEST_LOG.debug("Retrying request after UE. Attempt #%s",
str(kwargs['retry_num']))
return self.RETRY, None
else:
return self.RETHROW, None
def retry_till_success(fun, *args, **kwargs):
timeout = kwargs.pop('timeout', 60)
bypassed_exception = kwargs.pop('bypassed_exception', Exception)
deadline = time.time() + timeout
while True:
try:
return fun(*args, **kwargs)
except bypassed_exception:
if time.time() > deadline:
raise
else:
# brief pause before next attempt
time.sleep(0.25)
def clean_resources_on_exception(method):
"""
Ensure that resources used in test are cleaned upon unhandled exceptions.
:param method: ScyllaClusterTester method to wrap.
:return: Wrapped method.
"""
@wraps(method)
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception:
TEST_LOG.exception("Exception in %s. Will clean resources", method.__name__)
args[0].clean_resources()
raise
return wrapper
class ClusterTester(db_stats.TestStatsMixin, Test):
def __init__(self, methodName='test', name=None, params=None,
base_logdir=None, tag=None, job=None, runner_queue=None):
super(ClusterTester, self).__init__(methodName=methodName, name=name,
params=params,
base_logdir=base_logdir, tag=tag,
job=job, runner_queue=runner_queue)
self._failure_post_behavior = self.params.get(key='failure_post_behavior',
default='destroy')
ip_ssh_connections = self.params.get(key='ip_ssh_connections', default='public')
self.log.debug("IP used for SSH connections is '%s'",
ip_ssh_connections)
cluster.set_ip_ssh_connections(ip_ssh_connections)
self.log.debug("Behavior on failure/post test is '%s'",
self._failure_post_behavior)
cluster.register_cleanup(cleanup=self._failure_post_behavior)
self._duration = self.params.get(key='test_duration', default=60)
cluster.set_duration(self._duration)
cluster.Setup.reuse_cluster(self.params.get('reuse_cluster', default=False))
cluster.Setup.keep_cluster(self._failure_post_behavior)
# for saving test details in DB
self.create_stats = self.params.get(key='store_results_in_elasticsearch', default=True)
self.scylla_dir = SCYLLA_DIR
self.scylla_hints_dir = os.path.join(self.scylla_dir, "hints")
@clean_resources_on_exception
def setUp(self):
self.credentials = []
self.db_cluster = None
self.cs_db_cluster = None
self.loaders = None
self.monitors = None
self.connections = []
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
if self.create_stats:
self.create_test_stats()
self.init_resources()
if self.params.get('seeds_first', default='false') == 'true':
seeds_num = self.params.get('seeds_num', default=1)
self.db_cluster.wait_for_init(node_list=self.db_cluster.nodes[:seeds_num])
self.db_cluster.wait_for_init(node_list=self.db_cluster.nodes[seeds_num:])
else:
self.db_cluster.wait_for_init()
if self.cs_db_cluster:
self.cs_db_cluster.wait_for_init()
db_node_address = self.db_cluster.nodes[0].private_ip_address
self.loaders.wait_for_init(db_node_address=db_node_address)
self.monitors.wait_for_init()
# cancel reuse cluster - for new nodes added during the test
cluster.Setup.reuse_cluster(False)
self.prometheusDB = PrometheusDBStats(host=self.monitors.nodes[0].public_ip_address)
def get_nemesis_class(self):
"""
Get a Nemesis class from parameters.
:return: Nemesis class.
:rtype: nemesis.Nemesis derived class
"""
class_name = self.params.get('nemesis_class_name')
return getattr(nemesis, class_name)
def get_cluster_openstack(self, loader_info, db_info, monitor_info):
if loader_info['n_nodes'] is None:
loader_info['n_nodes'] = self.params.get('n_loaders')
if loader_info['type'] is None:
loader_info['type'] = self.params.get('openstack_instance_type_loader')
if db_info['n_nodes'] is None:
db_info['n_nodes'] = self.params.get('n_db_nodes')
if db_info['type'] is None:
db_info['type'] = self.params.get('openstack_instance_type_db')
if monitor_info['n_nodes'] is None:
monitor_info['n_nodes'] = self.params.get('n_monitor_nodes')
if monitor_info['type'] is None:
monitor_info['type'] = self.params.get('openstack_instance_type_monitor')
user_prefix = self.params.get('user_prefix', None)
user = self.params.get('openstack_user', None)
password = self.params.get('openstack_password', None)
tenant = self.params.get('openstack_tenant', None)
auth_version = self.params.get('openstack_auth_version', None)
auth_url = self.params.get('openstack_auth_url', None)
service_type = self.params.get('openstack_service_type', None)
service_name = self.params.get('openstack_service_name', None)
service_region = self.params.get('openstack_service_region', None)
service_cls = get_driver(Provider.OPENSTACK)
service = service_cls(user, password,
ex_force_auth_version=auth_version,
ex_force_auth_url=auth_url,
ex_force_service_type=service_type,
ex_force_service_name=service_name,
ex_force_service_region=service_region,
ex_tenant_name=tenant)
user_credentials = self.params.get('user_credentials_path', None)
if user_credentials:
self.credentials.append(UserRemoteCredentials(key_file=user_credentials))
else:
self.credentials.append(RemoteCredentials(service=service,
key_prefix='sct',
user_prefix=user_prefix))
self.db_cluster = ScyllaOpenStackCluster(openstack_image=self.params.get('openstack_image'),
openstack_image_username=self.params.get('openstack_image_username'),
openstack_network=self.params.get('openstack_network'),
openstack_instance_type=db_info['type'],
service=service,
credentials=self.credentials,
user_prefix=user_prefix,
n_nodes=db_info['n_nodes'],
params=self.params)
scylla_repo = get_data_dir_path('scylla.repo')
self.loaders = LoaderSetOpenStack(openstack_image=self.params.get('openstack_image'),
openstack_image_username=self.params.get('openstack_image_username'),
openstack_network=self.params.get('openstack_network'),
openstack_instance_type=loader_info['type'],
service=service,
credentials=self.credentials,
scylla_repo=scylla_repo,
user_prefix=user_prefix,
n_nodes=loader_info['n_nodes'],
params=self.params)
if monitor_info['n_nodes'] > 0:
self.monitors = MonitorSetOpenStack(openstack_image=self.params.get('openstack_image'),
openstack_image_username=self.params.get('openstack_image_username'),
openstack_network=self.params.get('openstack_network'),
openstack_instance_type=monitor_info['type'],
service=service,
credentials=self.credentials,
scylla_repo=scylla_repo,
user_prefix=user_prefix,
n_nodes=monitor_info['n_nodes'],
params=self.params,
targets=dict(db_cluster=self.db_cluster,
loaders=self.loaders)
)
else:
self.monitors = NoMonitorSet()
def get_cluster_gce(self, loader_info, db_info, monitor_info):
if loader_info['n_nodes'] is None:
loader_info['n_nodes'] = self.params.get('n_loaders')
if loader_info['type'] is None:
loader_info['type'] = self.params.get('gce_instance_type_loader')
if loader_info['disk_type'] is None:
loader_info['disk_type'] = self.params.get('gce_root_disk_type_loader')
if loader_info['disk_size'] is None:
loader_info['disk_size'] = self.params.get('gce_root_disk_size_loader')
if loader_info['n_local_ssd'] is None:
loader_info['n_local_ssd'] = self.params.get('gce_n_local_ssd_disk_loader')
if db_info['n_nodes'] is None:
n_db_nodes = self.params.get('n_db_nodes')
if isinstance(n_db_nodes, int): # legacy type
db_info['n_nodes'] = [n_db_nodes]
elif isinstance(n_db_nodes, str): # latest type to support multiple datacenters
db_info['n_nodes'] = [int(n) for n in n_db_nodes.split()]
else:
self.fail('Unsupported parameter type: {}'.format(type(n_db_nodes)))
cpu = self.params.get('gce_instance_type_cpu_db')
# unit is GB
mem = self.params.get('gce_instance_type_mem_db')
if cpu and mem:
db_info['type'] = 'custom-{}-{}-ext'.format(cpu, int(mem) * 1024)
if db_info['type'] is None:
db_info['type'] = self.params.get('gce_instance_type_db')
if db_info['disk_type'] is None:
db_info['disk_type'] = self.params.get('gce_root_disk_type_db')
if db_info['disk_size'] is None:
db_info['disk_size'] = self.params.get('gce_root_disk_size_db')
if db_info['n_local_ssd'] is None:
db_info['n_local_ssd'] = self.params.get('gce_n_local_ssd_disk_db')
if monitor_info['n_nodes'] is None:
monitor_info['n_nodes'] = self.params.get('n_monitor_nodes')
if monitor_info['type'] is None:
monitor_info['type'] = self.params.get('gce_instance_type_monitor')
if monitor_info['disk_type'] is None:
monitor_info['disk_type'] = self.params.get('gce_root_disk_type_monitor')
if monitor_info['disk_size'] is None:
monitor_info['disk_size'] = self.params.get('gce_root_disk_size_monitor')
if monitor_info['n_local_ssd'] is None:
monitor_info['n_local_ssd'] = self.params.get('gce_n_local_ssd_disk_monitor')
user_prefix = self.params.get('user_prefix', None)
gce_datacenter = self.params.get('gce_datacenter', None).split()
service_cls = get_driver(Provider.GCE)
ks = KeyStore()
gcp_credentials = ks.get_gcp_credentials()
services = []
for i in gce_datacenter:
services.append(service_cls(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"], datacenter=i,
project=gcp_credentials["project_id"]))
if len(services) > 1:
assert len(services) == len(db_info['n_nodes'])
user_credentials = self.params.get('user_credentials_path', None)
self.credentials.append(UserRemoteCredentials(key_file=user_credentials))
gce_image_db = self.params.get('gce_image_db')
if not gce_image_db:
gce_image_db = self.params.get('gce_image')
gce_image_monitor = self.params.get('gce_image_monitor')
if not gce_image_monitor:
gce_image_monitor = self.params.get('gce_image')
cluster_additional_disks = {'pd-ssd': self.params.get('gce_pd_ssd_disk_size_db', default=0),
'pd-standard': self.params.get('gce_pd_standard_disk_size_db', default=0)}
common_params = dict(gce_image_username=self.params.get('gce_image_username'),
gce_network=self.params.get('gce_network', default='default'),
credentials=self.credentials,
user_prefix=user_prefix,
params=self.params,
)
self.db_cluster = ScyllaGCECluster(gce_image=gce_image_db,
gce_image_type=db_info['disk_type'],
gce_image_size=db_info['disk_size'],
gce_n_local_ssd=db_info['n_local_ssd'],
gce_instance_type=db_info['type'],
services=services,
n_nodes=db_info['n_nodes'],
add_disks=cluster_additional_disks,
gce_datacenter=gce_datacenter,
**common_params)
loader_additional_disks = {'pd-ssd': self.params.get('gce_pd_ssd_disk_size_loader', default=0)}
self.loaders = LoaderSetGCE(gce_image=self.params.get('gce_image'),
gce_image_type=loader_info['disk_type'],
gce_image_size=loader_info['disk_size'],
gce_n_local_ssd=loader_info['n_local_ssd'],
gce_instance_type=loader_info['type'],
service=services[:1],
n_nodes=loader_info['n_nodes'],
add_disks=loader_additional_disks,
**common_params)
if monitor_info['n_nodes'] > 0:
monitor_additional_disks = {'pd-ssd': self.params.get('gce_pd_ssd_disk_size_monitor', default=0)}
self.monitors = MonitorSetGCE(gce_image=gce_image_monitor,
gce_image_type=monitor_info['disk_type'],
gce_image_size=monitor_info['disk_size'],
gce_n_local_ssd=monitor_info['n_local_ssd'],
gce_instance_type=monitor_info['type'],
service=services[:1],
n_nodes=monitor_info['n_nodes'],
add_disks=monitor_additional_disks,
targets=dict(db_cluster=self.db_cluster,
loaders=self.loaders),
**common_params)
else:
self.monitors = NoMonitorSet()
def get_cluster_aws(self, loader_info, db_info, monitor_info):
if loader_info['n_nodes'] is None:
loader_info['n_nodes'] = self.params.get('n_loaders')
if loader_info['type'] is None:
loader_info['type'] = self.params.get('instance_type_loader')
if db_info['n_nodes'] is None:
n_db_nodes = self.params.get('n_db_nodes')
if type(n_db_nodes) == int: # legacy type
db_info['n_nodes'] = [n_db_nodes]
elif type(n_db_nodes) == str: # latest type to support multiple datacenters
db_info['n_nodes'] = [int(n) for n in n_db_nodes.split()]
else:
self.fail('Unsupported parameter type: {}'.format(type(n_db_nodes)))
if db_info['type'] is None:
db_info['type'] = self.params.get('instance_type_db')
if monitor_info['n_nodes'] is None:
monitor_info['n_nodes'] = self.params.get('n_monitor_nodes')
if monitor_info['type'] is None:
monitor_info['type'] = self.params.get('instance_type_monitor')
if monitor_info['disk_size'] is None:
monitor_info['disk_size'] = self.params.get('aws_root_disk_size_monitor', default=None)
if monitor_info['device_mappings'] is None:
if monitor_info['disk_size']:
monitor_info['device_mappings'] = [{
"DeviceName": self.params.get("aws_root_disk_name_monitor", default="/dev/sda1"),
"Ebs": {
"VolumeSize": monitor_info['disk_size'],
"VolumeType": "gp2"
}
}]
else:
monitor_info['device_mappings'] = []
user_prefix = self.params.get('user_prefix', None)
user_credentials = self.params.get('user_credentials_path', None)
services = []
for i in self.params.get('region_name').split():
session = boto3.session.Session(region_name=i)
service = session.resource('ec2')
services.append(service)
if user_credentials:
self.credentials.append(UserRemoteCredentials(key_file=user_credentials))
else:
self.credentials.append(RemoteCredentials(service=service,
key_prefix='sct',
user_prefix=user_prefix))
ec2_security_group_ids = []
for i in self.params.get('security_group_ids').split():
ec2_security_group_ids.append(i.split(','))
ec2_subnet_id = self.params.get('subnet_id').split()
common_params = dict(ec2_security_group_ids=ec2_security_group_ids,
ec2_subnet_id=ec2_subnet_id,
services=services,
credentials=self.credentials,
user_prefix=user_prefix,
params=self.params
)
def create_cluster(db_type='scylla'):
cl_params = dict(
ec2_instance_type=db_info['type'],
ec2_block_device_mappings=db_info['device_mappings'],
n_nodes=db_info['n_nodes']
)
cl_params.update(common_params)
if db_type == 'scylla':
return ScyllaAWSCluster(
ec2_ami_id=self.params.get('ami_id_db_scylla').split(),
ec2_ami_username=self.params.get('ami_db_scylla_user'),
**cl_params)
elif db_type == 'cassandra':
return CassandraAWSCluster(
ec2_ami_id=self.params.get('ami_id_db_cassandra').split(),
ec2_ami_username=self.params.get('ami_db_cassandra_user'),
**cl_params)
db_type = self.params.get('db_type')
if db_type in ('scylla', 'cassandra'):
self.db_cluster = create_cluster(db_type)
elif db_type == 'mixed':
self.db_cluster = create_cluster('scylla')
self.cs_db_cluster = create_cluster('cassandra')
else:
self.error('Incorrect parameter db_type: %s' %
self.params.get('db_type'))
self.loaders = LoaderSetAWS(
ec2_ami_id=self.params.get('ami_id_loader').split(),
ec2_ami_username=self.params.get('ami_loader_user'),
ec2_instance_type=loader_info['type'],
ec2_block_device_mappings=loader_info['device_mappings'],
n_nodes=loader_info['n_nodes'],
**common_params)
if monitor_info['n_nodes'] > 0:
self.monitors = MonitorSetAWS(
ec2_ami_id=self.params.get('ami_id_monitor').split(),
ec2_ami_username=self.params.get('ami_monitor_user'),
ec2_instance_type=monitor_info['type'],
ec2_block_device_mappings=monitor_info['device_mappings'],
n_nodes=monitor_info['n_nodes'],
targets=dict(db_cluster=self.db_cluster,
loaders=self.loaders),
**common_params)
else:
self.monitors = NoMonitorSet()
def get_cluster_libvirt(self, loader_info, db_info, monitor_info):
def _set_from_params(base_dict, dict_key, params_key):
if base_dict.get(dict_key) is None:
conf_dict = dict()
conf_dict[dict_key] = self.params.get(params_key)
return conf_dict
else:
return {}
loader_info.update(_set_from_params(loader_info, 'n_nodes', 'n_loaders'))
loader_info.update(_set_from_params(loader_info, 'image', 'libvirt_loader_image'))
loader_info.update(_set_from_params(loader_info, 'user', 'libvirt_loader_image_user'))
loader_info.update(_set_from_params(loader_info, 'password', 'libvirt_loader_image_password'))
loader_info.update(_set_from_params(loader_info, 'os_type', 'libvirt_loader_os_type'))
loader_info.update(_set_from_params(loader_info, 'os_variant', 'libvirt_loader_os_variant'))
loader_info.update(_set_from_params(loader_info, 'memory', 'libvirt_loader_memory'))
loader_info.update(_set_from_params(loader_info, 'bridge', 'libvirt_bridge'))
loader_info.update(_set_from_params(loader_info, 'uri', 'libvirt_uri'))
db_info.update(_set_from_params(db_info, 'n_nodes', 'n_db_nodes'))
db_info.update(_set_from_params(db_info, 'image', 'libvirt_db_image'))
db_info.update(_set_from_params(db_info, 'user', 'libvirt_db_image_user'))
db_info.update(_set_from_params(db_info, 'password', 'libvirt_db_image_password'))
db_info.update(_set_from_params(db_info, 'os_type', 'libvirt_db_os_type'))
db_info.update(_set_from_params(db_info, 'os_variant', 'libvirt_db_os_variant'))
db_info.update(_set_from_params(db_info, 'memory', 'libvirt_db_memory'))
db_info.update(_set_from_params(db_info, 'bridge', 'libvirt_bridge'))
db_info.update(_set_from_params(db_info, 'uri', 'libvirt_uri'))
monitor_info.update(_set_from_params(monitor_info, 'n_nodes', 'n_monitor_nodes'))
monitor_info.update(_set_from_params(monitor_info, 'image', 'libvirt_monitor_image'))
monitor_info.update(_set_from_params(monitor_info, 'user', 'libvirt_monitor_image_user'))
monitor_info.update(_set_from_params(monitor_info, 'password', 'libvirt_monitor_image_password'))
monitor_info.update(_set_from_params(monitor_info, 'os_type', 'libvirt_monitor_os_type'))
monitor_info.update(_set_from_params(monitor_info, 'os_variant', 'libvirt_monitor_os_variant'))
monitor_info.update(_set_from_params(monitor_info, 'memory', 'libvirt_monitor_memory'))
monitor_info.update(_set_from_params(monitor_info, 'bridge', 'libvirt_bridge'))
monitor_info.update(_set_from_params(monitor_info, 'uri', 'libvirt_uri'))
user_prefix = self.params.get('user_prefix', None)
libvirt_uri = self.params.get('libvirt_uri')
if libvirt_uri is None:
libvirt_uri = 'qemu:///system'
hypervisor = libvirt.open(libvirt_uri)
cluster.set_libvirt_uri(libvirt_uri)
if self.params.get('db_type') == 'scylla':
self.db_cluster = ScyllaLibvirtCluster(domain_info=db_info,
hypervisor=hypervisor,
user_prefix=user_prefix,
n_nodes=db_info['n_nodes'],
params=self.params)
elif self.params.get('db_type') == 'cassandra':
raise NotImplementedError('No cassandra libvirt cluster '
'implementation yet.')
self.loaders = LoaderSetLibvirt(domain_info=loader_info,
hypervisor=hypervisor,
user_prefix=user_prefix,
n_nodes=loader_info['n_nodes'],
params=self.params)
if monitor_info['n_nodes'] > 0:
self.monitors = MonitorSetLibvirt(domain_info=monitor_info,
hypervisor=hypervisor,
user_prefix=user_prefix,
n_nodes=monitor_info['n_nodes'],
params=self.params,
targets=dict(db_cluster=self.db_cluster,
loaders=self.loaders)
)
else:
self.monitors = NoMonitorSet()
def get_cluster_docker(self):
user_credentials = self.params.get('user_credentials_path', None)
self.credentials.append(UserRemoteCredentials(key_file=user_credentials))
params = dict(
docker_image=self.params.get('docker_image', None),
n_nodes=[self.params.get('n_db_nodes')],
user_prefix=self.params.get('user_prefix', None),
credentials=self.credentials,
params=self.params
)
self.db_cluster = docker.ScyllaDockerCluster(**params)
params['n_nodes'] = self.params.get('n_loaders')
self.loaders = docker.LoaderSetDocker(**params)
params['n_nodes'] = int(self.params.get('n_monitor_nodes', default=0))
self.log.warning("Scylla monitoring is currently not supported on Docker")
self.monitors = NoMonitorSet()
def get_cluster_baremetal(self):
user_credentials = self.params.get('user_credentials_path', None)
self.credentials.append(UserRemoteCredentials(key_file=user_credentials))
params = dict(
n_nodes=[self.params.get('n_db_nodes')],
public_ips=self.params.get('db_nodes_public_ip', None),
private_ips=self.params.get('db_nodes_private_ip', None),
user_prefix=self.params.get('user_prefix', None),
credentials=self.credentials,
params=self.params,
targets=dict(db_cluster=self.db_cluster, loaders=self.loaders),
)
self.db_cluster = cluster_baremetal.ScyllaPhysicalCluster(**params)
params['n_nodes'] = self.params.get('n_loaders')
params['public_ips'] = self.params.get('loaders_public_ip')
params['private_ips'] = self.params.get('loaders_private_ip')
self.loaders = cluster_baremetal.LoaderSetPhysical(**params)
params['n_nodes'] = self.params.get('n_monitor_nodes')
params['public_ips'] = self.params.get('monitor_nodes_public_ip')
params['private_ips'] = self.params.get('monitor_nodes_private_ip')
self.monitors = cluster_baremetal.MonitorSetPhysical(**params)
@clean_resources_on_exception
def init_resources(self, loader_info=None, db_info=None,
monitor_info=None):
if loader_info is None:
loader_info = {'n_nodes': None, 'type': None, 'disk_size': None, 'disk_type': None, 'n_local_ssd': None,
'device_mappings': None}
if db_info is None:
db_info = {'n_nodes': None, 'type': None, 'disk_size': None, 'disk_type': None, 'n_local_ssd': None,
'device_mappings': None}
if monitor_info is None:
monitor_info = {'n_nodes': None, 'type': None, 'disk_size': None, 'disk_type': None, 'n_local_ssd': None,
'device_mappings': None}
cluster_backend = self.params.get('cluster_backend')
if cluster_backend is None:
cluster_backend = 'aws'
if cluster_backend == 'aws':
self.get_cluster_aws(loader_info=loader_info, db_info=db_info,
monitor_info=monitor_info)
elif cluster_backend == 'libvirt':
self.get_cluster_libvirt(loader_info=loader_info, db_info=db_info,
monitor_info=monitor_info)
elif cluster_backend == 'openstack':
self.get_cluster_openstack(loader_info=loader_info, db_info=db_info,
monitor_info=monitor_info)
elif cluster_backend == 'gce':
self.get_cluster_gce(loader_info=loader_info, db_info=db_info,
monitor_info=monitor_info)
elif cluster_backend == 'docker':
self.get_cluster_docker()
elif cluster_backend == 'baremetal':
self.get_cluster_baremetal()
seeds_num = self.params.get('seeds_num', default=1)
for i in range(seeds_num):
self.db_cluster.nodes[i].is_seed = True
def _cs_add_node_flag(self, stress_cmd):
if '-node' not in stress_cmd:
if len(self.db_cluster.datacenter) > 1:
ips = [ip for ip in self.db_cluster.get_node_public_ips()]
ip = ','.join(ips)
else:
ip = self.db_cluster.get_node_private_ips()[0]
stress_cmd = '%s -node %s' % (stress_cmd, ip)
return stress_cmd
@clean_resources_on_exception
def run_stress(self, stress_cmd, duration=None):
stress_cmd = self._cs_add_node_flag(stress_cmd)
stress_queue = self.run_stress_thread(stress_cmd=stress_cmd,
duration=duration)
self.verify_stress_thread(stress_queue)
@clean_resources_on_exception
def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='',
keyspace_name='', round_robin=False, stats_aggregate_cmds=True):
# stress_cmd = self._cs_add_node_flag(stress_cmd)
if duration is None:
duration = self.params.get('test_duration')
timeout = duration * 60 + 600
if self.create_stats:
self.update_stress_cmd_details(stress_cmd, prefix, stresser="cassandra-stress", aggregate=stats_aggregate_cmds)
return self.loaders.run_stress_thread(stress_cmd, timeout,
self.outputdir,
stress_num=stress_num,
keyspace_num=keyspace_num,
keyspace_name=keyspace_name,
profile=profile,
node_list=self.db_cluster.nodes,
round_robin=round_robin)
@clean_resources_on_exception
def run_stress_thread_bench(self, stress_cmd, duration=None, stats_aggregate_cmds=True):
if duration is None:
duration = self.params.get('test_duration')
timeout = duration * 60 + 600
if self.create_stats:
self.update_stress_cmd_details(stress_cmd, stresser="scylla-bench", aggregate=stats_aggregate_cmds)
return self.loaders.run_stress_thread_bench(stress_cmd, timeout,
self.outputdir,
node_list=self.db_cluster.nodes)
def kill_stress_thread(self):
if self.loaders: # the test can fail on provision step and loaders are still not provisioned
if self.params.get('bench_run', default=False):
self.loaders.kill_stress_thread_bench()
else:
self.loaders.kill_stress_thread()
def verify_stress_thread(self, queue):
results, errors = self.loaders.verify_stress_thread(queue, self.db_cluster)
# Sometimes, we might have an epic error messages list
# that will make small machines driving the avocado test
# to run out of memory when writing the XML report. Since
# the error message is merely informational, let's simply
# use the last 5 lines for the final error message.
if results and self.create_stats:
self.update_stress_results(results)
if not results:
self.log.warning('There is no stress results, probably stress thread has failed.')
errors = errors[-5:]
if errors:
self.fail("cassandra-stress errors on "
"nodes:\n%s" % "\n".join(errors))
@clean_resources_on_exception
def get_stress_results(self, queue, store_results=True):
results = self.loaders.get_stress_results(queue)
if store_results and self.create_stats:
self.update_stress_results(results)
return results
@clean_resources_on_exception
def get_stress_results_bench(self, queue):
results = self.loaders.get_stress_results_bench(queue)
if self.create_stats:
self.update_stress_results(results)
return results
def get_auth_provider(self, user, password):
return PlainTextAuthProvider(username=user, password=password)
def _create_session(self, node, keyspace, user, password, compression,
protocol_version, load_balancing_policy=None,
port=None, ssl_opts=None):
node_ips = [node.public_ip_address]
if not port:
port = 9042
if protocol_version is None:
protocol_version = 3
authenticator = self.params.get('authenticator')
if user is None and password is None and (authenticator and authenticator == 'PasswordAuthenticator'):
user = 'cassandra'
password = 'cassandra'
if user is not None:
auth_provider = self.get_auth_provider(user=user,
password=password)
else:
auth_provider = None
cluster = ClusterDriver(node_ips, auth_provider=auth_provider,
compression=compression,
protocol_version=protocol_version,
load_balancing_policy=load_balancing_policy,
default_retry_policy=FlakyRetryPolicy(),
port=port, ssl_options=ssl_opts,
connect_timeout=100)
session = cluster.connect()
# temporarily increase client-side timeout to 1m to determine
# if the cluster is simply responding slowly to requests
session.default_timeout = 60.0
if keyspace is not None:
session.set_keyspace(keyspace)
# override driver default consistency level of LOCAL_QUORUM
session.default_consistency_level = ConsistencyLevel.ONE
self.connections.append(session)
return session
def cql_connection(self, node, keyspace=None, user=None,
password=None, compression=True, protocol_version=None,
port=None, ssl_opts=None):
wlrr = WhiteListRoundRobinPolicy(self.db_cluster.get_node_public_ips())
return self._create_session(node, keyspace, user, password,
compression, protocol_version, wlrr,
port=port, ssl_opts=ssl_opts)
def cql_connection_exclusive(self, node, keyspace=None, user=None,
password=None, compression=True,
protocol_version=None, port=None,
ssl_opts=None):
wlrr = WhiteListRoundRobinPolicy([node.public_ip_address])
return self._create_session(node, keyspace, user, password,
compression, protocol_version, wlrr,
port=port, ssl_opts=ssl_opts)
def cql_connection_patient(self, node, keyspace=None,
user=None, password=None, timeout=30,
compression=True, protocol_version=None,
port=None, ssl_opts=None):
"""
Returns a connection after it stops throwing NoHostAvailables.
If the timeout is exceeded, the exception is raised.
"""
return retry_till_success(self.cql_connection,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable)
def cql_connection_patient_exclusive(self, node, keyspace=None,
user=None, password=None, timeout=30,
compression=True,
protocol_version=None,
port=None, ssl_opts=None):
"""
Returns a connection after it stops throwing NoHostAvailables.
If the timeout is exceeded, the exception is raised.
"""
return retry_till_success(self.cql_connection_exclusive,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable)
def create_ks(self, session, name, rf):
query = 'CREATE KEYSPACE IF NOT EXISTS %s WITH replication={%s}'
if isinstance(rf, types.IntType):
# we assume simpleStrategy
session.execute(query % (name,
"'class':'SimpleStrategy', "
"'replication_factor':%d" % rf))
else:
assert len(rf) != 0, "At least one datacenter/rf pair is needed"
# we assume networkTopologyStrategy
options = ', '.join(['\'%s\':%d' % (d, r) for
d, r in rf.iteritems()])
session.execute(query % (name,
"'class':'NetworkTopologyStrategy', %s" %
options))
session.execute('USE %s' % name)
def create_cf(self, session, name, key_type="varchar",
speculative_retry=None, read_repair=None, compression=None,
gc_grace=None, columns=None,
compact_storage=False, in_memory=False):
additional_columns = ""
if columns is not None:
for k, v in columns.items():
additional_columns = "%s, %s %s" % (additional_columns, k, v)
if additional_columns == "":
query = ('CREATE COLUMNFAMILY IF NOT EXISTS %s (key %s, c varchar, v varchar, '
'PRIMARY KEY(key, c)) WITH comment=\'test cf\'' %
(name, key_type))
else:
query = ('CREATE COLUMNFAMILY IF NOT EXISTS %s (key %s PRIMARY KEY%s) '
'WITH comment=\'test cf\'' %
(name, key_type, additional_columns))
if compression is not None:
query = ('%s AND compression = { \'sstable_compression\': '
'\'%sCompressor\' }' % (query, compression))
else:
# if a compression option is omitted, C*
# will default to lz4 compression
query += ' AND compression = {}'
if read_repair is not None:
query = '%s AND read_repair_chance=%f' % (query, read_repair)
if gc_grace is not None:
query = '%s AND gc_grace_seconds=%d' % (query, gc_grace)
if speculative_retry is not None:
query = ('%s AND speculative_retry=\'%s\'' %
(query, speculative_retry))
if in_memory:
query += " AND in_memory=true AND compaction={'class': 'InMemoryCompactionStrategy'}"
if compact_storage:
query += ' AND COMPACT STORAGE'
session.execute(query)
time.sleep(0.2)
def truncate_cf(self, ks_name, table_name, session):
try:
session.execute('TRUNCATE TABLE {0}.{1}'.format(ks_name, table_name))
except Exception as e:
self.log.debug('Failed to truncate base table {0}.{1}. Error: {2}'.format(ks_name, table_name, e.message))
def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session,
mv_columns='*', speculative_retry=None, read_repair=None, compression=None,
gc_grace=None, columns=None, compact_storage=False):
mv_columns_str = mv_columns
if isinstance(mv_columns, list):
mv_columns_str = ', '.join(c for c in mv_columns)
where_clause = []
mv_partition_key = mv_partition_key if isinstance(mv_partition_key, list) else list(mv_partition_key)
mv_clustering_key = mv_clustering_key if isinstance(mv_clustering_key, list) else list(mv_clustering_key)
for kc in mv_partition_key + mv_clustering_key:
where_clause.append('{} is not null'.format(kc))
pk_clause = ', '.join(pk for pk in mv_partition_key)
cl_clause = ', '.join(cl for cl in mv_clustering_key)
query = 'CREATE MATERIALIZED VIEW {ks}.{mv_name} AS SELECT {mv_columns} FROM {ks}.{table_name} ' \
'WHERE {where_clause} PRIMARY KEY ({pk}, {cl}) WITH comment=\'test MV\''.format(ks=ks_name, mv_name=mv_name, mv_columns=mv_columns_str,
table_name=base_table_name, where_clause=' and '.join(wc for wc in where_clause),
pk=pk_clause, cl=cl_clause)
if compression is not None:
query = ('%s AND compression = { \'sstable_compression\': '
'\'%sCompressor\' }' % (query, compression))
if read_repair is not None:
query = '%s AND read_repair_chance=%f' % (query, read_repair)
if gc_grace is not None:
query = '%s AND gc_grace_seconds=%d' % (query, gc_grace)
if speculative_retry is not None:
query = ('%s AND speculative_retry=\'%s\'' %
(query, speculative_retry))
if compact_storage:
query += ' AND COMPACT STORAGE'
self.log.debug('MV create statement: {}'.format(query))
session.execute(query)
def _wait_for_view(self, cluster, session, ks, view):
self.log.debug("Waiting for view {}.{} to finish building...".format(ks, view))
def _view_build_finished(live_nodes_amount):
result = self.rows_to_list(session.execute("SELECT status FROM system_distributed.view_build_status WHERE keyspace_name='{0}' "
"AND view_name='{1}'".format(ks, view)))
self.log.debug('View build status result: {}'.format(result))
return len([status for status in result if status[0] == 'SUCCESS']) >= live_nodes_amount
attempts = 20
nodes_status = cluster.get_nodetool_status()
live_nodes_amount = 0
for dc in nodes_status.itervalues():
for ip in dc.itervalues():
if ip['state'] == 'UN':
live_nodes_amount += 1
while attempts > 0:
if _view_build_finished(live_nodes_amount):
return
time.sleep(3)
attempts -= 1
raise Exception("View {}.{} not built".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, seconds_to_wait = 20):
def _check_build_started():
result = self.rows_to_list(session.execute("SELECT last_token FROM system.views_builds_in_progress "
"WHERE keyspace_name='{0}' AND view_name='{1}'".format(ks, view)))
self.log.debug('View build in progress: {}'.format(result))
return result != []
self.log.debug("Ensure view building started.")
start = time.time()
while not _check_build_started():
if time.time() - start > seconds_to_wait:
raise Exception("View building didn't start in {} seconds".format(seconds_to_wait))
@staticmethod
def rows_to_list(rows):
return [list(row) for row in rows]
def clean_resources(self):
self.log.debug('Cleaning up resources used in the test')
self.kill_stress_thread()
db_cluster_errors = None
db_cluster_coredumps = None
if self.db_cluster is not None:
db_cluster_errors = self.db_cluster.get_node_database_errors()
self.db_cluster.get_backtraces()
db_cluster_coredumps = self.db_cluster.coredumps
for current_nemesis in self.db_cluster.nemesis:
current_nemesis.report()
# Stopping nemesis, using timeout of 30 minutes, since replace/decommission node can take time
self.db_cluster.stop_nemesis(timeout=1800)
# TODO: this should be run in parallel
for node in self.db_cluster.nodes:
node.stop_task_threads(timeout=60)
if self._failure_post_behavior == 'destroy':
self.db_cluster.destroy()
self.db_cluster = None
if self.cs_db_cluster:
self.cs_db_cluster.destroy()
elif self._failure_post_behavior == 'stop':
for node in self.db_cluster.nodes:
node.instance.stop()
self.db_cluster = None
if self.loaders is not None:
self.loaders.get_backtraces()
if self._failure_post_behavior == 'destroy':
self.loaders.destroy()
self.loaders = None
elif self._failure_post_behavior == 'stop':
for node in self.loaders.nodes:
node.instance.stop()
self.db_cluster = None
if self.monitors is not None:
self.monitors.get_backtraces()
if self._failure_post_behavior == 'destroy':
self.monitors.destroy()
self.monitors = None
elif self._failure_post_behavior == 'stop':
for node in self.monitors.nodes:
node.instance.stop()
self.monitors = None
if self.credentials is not None:
cluster.remove_cred_from_cleanup(self.credentials)
if self._failure_post_behavior == 'destroy':
for cr in self.credentials:
cr.destroy()
self.credentials = []
self.update_test_details(db_cluster_errors, db_cluster_coredumps)
if db_cluster_coredumps:
self.fail('Found coredumps on DB cluster nodes: %s' %
db_cluster_coredumps)
if db_cluster_errors:
self.log.error('Errors found on DB node logs:')
for node_errors in db_cluster_errors:
for node_name in node_errors:
for (index, line) in node_errors[node_name]:
self.log.error('%s: L%s -> %s',
node_name, index + 1, line.strip())
self.fail('Errors found on DB node logs (see test logs)')
def tearDown(self):
self.clean_resources()
def populate_data_parallel(self, size_in_gb, blocking=True, read=False):
base_cmd = "cassandra-stress write cl=QUORUM "
if read:
base_cmd = "cassandra-stress read cl=ONE "
stress_fixed_params = " -schema 'replication(factor=3) compaction(strategy=LeveledCompactionStrategy)' " \
"-port jmx=6868 -mode cql3 native -rate threads=200 -col 'size=FIXED(1024) n=FIXED(1)' "
stress_keys = "n="
population = " -pop seq="
total_keys = size_in_gb * 1024 * 1024
n_loaders = self.params.get('n_loaders')
keys_per_node = total_keys / n_loaders
write_queue = list()
start = 1
for i in range(1, n_loaders + 1):
stress_cmd = base_cmd + stress_keys + str(keys_per_node) + population + str(start) + ".." + \
str(keys_per_node * i) + stress_fixed_params
start = keys_per_node * i + 1
write_queue.append(self.run_stress_thread(stress_cmd=stress_cmd, round_robin=True))
time.sleep(3)
if blocking:
for stress in write_queue:
self.verify_stress_thread(queue=stress)
return write_queue
@log_run_info
def alter_table_to_in_memory(self, key_space_name="keyspace1", table_name="standard1", node=None):
if not node:
node = self.db_cluster.nodes[0]
compaction_strategy = "%s" % {"class": "InMemoryCompactionStrategy"}
cql_cmd = "ALTER table {key_space_name}.{table_name} " \
"WITH in_memory=true AND compaction={compaction_strategy}".format(**locals())
node.remoter.run('cqlsh -e "{}" {}'.format(cql_cmd, node.private_ip_address), verbose=True)
def get_num_of_hint_files(self, node):
result = node.remoter.run("sudo find {0.scylla_hints_dir} -name *.log -type f| wc -l".format(self),
verbose=True)
total_hint_files = int(result.stdout.strip())
self.log.debug("Number of hint files on '%s': %s." % (node.name, total_hint_files))
return total_hint_files
def get_num_shards(self, node):
result = node.remoter.run("sudo ls -1 {0.scylla_hints_dir}| wc -l".format(self), verbose=True)
return int(result.stdout.strip())
@retrying(n=3, sleep_time=15, allowed_exceptions=(AssertionError,))
def hints_sending_in_progress(self):
q = "sum(rate(scylla_hints_manager_sent{}[15s]))"
now = time.time()
# check status of sending hints during last minute range
results = self.prometheusDB.query(query=q, start=now - 60, end=now)
self.log.debug("scylla_hints_manager_sent: %s" % results)
assert results, "No results from Prometheus"
# if all are zeros the result will be False, otherwise we are still sending
return any([float(v[1]) for v in results[0]["values"]])
@retrying(n=30, sleep_time=60, allowed_exceptions=(AssertionError, CmdError))
def wait_for_hints_to_be_sent(self, node, num_dest_nodes):
num_shards = self.get_num_shards(node)
hints_after_send_completed = num_shards * num_dest_nodes
# after hints were sent to all nodes, the number of files should be 1 per shard per destination
assert self.get_num_of_hint_files(node) <= hints_after_send_completed, "Waiting until the number of hint files " \
"will be %s." % hints_after_send_completed
assert self.hints_sending_in_progress() is False, "Waiting until Prometheus hints counter will not change"
def verify_no_drops_and_errors(self, starting_from):
q_dropped = "sum(rate(scylla_hints_manager_dropped{}[15s]))"
q_errors = "sum(rate(scylla_hints_manager_errors{}[15s]))"
queries_to_check = [q_dropped, q_errors]
for q in queries_to_check:
results = self.prometheusDB.query(query=q, start=starting_from, end=time.time())
err_msg = "There were hint manager %s detected during the test!" % "drops" if "dropped" in q else "errors"
assert any([float(v[1]) for v in results[0]["values"]]) is False, err_msg
def get_data_set_size(self, cs_cmd):
""":returns value of n in stress comand, that is approximation and currently doesn't take in consideration
column size definitions if they present in the command
"""
try:
return int(re.search("n=(\d+) ", cs_cmd).group(1))
except Exception:
self.fail("Unable to get data set size from cassandra-stress command: %s" % cs_cmd)
@retrying(n=60, sleep_time=60, allowed_exceptions=(AssertionError,))
def wait_data_dir_reaching(self, size, node):
q = '(sum(node_filesystem_size{{mountpoint="{0.scylla_dir}", ' \
'instance=~"{1.private_ip_address}"}})-sum(node_filesystem_avail{{mountpoint="{0.scylla_dir}", ' \
'instance=~"{1.private_ip_address}"}}))'.format(self, node)
res = self.prometheusDB.query(query=q, start=time.time(), end=time.time())
assert res, "No results from Prometheus"
used = int(res[0]["values"][0][1]) / (2 ** 10)
assert used >= size, "Waiting for Scylla data dir to reach '{size}', " \
"current size is: '{used}'".format(**locals())
def check_regression(self):
ra = PerformanceResultsAnalyzer(es_index=self._test_index, es_doc_type=self._es_doc_type,
send_email=self.params.get('send_email', default=True),
email_recipients=self.params.get('email_recipients', default=None))
is_gce = True if self.params.get('cluster_backend') == 'gce' else False
try:
ra.check_regression(self._test_id, is_gce)
except Exception as ex:
self.log.exception('Failed to check regression: %s', ex)
# Wait for up to 40 mins that there are no running compactions
@retrying(n=40, sleep_time=60, allowed_exceptions=(AssertionError,))
def wait_no_compactions_running(self):
q = "sum(scylla_compaction_manager_compactions{})"
now = time.time()
results = self.prometheusDB.query(query=q, start=now - 60, end=now)
self.log.debug("scylla_hints_manager_sent: %s" % results)
assert results, "No results from Prometheus"
# if all are zeros the result will be False, otherwise there are still compactions
assert any([float(v[1]) for v in results[0]["values"]]) is False, \
"Waiting until all compactions settle down"
def run_fstrim_on_all_db_nodes(self):
"""
This function will run fstrim command all db nodes in the cluster to clear any bad state of the disks.
:return:
"""
for node in self.db_cluster.nodes:
node.remoter.run('sudo fstrim -v /var/lib/scylla')
| agpl-3.0 | -6,798,341,341,531,159,000 | 48.770067 | 155 | 0.554305 | false |
bchartoff/regexcalibur | globals.py | 1 | 1502 | import os
from mobs import *
from inventory import *
import terminaloutput
modify_text_file = terminaloutput.modify_text_file
def init():
global gameover, lives, active_input, active_screen, active_message, start_inventory, active_inventory, inventory, active_hit_strings, active_miss_strings, num_mobs, hit_strings, miss_strings, active_hit_mob, active_miss_mob, hit_mobs, miss_mobs
gameover = False
lives = 5
active_input = ""
active_screen = None
active_message = None
inventory_path = "inventory/descriptions/"
num_inventory = len([name for name in os.listdir(inventory_path) if os.path.splitext(inventory_path+name)[-1].lower() == ".txt"])
start_inventory = [Inventory(i) for i in range(0,1)]
active_inventory = start_inventory
inventory = [Inventory(i) for i in range(0,num_inventory)]
hit_path = "mobs/hit_mobs/hit_strings/"
miss_path = "mobs/miss_mobs/miss_strings/"
hit_strings = [modify_text_file(hit_path+fn) for fn in os.listdir(hit_path) if os.path.splitext(hit_path+fn)[-1].lower() == ".txt"]
miss_strings = [modify_text_file(miss_path+fn) for fn in os.listdir(miss_path) if os.path.splitext(miss_path+fn)[-1].lower() == ".txt"]
active_hit_strings = []
active_miss_strings = []
num_mobs = len([name for name in os.listdir(hit_path) if os.path.splitext(hit_path+name)[-1].lower() == ".txt"])
active_hit_mob = Mob("hit",0)
active_miss_mob = Mob("miss",0)
hit_mobs = [Mob("hit",i) for i in range(0,num_mobs)]
miss_mobs = [Mob("miss",i) for i in range(0,num_mobs)]
| mit | -7,099,374,207,322,111,000 | 44.515152 | 246 | 0.706391 | false |
erts/pyTCP2UDPBrige | brige.py | 1 | 2436 | import threading
import socket
import sys
LISTEN_TO_UDP_PORT = 5000
SEND_TO_UDP_IP = "127.0.0.1"
SEND_TO_UDP_PORT = 5000
TCP_SERVER_IP = "127.0.0.1"
TCP_SERVER_PORTS = [5000,5001,5002,]
class TCP_UDP_forwarder(threading.Thread):
def __init__(self, tcp_server_ip, tcp_server_port):
threading.Thread.__init__(self)
self.tcp_server_ip = tcp_server_ip
self.tcp_server_port = tcp_server_port
self.tcp_server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server_sock.bind((self.tcp_server_ip,self.tcp_server_port))
self.tcp_server_sock.listen(5)
def run(self):
self.run_tcp_listener()
def run_tcp_listener(self):
while True:
connection, client_address = self.tcp_server_sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
while True:
data = connection.recv(1024)
print >>sys.stderr, 'Address %s:%d received "%s"' % (self.tcp_server_ip, self.tcp_server_port, data)
if data:
self.send_data_over_udp_client(data)
print >>sys.stderr, 'sending data to the client'
connection.sendall(data)
else:
print >>sys.stderr, 'no more data from', client_address
break
finally:
connection.close()
def send_data_over_udp_client(self,data):
print "Send data to server over UDP, data: " + str(data)
#client_udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#client_udp_socket.sendto(data,(SEND_TO_UDP_IP,SEND_TO_UDP_PORT))
#for test! Should be removed in future
#reveive_data, address = client_udp_socket.recvfrom(1024)
#print str(reveive_data) + " from " +str(address)
def main():
#manage list of forwarders
tcp2udp_forwarders = []
for port in TCP_SERVER_PORTS:
print "Create address %s:%d ", TCP_SERVER_IP,port
tcp2udp_forwarder = TCP_UDP_forwarder(TCP_SERVER_IP,port)
tcp2udp_forwarders.append(tcp2udp_forwarder)
tcp2udp_forwarder.start()
for forward in tcp2udp_forwarders:
forward.join()
#manage listener and tcp clients
if "__main__" == __name__:
main()
| mit | 4,556,852,890,959,875,600 | 33.814286 | 120 | 0.574302 | false |
jhuttner/flake8-import-order | flake8_import_order/checker.py | 1 | 1888 | import ast
import pycodestyle
from flake8_import_order import ImportVisitor
from flake8_import_order.styles import (
Cryptography, Google, PEP8, Smarkets,
)
DEFAULT_IMPORT_ORDER_STYLE = 'cryptography'
class ImportOrderChecker(object):
visitor_class = ImportVisitor
options = None
def __init__(self, filename, tree):
self.tree = tree
self.filename = filename
self.lines = None
def load_file(self):
if self.filename in ("stdin", "-", None):
self.filename = "stdin"
self.lines = pycodestyle.stdin_get_value().splitlines(True)
else:
self.lines = pycodestyle.readlines(self.filename)
if not self.tree:
self.tree = ast.parse("".join(self.lines))
def error(self, error):
raise NotImplemented()
def check_order(self):
if not self.tree or not self.lines:
self.load_file()
visitor = self.visitor_class(
self.options.get('application_import_names', []),
self.options.get('application_package_names', []),
)
visitor.visit(self.tree)
imports = []
for import_ in visitor.imports:
if not pycodestyle.noqa(self.lines[import_.lineno - 1]):
imports.append(import_)
style_option = self.options.get(
'import_order_style', DEFAULT_IMPORT_ORDER_STYLE,
)
if style_option == 'cryptography':
style = Cryptography(imports)
elif style_option == 'google':
style = Google(imports)
elif style_option == 'pep8':
style = PEP8(imports)
elif style_option == 'smarkets':
style = Smarkets(imports)
else:
raise AssertionError("Unknown style {}".format(style_option))
for error in style.check():
yield self.error(error)
| lgpl-3.0 | -7,491,054,521,117,362,000 | 28.046154 | 73 | 0.587924 | false |
amudalab/concept-graphs | document retrieval/doc_ret_bfs.py | 1 | 1509 | def BFS(graph,root,k):
checked = []
visited=[]
level=[]
l=[]
l.append(root)
level.append(l)
count =0
checked.append(root)
while len(checked)>0:
v = checked.pop(0)
visited.append(v)
l=[]
for edge in graph[v]:
#l=list(set(graph[v])|set(l))
if edge not in checked and edge not in visited:
checked.append(edge)
str1="v"+str(v)+","+"v"+str(edge)+","+"false"+","+str(A[v][edge])+","+"true\n"
fil_out.write(str1)
## if count<k:
## str1="v"+str(v)+","+"v"+str(edge)+","+"false"+","+str(A[v][edge])+","+"true\n"
## fil.write(str1)
for edge in level[(len(level)-1)]:
l=list(set(graph[edge])|set(l))
for i in range(len(level)):
for j in level[i]:
if j in l:
l.remove(j)
if len(l)>0:
level.append(l)
print len(level)
for i in range(k-1):
visit=[]
for each_node in level[i]:
inter=list(set(graph[each_node])&set(level[i+1]))
for each_inter in inter:
if each_inter not in visit:
str1="v"+str(each_node)+","+"v"+str(each_inter)+","+"false"+","+str(A[each_node][each_inter])+","+"true\n"
fil.write(str1)
visit.append(each_inter)
print(level)
print(len(level))
| mit | 4,241,411,692,561,005,000 | 33.093023 | 126 | 0.442015 | false |
ChristopheVuillot/qiskit-sdk-py | qiskit/extensions/standard/cy.py | 1 | 2323 | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
controlled-Y gate.
"""
from qiskit import QuantumCircuit
from qiskit import Gate
from qiskit import CompositeGate
from qiskit.extensions.standard import header
from qiskit._quantumregister import QuantumRegister
from qiskit._instructionset import InstructionSet
class CyGate(Gate):
"""controlled-Y gate."""
def __init__(self, ctl, tgt, circ=None):
"""Create new CY gate."""
super(CyGate, self).__init__("cy", [], [ctl, tgt], circ)
def qasm(self):
"""Return OPENQASM string."""
ctl = self.arg[0]
tgt = self.arg[1]
return self._qasmif("cy %s[%d],%s[%d];" % (ctl[0].name, ctl[1],
tgt[0].name, tgt[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.cy(self.arg[0], self.arg[1]))
def cy(self, ctl, tgt):
"""Apply CY to circuit."""
if isinstance(ctl, QuantumRegister) and \
isinstance(tgt, QuantumRegister) and len(ctl) == len(tgt):
# apply cx to qubits between two registers
instructions = InstructionSet()
for i in range(ctl.size):
instructions.add(self.cy((ctl, i), (tgt, i)))
return instructions
else:
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups([ctl, tgt])
return self._attach(CyGate(ctl, tgt, self))
QuantumCircuit.cy = cy
CompositeGate.cy = cy
| apache-2.0 | -7,041,353,571,288,676,000 | 32.161765 | 79 | 0.598795 | false |
swapagarwal/JARVIS-on-Messenger | modules/src/video.py | 1 | 2005 | import os
import requests
import requests_cache
import config
from templates.generic import *
from templates.text import TextTemplate
from utils.YouTube import YouTubeUtil
YOUTUBE_DATA_API_KEY = os.environ.get('YOUTUBE_DATA_API_KEY', config.YOUTUBE_DATA_API_KEY)
def process(input, entities):
output = {}
try:
video = entities['video'][0]['value']
with requests_cache.enabled('video_cache', backend='sqlite', expire_after=3600):
r = requests.get(
'https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=10&q=' + video + '&type=video&key=' + YOUTUBE_DATA_API_KEY)
data = r.json()
template = GenericTemplate()
videos = [item for item in data['items'] if item['id']['kind'] == 'youtube#video']
for item in videos:
title = item['snippet']['title']
item_url = YouTubeUtil.get_video_url(item['id']['videoId'])
image_url = item['snippet']['thumbnails']['high']['url']
subtitle = item['snippet']['channelTitle']
buttons = ButtonTemplate()
buttons.add_web_url('YouTube Link', YouTubeUtil.get_video_url(item['id']['videoId']))
buttons.add_web_url('Channel Link', YouTubeUtil.get_channel_url(item['snippet']['channelId']))
template.add_element(title=title, item_url=item_url, image_url=image_url, subtitle=subtitle,
buttons=buttons.get_buttons())
output['input'] = input
output['output'] = template.get_message()
output['success'] = True
except:
error_message = 'I couldn\'t find any videos matching your query.'
error_message += '\nPlease ask me something else, like:'
error_message += '\n - sia videos'
error_message += '\n - videos by eminem'
error_message += '\n - video coldplay'
output['error_msg'] = TextTemplate(error_message).get_message()
output['success'] = False
return output
| mit | 7,995,285,015,709,858,000 | 43.555556 | 145 | 0.615461 | false |
aakechin/BRCA-analyzer | joinMutations.py | 1 | 3013 | # This script joins mutation of all patients to one file
# Use the following arguments:
# [1] - regular expression for variants files
# [2] - name for resultFile
## v4 - added argparse
## v5 - join mutations of avinput (annovar input)
import glob
import sys
import argparse
import re
def showPercWork(done,allWork):
import sys
percDoneWork=round((done/allWork)*100,1)
sys.stdout.write("\r"+str(percDoneWork)+"%")
sys.stdout.flush()
# Readign arguments
par=argparse.ArgumentParser(description='This script joins mutation of all patients to one file')
par.add_argument('--varFiles','-v',dest='varFiles',type=str,help='regular expression for choosing files with variations',required=True)
par.add_argument('--outFile','-o',dest='outFile',type=str,help='directory for output',required=True)
args=par.parse_args()
ds=glob.glob(args.varFiles)
if len(ds)==0:
print('ERROR: No files were selected! Maybe you write "~/" but you should use /home/USERNAME/')
print(args.varFiles)
exit(0)
elif len(ds)==1:
print('WARNING: Only one file was selected!')
print(args.varFiles)
titlesCheck=False
allWork=len(ds)
showPercWork(0,allWork)
allData={}
annp=re.compile('ANN\=([^\;]+)')
for i,d in enumerate(ds):
file=open(d)
dPart=d[d.rfind('/')+1:]
patName=dPart[:dPart.index('.')]
adi=1
for string in file:
if 'CHROM' in string:
if not titlesCheck:
titlesCheck=True
continue
cols=string[:-1].split('\t')
# If there is no alternative alleles
## or there is strand-bias, continue
if cols[12]=='.' or 'SB' in cols[14]:
continue
qual=cols[13]
adColNum=cols[16].split(':').index('AD')
dpColNum=cols[16].split(':').index('DP')
ads=cols[17].split(':')[adColNum].split(',')
dp=cols[17].split(':')[dpColNum]
adNum=len(ads)
# If we meet locus with several alleles first time
if adNum>2:
# Here we write whole coverage and alt coverage
## But later we will calculate namelt reference coverage
ad=dp+','+ads[adi]
if adi<adNum-1:
adi+=1
elif adi==adNum-1:
adi=1
else:
# Here we write whole coverage and alt coverage
## But later we will calculate namelt reference coverage
ad=','.join([dp,ads[1]])
adi=1
annm=annp.findall(cols[15])
ann=annm[0]
pos=cols[9]
ref=cols[11]
alt=cols[12]
key='\t'.join(cols[:5])
if key not in allData.keys():
allData[key]=[patName,qual,ad,ann,pos,ref,alt]
else:
allData[key][0]+='|'+patName
allData[key][1]+='|'+qual
allData[key][2]+='|'+ad
file.close()
showPercWork(i+1,allWork)
resultFile=open(args.outFile,'w')
for key,item in allData.items():
resultFile.write(key+'\t'+'\t'.join(item)+'\n')
resultFile.close()
print()
| gpl-3.0 | 6,838,469,630,968,487,000 | 31.397849 | 135 | 0.599734 | false |
alope107/nbgrader | nbgrader/tests/formgrader/manager.py | 2 | 5349 | import time
import os
import subprocess as sp
from textwrap import dedent
from nbgrader.tests import start_subprocess, copy_coverage_files
# to add a new manager for the tests, you MUST add it to this list of classes
__all__ = [
"DefaultManager",
"HubAuthManager",
"HubAuthTokenManager",
"HubAuthCustomUrlManager"
]
class DefaultManager(object):
nbgrader_config = dedent(
"""
c = get_config()
c.NoAuth.nbserver_port = 9001
c.FormgradeApp.port = 9000
"""
)
base_formgrade_url = "http://localhost:9000/"
base_notebook_url = "http://localhost:9001/notebooks/"
def __init__(self, tempdir, startup_wait=5, shutdown_wait=5):
self.tempdir = tempdir
self.startup_wait = startup_wait
self.shutdown_wait = shutdown_wait
self.formgrader = None
self.jupyterhub = None
self.env = os.environ.copy()
def _write_config(self):
with open("nbgrader_config.py", "w") as fh:
fh.write(self.nbgrader_config.format(tempdir=self.tempdir))
def _start_jupyterhub(self):
pass
def _start_formgrader(self):
self.formgrader = start_subprocess(["nbgrader", "formgrade"], env=self.env)
time.sleep(self.startup_wait)
def start(self):
self._write_config()
self._start_jupyterhub()
self._start_formgrader()
def _stop_formgrader(self):
self.formgrader.terminate()
# wait for the formgrader to shut down
for i in range(int(self.shutdown_wait / 0.1)):
retcode = self.formgrader.poll()
if retcode is not None:
break
time.sleep(0.1)
# not shutdown, force kill it
if retcode is None:
self.formgrader.kill()
def _stop_jupyterhub(self):
pass
def stop(self):
self._stop_formgrader()
self._stop_jupyterhub()
copy_coverage_files()
class HubAuthManager(DefaultManager):
nbgrader_config = dedent(
"""
c = get_config()
c.NbGraderConfig.course_id = 'course123ABC'
c.FormgradeApp.port = 9000
c.FormgradeApp.authenticator_class = "nbgrader.auth.hubauth.HubAuth"
c.HubAuth.graders = ["foobar"]
c.HubAuth.notebook_url_prefix = "class_files"
"""
)
jupyterhub_config = dedent(
"""
c = get_config()
c.JupyterHub.authenticator_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserAuth'
c.JupyterHub.spawner_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserSpawner'
c.JupyterHub.log_level = "WARN"
"""
)
base_formgrade_url = "http://localhost:8000/hub/nbgrader/course123ABC/"
base_notebook_url = "http://localhost:8000/user/foobar/notebooks/class_files/"
def _write_config(self):
super(HubAuthManager, self)._write_config()
pth = os.path.join(self.tempdir, "jupyterhub_config.py")
with open(pth, "w") as fh:
fh.write(self.jupyterhub_config.format(tempdir=self.tempdir))
def _start_jupyterhub(self):
self.env['CONFIGPROXY_AUTH_TOKEN'] = 'foo'
self.jupyterhub = start_subprocess(
["jupyterhub"],
cwd=self.tempdir,
env=self.env)
time.sleep(self.startup_wait)
def _start_formgrader(self):
print("Getting token from jupyterhub")
token = sp.check_output(['jupyterhub', 'token'], cwd=self.tempdir).decode().strip()
self.env['JPY_API_TOKEN'] = token
self.env['CONFIGPROXY_AUTH_TOKEN'] = 'foo'
super(HubAuthManager, self)._start_formgrader()
def _stop_jupyterhub(self):
self.jupyterhub.terminate()
# wait for the formgrader to shut down
for i in range(int(self.shutdown_wait / 0.1)):
retcode = self.jupyterhub.poll()
if retcode is not None:
break
time.sleep(0.1)
# not shutdown, force kill it
if retcode is None:
self.jupyterhub.kill()
# remove database and cookie secret
os.remove(os.path.join(self.tempdir, "jupyterhub.sqlite"))
os.remove(os.path.join(self.tempdir, "jupyterhub_cookie_secret"))
class HubAuthTokenManager(HubAuthManager):
nbgrader_config = dedent(
"""
c = get_config()
c.NbGraderConfig.course_id = 'course123ABC'
c.FormgradeApp.port = 9000
c.FormgradeApp.authenticator_class = "nbgrader.auth.hubauth.HubAuth"
c.HubAuth.graders = ["foobar"]
c.HubAuth.notebook_url_prefix = "class_files"
c.HubAuth.proxy_token = 'foo'
c.HubAuth.generate_hubapi_token = True
c.HubAuth.hub_db = '{tempdir}/jupyterhub.sqlite'
"""
)
def _start_formgrader(self):
super(HubAuthManager, self)._start_formgrader()
class HubAuthCustomUrlManager(HubAuthManager):
nbgrader_config = dedent(
"""
c = get_config()
c.NbGraderConfig.course_id = 'course123ABC'
c.FormgradeApp.port = 9000
c.FormgradeApp.authenticator_class = "nbgrader.auth.hubauth.HubAuth"
c.HubAuth.graders = ["foobar"]
c.HubAuth.notebook_url_prefix = "class_files"
c.HubAuth.remap_url = '/hub/grader'
"""
)
base_formgrade_url = "http://localhost:8000/hub/grader/"
| bsd-3-clause | -6,192,987,308,089,494,000 | 29.220339 | 92 | 0.612264 | false |
why2pac/dp-tornado | dp_tornado/helper/validator/form/__init__.py | 1 | 1609 | # -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
class FormHelper(dpHelper):
def validate(self, controller, fields, error_res='json'):
assert error_res in ('json', 'http', 'code')
output = {}
missing_reason = 'missing'
invalid_reason = 'invalid'
for field, payload in fields.items():
missing_message = payload['missing'] if 'missing' in payload else None
invalid_message = payload['invalid'] if 'invalid' in payload else None
cast = payload['cast'] if 'cast' in payload else None
value = controller.get_argument(name=field, **payload)
# Value validation
if (cast is bool and value == -1) or (cast is not bool and value is False):
return self._validate_response(controller, error_res, field, invalid_reason, invalid_message)
# Check required
if (value is None or value == '') and 'required' in payload and payload['required']:
return self._validate_response(controller, error_res, field, missing_reason, missing_message)
output[field] = value
return output
def _validate_response(self, controller, error_res, field, reason, message):
if error_res == 'json':
return controller.finish({
'result': False,
'error': {
'field': field,
'reason': reason,
'message': message
}
})
return controller.finish_with_error(400, message)
| mit | -148,264,188,733,514,530 | 33.978261 | 109 | 0.570541 | false |
adstomper/adblockedge | buildtools/build.py | 1 | 15827 | # coding: utf-8
# This Source Code is subject to the terms of the Mozilla Public License
# version 2.0 (the "License"). You can obtain a copy of the License at
# http://mozilla.org/MPL/2.0/.
import os, sys, re, subprocess, buildtools
from getopt import getopt, GetoptError
class Command(object):
name = property(lambda self: self._name)
shortDescription = property(lambda self: self._shortDescription,
lambda self, value: self.__dict__.update({'_shortDescription': value}))
description = property(lambda self: self._description,
lambda self, value: self.__dict__.update({'_description': value}))
params = property(lambda self: self._params,
lambda self, value: self.__dict__.update({'_params': value}))
supportedTypes = property(lambda self: self._supportedTypes,
lambda self, value: self.__dict__.update({'_supportedTypes': value}))
options = property(lambda self: self._options)
def __init__(self, handler, name):
self._handler = handler
self._name = name
self._shortDescription = ''
self._description = ''
self._params = ''
self._supportedTypes = None
self._options = []
self.addOption('Show this message and exit', short='h', long='help')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def __call__(self, baseDir, scriptName, opts, args, type):
return self._handler(baseDir, scriptName, opts, args, type)
def isSupported(self, type):
return self._supportedTypes == None or type in self._supportedTypes
def addOption(self, description, short=None, long=None, value=None):
self._options.append((description, short, long, value))
def parseArgs(self, args):
shortOptions = map(lambda o: o[1]+':' if o[3] != None else o[1], filter(lambda o: o[1] != None, self._options))
longOptions = map(lambda o: o[2]+'=' if o[3] != None else o[2], filter(lambda o: o[2] != None, self._options))
return getopt(args, ''.join(shortOptions), longOptions)
commandsList = []
commands = {}
def addCommand(handler, name):
if isinstance(name, basestring):
aliases = ()
else:
name, aliases = (name[0], name[1:])
global commandsList, commands
command = Command(handler, name)
commandsList.append(command)
commands[name] = command
for alias in aliases:
commands[alias] = command
return command
def splitByLength(string, maxLen):
parts = []
currentPart = ''
for match in re.finditer(r'\s*(\S+)', string):
if len(match.group(0)) + len(currentPart) < maxLen:
currentPart += match.group(0)
else:
parts.append(currentPart)
currentPart = match.group(1)
if len(currentPart):
parts.append(currentPart)
return parts
def usage(scriptName, type, commandName=None):
if commandName == None:
global commandsList
descriptions = []
for command in commandsList:
if not command.isSupported(type):
continue
commandText = ('%s %s' % (command.name, command.params)).ljust(39)
descriptionParts = splitByLength(command.shortDescription, 29)
descriptions.append(' %s %s %s' % (scriptName, commandText, descriptionParts[0]))
for part in descriptionParts[1:]:
descriptions.append(' %s %s %s' % (' ' * len(scriptName), ' ' * len(commandText), part))
print '''Usage:
%(descriptions)s
For details on a command run:
%(scriptName)s <command> --help
''' % {
'scriptName': scriptName,
'descriptions': '\n'.join(descriptions)
}
else:
global commands
command = commands[commandName]
description = '\n'.join(map(lambda s: '\n'.join(splitByLength(s, 80)), command.description.split('\n')))
options = []
for descr, short, long, value in command.options:
if short == None:
shortText = ''
elif value == None:
shortText = '-%s' % short
else:
shortText = '-%s %s' % (short, value)
if long == None:
longText = ''
elif value == None:
longText = '--%s' % long
else:
longText = '--%s=%s' % (long, value)
descrParts = splitByLength(descr, 46)
options.append(' %s %s %s' % (shortText.ljust(11), longText.ljust(19), descrParts[0]))
for part in descrParts[1:]:
options.append(' %s %s %s' % (' ' * 11, ' ' * 19, part))
print '''%(scriptName)s %(name)s %(params)s
%(description)s
Options:
%(options)s
''' % {
'scriptName': scriptName,
'name': command.name,
'params': command.params,
'description': description,
'options': '\n'.join(options)
}
def runBuild(baseDir, scriptName, opts, args, type):
locales = None
buildNum = None
multicompartment = False
releaseBuild = False
keyFile = None
limitMetadata = False
for option, value in opts:
if option in ('-l', '--locales'):
locales = value.split(',')
elif option in ('-b', '--build'):
buildNum = int(value)
elif option in ('-k', '--key'):
keyFile = value
elif option in ('-m', '--multi-compartment'):
multicompartment = True
elif option in ('-r', '--release'):
releaseBuild = True
elif option == '--babelzilla':
locales = 'all'
limitMetadata = True
outFile = args[0] if len(args) > 0 else None
if type == 'gecko':
import buildtools.packager as packager
packager.createBuild(baseDir, outFile=outFile, locales=locales, buildNum=buildNum,
releaseBuild=releaseBuild, keyFile=keyFile,
limitMetadata=limitMetadata, multicompartment=multicompartment)
elif type == 'kmeleon':
import buildtools.packagerKMeleon as packagerKMeleon
packagerKMeleon.createBuild(baseDir, outFile=outFile, locales=locales,
buildNum=buildNum, releaseBuild=releaseBuild)
def runAutoInstall(baseDir, scriptName, opts, args, type):
if len(args) == 0:
print 'Port of the Extension Auto-Installer needs to be specified'
usage(scriptName, type, 'autoinstall')
return
multicompartment = False
for option, value in opts:
if option in ('-m', '--multi-compartment'):
multicompartment = True
if ':' in args[0]:
host, port = args[0].rsplit(':', 1)
else:
host, port = ('localhost', args[0])
import buildtools.packager as packager
packager.autoInstall(baseDir, host, port, multicompartment=multicompartment)
def setupTranslations(baseDir, scriptName, opts, args, type):
if len(args) < 1:
print 'Project key is required to update translation master files.'
usage(scriptName, type, 'setuptrans')
return
key = args[0]
import buildtools.packager as packager
locales = packager.getLocales(baseDir, True)
basename = packager.readMetadata(baseDir).get('general', 'baseName')
import buildtools.localeTools as localeTools
localeTools.setupTranslations(locales, basename, key)
def updateTranslationMaster(baseDir, scriptName, opts, args, type):
if len(args) < 1:
print 'Project key is required to update translation master files.'
usage(scriptName, type, 'translate')
return
key = args[0]
import buildtools.packager as packager
defaultLocaleDir = os.path.join(packager.getLocalesDir(baseDir), packager.defaultLocale)
basename = packager.readMetadata(baseDir).get('general', 'baseName')
import buildtools.localeTools as localeTools
localeTools.updateTranslationMaster(defaultLocaleDir, packager.defaultLocale, basename, key)
def getTranslations(baseDir, scriptName, opts, args, type):
if len(args) < 1:
print 'Project key is required to update translation master files.'
usage(scriptName, type, 'translate')
return
key = args[0]
import buildtools.packager as packager
localesDir = packager.getLocalesDir(baseDir)
basename = packager.readMetadata(baseDir).get('general', 'baseName')
import buildtools.localeTools as localeTools
localeTools.getTranslations(localesDir, packager.defaultLocale, basename, key)
def showDescriptions(baseDir, scriptName, opts, args, type):
locales = None
for option, value in opts:
if option in ('-l', '--locales'):
locales = value.split(',')
import buildtools.packager as packager
if locales == None:
locales = packager.getLocales(baseDir)
elif locales == 'all':
locales = packager.getLocales(baseDir, True)
data = packager.readLocaleMetadata(baseDir, locales)
localeCodes = data.keys()
localeCodes.sort()
for localeCode in localeCodes:
locale = data[localeCode]
print ('''%s
%s
%s
%s
%s
''' % (localeCode,
locale['name'] if 'name' in locale else 'None',
locale['description'] if 'description' in locale else 'None',
locale['description.short'] if 'description.short' in locale else 'None',
locale['description.long'] if 'description.long' in locale else 'None',
)).encode('utf-8')
def generateDocs(baseDir, scriptName, opts, args, type):
if len(args) == 0:
print 'No target directory specified for the documentation'
usage(scriptName, type, 'docs')
return
targetDir = args[0]
toolkit = None
for option, value in opts:
if option in ('-t', '--toolkit'):
toolkit = value
if toolkit == None:
toolkit = os.path.join(baseDir, 'jsdoc-toolkit')
if not os.path.exists(toolkit):
subprocess.Popen(['hg', 'clone', 'https://hg.adblockplus.org/jsdoc-toolkit/', toolkit]).communicate()
command = [sys.executable,
os.path.join(toolkit, 'jsrun.py'),
'-t=' + os.path.join(toolkit, 'templates', 'jsdoc'),
'-d=' + targetDir,
'-a',
'-p',
'-x=js,jsm',
os.path.join(baseDir, 'modules'),
os.path.join(baseDir, 'components')]
subprocess.Popen(command).communicate()
def runReleaseAutomation(baseDir, scriptName, opts, args, type):
buildtoolsRepo = buildtools.__path__[0]
keyFile = None
downloadsRepo = os.path.join(baseDir, '..', 'downloads')
for option, value in opts:
if option in ('-k', '--key'):
keyFile = value
elif option in ('-d', '--downloads'):
downloadsRepo = value
if type == 'gecko':
if len(args) == 0:
print 'No version number specified for the release'
usage(scriptName, type, 'release')
return
version = args[0]
if re.search(r'[^\w\.]', version):
print 'Wrong version number format'
usage(scriptName, type, 'release')
return
if keyFile == None:
print 'Warning: no key file specified, creating an unsigned release build\n'
import buildtools.releaseAutomation as releaseAutomation
releaseAutomation.run(baseDir, version, keyFile, downloadsRepo, buildtoolsRepo)
else:
import buildtools.releaseAutomationKMeleon as releaseAutomationKMeleon
releaseAutomationKMeleon.run(baseDir, downloadsRepo, buildtoolsRepo)
with addCommand(lambda baseDir, scriptName, opts, args, type: usage(scriptName, type), ('help', '-h', '--help')) as command:
command.shortDescription = 'Show this message'
with addCommand(runBuild, 'build') as command:
command.shortDescription = 'Create a build'
command.description = 'Creates an extension build with given file name. If output_file is missing a default name will be chosen.'
command.params = '[options] [output_file]'
command.addOption('Only include the given locales (if omitted: all locales not marked as incomplete)', short='l', long='locales', value='l1,l2,l3')
command.addOption('Use given build number (if omitted the build number will be retrieved from Mercurial)', short='b', long='build', value='num')
command.addOption('File containing private key and certificates required to sign the package', short='k', long='key', value='file')
command.addOption('Create a build for leak testing', short='m', long='multi-compartment')
command.addOption('Create a release build', short='r', long='release')
command.addOption('Create a build for Babelzilla', long='babelzilla')
command.supportedTypes = ('gecko', 'kmeleon')
with addCommand(runAutoInstall, 'autoinstall') as command:
command.shortDescription = 'Install extension automatically'
command.description = 'Will automatically install the extension in a browser running Extension Auto-Installer. If host parameter is omitted assumes that the browser runs on localhost.'
command.params = '[<host>:]<port>'
command.addOption('Create a build for leak testing', short='m', long='multi-compartment')
command.supportedTypes = ('gecko')
with addCommand(setupTranslations, 'setuptrans') as command:
command.shortDescription = 'Sets up translation languages'
command.description = 'Sets up translation languages for the project on crowdin.net.'
command.params = '[options] project-key'
command.supportedTypes = ('gecko')
with addCommand(updateTranslationMaster, 'translate') as command:
command.shortDescription = 'Updates translation master files'
command.description = 'Updates the translation master files in the project on crowdin.net.'
command.params = '[options] project-key'
command.supportedTypes = ('gecko')
with addCommand(getTranslations, 'gettranslations') as command:
command.shortDescription = 'Downloads translation updates'
command.description = 'Downloads updated translations from crowdin.net.'
command.params = '[options] project-key'
command.supportedTypes = ('gecko')
with addCommand(showDescriptions, 'showdesc') as command:
command.shortDescription = 'Print description strings for all locales'
command.description = 'Display description strings for all locales as specified in the corresponding meta.properties files.'
command.addOption('Only include the given locales', short='l', long='locales', value='l1,l2,l3')
command.params = '[options]'
command.supportedTypes = ('gecko')
with addCommand(generateDocs, 'docs') as command:
command.shortDescription = 'Generate documentation'
command.description = 'Generate documentation files and write them into the specified directory.'
command.addOption('JsDoc Toolkit location', short='t', long='toolkit', value='dir')
command.params = '[options] <directory>'
command.supportedTypes = ('gecko')
with addCommand(runReleaseAutomation, 'release') as command:
command.shortDescription = 'Run release automation'
command.description = 'Note: If you are not the project owner then you '\
'probably don\'t want to run this!\n\n'\
'Runs release automation: creates downloads for the new version, tags '\
'source code repository as well as downloads and buildtools repository.'
command.addOption('File containing private key and certificates required to sign the release', short='k', long='key', value='file')
command.addOption('Directory containing downloads repository (if omitted ../downloads is assumed)', short='d', long='downloads', value='dir')
command.params = '[options] <version>'
command.supportedTypes = ('gecko', 'kmeleon')
def processArgs(baseDir, args, type='gecko'):
global commands
scriptName = os.path.basename(args[0])
args = args[1:]
if len(args) == 0:
args = ['build']
print '''
No command given, assuming "build". For a list of commands run:
%s help
''' % scriptName
command = args[0]
if command in commands:
if commands[command].isSupported(type):
try:
opts, args = commands[command].parseArgs(args[1:])
except GetoptError, e:
print str(e)
usage(scriptName, type, command)
sys.exit(2)
for option, value in opts:
if option in ('-h', '--help'):
usage(scriptName, type, command)
sys.exit()
commands[command](baseDir, scriptName, opts, args, type)
else:
print 'Command %s is not supported for this application type' % command
usage(scriptName, type)
else:
print 'Command %s is unrecognized' % command
usage(scriptName, type)
| mpl-2.0 | -2,171,428,615,372,930,000 | 35.978972 | 186 | 0.680672 | false |
zachrickert/data_structures | src/test_heap.py | 1 | 9014 | # -*- coding: utf-8 -*-
"""Testing module for the Heap classes."""
from heap import Heap
import pytest
import random
# ------------------Heap Initialization Tests--------------------
# [x] Test default initialization of a heap is a heap.
# [x] Test the inital size of a heap is 0.
# [x] Test the default initializtion is a max heap.
# [x] Test the override to a min heap.
# [x] Test if invalid input into min/max heap results in an error.
def test_heap_initalization():
"""Test default initialization of a heap is a heap."""
heap = Heap()
assert isinstance(heap, Heap)
def test_heap_size_initally_0():
"""Test the inital size of a heap is 0."""
heap = Heap()
assert heap.size == 0
def test_heap_init_default_max():
"""Test the default initializtion is a max heap."""
heap = Heap()
assert heap._min_max == 'max'
def test_heap_init_override_min():
"""Test the override to a min heap."""
heap = Heap('min')
assert heap._min_max == 'min'
def test_heap_init_error_on_unknown_type():
"""Test the init throws an error if given unknown type."""
with pytest.raises(TypeError):
heap = Heap('blah')
# ------------------Insert Method Tests--------------------
# [x] Insert one value into the heap check that value in heap.
# [x] Insert one value into the heap check at position 0.
# [x] Insert one value, size increases.
# [x] Insert, insert smaller, value at position 1.
# [x] Insert, insert bigger, value at position 0.
# [x] Minheap, insert, insert smaller, value at position 1.
# [x] Minheap, Insert, insert bigger, value at position 0.
# [x] Insert random nodes, verify heap property
# [x] Minheap insert random nodes, verify heap property
def test_insert_one_value_in_heap():
"""Insert one value into the heap check that value in heap."""
heap = Heap()
heap.insert(10)
assert 10 in heap._heap
def test_insert_one_value_at_position_0():
"""Insert one value into the heap check at position 0."""
heap = Heap()
heap.insert(10)
assert heap._heap[0] == 10
def test_insert_increases_size():
"""Insert one value, size increases."""
heap = Heap()
assert heap.size == 0
heap.insert(10)
assert heap.size == 1
def test_insert_two_values_check_position_two():
"""Insert, insert smaller, value at position 1."""
heap = Heap()
heap.insert(10)
heap.insert(7)
assert heap._heap[1] == 7
def test_insert_two_values_check_position_two_switch_positions():
"""Insert, insert bigger, value at position 0."""
heap = Heap()
heap.insert(7)
heap.insert(10)
assert heap._heap[0] == 10
def test_min_insert_two_values_check_position_two():
"""Minheap, Insert, insert smaller, value at position 1."""
heap = Heap('min')
heap.insert(10)
heap.insert(7)
assert heap._heap[0] == 7
def test_min_insert_two_values_check_position_two_switch_positions():
"""MinHeap, Insert, insert bigger, value at position 0."""
heap = Heap('min')
heap.insert(7)
heap.insert(10)
assert heap._heap[1] == 10
def test_insert_follows_heap_property():
"""Insert random nodes, verify heap property."""
heap = Heap()
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
for idx, item in enumerate(heap._heap):
parent = max(0, (idx - 1) >> 1)
assert heap._heap[parent] >= heap._heap[idx]
def test_minheap_insert_follows_heap_property():
"""Insert random nodes, verify heap property."""
heap = Heap('min')
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
for idx, item in enumerate(heap._heap):
parent = max(0, (idx - 1) >> 1)
assert heap._heap[parent] <= heap._heap[idx]
# ------------------Peak Method Tests--------------------
# [x] Insert one number, peak returns number
# [x] Insert multiple numbers, peak returns max
# [x] Minheap - Insert multiple numbers, peak returns min
# [x] Init heap, peak returns none.
def test_peak_heap_of_one_item():
"""Insert one number, peak returns number."""
heap = Heap()
heap.insert(10)
assert heap.peak() == 10
def test_peak_returns_max():
"""Insert multiple numbers, peak returns max."""
heap = Heap()
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
assert heap.peak() == max(sample_list)
def test_peak_returns_min():
"""Insert multiple numbers, peak returns max."""
heap = Heap('min')
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
assert heap.peak() == min(sample_list)
def test_peak_no_heap():
"""Init heap, peak returns none."""
heap = Heap()
assert heap.peak() is None
# ------------------Extract Method Tests--------------------
# [x] Extract returns the head value.
# [x] Insert, insert, extract, check first value.
# [x] Insert 3 values extract, check first value is correct value
# [x] Minheap Insert 3 values extract, check first value is correct value
# [x] Heap still a heap after extraction.
# [x] Minheap still a min heap after extraction.
# [x] Extract returns an IndexError if heap is empty.
# [x] Extract decrements the size of the heap.
def test_extract_returns_value():
"""Extract returns the head value."""
heap = Heap()
heap.insert(10)
assert heap.extract() == 10
def test_extract_moves_next_value_to_top():
"""Insert, insert, extract, check first value."""
heap = Heap()
heap.insert(10)
heap.insert(7)
heap.extract()
assert heap.extract() == 7
def test_extract_moves_correct_value_to_top():
"""Insert 3 values extract, check first value is correct value."""
heap = Heap()
heap.insert(5)
heap.insert(7)
heap.insert(10)
assert heap.extract() == 10
assert heap.extract() == 7
assert heap.extract() == 5
def test_minheap_extract_moves_correct_value_to_top():
"""Minheap Insert 3 values extract, check first value is correct value."""
heap = Heap('min')
heap.insert(10)
heap.insert(7)
heap.insert(5)
assert heap.extract() == 5
assert heap.extract() == 7
assert heap.extract() == 10
def test_extract_still_heap_property():
"""Heap still a heap after extraction."""
heap = Heap()
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
heap.extract()
for idx, item in enumerate(heap._heap):
parent = max(0, (idx - 1) >> 1)
assert heap._heap[parent] >= heap._heap[idx]
def test_extract_still_minheap_property():
"""Heap still a minheap after extraction."""
heap = Heap('min')
spots = 16
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
heap.extract()
for idx, item in enumerate(heap._heap):
parent = max(0, (idx - 1) >> 1)
assert heap._heap[parent] <= heap._heap[idx]
def test_extract_from_empty_heap():
"""Extract returns an IndexError if heap is empty."""
heap = Heap()
with pytest.raises(IndexError):
heap.extract()
def test_extract_decrements_size_of_heap():
"""Extract decrements the size of the heap."""
heap = Heap()
heap.insert(10)
heap.insert(7)
assert heap.size == 2
heap.extract()
assert heap.size == 1
# ------------------Overall Tests--------------------
# [x] Add values to heap, extract values. Chek all in order.
# [x] Add values to minheap, extract values. Chek all in order.
def test_overall_heap_function():
"""Add values to heap, extract values. Chek all in order."""
heap = Heap()
spots = 64
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
last_item = heap.extract()
while heap.size > 0:
new_item = heap.extract()
assert new_item <= last_item
last_item = new_item
def test_overall_minheap_function():
"""Add values to minheap, extract values. Chek all in order."""
heap = Heap('min')
spots = 64
sample_list = random.sample(range(100), spots)
for item in sample_list:
heap.insert(item)
last_item = heap.extract()
while heap.size > 0:
new_item = heap.extract()
assert new_item >= last_item
last_item = new_item
# ------------------Helper Function Tests--------------------
# [x] Int, float, boolean, string types unaffected.
# [x] List and tuples return first value.
def test_helper_unchanged_types():
from heap import _get_first_item
data_types = [-25, 3.14, True, 'abcdef']
for item in data_types:
assert _get_first_item(item) == item
def test_helper_list_and_tuples():
from heap import _get_first_item
data_types = [(2, 5), [7, 4]]
for item in data_types:
assert _get_first_item(item) == item[0]
| mit | 681,883,408,417,413,500 | 27.345912 | 78 | 0.623031 | false |
ger-ator/turnos | personal/bajas.py | 1 | 16813 | from PyQt5 import QtSql, QtCore
from enum import Enum
from personal import calendario
from personal import personal
from personal import trabajador
def transaccion(func):
def func_wrapper(*args):
QtSql.QSqlDatabase.database().transaction()
func(*args)
QtSql.QSqlDatabase.database().commit()
return func_wrapper
class Bajas(object):
_dbase = None
_ids = []
def __init__(self, dbase=None):
self.dbase = dbase
if not Bajas._ids:
query = QtSql.QSqlQuery()
query.exec_("SELECT baja_id FROM bajas")
while query.next():
Bajas._ids.append(query.value(0))
self.ids = Bajas._ids
def iterable(self, lista=None):
if lista is None:
return {Baja(self.dbase, index) for index in self.ids}
else:
return {Baja(self.dbase, index)
for index in self.ids if index in lista}
@transaccion
def add(self, sustituido, inicio, final, motivo=""):
##Añadir la baja
query = QtSql.QSqlQuery()
query.prepare("INSERT INTO bajas "
"(sustituido_id, inicio, final, motivo) "
"VALUES (?, ?, ?, ?)")
query.addBindValue(sustituido.rowid())
query.addBindValue(inicio)
query.addBindValue(final)
query.addBindValue(motivo)
if not query.exec_():
return False
baja_insertada = query.lastInsertId()
Bajas._ids.append(baja_insertada)
####
##Crear sustituciones asociadas
mis_sustituciones = Sustituciones()
cal = calendario.Calendario()
for dia in [inicio.addDays(i)
for i in range(inicio.daysTo(final) + 1)]:
turno = cal.getJornada(sustituido, dia)
if turno in {personal.Jornada.TM, personal.Jornada.TT,
personal.Jornada.TN, personal.Jornada.Ret}:
mis_sustituciones.add(sustituido, None,
dia, turno, baja_insertada)
####
@transaccion
def delete(self, baja):
##Borra las sustituciones asociadas a la baja
mis_sustituciones = Sustituciones(self.dbase)
for sustitucion in mis_sustituciones.iterable(baja):
mis_sustituciones.delete(sustitucion)
####
##Borra la baja
query = QtSql.QSqlQuery()
query.prepare("DELETE FROM bajas "
"WHERE baja_id = ?")
query.addBindValue(baja.rowid())
if not query.exec_():
return False
Bajas._ids.remove(baja.rowid())
return True
####
class Baja(object):
__BASE_BAJAS = {"baja_id":0,
"sustituido_id":1,
"inicio":2,
"final":3,
"motivo":4}
_cache = {}
def __init__(self, dbase, baja_id):
self.dbase = dbase
self.baja_id = str(baja_id)
##Añadir baja a cache
if not self.baja_id in Baja._cache:
query = QtSql.QSqlQuery()
query.prepare("SELECT * FROM bajas "
"WHERE baja_id = ?")
query.addBindValue(baja_id)
if not query.exec_():
print(query.lastError().text())
raise ValueError
query.first()
if query.isValid():
Baja._cache[self.baja_id] = {}
for key, value in Baja.__BASE_BAJAS.items():
if query.isNull(value):
Baja._cache[self.baja_id][key] = None
else:
Baja._cache[self.baja_id][key] = query.value(value)
####
##Tomar datos de la baja
self.datos = Baja._cache[self.baja_id]
####
def __eq__(self, other):
if other is None:
return self is None
else:
return self.rowid() == other.rowid()
def __ne__(self, other):
if other is None:
return self is not None
else:
return self.rowid() != other.rowid()
def __key(self):
return (self.datos["baja_id"], self.datos["sustituido_id"])
def __hash__(self):
return hash(self.__key())
def getColumn(self, column):
try:
return self.datos[column]
except KeyError:
print("Baja.getColumn: No se ha encontrado {0} "
"para {1}".format(column, self.baja_id))
return None
def setColumn(self, column, value):
try:
query = QtSql.QSqlQuery()
query.prepare("UPDATE bajas "
"SET {0} = ? "
"WHERE baja_id = ?".format(column))
query.addBindValue(value)
query.addBindValue(self.datos["baja_id"])
if not query.exec_():
print(query.lastError().text())
raise ValueError
Baja._cache[self.baja_id][column] = value
except KeyError:
print("Baja.setColumn: No se ha encontrado {0} "
"para {1}".format(column, self.baja_id))
def rowid(self):
return self.getColumn("baja_id")
def sustituido(self):
sustituido = self.getColumn("sustituido_id")
if sustituido is None:
return None
else:
return trabajador.Trabajador(self.dbase, sustituido)
def inicio(self):
return QtCore.QDate.fromString(self.getColumn("inicio"),
QtCore.Qt.ISODate)
def final(self):
return QtCore.QDate.fromString(self.getColumn("final"),
QtCore.Qt.ISODate)
def motivo(self):
return self.getColumn("motivo")
def setInicio(self, fecha):
if fecha == self.inicio():
return
elif fecha > self.inicio():
mis_sustituciones = Sustituciones(self.dbase)
for sustitucion in mis_sustituciones.iterable(self):
if sustitucion.fecha() < fecha:
mis_sustituciones.delete(sustitucion)
elif fecha < self.inicio():
mis_sustituciones = Sustituciones(self.dbase)
cal = calendario.Calendario()
for dia in [fecha.addDays(i)
for i in range(fecha.daysTo(self.inicio()))]:
turno = cal.getJornada(self.sustituido(), dia)
if turno in {personal.Jornada.TM, personal.Jornada.TT,
personal.Jornada.TN, personal.Jornada.Ret}:
mis_sustituciones.add(self.sustituido(), None,
dia, turno, self.rowid())
self.setColumn("inicio", fecha.toString(QtCore.Qt.ISODate))
def setFinal(self, fecha):
if fecha == self.final():
return
elif fecha < self.final():
mis_sustituciones = Sustituciones(self.dbase)
for sustitucion in mis_sustituciones.iterable(self):
if sustitucion.fecha() > fecha:
mis_sustituciones.delete(sustitucion)
elif fecha > self.final():
mis_sustituciones = Sustituciones(self.dbase)
cal = calendario.Calendario()
for dia in [fecha.addDays(i)
for i in range(fecha.daysTo(self.final()), 0)]:
turno = cal.getJornada(self.sustituido(), dia)
if turno in {personal.Jornada.TM, personal.Jornada.TT,
personal.Jornada.TN, personal.Jornada.Ret}:
mis_sustituciones.add(self.sustituido(), None,
dia, turno, self.rowid())
self.setColumn("final", fecha.toString(QtCore.Qt.ISODate))
class Sustituciones(object):
_dbase = None
_ids = []
def __init__(self, dbase=None):
self.dbase = dbase
if not Sustituciones._ids:
query = QtSql.QSqlQuery()
query.exec_("SELECT sustitucion_id FROM sustituciones")
while query.next():
Sustituciones._ids.append(query.value(0))
self.ids = Sustituciones._ids
def iterable(self, baja=None):
if baja is None:
return {Sustitucion(self.dbase, index) for index in self.ids}
else:
return {Sustitucion(self.dbase, index)
for index in self.ids
if Sustitucion(self.dbase, index).baja() == baja}
def add(self, sustituido, sustituto, fecha, turno, baja_id):
query = QtSql.QSqlQuery()
query.prepare("INSERT INTO sustituciones "
"(sustituido_id, sustituto_id, fecha, turno, baja_id) "
"VALUES (?, ?, ?, ?, ?)")
query.addBindValue(sustituido.rowid())
if sustituto is None:
query.addBindValue(None)
else:
query.addBindValue(sustituto.rowid())
query.addBindValue(fecha)
query.addBindValue(turno.value)
query.addBindValue(baja_id)
if not query.exec_():
raise ValueError("Alguno de los argumentos no "
"es valido para la base de datos.")
Sustituciones._ids.append(query.lastInsertId())
def delete(self, sustitucion):
query = QtSql.QSqlQuery()
query.prepare("DELETE FROM sustituciones "
"WHERE sustitucion_id = ?")
query.addBindValue(sustitucion.rowid())
if not query.exec_():
return False
Sustituciones._ids.remove(sustitucion.rowid())
return True
##ME FALTA DARE UNA VUELTA A COMO ELIMINO DE LA CACHE DE class Sustitucion
##DE MOMENTO DEJO LA CACHE SUCIA YA QUE LOS IDS SON UNICOS
class Sustitucion(object):
__BASE_SUSTITUCIONES = {"sustitucion_id":0,
"sustituido_id":1,
"sustituto_id":2,
"fecha":3,
"turno":4,
"baja_id":5}
_cache = {}
def __init__(self, dbase, sustitucion_id):
self.dbase = dbase
self.sustitucion_id = str(sustitucion_id)
##Añadir sustitucion a cache
if not self.sustitucion_id in Sustitucion._cache:
query = QtSql.QSqlQuery()
query.prepare("SELECT * FROM sustituciones "
"WHERE sustitucion_id = ?")
query.addBindValue(sustitucion_id)
if not query.exec_():
print(query.lastError().text())
raise ValueError
query.first()
if query.isValid():
Sustitucion._cache[self.sustitucion_id] = {}
for key, value in Sustitucion.__BASE_SUSTITUCIONES.items():
if query.isNull(value):
Sustitucion._cache[self.sustitucion_id][key] = None
else:
Sustitucion._cache[self.sustitucion_id][key] = query.value(value)
####
##Tomar datos de la sustitucion
self.datos = Sustitucion._cache[self.sustitucion_id]
####
def __eq__(self, other):
if other is None:
return self is None
else:
return self.rowid() == other.rowid()
def __ne__(self, other):
if other is None:
return self is not None
else:
return self.rowid() != other.rowid()
def __key(self):
return (self.datos["sustitucion_id"], self.datos["sustituido_id"],
self.datos["fecha"], self.datos["turno"])
def __hash__(self):
return hash(self.__key())
def getColumn(self, column):
try:
return self.datos[column]
except KeyError:
print("Sustitucion.getColumn: No se ha encontrado {0} "
"para {1}".format(column, self.sustitucion_id))
return None
def setColumn(self, column, value):
try:
query = QtSql.QSqlQuery()
query.prepare("UPDATE sustituciones "
"SET {0} = ? "
"WHERE sustitucion_id = ?".format(column))
query.addBindValue(value)
query.addBindValue(self.datos["sustitucion_id"])
if not query.exec_():
print(query.lastError().text())
raise ValueError
Sustitucion._cache[self.sustitucion_id][column] = value
except KeyError:
print("Sustitucion.setColumn: No se ha encontrado {0} "
"para {1}".format(column, self.baja_id))
def rowid(self):
return self.getColumn("sustitucion_id")
def sustituido(self):
sustituido = self.getColumn("sustituido_id")
if sustituido is None:
return None
else:
return trabajador.Trabajador(self.dbase, sustituido)
def sustituto(self):
sustituto = self.getColumn("sustituto_id")
if sustituto is None:
return None
else:
return trabajador.Trabajador(self.dbase, sustituto)
def fecha(self):
return QtCore.QDate.fromString(self.getColumn("fecha"),
QtCore.Qt.ISODate)
def turno(self):
return personal.Jornada(self.getColumn("turno"))
def baja(self):
return Baja(self.dbase, self.getColumn("baja_id"))
def setSustituto(self, sustituto):
if isinstance(sustituto, trabajador.Trabajador):
self.setColumn("sustituto_id", sustituto.rowid())
else:
self.setColumn("sustituto_id", None)
def sustitutos(self):
trabajadores = trabajador.Trabajadores(self.dbase)
sustituido = self.sustituido()
mis_sustituciones = Sustituciones(self.dbase)
mis_bajas = Bajas(self.dbase)
cal = calendario.Calendario()
candidatos = set()
no_validos = set()
puestos = {sustituido.puesto()}
if personal.Puesto.OpPolivalente in puestos:
puestos.update({personal.Puesto.OpReactor,
personal.Puesto.OpTurbina})
elif (personal.Puesto.OpReactor in puestos or
personal.Puesto.OpTurbina in puestos):
puestos.add(personal.Puesto.OpPolivalente)
##Buscar trabajadores de otros equipos en jornada de Ofi, Des, Ret
for candidato in trabajadores.iterable():
if (candidato.puesto() in puestos and
candidato.grupo() != sustituido.grupo() and
cal.getJornada(candidato, self.fecha()) in {personal.Jornada.Des,
personal.Jornada.Ret,
personal.Jornada.Ofi}):
candidatos.add(candidato)
####
##Filtrar trabajadores que estan de baja o sustituyendo
for sustitucion in mis_sustituciones.iterable():
if sustitucion.fecha() == self.fecha():
no_validos.add(sustitucion.sustituto())
for baja in mis_bajas.iterable():
if baja.inicio() <= self.fecha() and baja.final() >= self.fecha():
no_validos.add(baja.sustituido())
####
##Filtrar trabajadores con TN programado o debido a sustitucion
##para evitar empalmar dos turno seguidos
if self.turno() is personal.Jornada.TM:
for candidato in candidatos:
if cal.getJornada(candidato,
self.fecha().addDays(-1)) is personal.Jornada.TN:
no_validos.add(candidato)
for sustitucion in mis_sustituciones.iterable():
if (sustitucion.fecha() == self.fecha().addDays(-1) and
sustitucion.turno() is personal.Jornada.TN):
no_validos.add(sustitucion.sustituto())
return (candidatos - no_validos)
def orderedSustitutos(self):
candidatos = self.sustitutos()
cal = calendario.Calendario()
lista_ordenada = [(i,
cal.getJornada(i, self.fecha())) for i in candidatos]
if self.sustituido().unidad() is personal.Unidad.U1:
lista_ordenada.sort(key=lambda trabajador: trabajador[0].datos["unidad"])
else:
lista_ordenada.sort(key=lambda trabajador: trabajador[0].datos["unidad"], reverse=True)
lista_ordenada.sort(key=lambda trabajador: trabajador[1].value)
return [i[0] for i in lista_ordenada]
| gpl-3.0 | -4,720,994,865,346,456,000 | 37.291572 | 99 | 0.535158 | false |
retooth/morse | morse/models/filters.py | 1 | 2137 | #!/usr/bin/python
# This file is part of Morse.
#
# Morse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Morse. If not, see <http://www.gnu.org/licenses/>.
from . import db
class BoardFilter (db.Model):
""" board filter for registered users """
__tablename__ = "board_filters"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), primary_key=True)
string_identifier = db.Column(db.String, primary_key=True)
active = db.Column(db.Boolean)
def __init__ (self, user_id, string_identifier, active = False):
self.user_id = user_id
self.string_identifier = string_identifier
self.active = active
class TopicFilter (db.Model):
""" topic filter for registered users """
__tablename__ = "topic_filters"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), primary_key=True)
string_identifier = db.Column(db.String, primary_key=True)
active = db.Column(db.Boolean)
def __init__ (self, user_id, string_identifier, active = False):
self.user_id = user_id
self.string_identifier = string_identifier
self.active = active
class PostFilter (db.Model):
""" post filter for registered users """
__tablename__ = "post_filters"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), primary_key=True)
string_identifier = db.Column(db.String, primary_key=True)
active = db.Column(db.Boolean)
def __init__ (self, user_id, string_identifier, active = False):
self.user_id = user_id
self.string_identifier = string_identifier
self.active = active
| gpl-3.0 | 838,505,797,291,886,600 | 34.616667 | 80 | 0.673842 | false |
kawashiro/dewyatochka2 | src/dewyatochka/plugins/cool_story/parser/nya_sh.py | 1 | 1954 | # -*- coding: UTF-8
""" nya.sh html parser
Classes
=======
Parser -- Parser implementation
Attributes
==========
NYA_SH_SOURCE_NAME -- Source name constant
"""
import html
from html.parser import HTMLParser
from lxml.html import HtmlElement
from pyquery import PyQuery
from ._base import *
__all__ = ['Parser', 'NYA_SH_SOURCE_NAME']
# Source name constant
NYA_SH_SOURCE_NAME = 'nya.sh'
class Parser(AbstractParser):
""" nya.sh parser """
def __init__(self):
""" Init parser object, create html parser for entities decoding """
super().__init__()
self.__html_parser = HTMLParser()
@property
def name(self) -> str:
""" Get unique name
:return str:
"""
return NYA_SH_SOURCE_NAME
def _parse_posts_collection(self, html_: PyQuery) -> list:
""" Get posts HTMLElement[] collection
:param PyQuery html_: Page PyQuery object
:return list:
"""
return html_('div.q')
def _parse_pages_collection(self, html_: PyQuery) -> list:
""" Get pages urls for indexation
:param PyQuery html_: Page PyQuery object
:return list:
"""
pages_links = []
links_list = html_('div.pages *')
is_next_link = False
for link in links_list:
if is_next_link:
pages_links.append(link.attrib['href'])
elif link.tag == 'b':
is_next_link = True
return pages_links
def _parse_post(self, html_element: HtmlElement) -> RawPost:
""" Parse post html element
:param HTMLElement html_element:
:return RawPost:
"""
post_pyq_el = PyQuery(html_element)
story_id = int(post_pyq_el('div.sm a b')[0].text.lstrip('#'))
story_text = html.unescape(parse_multiline_html(post_pyq_el('div.content')))
return RawPost(story_id, self.name, '', story_text, frozenset())
| gpl-3.0 | -1,173,607,798,858,651,000 | 22.829268 | 84 | 0.575742 | false |
kayhayen/Nuitka | tests/benchmarks/constructs/CallUncompiledFunctionPosArgs.py | 1 | 1520 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
exec("""
def python_func(a,b,c,d,e,f):
pass
""")
def calledRepeatedly():
# This is supposed to make a call to a non-compiled function, which is
# being optimized separately.
python_f = python_func
# construct_begin
python_f("some", "random", "values", "to", "check", "call")
python_f("some", "other", "values", "to", "check", "call")
python_f("some", "new", "values", "to", "check", "call")
# construct_alternative
pass
# construct_end
return python_f
import itertools
for x in itertools.repeat(None, 50000):
calledRepeatedly()
print("OK.")
| apache-2.0 | -8,219,298,560,963,762,000 | 32.043478 | 78 | 0.683553 | false |
ostrovok-team/django-mediagenerator | mediagenerator/management/commands/cleanupmedia.py | 1 | 1262 | import os
from django.core.management.base import NoArgsCommand
from django.utils.importlib import import_module
from mediagenerator import settings
class Command(NoArgsCommand):
help = 'Removes files from _generated_media not stored in _generated_media_file.pu'
files_removed = 0
def handle_noargs(self, **options):
try:
names = import_module(settings.GENERATED_MEDIA_NAMES_MODULE).NAMES
except (ImportError, AttributeError):
print "No found generated media. Exiting."
return
namesset = set()
for n in names.values():
namesset.add(os.path.relpath(os.path.join(settings.GENERATED_MEDIA_DIR, n)))
os.path.walk(settings.GENERATED_MEDIA_DIR, self.walk, namesset)
print "Cleanup done, files removed: %d" % self.files_removed
def walk(self, current_content, dirname, names):
relname = os.path.relpath(dirname)
for fname in names:
full_name = os.path.join(relname, fname)
if not os.path.isfile(full_name): continue
if full_name in current_content: continue
print "Removing unnecessary file %s" % full_name
os.remove(full_name)
self.files_removed += 1
| bsd-3-clause | -410,517,007,112,791,940 | 30.55 | 88 | 0.646593 | false |
jgliss/pydoas | scripts/ex1_read_and_plot_example_data.py | 1 | 6008 | # -*- coding: utf-8 -*-
"""pydoas example script 1
Introductory script illustrating how to import data from DOASIS
resultfiles. The DOASIS result file format is specified as default in the
package data file "import_info.txt".
Creates a result data set using DOASIS example data and plots some examples
"""
import pydoas
import matplotlib.pyplot as plt
from os.path import join
from SETTINGS import SAVE_DIR, SAVEFIGS, OPTPARSE, DPI, FORMAT
if __name__=="__main__":
plt.close("all")
### Get example data base path and all files in there
files, path = pydoas.get_data_files("doasis")
### Device ID of the spectrometer (of secondary importance)
dev_id = "avantes"
### Data import type (DOASIS result file format)
res_type = "doasis"
### Specify the the import details
# here, 3 x SO2 from the first 3 fit scenario result files (f01, f02, f03)
# BrO from f04, 2 x O3 (f02, f04) and OClO (f04)
import_dict = {'so2' : ['SO2_Hermans_298_air_conv',\
['f01','f02','f03']],
'bro' : ['BrO_Wil298_Air_conv',['f04']],
'o3' : ['o3_221K_air_burrows_1999_conv',\
['f02', 'f04']],
'oclo' : ['OClO_293K_Bogumil_2003_conv',['f04']]}
### Specify the default fit scenarios for each species
# After import, the default fit scenarios for each species are used
# whenever fit scenarios are not explicitely specified
default_dict = {"so2" : "f03",
"bro" : "f04",
"o3" : "f04",
"oclo" : "f04"}
#: Create import setup object
stp = pydoas.dataimport.ResultImportSetup(path, result_import_dict =\
import_dict, default_dict = default_dict, meta_import_info = res_type,\
dev_id = dev_id)
#: Create Dataset object for setup...
ds = pydoas.analysis.DatasetDoasResults(stp)
#: ... and load results
ds.load_raw_results()
### plot_some_examples
fig1, axes = plt.subplots(2, 2, figsize = (16, 8), sharex = True)
ax = axes[0,0]
#load all SO2 results
so2_default = ds.get_results("so2")
so2_fit01 = ds.get_results("so2", "f01")
so2_fit02 = ds.get_results("so2", "f02")
#plot all SO2 results in top left axes object
so2_default.plot(style="-b", ax=ax, label="so2 (default, f03)")
so2_fit01.plot(style="--c", ax=ax, label="so2 (f01)")
so2_fit02.plot(style="--r", ax=ax, label="so2 (f02)").set_ylabel("SO2 [cm-2]")
ax.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=9)
ax.set_title("SO2")
fig1.tight_layout(pad = 1, w_pad = 3.5, h_pad = 3.5)
#now load the other species and plot them into the other axes objects
bro=ds.get_results("bro")
bro.plot(ax=axes[0, 1], label="bro", title="BrO").set_ylabel("BrO [cm-2]")
o3=ds.get_results("o3")
o3.plot(ax=axes[1, 0], label="o3",
title="O3").set_ylabel("O3 [cm-2]")
oclo=ds.get_results("oclo")
oclo.plot(ax=axes[1, 1], label="oclo",
title="OClO").set_ylabel("OClO [cm-2]")
# Now calculate Bro/SO2 ratios of the time series and plot them with
# SO2 shaded on second y axis
bro_so2 = bro/so2_default
oclo_so2 = oclo/so2_default
fig2, axis = plt.subplots(1,1, figsize=(12,8))
bro_so2.plot(ax=axis, style=" o", label="BrO/SO2")
oclo_so2.plot(ax=axis, style=" x", label="OClO/SO2")
#axis.set_ylabel("BrO/SO2")
so2_default.plot(ax=axis, kind="area",
secondary_y=True, alpha=0.3).set_ylabel("SO2 CD [cm-2]")
axis.legend()
if SAVEFIGS:
fig1.savefig(join(SAVE_DIR, "ex1_out1.%s" %FORMAT),
format=FORMAT, dpi=DPI)
fig2.savefig(join(SAVE_DIR, "ex1_out2.%s" %FORMAT),
format=FORMAT, dpi=DPI)
### IMPORTANT STUFF FINISHED (Below follow tests and display options)
# Import script options
(options, args) = OPTPARSE.parse_args()
# If applicable, do some tests. This is done only if TESTMODE is active:
# testmode can be activated globally (see SETTINGS.py) or can also be
# activated from the command line when executing the script using the
# option --test 1
if int(options.test):
### under development
import numpy.testing as npt
import numpy as np
from os.path import basename
npt.assert_array_equal([len(so2_default),
ds.get_default_fit_id("so2"),
ds.get_default_fit_id("bro"),
ds.get_default_fit_id("oclo")],
[22, "f03", "f04", "f04"])
vals = [so2_default.mean(),
so2_default.std(),
so2_fit01.mean(),
so2_fit02.mean(),
bro.mean(),
oclo.mean(),
bro_so2.mean(),
oclo_so2.mean(),
np.sum(ds.raw_results["f01"]["delta"])]
npt.assert_allclose(actual=vals,
desired=[9.626614500000001e+17,
9.785535879339162e+17,
1.0835821818181818e+18,
6.610916636363636e+17,
126046170454545.45,
42836762272727.27,
0.0001389915245877655,
7.579933107191676e-05,
0.125067],
rtol=1e-7)
print("All tests passed in script: %s" %basename(__file__))
try:
if int(options.show) == 1:
plt.show()
except:
print("Use option --show 1 if you want the plots to be displayed")
| bsd-3-clause | -2,086,774,709,072,713,200 | 38.019481 | 83 | 0.531625 | false |
frozflame/molbiox | molbiox/execute/generate.py | 1 | 1357 | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals, print_function
import os
import sys
import stat
from molbiox.frame.command import Command
from molbiox.frame.locate import locate_template
class CommandGen(Command):
abbr = 'gen'
name = 'generate'
desc = 'generate scripts'
@classmethod
def register(cls, subparser):
subparser.add_argument(
'--rude', action='store_true',
help='overwriting existing files if needed')
subparser.add_argument(
'template', default='test',
help='name of template to start from')
subparser.add_argument(
'out', nargs='?',
help='path to the output file, otherwise stdout')
@classmethod
def run(cls, args):
tpl_path = locate_template(args.template, new=False)
tpl = open(tpl_path).read()
# TODO: render from a template
output = tpl
if args.out == '-':
sys.stdout.write(tpl)
else:
filename = args.out or locate_template(args.template, new=True)
cls.check_overwrite(args, filename)
with open(filename, 'w') as outfile:
outfile.write(output)
# chmod +x
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC)
| gpl-2.0 | 4,811,820,897,735,580,000 | 26.14 | 75 | 0.590273 | false |
simongoffin/website_version | addons/website_version/models/website.py | 1 | 1142 | # -*- coding: utf-8 -*-
from openerp.osv import osv,fields
from openerp.http import request
class NewWebsite(osv.Model):
_inherit = "website"
_columns = {
'snapshot_id':fields.many2one("website_version.snapshot",string="Snapshot"),
}
def get_current_snapshot(self,cr,uid,context=None):
id=request.session.get('snapshot_id')
if id=='Master' or id==None:
return 'Master'
else:
ob=self.pool['website_version.snapshot'].browse(cr,uid,[id],context=context)
return ob[0].name
def get_current_website(self, cr, uid, context=None):
#from pudb import set_trace; set_trace()
website = super(NewWebsite,self).get_current_website(cr, uid, context=context)
#key = 'website_%s_snapshot_id' % request.website.id
key='snapshot_id'
if request.session.get(key):
request.context['snapshot_id'] = request.session.get(key)
elif website.snapshot_id:
request.context['snapshot_id'] = website.snapshot_id.id
request.session['snapshot_id'] = website.snapshot_id.id
return website
| agpl-3.0 | 5,451,043,187,917,340,000 | 33.606061 | 88 | 0.626095 | false |
nicolacimmino/LoP-RAN | LoPAccessPoint/MacroIP_UDP.py | 1 | 3914 | # MacroIP_UDP is part of MacroIP Core. Provides Access to UDP data through simple
# textual macros.
# Copyright (C) 2014 Nicola Cimmino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# This service expects a LoPNode connected on serial port ttyUSB0 and set
# to access point mode already (ATAP1). In due time autodiscovery and
# configuration will be built.
#
# We don't really lease addressed from a DHCP server here. Instead our box has
# a pool of IP address aliases that are for us to distribute to our client.
# This has the benefit of not requiring to modify on the fly our network config
# since in the end we always would need to have the addresses assigned to
# this box in oder to get the traffic. This has the disavantage to require
# a range of private IPs to be reserved for out use.
import MacroIP_DHCP
import socket
import select
import struct
from threading import Thread
outputMacrosQueue = []
active_sockets = []
def startActivity():
thread = Thread( target = serveIncomingIPTraffic )
thread.start()
def processMacro(clientid, macro):
# Send macro
if macro.startswith("udp.send\\"):
params = macro.split("\\")
source_address = MacroIP_DHCP.getIP(clientid)
source_port = int(params[2])
dest_address = params[1]
dest_port = int(params[3])
data_to_send = macro[ macro.find("\\\\") + 2:]
udpsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Do not bind if the node didn't have a lease, we will just go out
# from the AP main IP Address (so basically we SNAT) and port mapping
# will take care to use a suitable source port, we in fact ignore
# client supplied source port.
if source_address != None:
udpsocket.bind((source_address, source_port))
udpsocket.sendto(data_to_send, (dest_address, dest_port))
udpsocket.close()
# Listen
if macro.startswith("udp.listen\\"):
print clientid
params = macro.split("\\")
local_port = int(params[1])
local_address = MacroIP_DHCP.getIP(clientid)
# We cannot listen unless the client has leased an IP
if local_address != None:
print "Listening " + local_address + ":" + str(local_port)
udpsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
udpsocket.bind((local_address, local_port))
active_sockets.append(udpsocket)
else:
outputMacrosQueue.append((clientid, "\\udp.listen\\failed\\\\"))
# This loop is run in a separate thread and keeps waiting for incoming
# UDP traffic and delivers it to the client.
def serveIncomingIPTraffic():
global outputMacrosQueue
while True:
readable, writable, errored = select.select(active_sockets, [], [], 0)
for s in readable:
(local_ip , local_port) = s.getsockname()
clientid = MacroIP_DHCP.getClientID(local_ip)
if clientid != 0:
data, addr = s.recvfrom(1024)
remote_ip = addr[0]
remote_port = addr[1]
if data:
outputMacrosQueue.append((clientid, "\\udp.data\\" + str(local_port) + "\\" + str(remote_ip) + "\\" + str(remote_port) + "\\\\" + data))
def getOutputMacroIPMacro():
if len(outputMacrosQueue) > 0:
return outputMacrosQueue.pop(0)
else:
return (None, None)
| gpl-3.0 | -6,346,434,358,540,674,000 | 37.009709 | 144 | 0.692131 | false |
Koheron/zynq-sdk | examples/red-pitaya/oscillo/python/oscillo.py | 2 | 3530 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
from koheron import command
class Oscillo(object):
def __init__(self, client):
self.client = client
self.wfm_size = 8192
self.sampling_rate = 125e6
self.t = np.arange(self.wfm_size)/self.sampling_rate
self.dac = np.zeros((2, self.wfm_size))
self.adc = np.zeros((2, self.wfm_size))
self.spectrum = np.zeros((2, int(self.wfm_size / 2)))
self.avg_spectrum = np.zeros((2, int(self.wfm_size / 2)))
@command()
def set_dac_periods(self, period0, period1):
''' Select the periods played on each address generator
ex: self.set_dac_periods(8192, 4096)
'''
pass
@command()
def set_num_average_min(self, num_average_min):
''' Set the minimum of averages that will be computed on the FPGA
The effective number of averages is >= num_average_min.
'''
pass
@command()
def set_average_period(self, average_period):
''' Set the period of the averaging module and reset the module.
'''
self.period = average_period
@command()
def set_average(self, is_average):
''' is_average = True enables averaging. '''
pass
@command()
def get_num_average(self, channel):
''' Get the number of averages corresponding to the last acquisition. '''
num_average = self.client.recv_uint32()
return num_average
@command()
def get_decimated_data(self, decim_factor, index_low, index_high):
decimated_data = self.client.recv_vector(dtype='float32')
return decimated_data
def get_adc(self):
self.adc = np.reshape(self.get_decimated_data(1, 0, self.wfm_size), (2, self.wfm_size))
def get_spectrum(self):
fft_adc = np.fft.fft(self.adc, axis=1)
self.spectrum = fft_adc[:, 0:self.wfm_size / 2]
def get_avg_spectrum(self, n_avg=1):
self.avg_spectrum = np.zeros((2, int(self.wfm_size / 2)))
for i in range(n_avg):
self.get_adc()
fft_adc = np.abs(np.fft.fft(self.adc, axis=1))
self.avg_spectrum += fft_adc[:, 0:int(self.wfm_size / 2)]
self.avg_spectrum /= n_avg
@command()
def reset_acquisition(self):
pass
@command(funcname='reset')
def reset_dac(self):
pass
def reset(self):
self.reset_dac()
# Modulation
def set_dac(self, channels=[0,1]):
""" Write the BRAM corresponding on the selected channels
(dac0 or dac1) with the array stored in self.dac[channel,:].
ex: self.set_dac(channel=[0])
"""
@command(classname='Modulation')
def set_dac_buffer(self, channel, arr):
pass
for channel in channels:
data = np.int16(16384 * (self.dac[channel,:]))
set_dac_buffer(self, channel, np.uint32(data[1::2] + data[::2] * 65536))
@command(classname='Modulation')
def get_modulation_status(self):
return self.client.recv_tuple('IIffffff')
@command(classname='Modulation')
def set_waveform_type(self, channel, wfm_type):
pass
@command(classname='Modulation')
def set_dac_amplitude(self, channel, amplitude_value):
pass
@command(classname='Modulation')
def set_dac_frequency(self, channel, frequency_value):
pass
@command(classname='Modulation')
def set_dac_offset(self, channel, frequency_value):
pass | mit | -7,438,568,031,150,482,000 | 29.439655 | 95 | 0.6 | false |
WikiWatershed/tr-55 | tr55/tables.py | 1 | 19396 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 tables
"""
# For the different land uses, this describes the NLCD class value, the landscape factor (ki) and the Curve
# Numbers for each hydrologic soil group for that use type.
# NOTE: Missing NLCD type 12 (plus all Alaska only types (51, 72-74))
# For the BMP's the numbers are not Curve Numbers, they are quantities of rainfall (in inches)
# that will be converted to infiltration by that BMP for that soil type.
# The Food and Agriculture Organization of the United Nations (FAO) document on evapotranspiration is:
# Allen, R.G.; Pereira, L.S.; Raes, D.; Smith, M. Evapotranspiration and Crop Water Requirements;
# Irrigation and Drainage Paper No. 56; FAO: Rome, 1998.
# Available: http://www.fao.org/docrep/x0490e/x0490e00.htm#Contents
LAND_USE_VALUES = {
# NRCS Curve Numbers for NLCD land classes
'open_water': {'nlcd': 11, 'ki': 0.6525, 'cn': {'a': 100, 'b': 100, 'c': 100, 'd': 100}},
# Curve Number Source: Assumes 100% runoff
# Ki Source: FAO for Open Water, > 5 m depth, clear of turbidity, temperate climate.
'perennial_ice': {'nlcd': 12, 'ki': 0.0, 'cn': {'a': 100, 'b': 100, 'c': 100, 'd': 100}},
# Curve Number Source: Assumes 100% runoff
# Ki Source: Assumes no ET.
'developed_open': {'nlcd': 21, 'ki': 0.95, 'cn': {'a': 59, 'b': 75, 'c': 83, 'd': 87}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 20% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: FAO for growing season for cool season turfgrass (dense stands of bluegrass, ryegrass, and fescue).
'developed_low': {'nlcd': 22, 'ki': 0.42, 'cn': {'a': 68, 'b': 80, 'c': 86, 'd': 89}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 38% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: UNKNOWN
'developed_med': {'nlcd': 23, 'ki': 0.18, 'cn': {'a': 81, 'b': 88, 'c': 91, 'd': 93}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 65% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: UNKNOWN
'developed_high': {'nlcd': 24, 'ki': 0.06, 'cn': {'a': 91, 'b': 94, 'c': 95, 'd': 96}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 85% impervious.
# Ki Source: UNKNOWN
'barren_land': {'nlcd': 31, 'ki': 0.30, 'cn': {'a': 77, 'b': 86, 'c': 91, 'd': 94}},
# Curve Number Source: Fallow, Bare soil; Newly graded areas (TR-55, 1986, Table 2-2a and 2-2b)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'deciduous_forest': {'nlcd': 41, 'ki': 1.0, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'evergreen_forest': {'nlcd': 42, 'ki': 1.00, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: FAO for conifer trees during growing season in well-watered conditions for large forests.
'mixed_forest': {'nlcd': 43, 'ki': 1.0, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'shrub': {'nlcd': 52, 'ki': 0.90, 'cn': {'a': 35, 'b': 56, 'c': 70, 'd': 77}},
# Curve Number Source: Brush, fair; 50-75% ground cover (TR-55, 1986, Table 2-2c)
# Ki Source: Descheemaeker, K., Raes, D., Allen, R., Nyssen, J., Poesen, J., Muys, B., Haile, M. and Deckers, J.
# 2011. Two rapid appraisals of FAO-56 crop coefficients for semiarid natural vegetation of the
# northern Ethiopian highlands. Journal of Arid Environments 75(4):353-359.
'grassland': {'nlcd': 71, 'ki': 1.08, 'cn': {'a': 30, 'b': 58, 'c': 71, 'd': 78}},
# Curve Number Source: Meadow - continuous grass, protected from grazing and generally mowed for hay.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Average of all values in FAO document for Forages/Hay.
'pasture': {'nlcd': 81, 'ki': 0.95, 'cn': {'a': 39, 'b': 61, 'c': 74, 'd': 80}},
# Curve Number Source: Pasture, good; >75% ground cover and not heavily grazed. (TR-55, 1986, Table 2-2c)
# Ki Source: FAO for Grazing pasture with rotated grazing.
'cultivated_crops': {'nlcd': 82, 'ki': 1.15, 'cn': {'a': 67, 'b': 78, 'c': 85, 'd': 89}},
# Curve Number Source: Row crops, straight rows, good condition (TR-55, 1986, Table 2-2b)
# Ki Source: FAO average for all cereal crows during the growing season.
'woody_wetlands': {'nlcd': 90, 'ki': 1.20, 'cn': {'a': 30, 'b': 30, 'c': 30, 'd': 30}},
# Curve Number Source: Uses lowest curve numbers possible to maximize infiltration
# Ki Source: FAO for either Cattail/Bulrush wetland or Reed Swamp wetland during growing season.
'herbaceous_wetlands': {'nlcd': 95, 'ki': 1.20, 'cn': {'a': 30, 'b': 30, 'c': 30, 'd': 30}},
# Curve Number Source: Uses lowest curve numbers possible to maximize infiltration
# Ki Source: FAO for either Cattail/Bulrush wetland or Reed Swamp wetland during growing season.
# NRCS Curve Numbers for BMP's acting as land cover changes
'cluster_housing': {'ki': 0.42, 'cn': {'a': 62, 'b': 77, 'c': 84, 'd': 88}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 26.8% impervious.
# Ki Source: UNKNOWN
'no_till': {'ki': 0.9, 'cn': {'a': 57, 'b': 73, 'c': 82, 'd': 86}},
# Curve Number Source: UNKNOWN
# Ki Source: UNKNOWN
# Storage Capacities and Maximum Loading Ratios for Infiltration BMP's
# storage is in m3/m2, max_drainage_ratio is the ratio of drawn BMP area to
# the maximum possible area that should contribute to it.
# NOTE that these contributing areas ratios are based only on the suggested
# drainage areas for a well-designed BMP's and have nothing to do with the
# user's actual placement of the BMP on the UI map.
'green_roof': {'ki': 0.4, 'storage': 0.020, 'max_drainage_ratio': 1},
# Assume a simple extensive vegetated roof cover with 2" of growth media
# at 15% porosity and 2" of granular discharge media at 25% porosity
# Assume drainage area is equal only to the water that falls directly on the roof
# (no extra contributing area).
# Source: PA stormwater manual 6.5.1
'infiltration_basin': {'ki': 0.0, 'storage': 0.610, 'max_drainage_ratio': 8},
# Assume a large open area with no infiltration underneath and 2' of ponding depth (100% porosity)
# Assume drainage area is largely pervious surface (lawns) allowing a maximum loading ratio of 8:1
# Source: New Jersey stormwater manual, PA stormwater manual appendix C
'porous_paving': {'ki': 0.0, 'storage': 0.267, 'max_drainage_ratio': 2},
# Assume porous bituminous asphalt used as the paving surface
# 2.5" of porous paving service at 16% porosity, 1" of bedding layer/choker course at 50% porosity,
# and 24" of infiltration bed/reservoir layer at 40% porosity
# Assume some allowable additional drainage area (2:1) from roofs or adjacent pavement
# Note that inflow from any pervious areas is not recommended due to potential clogging
# Sources: PA stormwater manual 6.4.1,
# StormTech (http://www.stormtech.com/download_files/pdf/techsheet1.pdf),
# http://www.construction.basf.us/features/view/pervious-pavements,
# http: // stormwater.pca.state.mn.us / index.php / Design_criteria_for_permeable_pavement
'rain_garden': {'ki': 0.08, 'storage': 0.396, 'max_drainage_ratio': 5},
# Assumes 6" of ponding depth at 100% porosity, 24" planting mix at 20% porosity
# and 12" gravel underbed at 40% porosity.
# Assume drainage area is largely impervious (parking lots) allowing a maximum loading ratio of 5:1
# Source: PA stormwater manual 6.4.5, PA stormwater manual appendix C
}
# Runoff tables for Pitt's Small Storm Hydrology (SSH) model
# The raw runoff coefficients are those measured by the USGS in Wisconsin
# (Bannerman 1983, 1992 and 1993; Horwatich, 2004; Steuer 1996 and 1997; USEPA 1993; Walker 1994; Waschbusch 1999)
# This data is also provided as the Rv (runoff coefficient) file for all regions *but* the SouthEast in version 10.x of WinSlamm #
# http://wi.water.usgs.gov/slamm/index.html
# http://wi.water.usgs.gov/slamm/slamm_parameter_descriptions.htm
# http://winslamm.com/Select_documentation.html #
#
# The Standard Land Uses, including the percents of land in area type and their level of connectedness,
# are collected from multiple published papers analyzing different sites using WinSLAMM
# Pitt has compiled all of the site summaries here:
# http://winslamm.com/docs/Standard%20Land%20Use%20and%20Parameter%20file%20descriptions%20final%20April%2018%202011.pdf
# The above pdf also lists the original sources of the raw data. #
#
# The final runoff volumens and runoff ratios for each standard land use were calculated as the sum of the multiples of the raw runoff coefficients
# for each area type and the percent of land in that area type in each standard land use.
#
# For this work, this is the mapping used between the NLCD class and the SSH's Standard Land Use:
# NLCD class 21 (Developed, Open) = "Open Space"
# NLCD class 22 (Developed, Low) = "Residential"
# NLCD class 23 (Developed, Medium) = "Institutional"
# NLCD class 24 (Developed, High) = "Commercial"
# The runoff coeffients for Cluster Housing were derived by taking the numbers for the residential SLU and
# halving the amount of street, driveway, and parking and adding that amount to the amount of small
# landscaping. This simulates LID concentrating housing and decreasing paving, while maintaining the
# same residential density (ie, the same amount of roof space).
SSH_RAINFALL_STEPS = [0.01, 0.08, 0.12, 0.2, 0.39, 0.59, 0.79, 0.98, 1.2, 1.6, 2, 2.4, 2.8, 3.2, 3.5, 3.9, 4.9]
SSH_RUNOFF_RATIOS = {
'developed_open' :
{'runoff_ratio':
{'a': [0.0393, 0.0472, 0.0598, 0.0645, 0.1045, 0.1272, 0.1372, 0.1432, 0.1493, 0.1558, 0.1609, 0.1637, 0.1662, 0.1686, 0.1711, 0.1726, 0.1757],
'b': [0.0393, 0.0472, 0.0598, 0.0645, 0.1177, 0.1462, 0.1636, 0.1697, 0.1809, 0.1874, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3206, 0.3229],
'c': [0.0393, 0.0472, 0.0598, 0.0645, 0.1193, 0.1528, 0.1769, 0.1904, 0.2008, 0.2423, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3624, 0.4066],
'd': [0.0393, 0.0472, 0.0598, 0.0645, 0.1193, 0.1528, 0.1769, 0.1904, 0.2008, 0.2423, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3624, 0.4066],
}
},
'developed_low' :
{'runoff_ratio':
{'a' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1841, 0.2053, 0.2138, 0.2187, 0.2249, 0.2303, 0.2359, 0.2382, 0.2412, 0.2439, 0.2465, 0.2485, 0.2523],
'b' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1960, 0.2224, 0.2377, 0.2426, 0.2534, 0.2589, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.3822, 0.3853],
'c' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1974, 0.2284, 0.2496, 0.2614, 0.2714, 0.3085, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.4200, 0.4609],
'd' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1974, 0.2284, 0.2496, 0.2614, 0.2714, 0.3085, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.4200, 0.4609],
}
},
'developed_med' :
{'runoff_ratio':
{'a' : [0.1322, 0.1929, 0.2631, 0.3107, 0.3698, 0.4032, 0.4235, 0.4368, 0.4521, 0.4688, 0.4816, 0.4886, 0.4953, 0.5006, 0.5047, 0.5074, 0.5138],
'b' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3838, 0.4226, 0.4474, 0.4616, 0.4797, 0.4980, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6045, 0.6146],
'c' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3846, 0.4258, 0.4539, 0.4717, 0.4895, 0.5249, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6248, 0.6553],
'd' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3846, 0.4258, 0.4539, 0.4717, 0.4895, 0.5249, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6248, 0.6553],
}
},
'developed_high' :
{'runoff_ratio':
{'a': [0.1966, 0.2815, 0.4034, 0.4796, 0.5549, 0.6037, 0.6311, 0.6471, 0.6675, 0.6891, 0.7063, 0.7154, 0.7257, 0.7335, 0.7389, 0.7435, 0.7533],
'b': [0.1966, 0.2815, 0.4034, 0.4895, 0.5803, 0.6343, 0.6647, 0.6818, 0.7045, 0.7274, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8104, 0.8203],
'c': [0.1966, 0.2815, 0.4034, 0.4895, 0.5807, 0.6358, 0.6677, 0.6865, 0.7090, 0.7398, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8197, 0.8390],
'd': [0.1966, 0.2815, 0.4034, 0.4895, 0.5807, 0.6358, 0.6677, 0.6865, 0.7090, 0.7398, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8197, 0.8390],
}
},
'cluster_housing' :
{'runoff_ratio':
{'a': [0.0466, 0.0733, 0.0956, 0.1084, 0.1262, 0.1387, 0.1452, 0.1492, 0.1538, 0.1580, 0.1623, 0.1641, 0.1664, 0.1684, 0.1701, 0.1717, 0.1743],
'b': [0.0466, 0.0733, 0.0956, 0.1084, 0.1395, 0.1578, 0.1718, 0.1758, 0.1856, 0.1897, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3201, 0.3218],
'c': [0.0466, 0.0733, 0.0956, 0.1084, 0.1411, 0.1645, 0.1851, 0.1966, 0.2056, 0.2449, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3619, 0.4056],
'd': [0.0466, 0.0733, 0.0956, 0.1084, 0.1411, 0.1645, 0.1851, 0.1966, 0.2056, 0.2449, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3619, 0.4056],
}
},
}
# The set of best management practices that we know about. The
# cluster_housing and no_till types are excluded because they do not
# actively retain water.
BMPS = set(['green_roof', 'porous_paving',
'rain_garden', 'infiltration_basin'])
# The set of "built" land uses
# These are the land uses to which the Pitt model will be applied at less than 2" of rain.
BUILT_TYPES = set(['developed_open', 'developed_low', 'developed_med',
'developed_high', 'cluster_housing'])
NON_NATURAL = set(['pasture', 'cultivated_crops', 'green_roof']) | set(['no_till']) | BMPS | BUILT_TYPES
# The set of pollutants that we are concerned with.
POLLUTANTS = set(['tn', 'tp', 'bod', 'tss'])
# Event mean concentrations (mg/l) by pollutant and NLCD type
# tn: Total Nitrogen, tp: Total Phosphorus,
# bod: Biochemical Oxygen Demand, tss: Total Suspended Solids
# Data from:
# (1) USEPA, 2011. User’s Guide: Spreadsheet Tool for Estimation of Pollutant Load (STEPL), Version 4.1, 57 pp.
# (2) Pennsylvania Department of Environmental Protection, 2006.
# Pennsylvania Stormwater Best Management Practices Manual. 685 pp.
# (3) USEPA, 2005. The National Stormwater Quality Database, Version 1.2: A Compilation and Analysis of NPDES
# Stormwater Monitoring Information. USEPA, Office of Water, Washington, DC, 447 pp.
# (4) New Hampshire Dept. of Environmental Services, 2010. Guidance for Estimating Pre- and Post-Development
# Stormwater Loads. (EMCs available at
# http://www.des.nh.gov/organization/divisions/water/stormwater/documents/wd-08-20a_apxd.pdf)
# (5) Washington State Dept. of Ecology, 2007. Efficiency of Urban Stormwater Best Management Practices:
# A Literature Review. Publication No. 07-03-009, 12 pp.
# (6) Keiser & Associates, 2003. Empirical Sediment and Phosphorus Nonpoint Source Model for the St. Joseph River
# Watershed. 48 pp.
POLLUTION_LOADS = {
11: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Open water
12: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Perennial Ice/Snow
21: {'tn': 2.26, 'tp': 0.32, 'bod': 5.7, 'tss': 99.8}, # Developed, Open Space
22: {'tn': 2.58, 'tp': 0.38, 'bod': 6.0, 'tss': 126}, # Developed, Low Intensity
23: {'tn': 3.62, 'tp': 0.38, 'bod': 9.0, 'tss': 134.7}, # Developed, Medium Intensity
24: {'tn': 3.54, 'tp': 0.35, 'bod': 9.9, 'tss': 163.7}, # Developed High Intensity
31: {'tn': 0.10, 'tp': 0.01, 'bod': 0.0, 'tss': 1}, # Barren Land (Rock/Sand/Clay)
32: {'tn': 0.10, 'tp': 0.01, 'bod': 0.0, 'tss': 1}, # Quarries/Strip Mines/Gravel Pits
41: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Deciduous Forest
42: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Evergreen Forest
43: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Mixed Forest
51: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Dwarf Scrub (Alaska Only, N/A)
52: {'tn': 0.19, 'tp': 0.15, 'bod': 0.5, 'tss': 39}, # Shrub/Scrub
71: {'tn': 2.30, 'tp': 0.22, 'bod': 0.5, 'tss': 48.8}, # Grassland/Herbaceous
72: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Sedge/Herbaceous (Alaska Only, N/A)
73: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Lichens (Alaska Only, N/A)
74: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Moss (Alaska Only, N/A)
81: {'tn': 5.71, 'tp': 0.55, 'bod': 13, 'tss': 145}, # Pasture/Hay
82: {'tn': 7.70, 'tp': 1.07, 'bod': 12.45, 'tss': 216}, # Cultivated Crops
90: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}, # Woody Wetlands
91: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
92: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
93: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
94: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
95: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}, # Emergent Herbaceous Wetlands
96: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
97: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
98: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
99: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}
}
| apache-2.0 | 2,153,564,880,117,881,900 | 71.096654 | 158 | 0.601526 | false |
cryptapus/electrum | electrum/transaction.py | 1 | 48273 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from typing import (Sequence, Union, NamedTuple, Tuple, Optional, Iterable,
Callable)
from .util import print_error, profiler
from . import ecc
from . import bitcoin
from .bitcoin import *
import struct
import traceback
import sys
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
PARTIAL_TXN_HEADER_MAGIC = b'EPTF\xff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class UnknownTxinType(Exception):
pass
class NotRecognizedRedeemScript(Exception):
pass
class MalformedBitcoinScript(Exception):
pass
TxOutput = NamedTuple("TxOutput", [('type', int), ('address', str), ('value', Union[int, str])])
# ^ value is str when the output is set to max: '!'
TxOutputForUI = NamedTuple("TxOutputForUI", [('address', str), ('value', int)])
TxOutputHwInfo = NamedTuple("TxOutputHwInfo", [('address_index', Tuple),
('sorted_xpubs', Iterable[str]),
('num_sig', Optional[int]),
('script_type', str)])
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
class EnumException(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumException("enum name is not a string: " + x)
if not isinstance(i, int):
raise EnumException("enum value is not an integer: " + i)
if x in uniqueNames:
raise EnumException("enum name is not unique: " + x)
if i in uniqueValues:
raise EnumException("enum value is not unique for " + x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_NOP1", 0xB0),
("OP_CHECKLOCKTIMEVERIFY", 0xB1), ("OP_CHECKSEQUENCEVERIFY", 0xB2),
"OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(_bytes : bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
try: nSize = _bytes[i]
except IndexError: raise MalformedBitcoinScript()
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
try: (nSize,) = struct.unpack_from('<H', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
try: (nSize,) = struct.unpack_from('<I', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
class OPPushDataGeneric:
def __init__(self, pushlen: Callable=None):
if pushlen is not None:
self.check_data_len = pushlen
@classmethod
def check_data_len(cls, datalen: int) -> bool:
# Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
return opcodes.OP_PUSHDATA4 >= datalen >= 0
@classmethod
def is_instance(cls, item):
# accept objects that are instances of this class
# or other classes that are subclasses
return isinstance(item, cls) \
or (isinstance(item, type) and issubclass(item, cls))
OPPushDataPubkey = OPPushDataGeneric(lambda x: x in (33, 65))
# note that this does not include x_pubkeys !
def match_decoded(decoded, to_match):
if decoded is None:
return False
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
to_match_item = to_match[i]
decoded_item = decoded[i]
if OPPushDataGeneric.is_instance(to_match_item) and to_match_item.check_data_len(decoded_item[0]):
continue
if to_match_item != decoded_item[0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
print_error("parse_scriptSig: cannot find address in input script (coinbase?)",
bh2u(_bytes))
return
match = [OPPushDataGeneric]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
# segwit embedded into p2sh
# witness version 0
d['address'] = bitcoin.hash160_to_p2sh(bitcoin.hash_160(item))
if len(item) == 22:
d['type'] = 'p2wpkh-p2sh'
elif len(item) == 34:
d['type'] = 'p2wsh-p2sh'
else:
print_error("unrecognized txin type", bh2u(item))
elif opcodes.OP_1 <= item[0] <= opcodes.OP_16:
# segwit embedded into p2sh
# witness version 1-16
pass
else:
# assert item[0] == 0x30
# pay-to-pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# p2pkh TxIn transactions push a signature
# (71-73 bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [OPPushDataGeneric, OPPushDataGeneric]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("parse_scriptSig: cannot find address in input script (p2pkh?)",
bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [opcodes.OP_0] + [OPPushDataGeneric] * (len(decoded) - 1)
if match_decoded(decoded, match):
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
redeem_script_unsanitized = decoded[-1][1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, redeem_script = parse_redeemScript_multisig(redeem_script_unsanitized)
except NotRecognizedRedeemScript:
print_error("parse_scriptSig: cannot find address in input script (p2sh?)",
bh2u(_bytes))
# we could still guess:
# d['address'] = hash160_to_p2sh(hash_160(decoded[-1][1]))
return
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeem_script'] = redeem_script
d['address'] = hash160_to_p2sh(hash_160(bfh(redeem_script)))
return
# custom partial format for imported addresses
match = [opcodes.OP_INVALIDOPCODE, opcodes.OP_0, OPPushDataGeneric]
if match_decoded(decoded, match):
x_pubkey = bh2u(decoded[2][1])
pubkey, address = xpubkey_to_address(x_pubkey)
d['type'] = 'address'
d['address'] = address
d['num_sig'] = 1
d['x_pubkeys'] = [x_pubkey]
d['pubkeys'] = None # get_sorted_pubkeys will populate this
d['signatures'] = [None]
return
print_error("parse_scriptSig: cannot find address in input script (unknown)",
bh2u(_bytes))
def parse_redeemScript_multisig(redeem_script: bytes):
try:
dec2 = [ x for x in script_GetOp(redeem_script) ]
except MalformedBitcoinScript:
raise NotRecognizedRedeemScript()
try:
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
except IndexError:
raise NotRecognizedRedeemScript()
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [op_m] + [OPPushDataGeneric] * n + [op_n, opcodes.OP_CHECKMULTISIG]
if not match_decoded(dec2, match_multisig):
raise NotRecognizedRedeemScript()
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeem_script2 = bfh(multisig_script(x_pubkeys, m))
if redeem_script2 != redeem_script:
raise NotRecognizedRedeemScript()
redeem_script_sanitized = multisig_script(pubkeys, m)
return m, n, x_pubkeys, pubkeys, redeem_script_sanitized
def get_address_from_output_script(_bytes: bytes, *, net=None) -> Tuple[int, str]:
try:
decoded = [x for x in script_GetOp(_bytes)]
except MalformedBitcoinScript:
decoded = None
# p2pk
match = [OPPushDataPubkey, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match) and ecc.ECPubkey.is_pubkey_bytes(decoded[0][1]):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# p2pkh
match = [opcodes.OP_DUP, opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1], net=net)
# p2sh
match = [opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUAL]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1], net=net)
# segwit address (version 0)
match = [opcodes.OP_0, OPPushDataGeneric(lambda x: x in (20, 32))]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=0, net=net)
# segwit address (version 1-16)
future_witness_versions = list(range(opcodes.OP_1, opcodes.OP_16 + 1))
for witver, opcode in enumerate(future_witness_versions, start=1):
match = [opcode, OPPushDataGeneric(lambda x: 2 <= x <= 40)]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=witver, net=net)
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds, full_parse: bool):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['scriptSig'] = bh2u(scriptSig)
d['sequence'] = sequence
d['type'] = 'unknown' if prevout_hash != '00'*32 else 'coinbase'
d['address'] = None
d['num_sig'] = 0
if not full_parse:
return d
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
if d['type'] != 'coinbase' and scriptSig:
try:
parse_scriptSig(d, scriptSig)
except BaseException:
traceback.print_exc(file=sys.stderr)
print_error('failed to parse scriptSig', bh2u(scriptSig))
return d
def construct_witness(items: Sequence[Union[str, int, bytes]]) -> str:
"""Constructs a witness from the given stack items."""
witness = var_int(len(items))
for item in items:
if type(item) is int:
item = bitcoin.script_num_to_hex(item)
elif type(item) is bytes:
item = bh2u(item)
witness += bitcoin.witness_push(item)
return witness
def parse_witness(vds, txin, full_parse: bool):
n = vds.read_compact_size()
if n == 0:
txin['witness'] = '00'
return
if n == 0xffffffff:
txin['value'] = vds.read_uint64()
txin['witness_version'] = vds.read_uint16()
n = vds.read_compact_size()
# now 'n' is the number of items in the witness
w = list(bh2u(vds.read_bytes(vds.read_compact_size())) for i in range(n))
txin['witness'] = construct_witness(w)
if not full_parse:
return
try:
if txin.get('witness_version', 0) != 0:
raise UnknownTxinType()
if txin['type'] == 'coinbase':
pass
elif txin['type'] == 'address':
pass
elif txin['type'] == 'p2wsh-p2sh' or n > 2:
witness_script_unsanitized = w[-1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, witness_script = parse_redeemScript_multisig(bfh(witness_script_unsanitized))
except NotRecognizedRedeemScript:
raise UnknownTxinType()
txin['signatures'] = parse_sig(w[1:-1])
txin['num_sig'] = m
txin['x_pubkeys'] = x_pubkeys
txin['pubkeys'] = pubkeys
txin['witness_script'] = witness_script
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wsh'
txin['address'] = bitcoin.script_to_p2wsh(witness_script)
elif txin['type'] == 'p2wpkh-p2sh' or n == 2:
txin['num_sig'] = 1
txin['x_pubkeys'] = [w[1]]
txin['pubkeys'] = [safe_parse_pubkey(w[1])]
txin['signatures'] = parse_sig([w[0]])
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wpkh'
txin['address'] = bitcoin.public_key_to_p2wpkh(bfh(txin['pubkeys'][0]))
else:
raise UnknownTxinType()
except UnknownTxinType:
txin['type'] = 'unknown'
except BaseException:
txin['type'] = 'unknown'
traceback.print_exc(file=sys.stderr)
print_error('failed to parse witness', txin.get('witness'))
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
if d['value'] > TOTAL_COIN_SUPPLY_LIMIT_IN_BTC * COIN:
raise SerializationError('invalid output amount (too large)')
if d['value'] < 0:
raise SerializationError('invalid output amount (negative)')
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw: str, force_full_parse=False) -> dict:
raw_bytes = bfh(raw)
d = {}
if raw_bytes[:5] == PARTIAL_TXN_HEADER_MAGIC:
d['partial'] = is_partial = True
partial_format_version = raw_bytes[5]
if partial_format_version != 0:
raise SerializationError('unknown tx partial serialization format version: {}'
.format(partial_format_version))
raw_bytes = raw_bytes[6:]
else:
d['partial'] = is_partial = False
full_parse = force_full_parse or is_partial
vds = BCDataStream()
vds.write(raw_bytes)
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
if marker != b'\x01':
raise ValueError('invalid txn marker byte: {}'.format(marker))
n_vin = vds.read_compact_size()
d['segwit_ser'] = is_segwit
d['inputs'] = [parse_input(vds, full_parse=full_parse) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
if is_segwit:
for i in range(n_vin):
txin = d['inputs'][i]
parse_witness(vds, txin, full_parse=full_parse)
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys: Sequence[str], m: int) -> str:
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise Exception("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None # type: List[TxOutput]
self.locktime = 0
self.version = 1
# by default we assume this is a partial txn;
# this value will get properly set when deserializing
self.is_partial_originally = True
self._segwit_ser = None # None means "don't know"
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self) -> List[TxOutput]:
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
if txin['type'] == 'coinbase':
return [], []
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures: Sequence[str]):
"""Add new signatures to a transaction
`signatures` is expected to be a list of sigs with signatures[i]
intended for self._inputs[i].
This is used by the Trezor, KeepKey an Safe-T plugins.
"""
if self.is_complete():
return
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if sig in txin.get('signatures'):
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_string = ecc.sig_string_from_der_sig(bfh(sig[:-2]))
for recid in range(4):
try:
public_key = ecc.ECPubkey.from_sig_string(sig_string, recid, pre_hash)
except ecc.InvalidECPointException:
# the point might not be on the curve for some recid values
continue
pubkey_hex = public_key.get_public_key_hex(compressed=True)
if pubkey_hex in pubkeys:
try:
public_key.verify_message_hash(sig_string, pre_hash)
except Exception:
traceback.print_exc(file=sys.stderr)
continue
j = pubkeys.index(pubkey_hex)
print_error("adding sig", i, j, pubkey_hex, sig)
self.add_signature_to_txin(i, j, sig)
break
# redo raw
self.raw = self.serialize()
def add_signature_to_txin(self, i, signingPos, sig):
txin = self._inputs[i]
txin['signatures'][signingPos] = sig
txin['scriptSig'] = None # force re-serialization
txin['witness'] = None # force re-serialization
self.raw = None
def deserialize(self, force_full_parse=False):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw, force_full_parse)
self._inputs = d['inputs']
self._outputs = [TxOutput(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
self.is_partial_originally = d['partial']
self._segwit_ser = d['segwit_ser']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
self.BIP69_sort()
return self
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr
elif output_type == TYPE_ADDRESS:
return bitcoin.address_to_script(addr)
elif output_type == TYPE_PUBKEY:
return bitcoin.public_key_to_p2pk_script(addr)
else:
raise TypeError('Unknown output type')
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
if txin['type'] == 'coinbase':
return [], []
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin, estimate_size=False):
_type = txin['type']
if not self.is_segwit_input(txin) and not self.is_input_value_needed(txin):
return '00'
if _type == 'coinbase':
return txin['witness']
witness = txin.get('witness', None)
if witness is None or estimate_size:
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
if _type in ['p2wpkh', 'p2wpkh-p2sh']:
witness = construct_witness([sig_list[0], pubkeys[0]])
elif _type in ['p2wsh', 'p2wsh-p2sh']:
witness_script = multisig_script(pubkeys, txin['num_sig'])
witness = construct_witness([0] + sig_list + [witness_script])
else:
witness = txin.get('witness', '00')
if self.is_txin_complete(txin) or estimate_size:
partial_format_witness_prefix = ''
else:
input_value = int_to_hex(txin['value'], 8)
witness_version = int_to_hex(txin.get('witness_version', 0), 2)
partial_format_witness_prefix = var_int(0xffffffff) + input_value + witness_version
return partial_format_witness_prefix + witness
@classmethod
def is_segwit_input(cls, txin, guess_for_address=False):
_type = txin['type']
if _type == 'address' and guess_for_address:
_type = cls.guess_txintype_from_address(txin['address'])
has_nonzero_witness = txin.get('witness', '00') not in ('00', None)
return cls.is_segwit_inputtype(_type) or has_nonzero_witness
@classmethod
def is_segwit_inputtype(cls, txin_type):
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
@classmethod
def is_input_value_needed(cls, txin):
return cls.is_segwit_input(txin) or txin['type'] == 'address'
@classmethod
def guess_txintype_from_address(cls, addr):
# It's not possible to tell the script type in general
# just from an address.
# - "1" addresses are of course p2pkh
# - "3" addresses are p2sh but we don't know the redeem script..
# - "bc1" addresses (if they are 42-long) are p2wpkh
# - "bc1" addresses that are 62-long are p2wsh but we don't know the script..
# If we don't know the script, we _guess_ it is pubkeyhash.
# As this method is used e.g. for tx size estimation,
# the estimation will not be precise.
witver, witprog = segwit_addr.decode(constants.net.SEGWIT_HRP, addr)
if witprog is not None:
return 'p2wpkh'
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == constants.net.ADDRTYPE_P2PKH:
return 'p2pkh'
elif addrtype == constants.net.ADDRTYPE_P2SH:
return 'p2wpkh-p2sh'
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
# If there is already a saved scriptSig, just return that.
# This allows manual creation of txins of any custom type.
# However, if the txin is not complete, we might have some garbage
# saved from our partial txn ser format, so we re-serialize then.
script_sig = txin.get('scriptSig', None)
if script_sig is not None and self.is_txin_complete(txin):
return script_sig
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type in ['p2wpkh', 'p2wsh']:
return ''
elif _type == 'p2wpkh-p2sh':
pubkey = safe_parse_pubkey(pubkeys[0])
scriptSig = bitcoin.p2wpkh_nested_script(pubkey)
return push_script(scriptSig)
elif _type == 'p2wsh-p2sh':
if estimate_size:
witness_script = ''
else:
witness_script = self.get_preimage_script(txin)
scriptSig = bitcoin.p2wsh_nested_script(witness_script)
return push_script(scriptSig)
elif _type == 'address':
return 'ff00' + push_script(pubkeys[0]) # fd extended pubkey
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
preimage_script = txin.get('preimage_script', None)
if preimage_script is not None:
return preimage_script
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
if txin['type'] == 'p2pkh':
return bitcoin.address_to_script(txin['address'])
elif txin['type'] in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
pubkey = pubkeys[0]
pkh = bh2u(bitcoin.hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
elif txin['type'] == 'p2pk':
pubkey = pubkeys[0]
return bitcoin.public_key_to_p2pk_script(pubkey)
else:
raise TypeError('Unknown txin type', txin['type'])
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def get_outpoint_from_txin(cls, txin):
if txin['type'] == 'coinbase':
return None
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
return prevout_hash + ':%d' % prevout_n
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP69_sort(self, inputs=True, outputs=True):
if inputs:
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
if outputs:
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(output_type, addr)
s += var_int(len(script)//2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
# TODO: py3 hex
if self.is_segwit_input(txin):
hashPrevouts = bh2u(Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(Hash(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self, guess_for_address=False):
if not self.is_partial_originally:
return self._segwit_ser
return any(self.is_segwit_input(x, guess_for_address=guess_for_address) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
network_ser = self.serialize_to_network(estimate_size, witness)
if estimate_size:
return network_ser
if self.is_partial_originally and not self.is_complete():
partial_format_version = '00'
return bh2u(PARTIAL_TXN_HEADER_MAGIC) + partial_format_version + network_ser
else:
return network_ser
def serialize_to_network(self, estimate_size=False, witness=True):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
use_segwit_ser_for_estimate_size = estimate_size and self.is_segwit(guess_for_address=True)
use_segwit_ser_for_actual_use = not estimate_size and \
(self.is_segwit() or any(txin['type'] == 'address' for txin in inputs))
use_segwit_ser = use_segwit_ser_for_estimate_size or use_segwit_ser_for_actual_use
if witness and use_segwit_ser:
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x, estimate_size) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def txid(self):
self.deserialize()
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize_to_network(witness=False)
return bh2u(Hash(bfh(ser))[::-1])
def wtxid(self):
self.deserialize()
if not self.is_complete():
return None
ser = self.serialize_to_network(witness=True)
return bh2u(Hash(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
self.BIP69_sort(outputs=False)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
self.BIP69_sort(inputs=False)
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
def estimated_size(self):
"""Return an estimated virtual tx size in vbytes.
BIP-0141 defines 'Virtual transaction size' to be weight/4 rounded up.
This definition is only for humans, and has little meaning otherwise.
If we wanted sub-byte precision, fee calculation should use transaction
weights, but for simplicity we approximate that with (virtual_size)x4
"""
weight = self.estimated_weight()
return self.virtual_size_from_weight(weight)
@classmethod
def estimated_input_weight(cls, txin, is_segwit_tx):
'''Return an estimate of serialized input weight in weight units.'''
script = cls.input_script(txin, True)
input_size = len(cls.serialize_input(txin, script)) // 2
if cls.is_segwit_input(txin, guess_for_address=True):
witness_size = len(cls.serialize_witness(txin, True)) // 2
else:
witness_size = 1 if is_segwit_tx else 0
return 4 * input_size + witness_size
@classmethod
def estimated_output_size(cls, address):
"""Return an estimate of serialized output size in bytes."""
script = bitcoin.address_to_script(address)
# 8 byte value + 1 byte script len + script
return 9 + len(script) // 2
@classmethod
def virtual_size_from_weight(cls, weight):
return weight // 4 + (weight % 4 > 0)
def estimated_total_size(self):
"""Return an estimated total transaction size in bytes."""
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) // 2 # ASCII hex string
def estimated_witness_size(self):
"""Return an estimate of witness size in bytes."""
estimate = not self.is_complete()
if not self.is_segwit(guess_for_address=estimate):
return 0
inputs = self.inputs()
witness = ''.join(self.serialize_witness(x, estimate) for x in inputs)
witness_size = len(witness) // 2 + 2 # include marker and flag
return witness_size
def estimated_base_size(self):
"""Return an estimated base transaction size in bytes."""
return self.estimated_total_size() - self.estimated_witness_size()
def estimated_weight(self):
"""Return an estimate of transaction weight."""
total_tx_size = self.estimated_total_size()
base_tx_size = self.estimated_base_size()
return 3 * base_tx_size + total_tx_size
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
if not self.is_partial_originally:
return True
s, r = self.signature_count()
return r == s
def sign(self, keypairs) -> None:
# keypairs: (x_)pubkey -> secret_bytes
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
break
if pubkey in keypairs:
_pubkey = pubkey
elif x_pubkey in keypairs:
_pubkey = x_pubkey
else:
continue
print_error("adding signature for", _pubkey)
sec, compressed = keypairs.get(_pubkey)
sig = self.sign_txin(i, sec)
self.add_signature_to_txin(i, j, sig)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def sign_txin(self, txin_index, privkey_bytes) -> str:
pre_hash = Hash(bfh(self.serialize_preimage(txin_index)))
privkey = ecc.ECPrivkey(privkey_bytes)
sig = privkey.sign_transaction(pre_hash)
sig = bh2u(sig) + '01'
return sig
def get_outputs_for_UI(self) -> Sequence[TxOutputForUI]:
outputs = []
for o in self.outputs():
if o.type == TYPE_ADDRESS:
addr = o.address
elif o.type == TYPE_PUBKEY:
addr = 'PUBKEY ' + o.address
else:
addr = 'SCRIPT ' + o.address
outputs.append(TxOutputForUI(addr, o.value)) # consider using yield
return outputs
def has_address(self, addr: str) -> bool:
return (addr in (o.address for o in self.outputs())) \
or (addr in (txin.get("address") for txin in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
| mit | -2,018,422,330,122,193,000 | 37.010236 | 159 | 0.580904 | false |
opensistemas-hub/osbrain | osbrain/tests/test_agent_req_rep.py | 1 | 2385 | """
Test file for REQ-REP communication pattern.
"""
import time
from osbrain import run_agent
from osbrain import run_logger
from osbrain.helper import logger_received
from osbrain.helper import sync_agent_logger
from .common import echo_handler
def test_return(nsproxy):
"""
REQ-REP pattern using a handler that returns a value.
"""
a0 = run_agent('a0')
a1 = run_agent('a1')
addr = a0.bind('REP', handler=echo_handler)
a1.connect(addr, alias='request')
response = a1.send_recv('request', 'Hello world')
assert response == 'Hello world'
def test_lambda(nsproxy):
"""
REQ-REP pattern using a lambda handler.
"""
a0 = run_agent('a0')
a1 = run_agent('a1')
addr = a0.bind('REP', handler=lambda agent, message: 'x' + message)
a1.connect(addr, alias='request')
response = a1.send_recv('request', 'Hello world')
assert response == 'xHello world'
def test_yield(nsproxy):
"""
REQ-REP pattern using a handler that yields a value. This is useful in
order to generate an early reply.
"""
def reply_early(agent, message):
yield message
time.sleep(agent.delay)
agent.delay = 'ok'
delay = 1
a0 = run_agent('a0')
a0.set_attr(delay=delay)
a1 = run_agent('a1')
addr = a0.bind('REP', handler=reply_early)
a1.connect(addr, alias='request')
t0 = time.time()
response = a1.send_recv('request', 'Working!')
assert time.time() - t0 < delay / 2.0
assert response == 'Working!'
assert a0.get_attr('delay') == delay
# Sleep so that the replier has had time to update
time.sleep(delay + 0.5)
assert a0.get_attr('delay') == 'ok'
def test_multiple_yield(nsproxy):
"""
A replier must only make use of yield once.
"""
def yield_twice(agent, message):
yield message
yield 'Derp'
logger = run_logger('logger')
a0 = run_agent('a0')
a1 = run_agent('a1')
a0.set_logger(logger)
sync_agent_logger(agent=a0, logger=logger)
addr = a0.bind('REP', handler=yield_twice)
a1.connect(addr, alias='request')
response = a1.send_recv('request', 'Hello world!')
# Response is received successfully
assert response == 'Hello world!'
# Replier should crash
assert logger_received(
logger, log_name='log_history_error', message='yielded more than once'
)
| apache-2.0 | -6,181,991,768,177,616,000 | 25.208791 | 78 | 0.633124 | false |
dzanotelli/python-dryrun | drypy/docs/conf.py | 1 | 5092 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# drypy documentation build configuration file, created by
# sphinx-quickstart on Mon May 29 12:24:58 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import drypy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'drypy'
copyright = '2017-2020, Daniele Zanotelli'
author = 'Daniele Zanotelli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = drypy.get_version(short=True)
# The full version, including alpha/beta/rc tags.
release = drypy.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'drypydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'drypy.tex', 'drypy Documentation',
'Daniele Zanotelli', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'drypy', 'drypy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'drypy', 'drypy Documentation',
author, 'drypy', 'One line description of project.',
'Miscellaneous'),
]
# intersphinx configuration: refer to the Python 3 standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# cache limit (days) - set to -1 for unlimited
intersphinx_cache_limit = 0
| mit | 8,664,584,717,016,444,000 | 29.130178 | 79 | 0.675962 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_conv3d_op.py | 1 | 32042 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
def conv3d_forward_naive(input,
filter,
group,
conv_param,
padding_algorithm='EXPLICIT',
data_format="NCDHW"):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." %
str(padding_algorithm))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError("Unknown Attr(data_format): '%s' ."
"It can only be 'NCDHW' or 'NDHWC'." %
str(data_format))
channel_last = (data_format == "NDHWC")
if channel_last:
input = np.transpose(input, [0, 4, 1, 2, 3])
in_n, in_c, in_d, in_h, in_w = input.shape
f_n, f_c, f_d, f_h, f_w = filter.shape
out_n = in_n
out_c = f_n
assert f_c * group == in_c
assert np.mod(out_c, group) == 0
sub_out_c = out_c // group
sub_f_n = f_n // group
stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
'dilations']
# update pad and dilation
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(input_shape, pool_size,
pool_stride):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max((
(out_size - 1) * stride_size + filter_size - input_size, 0))
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
ksize = filter.shape[2:5]
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1, 1]
input_data_shape = input.shape[2:5]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0]
pad_h_0, pad_h_1 = pad[1], pad[1]
pad_w_0, pad_w_1 = pad[2], pad[2]
if len(pad) == 6:
pad_d_0, pad_d_1 = pad[0], pad[1]
pad_h_0, pad_h_1 = pad[2], pad[3]
pad_w_0, pad_w_1 = pad[4], pad[5]
out_d = 1 + (in_d + pad_d_0 + pad_d_1 - (dilation[0] *
(f_d - 1) + 1)) // stride[0]
out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[1] *
(f_h - 1) + 1)) // stride[1]
out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[2] *
(f_w - 1) + 1)) // stride[2]
out = np.zeros((in_n, out_c, out_d, out_h, out_w))
d_bolck_d = (dilation[0] * (f_d - 1) + 1)
d_bolck_h = (dilation[1] * (f_h - 1) + 1)
d_bolck_w = (dilation[2] * (f_w - 1) + 1)
input_pad = np.pad(input, ((0, 0), (0, 0), (pad_d_0, pad_d_1),
(pad_h_0, pad_h_1), (pad_w_0, pad_w_1)),
mode='constant',
constant_values=0)
filter_dilation = np.zeros((f_n, f_c, d_bolck_d, d_bolck_h, d_bolck_w))
filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], 0:
d_bolck_w:dilation[2]] = filter
for d in range(out_d):
for i in range(out_h):
for j in range(out_w):
for g in range(group):
input_pad_masked = \
input_pad[:, g * f_c:(g + 1) * f_c,
d * stride[0]:d * stride[0] + d_bolck_d,
i * stride[1]:i * stride[1] + d_bolck_h,
j * stride[2]:j * stride[2] + d_bolck_w]
f_sub = filter_dilation[g * sub_f_n:(g + 1) *
sub_f_n, :, :, :, :]
for k in range(sub_out_c):
out[:, g * sub_out_c + k, d, i, j] = \
np.sum(input_pad_masked * f_sub[k, :, :, :, :],
axis=(1, 2, 3, 4))
if channel_last:
out = np.transpose(out, [0, 2, 3, 4, 1])
return out
def create_test_cudnn_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
TestCUDNNCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNCase
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
def create_test_cudnn_padding_SAME_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingSMAECase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
TestCUDNNPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingSMAECase
def create_test_cudnn_padding_VALID_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingVALIDCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
TestCUDNNPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingVALIDCase
def create_test_channel_last_class(parent):
class TestChannelLastCase(parent):
def init_data_format(self):
self.data_format = "NDHWC"
def init_test_case_2(self):
N, C, D, H, W = self.input_size
self.input_size = [N, D, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
TestChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestChannelLastCase
def create_test_cudnn_channel_last_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_data_format(self):
self.data_format = "NDHWC"
def init_test_case_2(self):
N, C, D, H, W = self.input_size
self.input_size = [N, D, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
TestCudnnChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastCase
class TestConv3DOp(OpTest):
def setUp(self):
self.op_type = "conv3d"
self.use_cudnn = False
self.use_mkldnn = False
self.data_format = "AnyLayout"
self.dtype = np.float64
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_test_case()
conv3d_param = {
'stride': self.stride,
'pad': self.pad,
'dilations': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.random(self.filter_size).astype(self.dtype)
output = conv3d_forward_naive(
input,
filter,
self.groups,
conv3d_param, ).astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, {'Input', 'Filter'},
'Output',
max_relative_error=0.03,
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Filter'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False))
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
class TestCase1(TestConv3DOp):
def init_test_case(self):
self.pad = [1, 1, 1]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
class TestWithGroup1(TestConv3DOp):
def init_group(self):
self.groups = 3
class TestWithGroup2(TestCase1):
def init_group(self):
self.groups = 3
class TestWith1x1(TestConv3DOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithInput1x1Filter1x1(TestConv3DOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [40, 3, 1, 1, 1]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithDilation(TestConv3DOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 2, 2, 2]
def init_dilation(self):
self.dilations = [2, 2, 2]
def init_group(self):
self.groups = 3
#---------------- Conv3DCUDNN ----------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNN(TestConv3DOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16CUDNN(TestConv3DOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWith1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16With1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNExhaustiveSearch(TestCUDNN):
def init_kernel_type(self):
self.use_cudnn = True
self.exhaustive_search = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
# ---- test asymmetric padding ----
class TestConv3DOp_2(OpTest):
def setUp(self):
self.op_type = "conv3d"
self.use_cudnn = False
self.use_mkldnn = False
self.data_format = "NCDHW"
self.dtype = np.float64
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_data_format()
self.init_test_case()
self.init_paddings()
self.init_test_case_2()
conv3d_param = {
'stride': self.stride,
'pad': self.pad,
'dilations': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.random(self.filter_size).astype(self.dtype)
output = conv3d_forward_naive(input, filter, self.groups, conv3d_param,
self.padding_algorithm,
self.data_format).astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Filter'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']))
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "EXPLICIT"
def init_data_format(self):
self.data_format = "NCDHW"
class TestConv3DOp_AsyPadding(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 5, 5] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 4, 3]
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
create_test_padding_SAME_class(TestConv3DOp_DiffDataInDiffDim)
create_test_padding_VALID_class(TestConv3DOp_DiffDataInDiffDim)
create_test_channel_last_class(TestConv3DOp_DiffDataInDiffDim)
class TestCase1_AsyPadding(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_paddings(self):
self.pad = [0, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup1_AsyPadding(TestConv3DOp_2):
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [1, 1, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup2_AsyPadding(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [1, 1, 0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv3DOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 2, 2, 2]
def init_dilation(self):
self.dilations = [2, 2, 2]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 0, 1, 0, 1, 0]
self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv3DOp_AsyPadding)
create_test_cudnn_class(TestWithGroup1_AsyPadding)
create_test_cudnn_class(TestWithGroup2_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithDilation_AsyPadding)
create_test_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
# FIXME(typhoonzero): find a way to determine if
# using cudnn > 6 in python
# class TestWithDilationCUDNN(TestWithDilation):
# def init_op_type(self):
# self.op_type = "conv3d"
# --------- test python API ---------------
class TestConv3DAPI(unittest.TestCase):
def test_api(self):
input_NDHWC = fluid.layers.data(
name="input_NDHWC",
shape=[2, 5, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCDHW = fluid.layers.data(
name="input_NCDHW",
shape=[2, 3, 5, 5, 3],
append_batch_size=False,
dtype="float32")
fluid.layers.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[1, 2, 1, 0, 1, 0],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]],
dilation=[1, 1, 1],
groups=1,
data_format="NDHWC")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="SAME",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="VALID",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
class TestConv3DAPI_Error(unittest.TestCase):
def test_api(self):
input = fluid.layers.data(
name="input",
shape=[2, 5, 5, 5, 4],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
use_cudnn=[0],
data_format="NCDHW")
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
use_cudnn=False,
data_format="NCHWC")
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding="SAMEE",
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW")
self.assertRaises(ValueError, run_3)
def run_4():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW")
self.assertRaises(ValueError, run_4)
def run_5():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=0,
stride=0,
padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = fluid.layers.data(
name="x",
shape=[2, 5, 5, 5, -1],
append_batch_size=False,
dtype="float32")
def run_6():
fluid.layers.conv3d(
input=x,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_6)
# ValueError: groups
def run_7():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=3,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_7)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,244,860,726,683,635,700 | 31.398382 | 80 | 0.551713 | false |
jcfr/mystic | tests/solver_test_suite.py | 1 | 140286 | #!/usr/bin/env python
#
# Author: Alta Fang (altafang @caltech and alta @princeton)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""A test suite for Mystic solvers.
Note: VTR termination with default tolerance shouldn't work for functions
whose value at the minimum is negative!
Also, the two differential evolution solvers are global, while the other solvers
are local optimizers."""
# should report clock-time, # of iterations, and # of function evaluations
import sys
from StringIO import StringIO
import unittest
from math import *
from mystic.math import almostEqual
disp = False # Flag for whether to display number of iterations
# and function evaluations.
verbosity = 2 # Verbosity setting for unittests (default is 1).
def trap_stdout(): #XXX: better with contextmanager?
"temporarily trap stdout; return original sys.stdout"
orig, sys.stdout = sys.stdout, StringIO()
return orig
def release_stdout(orig):
"release stdout; return any trapped output as a string"
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
class TestZimmermann(unittest.TestCase):
"""Test the zimmermann optimization problem."""
def setUp(self):
from mystic.models import zimmermann
self.costfunction = zimmermann
self.expected=[7., 2.]
self.ND = len(self.expected)
self.min = [0.]*self.ND
self.max = [5.]*self.ND
self.maxiter = 2500
self.nplaces = 0 # Precision of answer
self.local = [ 2.35393787, 5.94748068] # local minimum
def _run_solver(self, iter_limit=False, local=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(321) # Number of failures is quite dependent on random seed!
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(min = self.min, max = self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
#print '\nsol:', sol
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Allow success if local solvers find the local or global minimum
if local:
tol = 1. # Tolerance for almostEqual.
for i in range(len(sol)):
self.assertTrue(almostEqual(sol[i], self.local[i], tol=tol) or \
almostEqual(sol[i], self.expected[i], tol=tol))
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(local=True)
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver(local=True)
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver(local=True)
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver(local=True)
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver(local=True)
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver(local=True)
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver(local=True)
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True, local=True)
#####################################################################
class TestRosenbrock(unittest.TestCase):
"""Test the 2-dimensional rosenbrock optimization problem."""
def setUp(self):
from mystic.models import rosen
self.costfunction = rosen
self.expected=[1., 1.]
self.ND = len(self.expected)
self.usebounds = False
self.min = [-5.]*self.ND
self.max = [5.]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(min = self.min, max = self.max)
if self.usebounds:
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x) #XXX Should use solver.generations instead?
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestCorana(unittest.TestCase):
"""Test the 4-dimensional Corana optimization problem. Many local
minima."""
def setUp(self):
from mystic.models import corana
self.costfunction = corana
self.ND = 4
self.maxexpected=[0.05]*self.ND
self.min = [-1000]*self.ND
self.max = [1000]*self.ND
self.maxiter = 10000
#self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected (here, absolute value is within the
# inequality)
error = 1. # Allowed error in either direction
for i in range(len(sol)):
self.assertTrue(abs(sol[i]) < self.maxexpected[i] + error)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestQuartic(unittest.TestCase):
"""Test the quartic (noisy) optimization problem."""
def setUp(self):
from mystic.models import quartic
self.costfunction = quartic
self.ND = 30
self.expected=[0.]*self.ND
self.min = [-1.28]*self.ND
self.max = [1.28]*self.ND
self.maxiter = 2500
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestShekel(unittest.TestCase):
"""Test the 5th DeJong function (Shekel) optimization problem."""
def setUp(self):
from mystic.models import shekel
self.costfunction = shekel
self.ND = 2
self.expected=[-32., -32.]
self.min = [-65.536]*self.ND
self.max = [65.536]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestStep(unittest.TestCase):
"""Test the 3rd DeJong function (step) optimization problem."""
def setUp(self):
from mystic.models import step
self.costfunction = step
self.ND = 5
self.expected = [-5.]*self.ND # xi=-5-n where n=[0.0,0.12]
self.min = [-5.12]*self.ND
self.max = [5.12]*self.ND
self.maxiter = 10000
#self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit = False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Expected: xi=-5-n where n=[0.0,0.12]
#XXX Again, no cushion.
for i in range(len(sol)):
self.assertTrue(sol[i] > self.expected[i] - 0.12 or sol[i] < self.expected[i])
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestGriewangk(unittest.TestCase):
"""Test the Griewangk optimization problem."""
def setUp(self):
from mystic.models import griewangk
self.costfunction = griewangk
self.ND = 10
self.expected = [0.]*self.ND
self.min = [-400.0]*self.ND
self.max = [400.0]*self.ND
self.maxiter = 2500
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Expected: xi=-5-n where n=[0.0,0.12]
#XXX Again, no cushion.
for i in range(len(sol)):
self.assertTrue(sol[i] > self.expected[i] - 0.12 or sol[i] < self.expected[i])
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#####################################################################
class TestPeaks(unittest.TestCase):
"""Test the peaks optimization problem."""
def setUp(self):
from mystic.models import peaks
self.costfunction = peaks
self.ND = 2
self.expected = [0.23, -1.63]
self.min = [-3.0]*self.ND
self.max = [3.0]*self.ND
self.maxiter = 2500
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#---------------------------------------------------------------------------------
##############################################################################
class TestVenkataraman91(unittest.TestCase):
"""Test Venkataraman's sinc optimization problem."""
def setUp(self):
from mystic.models import venkat91
self.costfunction = venkat91
self.ND = 2
self.expected = [4., 4.]
self.min = [-10.0]*self.ND
self.max = [10.0]*self.ND
self.maxiter = 2500
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
#random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
#solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestSchwefel(unittest.TestCase):
"""Test Schwefel's optimization problem."""
def setUp(self):
from mystic.models import schwefel
self.costfunction = schwefel
self.ND = 2
self.expected = [420.9687]*self.ND
self.min = [-500.0]*self.ND
self.max = [500.0]*self.ND
self.maxiter = 10000
self.nplaces = -1 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestEasom(unittest.TestCase):
"""Test Easom's function."""
def setUp(self):
from mystic.models import easom
self.costfunction = easom
self.ND = 2
self.expected = [pi]*self.ND
self.min = [-100.]*self.ND
self.max = [100.]*self.ND
self.maxiter = 10000
self.nplaces = -1 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestRotatedEllipsoid(unittest.TestCase):
"""Test the rotated ellipsoid function in 2 dimensions."""
def setUp(self):
from mystic.models import ellipsoid
self.costfunction = ellipsoid
self.ND = 2
self.expected = [0.]*self.ND
self.min = [-65.536]*self.ND
self.max = [65.536]*self.ND
self.maxiter = 10000
self.nplaces = -1 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestAckley(unittest.TestCase):
"""Test Ackley's path function in 2 dimensions."""
def setUp(self):
from mystic.models import ackley
self.costfunction = ackley
self.ND = 2
self.expected = [0.]*self.ND
self.min = [-32.768]*self.ND
self.max = [32.768]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestRastrigin(unittest.TestCase):
"""Test Rastrigin's function in 2 dimensions."""
def setUp(self):
from mystic.models import rastrigin
self.costfunction = rastrigin
self.ND = 2
self.expected = [0.]*self.ND
self.min = [-5.12]*self.ND
self.max = [5.12]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestGoldsteinPrice(unittest.TestCase):
"""Test the Goldstein-Price function."""
def setUp(self):
from mystic.models import goldstein
self.costfunction = goldstein
self.ND = 2
self.expected = [0., -1.]
self.min = [-2.]*self.ND
self.max = [2.]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestChampion(unittest.TestCase):
"""Test Champion's NMinimize test function 51."""
def setUp(self):
from mystic.models import nmin51
self.costfunction = nmin51
self.ND = 2
self.expected = [-0.0244031,0.210612]
self.min = [-1.]*self.ND
self.max = [1.]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(self.min, self.max)
solver.SetEvaluationLimits(generations=self.maxiter)
#solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
#print '\n', sol
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
##############################################################################
class TestPaviani(unittest.TestCase):
"""Test Paviani's function, or TP110 of Schittkowski's test problems."""
def setUp(self):
from mystic.models import paviani
self.costfunction = paviani
self.ND = 10
self.x0 = [9.]*self.ND
self.expected = [9.35025655]*self.ND
self.min = [2.001]*self.ND
self.max = [9.999]*self.ND
self.maxiter = 10000
self.nplaces = 0 # Precision of answer
def _run_solver(self, iter_limit=False, **kwds):
from mystic.monitors import Monitor
from mystic.tools import random_seed
random_seed(123)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetInitialPoints(self.x0)
solver.SetEvaluationLimits(generations=self.maxiter)
solver.SetStrictRanges(self.min, self.max)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
sol = solver.Solution()
#print '\n', sol
if disp:
# Print the number of iterations and function evaluations
iters = len(ssow.x)
func_evals = len(esow.x)
print "\nNumber of iterations = ", iters
print "Number of function evaluations = ", func_evals
# If solver should terminate immediately, check for that only.
if iter_limit:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
# Verify solution is close to expected
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.expected[i], self.nplaces)
def test_DifferentialEvolutionSolver_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#--------------------------------------------------------------------------
def test_DifferentialEvolutionSolver2_VTR(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = VTR()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = COG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_NCOG(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = NCOG()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
def test_DifferentialEvolutionSolver2_CRT(self):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.NP = 40
self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
self.term = CRT()#tol)
from mystic.strategy import Rand1Bin
self._run_solver( strategy=Rand1Bin )
#-------------------------------------------------------------------------
def test_NelderMeadSimplexSolver_CRT(self): # Default for this solver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = CRT()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_VTR(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_COG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_NelderMeadSimplexSolver_NCOG(self):
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = NelderMeadSimplexSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
#--------------------------------------------------------------------------
def test_PowellDirectionalSolver_NCOG(self): # Default for this solver
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import NormalizedChangeOverGeneration as NCOG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = NCOG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_COG(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import ChangeOverGeneration as COG
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = COG()#tol)
self._run_solver()
def test_PowellDirectionalSolver_VTR(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import VTR
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = VTR()#tol)
self._run_solver()
def test_PowellDirectionalSolver_CRT(self):
from mystic.solvers import PowellDirectionalSolver
from mystic.termination import CandidateRelativeTolerance as CRT
tol = 1e-7
self.solver = PowellDirectionalSolver(self.ND)
self.term = CRT()#tol)
self._run_solver(iter_limit=True)
#---------------------------------------------------------------------------------
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestZimmermann)
suite2 = unittest.TestLoader().loadTestsFromTestCase(TestRosenbrock)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TestCorana)
suite4 = unittest.TestLoader().loadTestsFromTestCase(TestQuartic)
suite5 = unittest.TestLoader().loadTestsFromTestCase(TestShekel)
suite6 = unittest.TestLoader().loadTestsFromTestCase(TestStep)
suite7 = unittest.TestLoader().loadTestsFromTestCase(TestGriewangk)
suite8 = unittest.TestLoader().loadTestsFromTestCase(TestPeaks)
suite9 = unittest.TestLoader().loadTestsFromTestCase(TestVenkataraman91)
suite10 = unittest.TestLoader().loadTestsFromTestCase(TestSchwefel)
suite11 = unittest.TestLoader().loadTestsFromTestCase(TestEasom)
suite12 = unittest.TestLoader().loadTestsFromTestCase(TestRotatedEllipsoid)
suite13 = unittest.TestLoader().loadTestsFromTestCase(TestAckley)
suite14 = unittest.TestLoader().loadTestsFromTestCase(TestRastrigin)
suite15 = unittest.TestLoader().loadTestsFromTestCase(TestGoldsteinPrice)
suite16 = unittest.TestLoader().loadTestsFromTestCase(TestChampion)
suite17 = unittest.TestLoader().loadTestsFromTestCase(TestPaviani)
# Comment out suites in the list below to test specific test cost functions only
# (Testing all the problems will take some time)
allsuites = unittest.TestSuite([suite1, # Zimmermann
# suite2, # Rosenbrock
# suite3, # Corana
# suite4, # Quartic
# suite5, # Shekel
# suite6, # Step
# suite7, # Griewangk
# suite8, # Peaks
# suite9, # Venkataraman91
# suite10, # Schwefel
# suite11, # Easom
suite12, # RotatedEllipsoid
# suite13, # Ackley
# suite14, # Rastrigin
# suite15, # GoldsteinPrice
# suite16, # Champion
# suite17, # Paviani
])
unittest.TextTestRunner(verbosity=verbosity).run(allsuites)
# EOF
| bsd-3-clause | 415,009,383,640,808,450 | 38.673643 | 90 | 0.636614 | false |
bollu/polymage | sandbox/bounds.py | 1 | 4680 | #
# Copyright 2014-2016 Vinay Vasista, Ravi Teja Mullapudi, Uday Bondhugula,
# and others from Multicore Computing Lab, Department of Computer Science
# and Automation, Indian Institute of Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# bounds.py : Interval analysis and function bounds check pass
#
from __future__ import absolute_import, division, print_function
from expression import isAffine
from poly import extract_value_dependence
import logging
import pipe
# LOG CONFIG #
bounds_logger = logging.getLogger("bounds.py")
bounds_logger.setLevel(logging.ERROR)
LOG = bounds_logger.log
def bounds_check_pass(pipeline):
"""
Bounds check pass analyzes if function values used in the compute
objects are within the domain of the functions. Static analysis is
only possible when the references to function values are regular
i.e. they are not data dependent. We restrict ourselves to affine
references.
"""
inp_groups = {}
for inp_func in pipeline.inputs:
inp_comp = pipeline.func_map[inp_func]
inp_groups[inp_func] = \
pipe.Group(pipeline._ctx, [inp_comp], \
pipeline._param_constraints)
for group in pipeline.groups:
for child in group.children:
check_refs(child, group)
for inp in group.image_refs:
check_refs(group, inp_groups[inp])
return
def check_refs(child_group, parent_group):
# Check refs works only on non-fused groups. It can be made to
# work with fused groups as well. However, it might serve very
# little use.
# assert (not child_group.isFused() and not parent_group.isFused())
# get the only comp_obj in child and parent groups
parent_comp = parent_group.comps[0]
parent_func = parent_comp.func
child_comp = child_group.comps[0]
child_func = child_comp.func
# Only verifying if both child and parent group have a polyhedral
# representation
if child_group.polyRep.poly_parts and parent_group.polyRep.poly_doms:
for child_part in child_group.polyRep.poly_parts[child_comp]:
# Compute dependence relations between child and parent
child_refs = child_part.refs
if child_part.pred:
child_refs += child_part.pred.collect(Reference)
# It is not generally feasible to check the validity of
# and access when the reference is not affine.
# Approximations can be done but for now skipping them.
def affine_ref(ref):
affine = True
for arg in ref.arguments:
affine = affine and isAffine(arg)
return affine
# filter out only the affine refs to parent_func
child_refs = [ ref for ref in child_refs \
if ref.objectRef == parent_func and
affine_ref(ref) ]
log_level = logging.DEBUG
deps = []
parent_dom = parent_group.polyRep.poly_doms[parent_comp]
for ref in child_refs:
deps += extract_value_dependence(child_part, ref, parent_dom)
LOG(log_level, "ref : "+str(ref))
for dep in deps:
diff = dep.rel.range().subtract(parent_dom.dom_set)
# ***
ref_str = "referenced = "+str(dep.rel.range())
dom_str = "parent domain = "+str(parent_dom.dom_set)
log_level = logging.DEBUG
LOG(log_level, ref_str)
LOG(log_level, dom_str)
# ***
if(not diff.is_empty()):
# ***
log_level = logging.ERROR
LOG(log_level, "_______________________")
LOG(log_level, "Reference out of domain")
LOG(log_level, ref_str)
LOG(log_level, dom_str)
LOG(log_level, "_______________________")
# ***
raise TypeError("Reference out of domain", child_group,
parent_group, diff)
return
| apache-2.0 | -8,621,944,478,895,732,000 | 38 | 77 | 0.599145 | false |
a4a881d4/6FSK | SIOT.py | 1 | 1816 | import utils
import numpy as np
from modu import modu,toComplex,dpmap
import math
from channel import channel
from rfir import rfir
from const import Primary
class SIOT:
def __init__(self,k):
self.length = 1<<k
self.Gpilot = utils.gold(Primary[k][0],Primary[k][1])
self.Pilot = self.Gpilot.toReal(self.Gpilot.seq(1,3)+[0]).tolist()
self.CPilot = np.array(dpmap([0]*self.length,self.Pilot))
def modu(self,D):
d = modu(D,self.Pilot,4,math.pi/8,18)
return d
def toComplex(self,d):
c = toComplex(d[::64],18)
return c
def xcorr(self,r):
c = np.zeros(r.shape,dtype=complex)
c[:16*len(self.CPilot):16]=self.CPilot
fc = np.fft.fft(c)
fr = np.fft.fft(r)
return np.fft.ifft(fr*np.conj(fc))
def r1(self,c,k):
t = c[k::16]
r = t[:self.length]*np.conj(self.CPilot)
return np.sum(r[:-1]*np.conj(r[1:]))
def r4(self,c,k):
t = c[k::16]
rr = t[:self.length]*np.conj(self.CPilot)
r = rr[::8]+rr[1::8]+rr[2::8]+r[3::8]+rr[4::8]+rr[5::8]+rr[6::8]+r[7::8]
return np.sum(r[:-1]*np.conj(r[1:]))
def main0():
S = SIOT()
D0 = utils.rsrc(1024)
D1 = utils.rsrc(1024)
d = S.modu(D0) + S.modu(D1)
cc = S.toComplex(d)
ch = channel(0.1,6.,0.5,16)
c = ch.ferr(cc)
c = ch.awgn(c)
f = rfir()
c = f.filter(c)
x = np.zeros(16*1042)
for k in range(len(x)):
x[k]=np.abs(S.r1(c,k))
import matplotlib.pyplot as plt
plt.plot(np.abs(x))
plt.show()
def main():
S = SIOT(12)
D0 = utils.rsrc(S.length)
D1 = utils.rsrc(S.length)
d = S.modu(D0) + S.modu(D1)
cc = S.toComplex(d)
ch = channel(0.000,6.,1.,16)
c = ch.ferr(cc)
c = ch.awgn(c)
f = rfir()
c = f.filter(c)
x = S.xcorr(c)
import matplotlib.pyplot as plt
plt.plot(np.abs(x))
plt.show()
if __name__ == '__main__':
main() | gpl-3.0 | -2,189,306,197,252,280,600 | 20.444444 | 74 | 0.578194 | false |
kernsuite-debian/lofar | SAS/SpecificationServices/lib/lofarxml_to_momxml_translator.py | 1 | 51084 | #!/usr/bin/env python3
# lofarxml_to_momxml_translator.py
#
# Copyright (C) 2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# This translator will create a MoM-compatible document from a LofarSpec
#
# Basic approach:
# - It parses relations on the Lofar spec and creates a base document with a folder hierarchy for MoM
# - It moves some basic info from the Lofar spec to the MoM document
# - For each activity
# - elements are moved/renamed where nested differently in MoM
# - some elements need further conversion, like clock/stations, which is handled in the code
# - elements unknown to MoM are encoded as json and placed in a <misc> element.
# - the mom-compatible activity / item is placed in the folder hierarchy
#
# Disclaimer: Want to change something? Before you tumble down this rabbit hole: Grab a coffee. (Make it strong.)
#
# TODO: The translation does not have full coverage at the moment. This stuff is still missing:
# TODO:
# TODO: Calibrator pipelines (Doesn't handle InstrumentModels and topology).
# TODO: Pulsar pipelines.
# TODO: TBB observations.
# TODO: Probably Imaging and LongBaseline pipelines.
from json import dumps, loads
from collections import OrderedDict
from lxml import etree
from xmljson import Parker
import re
import datetime
from .config import VALIDATION_SERVICENAME
from .validation_service_rpc import ValidationRPC
from .specification_service import _parse_relation_tree, make_key, _parse_project_code
from lofar.common.xmlparse import parse_xml_string_or_bytestring
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from io import BytesIO
import logging
__version__ = '0.43'
__author__ = "Joern Kuensemoeller"
__changedBy__ = "Joern Kuensemoeller"
logger = logging.getLogger(__name__)
validationrpc = ValidationRPC.create(DEFAULT_BUSNAME, DEFAULT_BROKER)
# ------------------------
# -> For moving elements:
# these types of activity are translateable
ACTIVITY_TYPES = ["observation", "pipeline", "measurement"] # "ingest", "cleanup"] <- how to translate these?
# items in MOM_ACTIVITY_ATTRIBUTE_MAPPING that are abstract.
# the activityname in these will be replaced by the specific type as per xsi:type attribute in the Mom namepace,
# preserving the rest. e.g. 'pipelineAttributes' -> 'averagingPipelineAttributes'
#
# (Note from the dev: sorry for the complicated mechanism, but the type of the activity is specified as xsi:type...
# ...while this does not work for the attributes element, which is specifically named after the type. So simply...
# ---mapping all of them is not possible.)
ABSTRACT_MOM_ELEMENTS = ["pipelineAttributes", "measurementAttributes"]
# The following mapping describes what has to be put somewhere else for mom.
# If the element in the dict key exists, it is moved to the element defined in value and renamed if
# Notes:
# - ! Order matters - This needs to be an ordered dict so the item sequence in the destination is not messed up!
# - Only the first occurence of nodes are moved, so the paths should be unique for each activity.
# - Path root in the key is the lofar spec activity element, and paths in the value are rooted in the item element
# - '::' is separator, since '.' or '/' don't work due to occurrence in namespace uri
# todo: check whether this can be simplified with xpath.
MOM_ACTIVITY_ATTRIBUTE_MAPPING = OrderedDict([
("triggerId::identifier", "trigger_id"),
#
## Observations
#
("observation::timeWindowSpecification::minStartTime",
"timeWindow::minStartTime"),
("observation::timeWindowSpecification::maxEndTime",
"timeWindow::maxEndTime"),
("observation::timeWindowSpecification::duration::minimumDuration",
"timeWindow::minDuration"),
("observation::timeWindowSpecification::duration::maximumDuration",
"timeWindow::maxDuration"),
# ---
# This works nice for a single non-Custom stationset, but this is better solved elsewhere specifically:
#("observation::stationSelectionSpecification::stationSelection::stationSet",
# "stationSelection::resourceGroup"),
#("observation::stationSelectionSpecification::stationSelection::minimumConstraint",
# "stationSelection::min"),
# ---
("observation::instrument",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::instrument"),
("observation::defaultTemplate",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::defaultTemplate"),
("observation::tbbPiggybackAllowed",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::tbbPiggybackAllowed"),
("observation::aartfaacPiggybackAllowed",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::aartfaacPiggybackAllowed"),
("observation::correlatedData",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::correlatedData"),
("observation::filteredData",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::filteredData"),
("observation::beamformedData",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::beamformedData"),
("observation::coherentStokesData",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::coherentStokesData"),
("observation::incoherentStokesData",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::incoherentStokesData"),
("observation::antenna",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::antenna"),
("observation::clock",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::clock"),
("observation::instrumentFilter",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::instrumentFilter"),
("observation::integrationInterval",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::integrationInterval"),
("observation::channelsPerSubband",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::channelsPerSubband"),
("observation::coherentDedisperseChannels", # todo: probably old BlueGene and no longer needed
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::coherentDedisperseChannels"),
("observation::tiedArrayBeams", # todo: probably old BlueGene and no longer needed, will give default hints in the UI
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::tiedArrayBeams"),
("observation::stokes",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::stokes"),
("observation::flysEye",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::tiedArrayBeams::flyseye"),
# all stationsets should've been converted to misc beforehand. This moves the only remaining 'custom' stationset to where MoM looks for it:
("observation::stationSelectionSpecification::stationSelection::stationSet",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::stationSet"),
("observation::stationSelectionSpecification::stationSelection::stations",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::stations"),
("observation::timeWindowSpecification::timeFrame",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::timeFrame"),
("observation::timeWindowSpecification::startTime",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::startTime"),
("observation::timeWindowSpecification::endTime",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::endTime"),
("observation::timeWindowSpecification::duration::duration",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::duration"),
("observation::bypassPff", # todo: probably old BlueGene and no longer needed
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::bypassPff"),
("observation::enableSuperterp",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::enableSuperterp"),
("observation::numberOfBitsPerSample",
"observation::{http://www.astron.nl/MoM2-Lofar}observationAttributes::userSpecification::numberOfBitsPerSample"),
#
## Pipelines
#
# !!! Note: When attributes are shared between pipeline types, they appear once here, but have to appear in correct
# ...position for all pipelines! Only defaultTemplate is currently shared and it's in the beginning everywhere.
#
# shared amongst pipelines:
("pipeline::defaultTemplate", "pipeline::pipelineAttributes::defaultTemplate"),
# averaging pipeline:
("pipeline::demixingParameters", "pipeline::pipelineAttributes::demixingParameters"),
("pipeline::bbsParameters", "pipeline::pipelineAttributes::bbsParameters"),
("pipeline::flaggingStrategy", "pipeline::pipelineAttributes::flaggingStrategy"),
("pipeline::frequencyIntegrationStep", "pipeline::pipelineAttributes::frequencyIntegrationStep"),
("pipeline::timeIntegrationStep", "pipeline::pipelineAttributes::timeIntegrationStep"),
# imaging pipeline:
("pipeline::imagerIntegrationTime", "pipeline::pipelineAttributes::imagerIntegrationTime"),
# pulsar pipeline:
("pipeline::_2bf2fitsExtraOpts", "pipeline::pipelineAttributes::_2bf2fitsExtraOpts"),
("pipeline::_8bitConversionSigma", "pipeline::pipelineAttributes::_8bitConversionSigma"),
("pipeline::decodeNblocks", "pipeline::pipelineAttributes::decodeNblocks"),
("pipeline::decodeSigma", "pipeline::pipelineAttributes::decodeSigma"),
("pipeline::digifilExtraOpts", "pipeline::pipelineAttributes::digifilExtraOpts"),
("pipeline::dspsrExtraOpts", "pipeline::pipelineAttributes::dspsrExtraOpts"),
("pipeline::dynamicSpectrumTimeAverage", "pipeline::pipelineAttributes::dynamicSpectrumTimeAverage"),
("pipeline::nofold", "pipeline::pipelineAttributes::nofold"),
("pipeline::nopdmp", "pipeline::pipelineAttributes::nopdmp"),
("pipeline::norfi", "pipeline::pipelineAttributes::norfi"),
("pipeline::prepdataExtraOpts", "pipeline::pipelineAttributes::prepdataExtraOpts"),
("pipeline::prepfoldExtraOpts", "pipeline::pipelineAttributes::prepfoldExtraOpts"),
("pipeline::prepsubbandExtraOpts", "pipeline::pipelineAttributes:prepsubbandExtraOpts"),
("pipeline::pulsar", "pipeline::pipelineAttributes::pulsar"),
("pipeline::rawTo8bit", "pipeline::pipelineAttributes:rawTo8bit"),
("pipeline::rfifindExtraOpts", "pipeline::pipelineAttributes::rfifindExtraOpts"),
("pipeline::rrats", "pipeline::pipelineAttributes::rrats"),
("pipeline::singlePulse", "pipeline::pipelineAttributes::singlePulse"),
("pipeline::skipDsps", "pipeline::pipelineAttributes::skipDsps"),
("pipeline::skipDynamicSpectrum", "pipeline::pipelineAttributes::skipDynamicSpectrum"),
("pipeline::skipPrepfold", "pipeline::pipelineAttributes::skipPrepfold"),
("pipeline::tsubint", "pipeline::pipelineAttributes::tsubint"),
#
## Measurements
#
# BeamMeasurement
("measurement::measurementType", "measurement::measurementAttributes::measurementType"),
("measurement::targetName", "measurement::measurementAttributes::specification::targetName"),
("measurement::ra", "measurement::measurementAttributes::specification::ra"),
("measurement::ra", "measurement::measurementAttributes::specification::ra"),
("measurement::dec", "measurement::measurementAttributes::specification::dec"),
("measurement::equinox", "measurement::measurementAttributes::specification::equinox"),
("measurement::duration", "measurement::measurementAttributes::specification::duration"),
("measurement::subbandsSpecification", "measurement::measurementAttributes::specification::subbandsSpecification"),
("measurement::tiedArrayBeams", "measurement::measurementAttributes::specification::tiedArrayBeams"),
# todo: If used, LofarBeamMeasurementSpecificationAttributesType requires more items!
# todo: add other measurements? Currently not defined on LofarBase.xsd, so these cannot occur...
])
# -----------
# -> For encoding new stuff (that's too fancy for MoM) in the misc field:
# These activity types can carry a misc element for extraspec.
# Measurements do not have that, but have to have a parent observation that has.
ACTIVITIES_WITH_MOM_EXTRASPECS = ['observation', 'pipeline']
# These specification elements are to be recoded for MoM as json
# Note: This happens after nodes are moved according to mapping, so restructure/rename first the way this is required
# ...on the misc field and add the parent items of what's to encode here.
MOM_ACTIVITY_EXTRASPECS = [
"trigger_id",
"priority",
"qualityOfService",
"timeWindow",
"stationSelection", # <- stationSelectionSpecification is parsed prior and placed there in correct format
# "pipeline..." # no pipeline time constraints...?
]
# ----------------
# -> For removing lefovers that MoM doesn't understand.
# These elements should simply be removed before exporting the mom xml because they are not understood by MoM
# (any important info should be moved out of here earlier, others may be safe to remove.
MOM_ACTIVITY_REMOVABLE_ELEMENTS = [
'observation::timeWindowSpecification',
'observation::stationSelectionSpecification',
]
#--------------------
# Topology
#
dptopologytypes = {
"BFDataProduct_CoherentStokes": 'cs',
"BFDataProduct_IncoherentStokes": 'is',# <-- todo: csis support not implemented
"UVDataProduct": 'uv', # <-- todo: InstrumentModel
#"TBBDataProduct": 'tbb', # <-- todo: tbb currently not implemented in MoM
#"PixelMapDataProduct": '', # <-- todo: pixelmap currently not implemented fully in MoM
"SkyImageDataProduct": 'si', # <-- MoM doens't really care I tihnk? check
"PulsarDataProduct": 'pu' # <-- MoM doens't really care I tihnk? check
}
class LofarXmlToMomXmlTranslator():
def _find_or_create_subelement(self, element, name, index=None):
"""
returns element child with given name. Creates it if not present.
"""
sub = element.find(str(name))
if sub is None:
# create it
if index is not None:
sub = etree.Element(name)
element.insert(index, sub)
else:
sub = etree.SubElement(element, name)
return sub
def _jsonify(self, xml):
"""
converts xml string to json string
"""
bf = Parker(dict_type=OrderedDict)
data = bf.data(etree.fromstring(xml))
json = dumps(data)
return json
def _parse_entities(self, spec):
"""
returns lookup dictionaries for entity relations
"""
entities = spec.findall('entity')
inputentities = {} # activity identifier -> list of entity identifiers
outputentities = {} # activity identifier -> list of entity identifiers
entitytypes = None # entity identifier -> entity type name
entityclusterelems = None # entity identifier -> <storageCluster> element/subtree
entityproducers = {} # entitiy identifier -> activity identifier
entityusers = {} # entitiy identifier -> activity identifier
if entities is not None:
producer = [(x.find("entity"), x.find("activity")) for x in spec.findall("relation") if
x.find("type").text == "producer"]
user = [(x.find("entity"), x.find("activity")) for x in spec.findall("relation") if
x.find("type").text == "user"]
for (entity_id, activity_id) in producer:
outputentities.setdefault(make_key(activity_id), []).append(make_key(entity_id))
entityproducers[make_key(entity_id)] = make_key(activity_id)
for (entity_id, activity_id) in user:
inputentities.setdefault(make_key(activity_id), []).append(make_key(entity_id))
entityusers[make_key(entity_id)] = make_key(activity_id)
entitytypes = {make_key(entity.find("temporaryIdentifier")): entity.find('dataproductType').text
for entity in entities}
entityclusterelems = {make_key(entity.find("temporaryIdentifier")): entity.find('storageCluster')
for entity in entities}
return inputentities, outputentities, entitytypes, entityclusterelems, entityproducers, entityusers
def _create_foldertree_in_momproject(self, spec, mom_project):
"""
Parses the relations in the LOFAR specs and creates the folder hiararchy in the given MoM project
Returns a dict to look up the folder element that is meant to contain an activity (identifier is key).
Returns a dict to look up the assigned groupid of activities (for use in topology)
Returns a dict to look up the assigned myid of measurements/observations (for use in topology)
"""
try:
activityfolders, parentfolders, foldernames = _parse_relation_tree(spec)
containers = spec.findall('container')
momfolders = {} # holds elements, not keys!
activityparents = {} # holds elements, not keys!
folder_topologygroup = {}
activity_topologygroup = {}
added_folders = []
# populate folder element dictionary. folder identifier is key
counter = 0
for container in containers:
key = make_key(container.find("temporaryIdentifier"))
folder = container.find('folder')
momfolder = etree.Element('{http://www.astron.nl/MoM2-Lofar}folder')
for child in folder.getchildren():
momfolder.append(child)
updateelem = container.find("addToExistingContainer")
if updateelem is not None:
momfolder.attrib["update_folder"] = updateelem.text
momfolders[key] = momfolder
counter += 1
folder_topologygroup[key] = 'B' + str(counter)
# create folder hierarchy
for activikey in list(activityfolders.keys()):
key = activityfolders[activikey]
activityparents[activikey] = momfolders[key]
activity_topologygroup[activikey] = folder_topologygroup[key]
activity_topologygroup[activikey] = folder_topologygroup[key]
# recursively walk towards root to determine what subtree requires creation:
to_add = []
while key is not None:
if key in added_folders:
break # already there, so create the children up to here only
to_add.append(key)
if key in list(parentfolders.keys()):
key = parentfolders[key]
else:
break
# create towards activity and create the missing folder hierarchy:
for key in reversed(to_add):
# create folder
folder = momfolders[key]
if key in parentfolders:
parent = momfolders[parentfolders[key]]
else:
parent = mom_project
children = self._find_or_create_subelement(parent, "children")
index = len(children.getchildren())
item = etree.SubElement(children, "item")
item.append(folder)
if 'index' not in item.attrib:
item.attrib['index'] = str(index)
added_folders.append(key)
# In the templates I see:
# folder topology is generally '0' or the index, topology_parent is true on inner folders
topology = index # "[header] [groupid] [myid] [slice]" # Is 0 some magic number here?
if key not in parentfolders:
istopparent = False
else:
istopparent = True
tindex = 0 # where to insert in parent
folder.attrib['topology_parent'] = str(istopparent).lower()
intop = folder.find('topology')
intopstr = None
if intop is not None:
intopstr = intop.text
folder.remove(intop)
top = self._find_or_create_subelement(folder, 'topology', tindex)
if intopstr is not None:
top.text = intopstr # todo: should the spec override what it auto-determined?
else:
top.text = topology
# Not only containers can contain children, but also observations.
# Determine measurement -> parent observation mapping.
observation_measurement = [(x.find("parent"), x.find("child")) for x in spec.findall("relation")
if x.find("type").text == "observation-measurement"]
observation_acts = [x for x in spec.findall('activity') if x.find('observation') is not None]
observations = {}
for obs_act in observation_acts:
key = make_key(obs_act.find("temporaryIdentifier"))
observations[key] = obs_act.find('observation')
# measurements share group ID with the observation and the obs' parent folder
# measurements share myid with the parent observation.
mcounter = 1
activity_topologymyid = {}
for (obs_id, measurement_id) in observation_measurement:
key = make_key(obs_id)
measurekey = make_key(measurement_id)
obskey = make_key(obs_id)
activityparents[measurekey] = observations[key]
activity_topologygroup[measurekey] = activity_topologygroup[key]
if obskey in activity_topologymyid:
activity_topologymyid[measurekey] = activity_topologymyid[obskey]
else:
activity_topologymyid[measurekey] = str(mcounter)
activity_topologymyid[obskey] = str(mcounter)
mcounter += 1
return activityparents, activity_topologygroup, activity_topologymyid
except Exception as err:
logger.error("Error occurred while creating folder hierarchy -> " + str(err))
raise
def _create_topology(self, header = None, groupid = None, myid = None, slice = None, function = None, sap = None, dptype = None):
"""
Returns a topology string based on provided information. Most of these are not actually parsed, according to
documentation, so we may fill in anything as long as the result is unique. It seems ok to leave items blank.
(No idea how a potential parser would determine which ones are missing, it seems completely ambiguous...)
I don't make use of header and slice, actually, and use the others as follows:
groupid: unique id per folder B[index], used by everything in there
myid: unique id per observation, used by related measurements / pipelines as well
function: 'T' for obs/measurements, 'P[index]' for pipelines
sap: using child index, assuming observation only has measurements as children
dptype: According to dataproduct type
---
The documentation provides the following:
header: "mom_msss_" (for MSSS), "mom_" (for non-MSSS) | not actually used as far as I can tell
groupid: "[folderId]" (MSSS), "M[momId]" (non-MSSS), "G[folderId]" (non-MSSS) | topology_parent="true" allows to have MoM-ID of some 'main folder' to be added here automagically | some of this may be parsed by MoM
myid: "M[momId]" | to make it unique if not otherwise | apparently not actually parsed
slice: MSSS slice | Not actually parsed, but predecessors need to have same value here
function: "C" (calibrator), "T" (Target), "P1,P2,P3,P4,PX,PY,PZ" (Pipelne), "CT" (Calibration and Target), "CP" (Calibration Pipelne), "IP" (Imaging Pipelne), "X" (unknown) | Parsed by MoM
sap = "SAP"XXX[".dps"] | Measurement; with suffix a measurement entity / dataproduct
dptype = ".uv" (correlated data), ".bf" (beamformed data) - no longer used, ".cs" (coherent stokes data), ".is" (incoherent stokes data), ".csis" (coherent stokes and incoherent stokes data), ".im" (instrument model), ".si" (sky image), ".pu" (pulsar data) | marked red, so probably parsed somewhere, but no indication where
The examples in the docs then have something contradictory like this:
B0.1.CPT.dps.im <-- where B0 is block zero, 1 is the index of the calibration pipeline (CPT) of which we have an instrument model entity here.
I assume CPT should be CP, and the '.dps' seems to be linked ot the dptype, not sap. That's apparently wrong, but pretty clear.
The 'B0' and '1' do not really fit (n)either semantically or in the documented naming scheme. Probably 'B0' is group ID and '1' is a myid in wrong format.
If it were only the example, I wouldn't say anything, but this seems to be the format used in the templates. Maybe these examples are actually for an
extra XML topology scheme that gets translated to the documented values in MoM?
full (but contradictory) documentation in the wiki:
https://www.astron.nl/lofarwiki/doku.php?id=mom3:topology
"""
topology = '.'.join([_f for _f in [header, groupid, myid, slice, function, sap, dptype] if _f])
return topology
def _mommify(self, activity, projectcode):
"""
Turns a LOFAR activity to a MoM compatible version.
The groupid is required for thetopology, measurements also require the activity_index.
The projectcode is put on observations for some reason but is not available on the LofarSpec activity.
Returns an item element containing nested MoM observation/pipeline
"""
act = None
for activitytype in ACTIVITY_TYPES: # look for all possible activities
act = activity.find(activitytype)
if act is not None:
# convert the very specific bits. Should happen before the mapping as attribute is moved!
# We change the measurement type to one that fits the one we use in MoM.
# The type name changed because it was not correct or is misleading.
if "{http://www.w3.org/2001/XMLSchema-instance}type" in act.attrib:
t = act.attrib["{http://www.w3.org/2001/XMLSchema-instance}type"]
if t.split(':')[1] == "SAPMeasurement":
newt = t.split(':')[0] + ':' + "BFMeasurement"
act.attrib["{http://www.w3.org/2001/XMLSchema-instance}type"] = newt
# infer missing elements that MoM requires # todo: is that necessary?
nameelem = act.find('name')
if nameelem is not None and act.find('targetName') is None:
etree.SubElement(act, 'targetName').text = nameelem.text
if act.find('duration') is None:
etree.SubElement(act, 'duration').text = 'PT0S' # todo: can we determine a reasonable value?
# clock:
for clock in act.findall('clock'):
atts = clock.attrib
mode = str(clock.text) + " " + str(atts.pop('units'))
atts['mode'] = mode
clock.text = None
# convert status value to status-specific elements for MoM
statuselem = activity.find('status')
if statuselem is not None:
newstatuselem = etree.SubElement(act, "currentStatus")
momstatus = "{http://www.astron.nl/MoM2}"+statuselem.text+'Status'
etree.SubElement(newstatuselem, momstatus)
# duplicate observation name and project code in attributes. At least the project code is probably
# ignored and it's not clear why it is in there. But existing XML has it defined, so I stay consistent
# here just to be sure..
if activitytype == 'observation':
atts = self._find_or_create_subelement(act,
"{http://www.astron.nl/MoM2-Lofar}observationAttributes")
obsname = act.find('name')
if obsname is not None:
etree.SubElement(atts, 'name').text = obsname.text
if projectcode is not None:
etree.SubElement(atts, 'projectName').text = projectcode
# stations:
# Note: we place stuff in temporary locations first and handle the final placement with the mechanism
# that is used for all other items as well (like misc encoding etc...). The reason is that the final
# destination is e.g. dependent on what kind of activity this is, and I don't want to replicate that
# logic.
for selection in act.findall('stationSelectionSpecification/stationSelection'):
station_set = selection.find("stationSet")
if station_set.text == "Custom":
stations = selection.xpath("stations/station")
for station in stations:
stationname = station.find("name").text
# create new stationSelection element <- picked up by extraspec encoding
newselection = etree.SubElement(activity, 'stationSelection')
etree.SubElement(newselection, 'resourceGroup').text = stationname
etree.SubElement(newselection, 'min').text = '1'
# change formatting for mom
station.remove(station.find('name'))
station.attrib['name'] = (stationname)
station.text = None # <-- will create self-closing tags, "" for extra closing tag
# but: leave it here <- will be placed on userSpecs later
# move to activity:
#activity.append(station_set)
#activity.append(selection.find('stations'))
else:
selection.find("minimumConstraint").tag = 'min'
selection.find("stationSet").tag = 'resourceGroup'
activity.append(selection)
# Abstract types with xsi:type attributes are used for pipelines and measurements.
# On the example mom xml I have, there is e.g. an element pipelineAttributes for all types,
# But on the XSDs this IS type specific, e.g. averagingPipelineAttributes (Yaay!)
# --> So, determine the specific type (if xsi:type present) and use the "camelCase+Type" name for the
# element instead.:
momtype = None
momtype_cc = None
try:
if "{http://www.w3.org/2001/XMLSchema-instance}type" in act.attrib:
t = act.attrib["{http://www.w3.org/2001/XMLSchema-instance}type"]
momtype = t.split(':')[1]
momtype_cc = momtype[:2].lower() + momtype[2:]
except ValueError as err:
logger.error("Could not determine a more specific MoM type from type attribute for the activity -> "
+ str(activitytype) + " " + str(err))
# momtype/_cc should now be present for pipelines/measurements but not observations
# restructure elements according to mapping.:
for src, dst in list(MOM_ACTIVITY_ATTRIBUTE_MAPPING.items()):
src_node = activity
for s in src.split('::'):
src_node = src_node.find(s)
if src_node is None:
break
if src_node is None: # -> attribute not found
continue
dst_node = activity
dst_path = dst.split('::')[:-1]
dst_tag = dst.split('::')[-1]
for d in dst_path:
if d in ABSTRACT_MOM_ELEMENTS: # replace abstract elements from Mapping by
d = d.replace(activitytype, "{http://www.astron.nl/MoM2-Lofar}" + str(momtype_cc))
if d is not "":
dst_node = self._find_or_create_subelement(dst_node, d)
else:
logger.warn("Ignoring empty string in mapping. -> " + str(dst))
src_node.getparent().remove(src_node)
if src_node.tag != dst_tag:
src_node.tag = dst_tag
dst_node.append(src_node)
if activitytype in ACTIVITIES_WITH_MOM_EXTRASPECS:
# jsonify new specs that MoM does not know about and put them as json in misc element:
if momtype_cc is not None: # -> not an obs
# use the specific type if present (pipelines/measurements)
atts = self._find_or_create_subelement(act, "{http://www.astron.nl/MoM2-Lofar}"
+ str(momtype_cc) + "Attributes")
# todo: check if misc is in userspec for measurements, it does not seem true for pipelines...?!
# userspec = self._find_or_create_subelement(atts,'userSpecification', 0) # goes in beginning here
misc = self._find_or_create_subelement(atts, "misc", 0)
else:
atts = self._find_or_create_subelement(act, "{http://www.astron.nl/MoM2-Lofar}"
+ str(activitytype) + "Attributes")
userspec = self._find_or_create_subelement(atts, 'userSpecification')
misc = self._find_or_create_subelement(userspec, "misc")
json = self._encode_mom_extraspecs(activity)
misc.text = json
else:
self._encode_mom_extraspecs(activity) # remove extraspec elements, but ignore the returned json
# clean up / remove what MoM does not know
self._remove_removable_elements(activity)
# create MoM compatible element, namespace, and xsi:type
item = etree.Element("item")
momact = etree.SubElement(item, "{http://www.astron.nl/MoM2-Lofar}" + str(activitytype))
if momtype:
# set an xsi:type according to the one on the lofar spec actvity
momact.attrib["{http://www.w3.org/2001/XMLSchema-instance}type"] = "lofar:" + str(momtype) + "Type"
# todo: better look up namespace identifier from nsmap.
for child in act.getchildren():
# move stuff to new mom element
momact.append(child)
return item, activitytype
raise Exception("Cannot translate activity for MoM! -> " + str(ACTIVITY_TYPES)
+ " not found in " + str(activity.getchildren()))
def _isoduration_to_seconds(self, isoduration):
comp = re.compile('P(?:(?P<years>\d+)Y)?(?:(?P<months>\d+)M)?(?:(?P<days>\d+)D)?(?:T(?:(?P<hours>\d+)H)?(?:(?P<minutes>\d+)M)?(?:(?P<seconds>\d+[.]?\d*)S)?)?')
durdict = comp.match(isoduration).groupdict()
td = datetime.timedelta(
days=int(durdict['days'] or 0) +
(int(durdict['months'] or 0) * 30) +
(int(durdict['years'] or 0) * 365),
hours=int(durdict['hours'] or 0),
minutes=int(durdict['minutes'] or 0),
seconds=int(durdict['seconds'] or 0))
return td.total_seconds()
def _encode_mom_extraspecs(self, activity):
"""
encodes extra specs on an activity element as json
return the json string
"""
try:
# move extraspec elements from activity to new element tree
# we do not want to move the entire subtree to not harm any other existing elements, so we try to locate the
# extraspec elements and if present, move them to a recreated tree structure that is then encoded in json
extraspecelement = etree.Element("extraspec") # temporarily holds data for misc
for extraspec in MOM_ACTIVITY_EXTRASPECS:
elements = extraspec.split("::")
source = activity
target = extraspecelement
prevelement = None
for element in elements:
# walk orginal tree to the latest child:
source = source.find(element) # update reference
if source is None:
break
# on-the-fly create parents in new element tree, update reference:
if prevelement:
if target.find(prevelement) is None:
target = etree.SubElement(target, prevelement)
else:
target = target.find(prevelement)
prevelement = element
if source is not None:
# move _all_ elements with that name, e.g. needed for stationSelection
sources = source.getparent().findall(source.tag)
for source in sources:
# move the child element to the parent element that was last created in the new tree:
source.getparent().remove(source)
target.append(source)
# find duration elements and convert iso to int.
# We are only covering elements and forst level subelements here, traverse further if needed
convertelems = []
convertelems.append(source)
convertelems.extend(source.getchildren())
for elem in convertelems:
if 'duration' in elem.tag.lower():
seconds = self._isoduration_to_seconds(elem.text)
elem.text = str(int(seconds)) # do not cast to int to get double here
# Jsonify extraspec tree and add to misc element on the original activity element.
json = self._jsonify(etree.tostring(extraspecelement))
# json = dumps(loads(json)['extraspec']) # remove parent element
return json
except Exception as err:
logger.error("Error while encoding MoM extraspecs -> " + str(err))
raise
def _remove_removable_elements(self, momact):
"""
removes elements from the mom specification that are not understood by MoM. Make sure to copy important info
elsewhere (e.g. encode in misc) beforehand.
"""
for elementpath in MOM_ACTIVITY_REMOVABLE_ELEMENTS:
elementnames = elementpath.split("::")
element = momact
for elementname in elementnames:
element = element.find(elementname) # walk to leaf
if element is None:
break
if element is not None:
element.getparent().remove(element)
def translate_lofarspec_to_momspec(self, spec_xml):
# Parse specification
parser = etree.XMLParser(remove_blank_text=True) # <-- prevent that prettyprinting breaks
spectree = parse_xml_string_or_bytestring(spec_xml, parser=parser).getroot()
nsmap = {"lofar": "http://www.astron.nl/MoM2-Lofar",
"mom2": "http://www.astron.nl/MoM2",
"xsi": "http://www.w3.org/2001/XMLSchema-instance"}
# create new document with correct namespace for Mom
momspec = etree.Element("{http://www.astron.nl/MoM2-Lofar}project",
nsmap = nsmap)
# translate general items
projectcode = _parse_project_code(spectree)
etree.SubElement(momspec, 'version').text = __version__
temp = etree.SubElement(momspec, 'template')
etree.SubElement(temp, 'description').text = 'Translated by ' + __name__ + ' version ' + __version__
temp.attrib['author'] = __author__
temp.attrib['changedBy'] = __changedBy__
temp.attrib['version'] = __version__
etree.SubElement(momspec, 'name').text = projectcode
# create folder hierarchy
activityparents, activity_topologygroup, activity_topologymyid = self._create_foldertree_in_momproject(spectree, momspec)
# get input/output dataproducts
indps, outdps, dptypes, dpclusterelems, dpproducers, dpusers = self._parse_entities(spectree)
topologies = {} # stores topologies for reference
# add all activities and their related dataproducts to corresponding folder
activities = spectree.findall('activity')
for activity in activities:
# determine destination folder for activity
key = (activity.find('temporaryIdentifier').find('source').text,
activity.find('temporaryIdentifier').find('identifier').text)
if key not in activityparents:
logger.debug("No key " + str(key) + " in " + str(activityparents))
raise Exception("No parent for key " + str(key))
# Determine parent element (folder/obs) to hold this activity
parent = activityparents[key]
# Determine element index
children = self._find_or_create_subelement(parent, "children")
index = len(children.findall('item'))
# restructure activity in MoM-comprehensible form
item, activitytype = self._mommify(activity, projectcode)
item.attrib["index"] = str(index)
momact = item.getchildren()[0]
# Add the mommified item to it's parent
children.append(item)
# Some activities, like observations, can also serve as containers for measurements.
# While all the containers are set up separately, we have to now update the reference to point to the new
# mommified parent activity, should it change at this step, so the child activity can be added to it.
# -> We then refer to a {MoM-Lofar}observation we just added.
# Note: This is probably super inefficient, but will have to do for now.
for atype in ACTIVITY_TYPES:
old_act = activity.find(atype)
if old_act in list(activityparents.values()):
new_act = item.find("{http://www.astron.nl/MoM2-Lofar}" + str(atype))
if new_act is not None:
for k, v in list(activityparents.items()):
if v == old_act:
activityparents[k] = new_act
else:
raise Exception('Could not update mommified activity reference ->' + str(atype))
# topology
sap = None
function = None
myid = None
if activitytype == "pipeline":
function = 'P' + str(index) # <-- assuming they are in same folder
akey = key
while akey in list(indps.keys()): # find root observation
akey = dpproducers[indps[akey][0]]
myid = activity_topologymyid[akey]
elif activitytype == "observation":
function = 'T' # we ignore the different possible types here for simplicity, parse twinrelations to handle this properly
myid = activity_topologymyid[key]
elif activitytype == "measurement":
function = 'T' # we ignore the different possible types here for simplicity, parse twinrelations to handle this properly
sap = "SAP" + str(index).zfill(3) # <- assuming they are in same folder
myid = activity_topologymyid[key]
groupid = activity_topologygroup[key]
topology = self._create_topology(
groupid=groupid,
myid=myid,
function=function,
sap=sap
)
tindex = 0 # where to insert
act = momact
if act.find(
'name') is not None and activitytype != 'pipeline': # <- sequence is different for pipelines for some reason
tindex = tindex + 1
if act.find('description') is not None and activitytype != 'pipeline':
tindex = tindex + 1
self._find_or_create_subelement(act, "topology", tindex).text = topology
topologies[key] = topology
# Add Dataproducts to activity in MoM tree
predecessors = []
if key in list(indps.keys()):
# The XSDs allow fully defining these with storageCluster etc, but MoM seems to expect an emty element with a single topology attribute
# todo maybe we can share some code here with outdps
indpkeys = indps[key]
rdpelem = etree.SubElement(momact, "usedDataProducts")
# todo: I think this should be actually populated after outdps were added for all activities. This currently relies on sequential occurence in XML
dpindex = 0
for indpkey in indpkeys:
dpitem = etree.SubElement(rdpelem, "item")
dpitem.attrib["index"] = str(dpindex)
dpindex = dpindex + 1
dptype = dptypes[indpkey]
dptype1 = dptype.split('_')[0] # is/cs are both bf
dptype_cc = dptype1[:2].lower() + dptype1[2:] # camelCase
indpelem = etree.SubElement(dpitem, "{http://www.astron.nl/MoM2-Lofar}" + dptype_cc)
indpelem.attrib["topology"] = topologies[indpkey]
# recursively determine predecessors of dataproduct and all dependencies:
def _get_predecessors(dpkey):
preds = []
preds.append(dpproducers[dpkey])
if dpproducers[dpkey] in list(indps.keys()):
for pdpkey in indps[dpproducers[dpkey]]:
preds.extend(_get_predecessors(pdpkey))
return preds
# append dataproduct's predecessors
predecessors.extend(_get_predecessors(indpkey))
if key in list(outdps.keys()):
outdpkeys = outdps[key]
rdpelem = etree.SubElement(momact, "resultDataProducts")
dpindex = 0
for outdpkey in outdpkeys:
dpitem = etree.SubElement(rdpelem, "item")
dpitem.attrib["index"] = str(dpindex)
dpindex = dpindex + 1
dptype = dptypes[outdpkey]
dptype1 = dptype.split('_')[0] # is/cs are both bf
dptype_cc = dptype1[:2].lower() + dptype1[2:] # camelCase
outdpelem = etree.SubElement(dpitem, "{http://www.astron.nl/MoM2-Lofar}"+dptype_cc)
dptopology = dptopologytypes[dptype]
topology = self._create_topology(
groupid=groupid,
myid=myid,
function=function,
sap=sap,
dptype="dps." + dptopology
)
name = topology
etree.SubElement(outdpelem, "name").text = name
etree.SubElement(outdpelem, "topology").text = topology
etree.SubElement(outdpelem, "status").text = "no_data" # <-- todo: is this actually required?
outdpelem.append(dpclusterelems[outdpkey])
topologies[outdpkey] = topology
if predecessors is not None and len(predecessors) > 0:
pre_topologies = [topologies[predecessor] for predecessor in predecessors]
# For some reason, the observation is referenced here, not for the measurement that produced the data.
# Removing SAP identifier should result in observations topology, use of set removes duplicates:
pre_topologies = list(set([pretop.split('.SAP')[0] for pretop in pre_topologies]))
self._find_or_create_subelement(act, "predecessor_topology", tindex + 1).text = ','.join(pre_topologies)
# create MoM Specification XML
momspec_xml = etree.tostring(momspec, pretty_print=True, method='xml')
return momspec_xml
| gpl-3.0 | -5,725,672,193,722,404,000 | 54.345612 | 333 | 0.611366 | false |
wevoice/wesub | apps/teams/new_views.py | 1 | 39933 | #Get the main project for a team Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
"""new_views -- New team views
This module holds view functions for new-style teams. Eventually it should
replace the old views.py module.
"""
from __future__ import absolute_import
import functools
import json
import logging
import pickle
from collections import namedtuple, OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.core.cache import cache
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
HttpResponseBadRequest, HttpResponseForbidden)
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext as _
from . import views as old_views
from . import forms
from . import permissions
from . import signals
from . import tasks
from .behaviors import get_main_project
from .bulk_actions import add_videos_from_csv
from .exceptions import ApplicationInvalidException
from .models import (Invite, Setting, Team, Project, TeamVideo,
TeamLanguagePreference, TeamMember, Application)
from .statistics import compute_statistics
from activity.models import ActivityRecord
from auth.models import CustomUser as User
from messages import tasks as messages_tasks
from subtitles.models import SubtitleLanguage
from teams.workflows import TeamWorkflow
from utils.breadcrumbs import BreadCrumb
from utils.decorators import staff_member_required
from utils.pagination import AmaraPaginator
from utils.forms import autocomplete_user_view, FormRouter
from utils.text import fmt
from utils.translation import get_language_label
from videos.models import Video
logger = logging.getLogger('teams.views')
ACTIONS_PER_PAGE = 20
VIDEOS_PER_PAGE = 8
MEMBERS_PER_PAGE = 10
def team_view(view_func):
@functools.wraps(view_func)
def wrapper(request, slug, *args, **kwargs):
if not request.user.is_authenticated():
return redirect_to_login(request.path)
if isinstance(slug, Team):
# we've already fetched the team in with_old_view
team = slug
else:
try:
team = Team.objects.get(slug=slug)
except Team.DoesNotExist:
raise Http404
if not team.user_is_member(request.user):
raise Http404
return view_func(request, team, *args, **kwargs)
return wrapper
def with_old_view(old_view_func):
def wrap(view_func):
@functools.wraps(view_func)
def wrapper(request, slug, *args, **kwargs):
try:
team = Team.objects.get(slug=slug)
except Team.DoesNotExist:
raise Http404
if team.is_old_style():
return old_view_func(request, team, *args, **kwargs)
return view_func(request, team, *args, **kwargs)
return wrapper
return wrap
def admin_only_view(view_func):
@functools.wraps(view_func)
@team_view
def wrapper(request, team, *args, **kwargs):
member = team.get_member(request.user)
if not member.is_admin():
messages.error(request,
_("You are not authorized to see this page"))
return redirect(team)
return view_func(request, team, member, *args, **kwargs)
return wrapper
def public_team_view(view_func):
def wrapper(request, slug, *args, **kwargs):
try:
team = Team.objects.get(slug=slug)
except Team.DoesNotExist:
raise Http404
return view_func(request, team, *args, **kwargs)
return wrapper
def team_settings_view(view_func):
"""Decorator for the team settings pages."""
@functools.wraps(view_func)
def wrapper(request, slug, *args, **kwargs):
team = get_object_or_404(Team, slug=slug)
if not permissions.can_view_settings_tab(team, request.user):
messages.error(request,
_(u'You do not have permission to edit this team.'))
return HttpResponseRedirect(team.get_absolute_url())
return view_func(request, team, *args, **kwargs)
return login_required(wrapper)
class VideoPageExtensionForm(object):
"""Define an extra form on the video page.
This class is used to define extension forms. See
VideoPageForms.add_extension_form() method for how you would use them.
"""
def __init__(self, name, label, form_class, selection_type=None):
"""Create a VideoPageExtensionForm
Args:
name -- unique name for the form
label -- human-friendly label to display
form_class -- form class to handle things
selection_type -- can one of the following:
- single-only: Enabled only for single selections
- multiple-only: Enabled only for multiple selections
"""
self.name = name
self.label = label
self.form_class = form_class
self.selection_type = selection_type
def css_selection_class(self):
if self.selection_type == 'single':
return 'needs-one-selected'
elif self.selection_type == 'multiple':
return 'needs-multiple-selected'
else:
return ''
class VideoPageForms(object):
"""Manages forms on the video page
This class is responsible for
- Determining which forms should be enabled for the page
- Building forms
- Allowing other apps to extend which forms appear in the bottom sheet
"""
form_classes = {
'add_form': forms.NewAddTeamVideoDataForm,
'add_csv': forms.TeamVideoCSVForm,
'edit': forms.NewEditTeamVideoForm,
'bulk-edit': forms.BulkEditTeamVideosForm,
'move': forms.MoveTeamVideosForm,
'remove': forms.RemoveTeamVideosForm,
}
def __init__(self, team, user, team_videos_qs):
self.team = team
self.user = user
self.team_videos_qs = team_videos_qs
self.enabled = set()
if permissions.can_add_videos_bulk(user):
self.enabled.add('add_csv')
if permissions.can_add_video(team, user):
self.enabled.add('add_form')
if permissions.can_edit_videos(team, user):
self.enabled.update(['edit', 'bulk-edit'])
if len(permissions.can_move_videos_to(team, user)) > 0:
self.enabled.add('move')
if permissions.can_remove_videos(team, user):
self.enabled.add('remove')
self.extension_forms = OrderedDict()
signals.build_video_page_forms.send(
sender=self, team=team, user=user, team_videos_qs=team_videos_qs)
self.has_bulk_form = any(
issubclass(form_class, forms.BulkTeamVideoForm)
for form_class in self.enabled_form_classes()
)
def build_ajax_form(self, name, request, selection, filters_form):
FormClass = self.lookup_form_class(name)
all_selected = len(selection) >= VIDEOS_PER_PAGE
if request.method == 'POST':
return FormClass(self.team, self.user, self.team_videos_qs,
selection, all_selected, filters_form,
data=request.POST, files=request.FILES)
else:
return FormClass(self.team, self.user, self.team_videos_qs,
selection, all_selected, filters_form)
def build_add_multiple_forms(self, request, filters_form):
if filters_form.selected_project:
# use the selected project by default on the add video form
initial = {
'project': filters_form.selected_project.id,
}
else:
initial = None
if request.method == 'POST' and 'form' in request.POST and request.POST['form'] == 'add':
return (forms.NewAddTeamVideoDataForm(self.team, request.POST, files=request.FILES),
forms.TeamVideoURLFormSet(request.POST))
else:
return (forms.NewAddTeamVideoDataForm(self.team),
forms.TeamVideoURLFormSet())
def add_extension_form(self, extension_form):
"""Add an extra form to appear on the video page
Extension forms are a way for other apps to add a form to the video
page. These forms appear on the bottom sheet when videos get
selected. Connect to the build_video_page_forms signal in order to
get a chance to call this method when a VideoPageForm is built.
"""
self.extension_forms[extension_form.name] = extension_form
def get_extension_forms(self):
return self.extension_forms.values()
def lookup_form_class(self, name):
if name in self.enabled:
return self.form_classes[name]
if name in self.extension_forms:
return self.extension_forms[name].form_class
raise KeyError(name)
def enabled_form_classes(self):
for name in self.enabled:
yield self.form_classes[name]
for ext_form in self.get_extension_forms():
yield ext_form.form_class
def _videos_and_filters_form(request, team):
filters_form = forms.VideoFiltersForm(team, request.GET)
if filters_form.is_bound and filters_form.is_valid():
team_videos = filters_form.get_queryset()
else:
team_videos = (team.videos.all()
.order_by('-created')
.select_related('teamvideo'))
main_project = get_main_project(team)
if main_project:
team_videos = team_videos.filter(
video__teamvideo__project=main_project)
return team_videos, filters_form
@with_old_view(old_views.detail)
@team_view
def videos(request, team):
team_videos, filters_form = _videos_and_filters_form(request, team)
page_forms = VideoPageForms(team, request.user, team_videos)
error_form = error_form_name = None
add_form, add_formset = page_forms.build_add_multiple_forms(request, filters_form)
if add_form.is_bound and add_form.is_valid() and add_formset.is_bound and add_formset.is_valid():
errors = ""
added = 0
project = add_form.cleaned_data['project']
thumbnail = add_form.cleaned_data['thumbnail']
language = add_form.cleaned_data['language']
for form in add_formset:
created, error = form.save(team, request.user, project=project, thumbnail=thumbnail, language=language)
if len(error) > 0:
errors += error + "<br/>"
if created:
added += 1
message = fmt(_(u"%(added)i videos added<br/>%(errors)s"), added=added, errors=errors)
messages.success(request, message)
return HttpResponseRedirect(request.build_absolute_uri())
paginator = AmaraPaginator(team_videos, VIDEOS_PER_PAGE)
page = paginator.get_page(request)
if request.method == 'POST':
csv_form = forms.TeamVideoCSVForm(data=request.POST, files=request.FILES)
if csv_form.is_bound and csv_form.is_valid():
csv_file = csv_form.cleaned_data['csv_file']
if csv_file is not None:
try:
add_videos_from_csv(team, request.user, csv_file)
message = fmt(_(u"File successfully uploaded, you should receive the summary shortly."))
except:
message = fmt(_(u"File was not successfully parsed."))
messages.success(request, message)
else:
csv_form = forms.TeamVideoCSVForm()
return render(request, 'new-teams/videos.html', {
'team': team,
'page': page,
'paginator': paginator,
'filters_form': filters_form,
'forms': page_forms,
'add_form': add_form,
'add_formset': add_formset,
'add_csv_form': csv_form,
'error_form': error_form,
'error_form_name': error_form_name,
'bulk_mode_enabled': page and page_forms.has_bulk_form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Videos')),
],
})
@team_view
def videos_form(request, team, name):
try:
selection = request.GET['selection'].split('-')
except StandardError:
return HttpResponseBadRequest()
team_videos_qs, filters_form = _videos_and_filters_form(request, team)
page_forms = VideoPageForms(team, request.user, team_videos_qs)
try:
page_forms.lookup_form_class(name)
except KeyError:
raise Http404
form = page_forms.build_ajax_form(name, request, selection, filters_form)
if form.is_bound and form.is_valid():
form.save()
messages.success(request, form.message())
response = HttpResponse("SUCCESS", content_type="text/plain")
response['X-Form-Success'] = '1'
return response
first_video = Video.objects.get(id=selection[0])
template_name = 'new-teams/videos-forms/{}.html'.format(name)
return render(request, template_name, {
'team': team,
'name': name,
'form': form,
'first_video': first_video,
'video_count': len(selection),
'all_selected': len(selection) >= VIDEOS_PER_PAGE,
})
@with_old_view(old_views.detail_members)
@team_view
def members(request, team):
member = team.get_member(request.user)
filters_form = forms.MemberFiltersForm(request.GET)
if request.method == 'POST':
edit_form = forms.EditMembershipForm(member, request.POST)
if edit_form.is_valid():
edit_form.save()
return HttpResponseRedirect(request.path)
else:
logger.warning("Error updating team memership: %s (%s)",
edit_form.errors.as_text(),
request.POST)
messages.warning(request, _(u'Error updating membership'))
else:
edit_form = forms.EditMembershipForm(member)
members = filters_form.update_qs(
team.members.select_related('user')
.prefetch_related('user__userlanguage_set',
'projects_managed',
'languages_managed'))
paginator = AmaraPaginator(members, MEMBERS_PER_PAGE)
page = paginator.get_page(request)
return render(request, 'new-teams/members.html', {
'team': team,
'page': page,
'filters_form': filters_form,
'edit_form': edit_form,
'show_invite_link': permissions.can_invite(team, request.user),
'show_add_link': permissions.can_add_members(team, request.user),
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Members')),
],
})
@team_view
def project(request, team, project_slug):
project = get_object_or_404(team.project_set, slug=project_slug)
if permissions.can_change_project_managers(team, request.user):
form = request.POST.get('form')
if request.method == 'POST' and form == 'add':
add_manager_form = forms.AddProjectManagerForm(
team, project, data=request.POST)
if add_manager_form.is_valid():
add_manager_form.save()
member = add_manager_form.cleaned_data['member']
msg = fmt(_(u'%(user)s added as a manager'), user=member.user)
messages.success(request, msg)
return redirect('teams:project', team.slug, project.slug)
else:
add_manager_form = forms.AddProjectManagerForm(team, project)
if request.method == 'POST' and form == 'remove':
remove_manager_form = forms.RemoveProjectManagerForm(
team, project, data=request.POST)
if remove_manager_form.is_valid():
remove_manager_form.save()
member = remove_manager_form.cleaned_data['member']
msg = fmt(_(u'%(user)s removed as a manager'),
user=member.user)
messages.success(request, msg)
return redirect('teams:project', team.slug, project.slug)
else:
remove_manager_form = forms.RemoveProjectManagerForm(team, project)
else:
add_manager_form = None
remove_manager_form = None
data = {
'team': team,
'project': project,
'managers': project.managers.all(),
'add_manager_form': add_manager_form,
'remove_manager_form': remove_manager_form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(project),
],
}
return team.new_workflow.render_project_page(request, team, project, data)
@team_view
def all_languages_page(request, team):
video_language_counts = dict(team.get_video_language_counts())
completed_language_counts = dict(team.get_completed_language_counts())
all_languages = set(video_language_counts.keys() +
completed_language_counts.keys())
languages = [
(lc,
get_language_label(lc),
video_language_counts.get(lc, 0),
completed_language_counts.get(lc, 0),
)
for lc in all_languages
if lc != ''
]
languages.sort(key=lambda row: (-row[2], row[1]))
data = {
'team': team,
'languages': languages,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Languages')),
],
}
return team.new_workflow.render_all_languages_page(
request, team, data,
)
@team_view
def language_page(request, team, language_code):
try:
language_label = get_language_label(language_code)
except KeyError:
raise Http404
if permissions.can_change_language_managers(team, request.user):
form = request.POST.get('form')
if request.method == 'POST' and form == 'add':
add_manager_form = forms.AddLanguageManagerForm(
team, language_code, data=request.POST)
if add_manager_form.is_valid():
add_manager_form.save()
member = add_manager_form.cleaned_data['member']
msg = fmt(_(u'%(user)s added as a manager'), user=member.user)
messages.success(request, msg)
return redirect('teams:language-page', team.slug,
language_code)
else:
add_manager_form = forms.AddLanguageManagerForm(team,
language_code)
if request.method == 'POST' and form == 'remove':
remove_manager_form = forms.RemoveLanguageManagerForm(
team, language_code, data=request.POST)
if remove_manager_form.is_valid():
remove_manager_form.save()
member = remove_manager_form.cleaned_data['member']
msg = fmt(_(u'%(user)s removed as a manager'),
user=member.user)
messages.success(request, msg)
return redirect('teams:language-page', team.slug,
language_code)
else:
remove_manager_form = forms.RemoveLanguageManagerForm(
team, language_code)
else:
add_manager_form = None
remove_manager_form = None
data = {
'team': team,
'language_code': language_code,
'language': language_label,
'managers': (team.members
.filter(languages_managed__code=language_code)),
'add_manager_form': add_manager_form,
'remove_manager_form': remove_manager_form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Languages'), 'teams:all-languages-page', team.slug),
BreadCrumb(language_label),
],
}
return team.new_workflow.render_language_page(
request, team, language_code, data,
)
@team_view
def add_members(request, team):
summary = None
if not permissions.can_add_members(team, request.user):
return HttpResponseForbidden(_(u'You cannot invite people to this team.'))
if request.POST:
form = forms.AddMembersForm(team, request.user, request.POST)
if form.is_valid():
summary = form.save()
form = forms.AddMembersForm(team, request.user)
if team.is_old_style():
template_name = 'teams/add_members.html'
else:
template_name = 'new-teams/add_members.html'
return render(request, template_name, {
'team': team,
'form': form,
'summary': summary,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Members'), 'teams:members', team.slug),
BreadCrumb(_('Invite')),
],
})
@team_view
def invite(request, team):
if not permissions.can_invite(team, request.user):
return HttpResponseForbidden(_(u'You cannot invite people to this team.'))
if request.POST:
form = forms.InviteForm(team, request.user, request.POST)
if form.is_valid():
# the form will fire the notifications for invitees
# this cannot be done on model signal, since you might be
# sending invites twice for the same user, and that borks
# the naive signal for only created invitations
form.save()
return HttpResponseRedirect(reverse('teams:members',
args=[team.slug]))
else:
form = forms.InviteForm(team, request.user)
if team.is_old_style():
template_name = 'teams/invite_members.html'
else:
template_name = 'new-teams/invite.html'
return render(request, template_name, {
'team': team,
'form': form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Members'), 'teams:members', team.slug),
BreadCrumb(_('Invite')),
],
})
@team_view
def autocomplete_invite_user(request, team):
return autocomplete_user_view(request, team.invitable_users())
@team_view
def autocomplete_project_manager(request, team, project_slug):
project = get_object_or_404(team.project_set, slug=project_slug)
return autocomplete_user_view(request, project.potential_managers())
@team_view
def autocomplete_language_manager(request, team, language_code):
return autocomplete_user_view(
request,
team.potential_language_managers(language_code))
def member_search(request, team, qs):
query = request.GET.get('query')
if query:
members_qs = (qs.filter(user__username__icontains=query)
.select_related('user'))
else:
members_qs = TeamMember.objects.none()
data = [
{
'value': member.user.username,
'label': fmt(_('%(username)s (%(full_name)s)'),
username=member.user.username,
full_name=unicode(member.user)),
}
for member in members_qs
]
return HttpResponse(json.dumps(data), mimetype='application/json')
@public_team_view
@login_required
def join(request, team):
user = request.user
if team.user_is_member(request.user):
messages.info(request,
fmt(_(u'You are already a member of %(team)s.'),
team=team))
elif team.is_open():
member = TeamMember.objects.create(team=team, user=request.user,
role=TeamMember.ROLE_CONTRIBUTOR)
messages.success(request,
fmt(_(u'You are now a member of %(team)s.'),
team=team))
messages_tasks.team_member_new.delay(member.pk)
elif team.is_by_application():
return application_form(request, team)
else:
messages.error(request,
fmt(_(u'You cannot join %(team)s.'), team=team))
return redirect(team)
def application_form(request, team):
try:
application = team.applications.get(user=request.user)
except Application.DoesNotExist:
application = Application(team=team, user=request.user)
try:
application.check_can_submit()
except ApplicationInvalidException, e:
messages.error(request, e.message)
return redirect(team)
if request.method == 'POST':
form = forms.ApplicationForm(application, data=request.POST)
if form.is_valid():
form.save()
return redirect(team)
else:
form = forms.ApplicationForm(application)
return render(request, "new-teams/application.html", {
'team': team,
'form': form,
})
@public_team_view
def admin_list(request, team):
if team.is_old_style():
return old_views.detail_members(request, team,
role=TeamMember.ROLE_ADMIN)
# The only real reason to view this page is if you want to ask an admin to
# invite you, so let's limit the access a bit
if (not team.is_by_invitation() and not
team.user_is_member(request.user)):
return HttpResponseForbidden()
return render(request, 'new-teams/admin-list.html', {
'team': team,
'admins': (team.members
.filter(Q(role=TeamMember.ROLE_ADMIN)|
Q(role=TeamMember.ROLE_OWNER))
.select_related('user'))
})
@team_view
def activity(request, team):
filters_form = forms.ActivityFiltersForm(team, request.GET)
paginator = AmaraPaginator(filters_form.get_queryset(), ACTIONS_PER_PAGE)
page = paginator.get_page(request)
action_choices = ActivityRecord.type_choices()
next_page_query = request.GET.copy()
next_page_query['page'] = page.next_page_number()
context = {
'paginator': paginator,
'page': page,
'filters_form': filters_form,
'filtered': filters_form.is_bound,
'team': team,
'tab': 'activity',
'user': request.user,
'next_page_query': next_page_query.urlencode(),
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Activity')),
],
}
if team.is_old_style():
template_dir = 'teams/'
else:
template_dir = 'new-teams/'
if not request.is_ajax():
return render(request, template_dir + 'activity.html', context)
else:
# for ajax requests we only want to return the activity list, since
# that's all that the JS code needs.
return render(request, template_dir + '_activity-list.html', context)
@team_view
def statistics(request, team, tab):
"""For the team activity, statistics tabs
"""
if (tab == 'teamstats' and
not permissions.can_view_stats_tab(team, request.user)):
return HttpResponseForbidden("Not allowed")
cache_key = 'stats-' + team.slug + '-' + tab
cached_context = cache.get(cache_key)
if cached_context:
context = pickle.loads(cached_context)
else:
context = compute_statistics(team, stats_type=tab)
cache.set(cache_key, pickle.dumps(context), 60*60*24)
context['tab'] = tab
context['team'] = team
context['breadcrumbs'] = [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Activity')),
]
if team.is_old_style():
return render(request, 'teams/statistics.html', context)
else:
return render(request, 'new-teams/statistics.html', context)
def dashboard(request, slug):
team = get_object_or_404(
Team.objects.for_user(request.user, exclude_private=False),
slug=slug)
if not team.is_old_style() and not team.user_is_member(request.user):
return welcome(request, team)
else:
return team.new_workflow.dashboard_view(request, team)
def welcome(request, team):
if team.is_visible:
videos = team.videos.order_by('-id')[:2]
else:
videos = None
if Application.objects.open(team, request.user):
messages.info(request,
_(u"Your application has been submitted. "
u"You will be notified of the team "
"administrator's response"))
return render(request, 'new-teams/welcome.html', {
'team': team,
'join_mode': team.get_join_mode(request.user),
'team_messages': team.get_messages([
'pagetext_welcome_heading',
]),
'videos': videos,
})
@team_settings_view
def settings_basic(request, team):
if team.is_old_style():
return old_views.settings_basic(request, team)
if permissions.can_rename_team(team, request.user):
FormClass = forms.RenameableSettingsForm
else:
FormClass = forms.SettingsForm
if request.POST:
form = FormClass(request.POST, request.FILES, instance=team)
is_visible = team.is_visible
if form.is_valid():
try:
form.save()
except:
logger.exception("Error on changing team settings")
raise
if is_visible != form.instance.is_visible:
tasks.update_video_public_field.delay(team.id)
tasks.invalidate_video_visibility_caches.delay(team)
messages.success(request, _(u'Settings saved.'))
return HttpResponseRedirect(request.path)
else:
form = FormClass(instance=team)
return render(request, "new-teams/settings.html", {
'team': team,
'form': form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings')),
],
})
@team_settings_view
def settings_messages(request, team):
if team.is_old_style():
return old_views.settings_messages(request, team)
initial = team.settings.all_messages()
if request.POST:
form = forms.GuidelinesMessagesForm(request.POST, initial=initial)
if form.is_valid():
for key, val in form.cleaned_data.items():
setting, c = Setting.objects.get_or_create(team=team, key=Setting.KEY_IDS[key])
setting.data = val
setting.save()
messages.success(request, _(u'Guidelines and messages updated.'))
return HttpResponseRedirect(request.path)
else:
form = forms.GuidelinesMessagesForm(initial=initial)
return render(request, "new-teams/settings-messages.html", {
'team': team,
'form': form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings'), 'teams:settings_basic', team.slug),
BreadCrumb(_('Messages')),
],
})
@team_settings_view
def settings_lang_messages(request, team):
if team.is_old_style():
return old_views.settings_lang_messages(request, team)
initial = team.settings.all_messages()
languages = [{"code": l.language_code, "data": l.data} for l in team.settings.localized_messages()]
if request.POST:
form = forms.GuidelinesLangMessagesForm(request.POST, languages=languages)
if form.is_valid():
new_language = None
new_message = None
for key, val in form.cleaned_data.items():
if key == "messages_joins_localized":
new_message = val
elif key == "messages_joins_language":
new_language = val
else:
l = key.split("messages_joins_localized_")
if len(l) == 2:
code = l[1]
try:
setting = Setting.objects.get(team=team, key=Setting.KEY_IDS["messages_joins_localized"], language_code=code)
if val == "":
setting.delete()
else:
setting.data = val
setting.save()
except:
messages.error(request, _(u'No message for that language.'))
return HttpResponseRedirect(request.path)
if new_message and new_language:
setting, c = Setting.objects.get_or_create(team=team,
key=Setting.KEY_IDS["messages_joins_localized"],
language_code=new_language)
if c:
setting.data = new_message
setting.save()
else:
messages.error(request, _(u'There is already a message for that language.'))
return HttpResponseRedirect(request.path)
elif new_message or new_language:
messages.error(request, _(u'Please set the language and the message.'))
return HttpResponseRedirect(request.path)
messages.success(request, _(u'Guidelines and messages updated.'))
return HttpResponseRedirect(request.path)
else:
form = forms.GuidelinesLangMessagesForm(languages=languages)
return render(request, "new-teams/settings-lang-messages.html", {
'team': team,
'form': form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings'), 'teams:settings_basic', team.slug),
BreadCrumb(_('Language-specific Messages')),
],
})
@team_settings_view
def settings_feeds(request, team):
if team.is_old_style():
return old_views.video_feeds(request, team)
action = request.POST.get('action')
if request.method == 'POST' and action == 'import':
feed = get_object_or_404(team.videofeed_set, id=request.POST['feed'])
feed.update()
messages.success(request, _(u'Importing videos now'))
return HttpResponseRedirect(request.build_absolute_uri())
if request.method == 'POST' and action == 'delete':
feed = get_object_or_404(team.videofeed_set, id=request.POST['feed'])
feed.delete()
messages.success(request, _(u'Feed deleted'))
return HttpResponseRedirect(request.build_absolute_uri())
if request.method == 'POST' and action == 'add':
add_form = forms.AddTeamVideosFromFeedForm(team, request.user,
data=request.POST)
if add_form.is_valid():
add_form.save()
messages.success(request, _(u'Video Feed Added'))
return HttpResponseRedirect(request.build_absolute_uri())
else:
add_form = forms.AddTeamVideosFromFeedForm(team, request.user)
return render(request, "new-teams/settings-feeds.html", {
'team': team,
'add_form': add_form,
'feeds': team.videofeed_set.all(),
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings'), 'teams:settings_basic', team.slug),
BreadCrumb(_('Video Feeds')),
],
})
@team_settings_view
def settings_projects(request, team):
if team.is_old_style():
return old_views.settings_projects(request, team)
projects = Project.objects.for_team(team)
form = request.POST.get('form')
if request.method == 'POST' and form == 'add':
add_form = forms.ProjectForm(team, data=request.POST)
if add_form.is_valid():
add_form.save()
messages.success(request, _('Project added.'))
return HttpResponseRedirect(
reverse('teams:settings_projects', args=(team.slug,))
)
else:
add_form = forms.ProjectForm(team)
if request.method == 'POST' and form == 'edit':
edit_form = forms.EditProjectForm(team, data=request.POST)
if edit_form.is_valid():
edit_form.save()
messages.success(request, _('Project updated.'))
return HttpResponseRedirect(
reverse('teams:settings_projects', args=(team.slug,))
)
else:
edit_form = forms.EditProjectForm(team)
if request.method == 'POST' and form == 'delete':
try:
project = projects.get(id=request.POST['project'])
except Project.DoesNotExist:
pass
else:
project.delete()
messages.success(request, _('Project deleted.'))
return HttpResponseRedirect(
reverse('teams:settings_projects', args=(team.slug,))
)
return render(request, "new-teams/settings-projects.html", {
'team': team,
'projects': projects,
'add_form': add_form,
'edit_form': edit_form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings'), 'teams:settings_basic', team.slug),
BreadCrumb(_('Projects')),
],
})
@team_settings_view
def edit_project(request, team, project_slug):
if team.is_old_style():
return old_views.edit_project(request, team, project_slug)
project = get_object_or_404(Project, slug=project_slug)
if 'delete' in request.POST:
project.delete()
return HttpResponseRedirect(
reverse('teams:settings_projects', args=(team.slug,))
)
elif request.POST:
form = forms.ProjectForm(team, instance=project, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(
reverse('teams:settings_projects', args=(team.slug,))
)
else:
form = forms.ProjectForm(team, instance=project)
return render(request, "new-teams/settings-projects-edit.html", {
'team': team,
'form': form,
'breadcrumbs': [
BreadCrumb(team, 'teams:dashboard', team.slug),
BreadCrumb(_('Settings'), 'teams:settings_basic', team.slug),
BreadCrumb(_('Projects'), 'teams:settings_projects', team.slug),
BreadCrumb(project.name),
],
})
@team_settings_view
def settings_workflows(request, team):
return team.new_workflow.workflow_settings_view(request, team)
@staff_member_required
@team_view
def video_durations(request, team):
projects = team.projects_with_video_stats()
totals = (
sum(p.video_count for p in projects),
sum(p.videos_without_duration for p in projects),
sum(p.total_duration for p in projects),
)
return render(request, "new-teams/video-durations.html", {
'team': team,
'projects': projects,
'totals': totals,
})
| agpl-3.0 | 4,369,495,035,570,212,000 | 36.250933 | 137 | 0.601307 | false |
Azure/azure-documentdb-python | azure/cosmos/endpoint_discovery_retry_policy.py | 1 | 4650 | #The MIT License (MIT)
#Copyright (c) 2014 Microsoft Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""Internal class for endpoint discovery retry policy implementation in the Azure Cosmos database service.
"""
import logging
from azure.cosmos.documents import _OperationType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(levelname)s:%(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
class _EndpointDiscoveryRetryPolicy(object):
"""The endpoint discovery retry policy class used for geo-replicated database accounts
to handle the write forbidden exceptions due to writable/readable location changes
(say, after a failover).
"""
Max_retry_attempt_count = 120
Retry_after_in_milliseconds = 1000
def __init__(self, connection_policy, global_endpoint_manager, *args):
self.global_endpoint_manager = global_endpoint_manager
self._max_retry_attempt_count = _EndpointDiscoveryRetryPolicy.Max_retry_attempt_count
self.failover_retry_count = 0
self.retry_after_in_milliseconds = _EndpointDiscoveryRetryPolicy.Retry_after_in_milliseconds
self.connection_policy = connection_policy
self.request = args[0] if args else None
#clear previous location-based routing directive
if (self.request):
self.request.clear_route_to_location()
# Resolve the endpoint for the request and pin the resolution to the resolved endpoint
# This enables marking the endpoint unavailability on endpoint failover/unreachability
self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request)
self.request.route_to_location(self.location_endpoint)
def ShouldRetry(self, exception):
"""Returns true if should retry based on the passed-in exception.
:param (errors.HTTPFailure instance) exception:
:rtype:
boolean
"""
if not self.connection_policy.EnableEndpointDiscovery:
return False
if self.failover_retry_count >= self.Max_retry_attempt_count:
return False
self.failover_retry_count += 1
if self.location_endpoint:
if _OperationType.IsReadOnlyOperation(self.request.operation_type):
#Mark current read endpoint as unavailable
self.global_endpoint_manager.mark_endpoint_unavailable_for_read(self.location_endpoint)
else:
self.global_endpoint_manager.mark_endpoint_unavailable_for_write(self.location_endpoint)
# set the refresh_needed flag to ensure that endpoint list is
# refreshed with new writable and readable locations
self.global_endpoint_manager.refresh_needed = True
# clear previous location-based routing directive
self.request.clear_route_to_location()
# set location-based routing directive based on retry count
# simulating single master writes by ensuring usePreferredLocations
# is set to false
self.request.route_to_location_with_preferred_location_flag(self.failover_retry_count, False)
# Resolve the endpoint for the request and pin the resolution to the resolved endpoint
# This enables marking the endpoint unavailability on endpoint failover/unreachability
self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request)
self.request.route_to_location(self.location_endpoint)
return True
| mit | -5,802,656,706,582,959,000 | 45.039604 | 106 | 0.726882 | false |
gragas/buffalo | buffalo/tray.py | 1 | 6856 | from buffalo import utils
class Tray(object):
FEATHER = 10
# FEATHER is the number of pixels away from the edges of a tray
# that the "resize region" goes
DEFAULT_MIN_WIDTH = 150
DEFAULT_MAX_WIDTH = 1000
DEFAULT_MIN_HEIGHT = 150
DEFAULT_MAX_HEIGHT = 1000
DEFAULT_COLOR = (0, 0, 100, 150)
def __init__(self, pos, size,
min_width=None, max_width=None,
min_height=None, max_height=None,
color=None):
self.pos = pos
self.size = size
self.min_width = min_width if min_width is not None else Tray.DEFAULT_MIN_WIDTH
self.max_width = max_width if max_width is not None else Tray.DEFAULT_MAX_WIDTH
self.min_height = min_height if min_height is not None else Tray.DEFAULT_MIN_HEIGHT
self.max_height = max_height if max_height is not None else Tray.DEFAULT_MAX_HEIGHT
assert(self.width >= self.min_width and self.width <= self.max_width)
assert(self.height >= self.min_height and self.height <= self.max_height)
self.color = color if color is not None else Tray.DEFAULT_COLOR
self.labels = set()
self.buttons = set()
self.options = set()
self.inputs = set()
self.should_resize = False
self.edge = 0b0000
self.render()
def render(self):
self.surface = utils.empty_surface(self.size)
self.surface.fill(self.color)
for label in self.labels:
label.blit(self.surface)
for button in self.buttons:
button.blit(self.surface)
for option in self.options:
option.blit(self.surface)
for inpt in self.inputs:
inpt.blit(self.surface)
def move(self, diff):
self.pos = self.pos[0] + diff[0], self.pos[1] + diff[1]
def resize(self, mouse_pos):
x, y = mouse_pos
original_pos = self.pos
original_size = self.size
if self.edge & 0b0001: # left
r = self.x + self.width
self.pos = x, self.y
self.size = self.width + (r - (self.x + self.width)), self.height
if self.edge & 0b0010: # top
b = self.y + self.height
self.pos = self.x, y
self.size = self.width, self.height + (b - (self.y + self.height))
if self.edge & 0b0100: # right
self.size = self.width + (x - (self.x + self.width)), self.height
if self.edge & 0b1000: # bottom
self.size = self.width, self.height + (y - (self.y + self.height))
if self.size[0] < self.min_width or self.size[0] > self.max_width:
self.size = original_size[0], self.size[1]
self.pos = original_pos[0], self.pos[1]
if self.size[1] < self.min_height or self.size[1] > self.max_height:
self.size = self.size[0], original_size[1]
self.pos = self.pos[0], original_pos[1]
self.render()
def handle(self, mouse_pos, mouse_rel, click_pos):
assert(type(mouse_pos) == tuple and len(mouse_pos) == 2)
assert(type(mouse_pos[0]) == int and type(mouse_pos[1]) == int)
x, y = mouse_pos
# Edges:
# Left: 0b0001
# Top: 0b0010
# Right: 0b0100
# Bottom: 0b1000
within_x = x >= self.x and x <= self.x + self.width
within_y = y >= self.y and y <= self.y + self.height
if within_x:
if y <= self.y + self.height and (self.y + self.height) - y <= Tray.FEATHER:
self.should_resize = True
self.edge |= 0b1000 # Bottom
if y >= self.y and y - self.y <= Tray.FEATHER:
self.should_resize = True
self.edge |= 0b0010 # Top
if within_y:
if x >= self.x and x - self.x <= Tray.FEATHER:
self.should_resize = True
self.edge |= 0b0001 # Left
if x <= self.x + self.width and self.x + self.width - x <= Tray.FEATHER:
self.should_resize = True
self.edge |= 0b0100 # Right
if x >= self.x - Tray.FEATHER and x <= self.x + self.width + Tray.FEATHER:
if y >= self.y - Tray.FEATHER and y <= self.y + self.height + Tray.FEATHER:
if x > self.x + Tray.FEATHER * 5 and x < self.x + self.width - Tray.FEATHER * 5:
if y > self.y + Tray.FEATHER * 5 and y < self.y + self.height - Tray.FEATHER * 5:
self.should_move = True
relative_to_self_pos = (click_pos[0] - self.x, click_pos[1] - self.y)
for button in self.buttons:
if button.get_rect().collidepoint(relative_to_self_pos):
self.should_move = False
self.should_resize = False
break
for inpt in self.inputs:
if inpt.get_rect().collidepoint(relative_to_self_pos):
self.should_move = False
self.should_resize = False
break
for option in self.options:
if option.get_left_rect().collidepoint(relative_to_self_pos):
self.should_move = False
self.should_resize = False
break
if option.get_right_rect().collidepoint(relative_to_self_pos):
self.should_move = False
self.should_resize = False
break
if self.should_move:
self.move(mouse_rel)
if self.should_resize:
self.resize(mouse_pos)
def blit(self, dest):
dest.blit(self.surface, self.pos)
@property
def pos(self):
return (self.x, self.y)
@property
def size(self):
return (self.width, self.height)
@property
def color(self):
return (self.r, self.g, self.b, self.a)
@pos.setter
def pos(self, value):
assert(type(value) == tuple and len(value) == 2)
assert(type(value[0]) == int)
assert(type(value[1]) == int)
self.x, self.y = value
@size.setter
def size(self, value):
assert(type(value) == tuple and len(value) == 2)
assert(type(value[0]) == int)
assert(type(value[1]) == int)
self.width, self.height = value
@color.setter
def color(self, value):
assert(type(value) == tuple and len(value) == 4)
assert(type(value[0]) == int and value[0] >= 0 and value[0] <= 255)
assert(type(value[1]) == int and value[1] >= 0 and value[1] <= 255)
assert(type(value[2]) == int and value[2] >= 0 and value[2] <= 255)
assert(type(value[3]) == int and value[3] >= 0 and value[3] <= 255)
self.r, self.g, self.b, self.a = value
| gpl-2.0 | -3,845,763,640,003,592,700 | 39.568047 | 101 | 0.533985 | false |
anomam/pvlib-python | setup.py | 1 | 3709 | #!/usr/bin/env python
import os
try:
from setuptools import setup
from setuptools.extension import Extension
except ImportError:
raise RuntimeError('setuptools is required')
import versioneer
DESCRIPTION = ('A set of functions and classes for simulating the ' +
'performance of photovoltaic energy systems.')
LONG_DESCRIPTION = """
PVLIB Python is a community supported tool that provides a set of
functions and classes for simulating the performance of photovoltaic
energy systems. PVLIB Python was originally ported from the PVLIB MATLAB
toolbox developed at Sandia National Laboratories and it implements many
of the models and methods developed at the Labs. More information on
Sandia Labs PV performance modeling programs can be found at
https://pvpmc.sandia.gov/. We collaborate with the PVLIB MATLAB project,
but operate independently of it.
We need your help to make pvlib-python a great tool!
Documentation: http://pvlib-python.readthedocs.io
Source code: https://github.com/pvlib/pvlib-python
"""
DISTNAME = 'pvlib'
LICENSE = 'BSD 3-Clause'
AUTHOR = 'pvlib python Developers'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://github.com/pvlib/pvlib-python'
INSTALL_REQUIRES = ['numpy >= 1.12.0',
'pandas >= 0.18.1',
'pytz',
'requests']
TESTS_REQUIRE = ['nose', 'pytest', 'pytest-cov', 'pytest-mock',
'pytest-timeout', 'pytest-rerunfailures', 'pytest-remotedata']
EXTRAS_REQUIRE = {
'optional': ['ephem', 'cython', 'netcdf4', 'nrel-pysam', 'numba',
'pvfactors', 'scipy', 'siphon', 'tables'],
'doc': ['ipython', 'matplotlib', 'sphinx == 1.8.5', 'sphinx_rtd_theme',
'sphinx-gallery', 'docutils == 0.15.2'],
'test': TESTS_REQUIRE
}
EXTRAS_REQUIRE['all'] = sorted(set(sum(EXTRAS_REQUIRE.values(), [])))
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
setuptools_kwargs = {
'zip_safe': False,
'scripts': [],
'include_package_data': True,
'python_requires': '~=3.5'
}
# set up pvlib packages to be installed and extensions to be compiled
PACKAGES = ['pvlib']
extensions = []
spa_sources = ['pvlib/spa_c_files/spa.c', 'pvlib/spa_c_files/spa_py.c']
spa_depends = ['pvlib/spa_c_files/spa.h']
spa_all_file_paths = map(lambda x: os.path.join(os.path.dirname(__file__), x),
spa_sources + spa_depends)
if all(map(os.path.exists, spa_all_file_paths)):
print('all spa_c files found')
PACKAGES.append('pvlib.spa_c_files')
spa_ext = Extension('pvlib.spa_c_files.spa_py',
sources=spa_sources, depends=spa_depends)
extensions.append(spa_ext)
else:
print('WARNING: spa_c files not detected. ' +
'See installation instructions for more information.')
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=PACKAGES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
ext_modules=extensions,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
classifiers=CLASSIFIERS,
**setuptools_kwargs)
| bsd-3-clause | -5,511,466,704,249,259,000 | 32.116071 | 79 | 0.66433 | false |
obtitus/py-boinc-plotter | pyBoincPlotter/plot/badge.py | 1 | 10297 |
# This file is part of the py-boinc-plotter,
# which provides parsing and plotting of boinc statistics and
# badge information.
# Copyright (C) 2013 [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# END LICENCE
# Standard python imports
import os
import re
import datetime
from io import StringIO
# try:
# from cStringIO import StringIO
# except:
# from StringIO import StringIO
# Scientific import
from .importMatplotlib import *
# logger
import logging
logger = logging.getLogger('boinc.badge')
class Badge(object):
# Define:
# self.color and self.value in subclass
def __init__(self, name, url):
self.name = name
self.url = url
@staticmethod
def badgeToColor(name):
name = name.lower()
if name == 'bronze': name = '#8C7853'
elif name == 'ruby': name = 'r'
elif name == 'amethyst': name = (149/255., 39/255., 197/255.)
elif name == 'turquoise': name = (59/255., 215/255., 249/255.)
elif name == 'diamond': name = (142/255., 219/255., 245/255.)
elif name == 'emerald': name = 'g'
elif name == 'sapphire': name = 'b'
# yoyo colors
elif name == 'master': name = 'gold'
elif name == 'grandmaster': name = 'b'
elif name == 'guru': name = 'b'
elif name == 'spirit': name = 'r'
elif name == 'held': name = 'r'
elif name == 'half god': name = 'g'
elif name == 'god': name = 'g'
return name
# Overriden in subclass:
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def __str__(self):
return self.name
def __repr__(self):
return str(self.__class__) + self.name
def getImageArtist(self, browser, *args, **kwargs):
# Uses badge url to create a matplotlib artist where the
# matplotlib.offsetbox.AnnotationBbox
# is created by *args and **kwargs. Typical usage: (ix, day), frameon=False, box_alignment=(0, 0.5)
fileName, extension = os.path.splitext(self.url)
zoom = kwargs.pop('zoom', 1)
try:
img = browser.visitURL(self.url, extension=extension)
img = StringIO(img)
img = mpimg.imread(img, format=extension) # may raise error due to .jpg, support for other images than .png is added by PIL
#if max(img.size) > maxdim:
img.resize((maxdim, maxdim))
# Add image:
of = matplotlib.offsetbox.OffsetImage(img, zoom=zoom)
ab = matplotlib.offsetbox.AnnotationBbox(of, *args, **kwargs)
return ab # use plt.gca().add_artist(ab)
except Exception:
logger.exception('Badge image failed, url = %s, extension = %s', self.url, extension)
class Badge_worldcommunitygrid(Badge):
@Badge.name.setter
def name(self, name):
self._name = name
name = name.replace('Level Badge', 'Badge') # hack.., the diamond++ badges is without the 'Level Badge'
self.reg = re.search('(\w+) Badge \((\d+) (days|year|years)\)', name)
logger.debug("Badge_worldcommunitygrid %s, %s", name, self.reg)
@property
def color(self):
reg = self.reg
if reg:
return Badge.badgeToColor(reg.group(1))
else:
return 'k'
@property
def runtime(self):
reg = self.reg
if reg:
if reg.group(3).startswith('year'):
years = int(reg.group(2))
day = datetime.timedelta(days=years*365.25).total_seconds()
elif reg.group(3) == 'days':
day = int(reg.group(2))
day = datetime.timedelta(days=day).total_seconds()
else:
logger.error('Badge level not recognized "%s", "%s"', self.name, reg.groups()) # TODO: raise exception?
return 0
return day
else:
return 0
class Badge_wuprop(Badge):
# Example url: http://wuprop.boinc-af.org/img/badge/100_0_0_0_0.png
badges = [[100, 'Bronze'],
[250, 'Silver'],
[500, 'Gold'],
[1000, 'Ruby'],
[2500, 'Emerald'],
[5000, 'Sapphire'],
[10000, 'Magenta'],
[25000, 'Lime'],
[50000, 'Cyan'],
[100000, 'Purple']]
def __init__(self, name='', url=''):
self.name = name
self.url = url
self.isWuprop = True # isinstance failed, see http://mail.python.org/pipermail/python-bugs-list/2005-August/029861.html
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@Badge.name.setter
def name(self, url):
reg = re.search('(\d+)_(\d+)_(\d+)_(\d+)_(\d+)', url)
if reg:
name = 'Badge: '
for ix, group in enumerate(reg.groups()):
if group != '0':
name += "{} applications: {} hours, ".format((ix+1)*20, group)
self._name = name[:-2]
self.value = map(int, reg.groups())
else:
self._name = url
@staticmethod
def getColor(runtime):
# color and value based on runtime (in seconds)
color = 'k'
value = 0
for b, c in Badge_wuprop.badges:
if runtime >= b*60*60:
color = Badge.badgeToColor(c);
value = b
return color, value
class Badge_yoyo(Badge):
badges = {'bronze': 10000,
'silver': 100000,
'gold': 500000,
'master': int(1e6),
'grandmaster': int(2e6),
'guru': int(5e6),
'spirit': int(10e6),
'held': int(25e6),
'half god' : int(50e6),
'god': int(100e6)}
@Badge.name.setter
def name(self, name):
self._name = name
self.reg = re.search('([\w\s]+) badge', name)
@property
def color(self):
reg = self.reg
if reg:
return Badge.badgeToColor(reg.group(1))
else:
return 'k'
@property
def value(self):
reg = self.reg
if reg:
try:
return self.badges[reg.group(1)]
except KeyError:
logger.error('Badge level not recognized "%s", "%s"', self.name, reg.groups()) # TODO: raise exception?
return 0
else:
return 0
class Badge_primegrid(Badge):
@Badge.name.setter
def name(self, name):
self._name = name
self.reg = re.search('(\w+ \w+) (\w+): More than ([\d,]+) credits \(([\d,]+)\)', name)
@property
def color(self):
reg = self.reg
if reg:
return Badge.badgeToColor(reg.group(2))
else:
return 'k'
def _getFloat(self, groupId):
reg = self.reg
if reg:
c = reg.group(groupId).replace(',', '')
return float(c)
else:
return 0
@property
def value(self):
return self._getFloat(3)
@property
def credit(self):
return self._getFloat(4)
@property
def app_name(self):
if self.reg:
return self.reg.group(1)
else:
return 'Unknown'
class Badge_numberfields(Badge):
badges = [[10000, 'Bronze'],
[100000, 'Silver'],
[500000, 'Gold'],
[1000000, 'Sapphire'],
[10000000, 'Ruby'],
[50000000, 'Emerald'],
[100000000, 'Diamond']]
# 'Bronze Medal- 10k credits. (Next badge is Silver at 100k)'
reg = '(\w+) Medal[-\s]*(\d+)(\w) credits'
def setValue(self, value, suffix):
value = float(value)
if suffix == 'k':
value *= 1000
elif suffix == 'm':
value *= 1000000
else:
logger.warning('Unknown numbersfields badge suffix %s, "%s"', reg.group(3), name)
return value
@Badge.name.setter
def name(self, name):
self._name = name
if name == '': return
reg = re.search(self.reg, name)
if reg:
self.color = self.badgeToColor(reg.group(1))
self.value = self.setValue(value=reg.group(2),
suffix=reg.group(3))
else:
logger.warning('Regexp failed on badge string "%s"', name)
class Badge_nfs(Badge_numberfields):
badges = [[10000, 'Bronze'],
[100000, 'Silver'],
[500000, 'Gold'],
[1000000, 'Amethyst'],
[5000000, 'Turquoise'],
[10000000, 'Sapphire'],
[50000000, 'Ruby'],
[100000000, 'Emerald'],
[500000000, 'Diamond']]
# 10k in NFS credit
reg = '(\d+)(\w) in NFS credit'
@Badge.name.setter
def name(self, name):
self._name = name
if name == '': return
reg = re.search(self.reg, name)
if reg:
self.value = self.setValue(value=reg.group(1),
suffix=reg.group(2))
else:
logger.warning('Regexp failed on badge string "%s"', name)
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = 'http://escatter11.fullerton.edu/nfs/'+url
if url == '': return
reg = re.search('(\w+)_nfs.png', url)
if reg:
self.color = self.badgeToColor(reg.group(1))
else:
logger.warning('Regexp failed on badge url "%s"', url)
| gpl-3.0 | 6,578,926,965,254,804,000 | 30.780864 | 135 | 0.529086 | false |
PalisadoesFoundation/switchmap-ng | switchmap/main/agent.py | 1 | 10245 | #!/usr/bin/env python3
"""switchmap.Agent class.
Description:
This script:
1) Processes a variety of information from agents
2) Posts the data using HTTP to a server listed
in the configuration file
"""
# Standard libraries
import textwrap
import sys
import time
import argparse
import multiprocessing
import os
from pprint import pprint
# PIP3 libraries
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
# switchmap.libraries
from switchmap.utils import daemon
from switchmap.utils.daemon import Daemon
from switchmap.constants import CONFIG
from switchmap.utils import log
from switchmap.www import API
class Agent(object):
"""Agent class for daemons."""
def __init__(self, parent, child=None):
"""Method initializing the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables (Parent)
self.parent = parent
self.pidfile_parent = daemon.pid_file(parent)
self.lockfile_parent = daemon.lock_file(parent)
# Initialize key variables (Child)
if bool(child) is None:
self.pidfile_child = None
else:
self.pidfile_child = daemon.pid_file(child)
def name(self):
"""Return agent name.
Args:
None
Returns:
value: Name of agent
"""
# Return
value = self.parent
return value
def query(self):
"""Placeholder method."""
# Do nothing
pass
class AgentDaemon(Daemon):
"""Class that manages agent deamonization."""
def __init__(self, agent):
"""Method initializing the class.
Args:
agent: agent object
Returns:
None
"""
# Initialize variables to be used by daemon
self.agent = agent
# Call up the base daemon
Daemon.__init__(self, agent)
def run(self):
"""Start polling.
Args:
None
Returns:
None
"""
# Start polling. (Poller decides frequency)
while True:
self.agent.query()
class AgentCLI(object):
"""Class that manages the agent CLI.
Args:
None
Returns:
None
"""
def __init__(self):
"""Method initializing the class.
Args:
None
Returns:
None
"""
# Initialize key variables
self.parser = None
def process(self, additional_help=None):
"""Return all the CLI options.
Args:
None
Returns:
args: Namespace() containing all of our CLI arguments as objects
- filename: Path to the configuration file
"""
# Header for the help menu of the application
parser = argparse.ArgumentParser(
description=additional_help,
formatter_class=argparse.RawTextHelpFormatter)
# CLI argument for starting
parser.add_argument(
'--start',
required=False,
default=False,
action='store_true',
help='Start the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--stop',
required=False,
default=False,
action='store_true',
help='Stop the agent daemon.'
)
# CLI argument for getting the status of the daemon
parser.add_argument(
'--status',
required=False,
default=False,
action='store_true',
help='Get daemon daemon status.'
)
# CLI argument for restarting
parser.add_argument(
'--restart',
required=False,
default=False,
action='store_true',
help='Restart the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--force',
required=False,
default=False,
action='store_true',
help=textwrap.fill(
'Stops or restarts the agent daemon ungracefully when '
'used with --stop or --restart.', width=80)
)
# Get the parser value
self.parser = parser
def control(self, agent):
"""Control the switchmap agent from the CLI.
Args:
agent: Agent object
Returns:
None
"""
# Get the CLI arguments
self.process()
parser = self.parser
args = parser.parse_args()
# Run daemon
_daemon = AgentDaemon(agent)
if args.start is True:
_daemon.start()
elif args.stop is True:
if args.force is True:
_daemon.force()
else:
_daemon.stop()
elif args.restart is True:
if args.force is True:
_daemon.force()
_daemon.start()
else:
_daemon.restart()
elif args.status is True:
_daemon.status()
else:
parser.print_help()
sys.exit(2)
class AgentAPI(Agent):
"""switchmap-ng API agent that serves web pages.
Args:
None
Returns:
None
Functions:
__init__:
populate:
post:
"""
def __init__(self, parent, child):
"""Method initializing the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables
Agent.__init__(self, parent, child)
self.config = CONFIG
def query(self):
"""Query all remote devices for data.
Args:
None
Returns:
None
"""
# Initialize key variables
config = self.config
# Check for lock and pid files
if os.path.exists(self.lockfile_parent) is True:
log_message = (
'Lock file %s exists. Multiple API daemons running '
'API may have died '
'catastrophically in the past, in which case the lockfile '
'should be deleted. '
'') % (self.lockfile_parent)
log.log2see(1083, log_message)
if os.path.exists(self.pidfile_parent) is True:
log_message = (
'PID file: %s already exists. Daemon already running? '
'If not, it may have died catastrophically in the past '
'in which case you should use --stop --force to fix.'
'') % (self.pidfile_parent)
log.log2see(1084, log_message)
######################################################################
#
# Assign options in format that the Gunicorn WSGI will accept
#
# NOTE! to get a full set of valid options pprint(self.cfg.settings)
# in the instantiation of StandaloneApplication. The option names
# do not exactly match the CLI options found at
# http://docs.gunicorn.org/en/stable/settings.html
#
######################################################################
options = {
'bind': '%s:%s' % (config.listen_address(), config.bind_port()),
'accesslog': config.web_log_file(),
'errorlog': config.web_log_file(),
'capture_output': True,
'pidfile': self.pidfile_child,
'loglevel': config.log_level(),
'workers': _number_of_workers(),
'umask': 0o0007,
}
# Log so that user running the script from the CLI knows that something
# is happening
log_message = (
'Switchmap API running on %s:%s and logging to file %s.'
'') % (
config.listen_address(),
config.bind_port(),
config.web_log_file())
log.log2info(1022, log_message)
# Run
StandaloneApplication(API, options).run()
class StandaloneApplication(BaseApplication):
"""Class to integrate the Gunicorn WSGI with the Switchmap Flask application.
Modified from: http://docs.gunicorn.org/en/latest/custom.html
"""
def __init__(self, app, options=None):
"""Method initializing the class.
args:
app: Flask application object of type Flask(__name__)
options: Gunicorn CLI options
"""
# Initialize key variables
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
pprint(self.cfg.settings)
def load_config(self):
"""Load the configuration."""
# Initialize key variables
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
# Assign configuration parameters
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
"""Run the Flask application throught the Gunicorn WSGI."""
return self.application
def _number_of_workers():
"""Get the number of CPU cores on this server."""
return (multiprocessing.cpu_count() * 2) + 1
def agent_sleep(agent_name, seconds=300):
"""Make agent sleep for a specified time, while updating PID every 300s.
Args:
agent_name: Name of agent
seconds: number of seconds to sleep
Returns:
uid: UID for agent
"""
# Initialize key variables
interval = 300
remaining = seconds
# Start processing
while True:
# Update the PID file timestamp (important)
daemon.update_pid(agent_name)
# Sleep for at least "interval" number of seconds
if remaining < interval:
time.sleep(remaining)
break
else:
time.sleep(interval)
# Decrement remaining time
remaining = remaining - interval
| apache-2.0 | -2,319,820,289,583,307,000 | 24.296296 | 81 | 0.538799 | false |
MoveLab/erulet-server | appulet/migrations/0013_auto__del_field_surveyinstance_user.py | 1 | 25798 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SurveyInstance.user'
db.delete_column(u'appulet_surveyinstance', 'user_id')
def backwards(self, orm):
# Adding field 'SurveyInstance.user'
db.add_column(u'appulet_surveyinstance', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='surveys_instances', null=True, to=orm['auth.User'], blank=True),
keep_default=False)
models = {
u'appulet.box': {
'Meta': {'object_name': 'Box'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interactive_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'boxes'", 'to': u"orm['appulet.InteractiveImage']"}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'max_x': ('django.db.models.fields.IntegerField', [], {}),
'max_y': ('django.db.models.fields.IntegerField', [], {}),
'message_ca': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_es': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_oc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'min_x': ('django.db.models.fields.IntegerField', [], {}),
'min_y': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.highlight': {
'Meta': {'object_name': 'Highlight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlights'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'long_text_ca': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_en': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_es': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_fr': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_oc': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'media': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'step': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlights'", 'null': 'True', 'to': u"orm['appulet.Step']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.highlighttranslationvcs': {
'Meta': {'object_name': 'HighlightTranslationVCS'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'highlight_translation_vcs_entries'", 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'long_text_ca': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_en': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_es': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_fr': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'long_text_oc': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'appulet.interactiveimage': {
'Meta': {'object_name': 'InteractiveImage'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'interactive_images'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'})
},
u'appulet.map': {
'Meta': {'object_name': 'Map'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'map'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'map_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'route': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Route']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'appulet.rating': {
'Meta': {'object_name': 'Rating'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ratings'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ratings'", 'null': 'True', 'to': u"orm['appulet.Route']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': u"orm['auth.User']"})
},
u'appulet.reference': {
'Meta': {'object_name': 'Reference'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'general': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'references'", 'null': 'True', 'to': u"orm['appulet.Highlight']"}),
'html_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'appulet.route': {
'Meta': {'object_name': 'Route'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'routes'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description_ca': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_es': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_oc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gpx_pois': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'gpx_track': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'gpx_waypoints': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_route_based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['appulet.Route']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reference': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Reference']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'short_description_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'track': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['appulet.Track']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'appulet.routetranslationvcs': {
'Meta': {'object_name': 'RouteTranslationVCS'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description_ca': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_es': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_oc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'route_translation_vcs_entries'", 'to': u"orm['appulet.Route']"}),
'short_description_ca': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'short_description_oc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'appulet.step': {
'Meta': {'object_name': 'Step'},
'absolute_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'altitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'steps'", 'null': 'True', 'to': u"orm['appulet.Track']"})
},
u'appulet.surveyinstance': {
'Meta': {'object_name': 'SurveyInstance'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'oc'", 'max_length': '2'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_instances'", 'null': 'True', 'to': u"orm['appulet.Route']"}),
'survey_scheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_instances'", 'to': u"orm['appulet.SurveyScheme']"})
},
u'appulet.surveyquestion': {
'Meta': {'object_name': 'SurveyQuestion'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_questions'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'question_ca': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'question_en': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'question_es': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'question_fr': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'question_oc': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'survey_scheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': u"orm['appulet.SurveyScheme']"})
},
u'appulet.surveyresponse': {
'Meta': {'object_name': 'SurveyResponse'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': u"orm['appulet.SurveyQuestion']"}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'survey_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': u"orm['appulet.SurveyInstance']"})
},
u'appulet.surveyscheme': {
'Meta': {'object_name': 'SurveyScheme'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_schemes'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'unique_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'appulet.track': {
'Meta': {'object_name': 'Track'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 6, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name_ca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name_oc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['appulet'] | gpl-3.0 | 6,517,042,192,401,871,000 | 92.474638 | 203 | 0.555624 | false |
julivico/tdd_python | functional_tests/base.py | 1 | 1025 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
functional_tests
"""
import sys
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_url = 'http://' + arg.split('=')[1]
return
super().setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
| mit | 4,868,916,712,964,632,000 | 24 | 71 | 0.619512 | false |
imk1/IMKTFBindingCode | makeNegativeSummitPairSet.py | 1 | 6850 | import sys
import argparse
from collections import deque
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description = "Get pairs of peak summits corresponding to each loop base")
parser.add_argument("--summitPairsFileName", required=True, help='Loop summit pairs sorted by the start position')
parser.add_argument("--peakSummitsFileName", required=True, help='Peak summits sorted')
parser.add_argument("--nonSummitPairsFileName", required=True, help='Summit pairs that do not correspond to matching loop bases')
parser.add_argument("--distanceCutoff", type=int, required=True, help='Summit pairs that do not correspond to matching loop bases')
options = parser.parse_args()
return options
def getSummit(summitLine):
# Get the summit
if summitLine == "":
# At the end of the summit file, so stop
return ("", 0)
summitLineElements = summitLine.strip().split("\t")
return (summitLineElements[0], int(summitLineElements[1]))
def getSummitPair(summitPairsLine):
# Get the pair of summits
summitPairsLineElements = summitPairsLine.split("\t")
return ((summitPairsLineElements[0], int(summitPairsLineElements[1])), (summitPairsLineElements[2], int(summitPairsLineElements[3])))
def recordSummitPairs(summitPairList, nonSummitPairsFile):
# Record pairs of summits
for summitPair in summitPairList:
# Iterate through the pairs and record each
nonSummitPairsFile.write(summitPair[0][0] + "\t" + str(summitPair[0][1]) + "\t" + summitPair[1][0] + "\t" + str(summitPair[1][1]) + "\n")
def getSummitsFromChrom(peakSummitsFile, chrom, lastSummit):
summitList = deque([lastSummit])
peakSummit = getSummit(peakSummitsFile.readline().strip())
while peakSummit[0] == chrom:
# Iterate through the summit file until a new chromosome is reached
summitList.append(peakSummit)
peakSummit = getSummit(peakSummitsFile.readline().strip())
lastSummit = peakSummit
return [summitList, lastSummit]
def addNonSummitPairs(summitPair, summitPairList, nonSummitPairList, distanceCutoff):
# Add all pairs with of each loop base with each loop base earlier on the chromosomes that it is not paired with
numRemove = 0
for i in range(2):
# Iterate through both loop bases
summit = summitPair[i]
for j in range(len(summitPairList)):
# Iterate through the earlier summit pairs and make all pairs with the current loop base
earlierSummitPair = summitPairList[j]
if (i == 0) and (summit[1] - earlierSummitPair[1][1] > distanceCutoff):
# The current summit is too far along the chromosome, so remove the earlier summit from the list
numRemove = j
continue
if (i == 1) and (earlierSummitPair[0][1] - summit[1] > distanceCutoff):
# The current summit is too early on the chromosome, so stop
break
for earlierSummit in earlierSummitPair:
# Iterate through the earlier summits in the earlier pair
if abs(summit[1] - earlierSummit[1]) > distanceCutoff:
# The summits are too far apart, so do not include them
continue
nonSummitPair = (earlierSummit, summit)
if summit[1] < earlierSummit[1]:
# Put the current summit first in the pair
nonSummitPair = (summit, earlierSummit)
if (nonSummitPair not in summitPairList) and (nonSummitPair not in nonSummitPairList):
# The current non-summit pair is a true non-summit pair and has not already been identified
nonSummitPairList.append(nonSummitPair)
for i in range(numRemove):
# Remove the summits that are too early on the chromosome
summitPairList.popleft()
return [summitPairList, nonSummitPairList]
def addPeakSummits(summitPair, summitList, nonSummitPairList, distanceCutoff):
# Add all pairs of the other peak summits with the loop base
numRemove = 0
for i in range(2):
# Iterate through both loop bases
summit = summitPair[i]
for j in range(len(summitList)):
# Iterate through the summits that are not in loop bases and make a pair with each of the current loop bases
s = summitList[j]
if (i == 0) and (summit[1] - s[1] > distanceCutoff):
# The current summit is too far along the chromosome, so remove the earlier summit from the list
numRemove = j
continue
if (i == 1) and (s[1] - summit[1] > distanceCutoff):
# The current summit is too early on the chromosome, so stop
break
if abs(summit[1] - s[1]) > distanceCutoff:
# The summits are too far apart, so do not include them
continue
nonSummitPair = (s, summit)
if summit[1] < s[1]:
# Put the current summit first in the pair
nonSummitPair = (summit, s)
if nonSummitPair not in nonSummitPairList:
# The current non-summit pair has not already been identified
nonSummitPairList.append(nonSummitPair)
for i in range(numRemove):
# Remove the summits that are too early on the chromosome
summitList.popleft()
return [summitList, nonSummitPairList]
def makeNegativeSummitPairSet(options):
# Make a set of negative peak summit pairs for training a loop connection classifer
# ASSUMES THAT summitPairsFile AND peakSummitsFile ARE SORTED BY CHROMOSOME, SUMMIT (pairs are sorted by first summit then second summit)
# ASSUMES THAT ALL CHROMOSOMES IN summitPairsFile ARE IN peakSummitsFile
# ASSUMES THAT ALL CHROMOSOMES IN peakSummitsFile ARE IN summitPairsFile
# WILL NOT INCLUDE NEGATIVES WITH A DISTANCE > distanceCutoff (and will exclude some others)
summitPairsFile = open(options.summitPairsFileName)
peakSummitsFile = open(options.peakSummitsFileName)
nonSummitPairsFile = open(options.nonSummitPairsFileName, 'w+')
summitPairsLine = summitPairsFile.readline().strip()
lastChrom = ""
summitPairList = deque([])
summitList = deque([])
nonSummitPairList = []
lastSummit = getSummit(peakSummitsFile.readline().strip())
while summitPairsLine != "":
# Iterate through the chromosomes and make all non-summit-pairs for each
summitPair = getSummitPair(summitPairsLine)
if summitPair[0][0] != lastChrom:
# At a new chromosome
if lastChrom != "":
# Record the summit pairs that do not overlap matching loop bases
recordSummitPairs(nonSummitPairList, nonSummitPairsFile)
nonSummitPairList = []
summitPairList = deque([])
lastChrom = summitPair[0][0]
[summitList, lastSummit] = getSummitsFromChrom(peakSummitsFile, lastChrom, lastSummit)
[summitPairList, nonSummitPairList] = addNonSummitPairs(summitPair, summitPairList, nonSummitPairList, options.distanceCutoff)
[summitList, nonSummitPairList] = addPeakSummits(summitPair, summitList, nonSummitPairList, options.distanceCutoff)
summitPairList.append(summitPair)
summitPairsLine = summitPairsFile.readline().strip()
recordSummitPairs(nonSummitPairList, nonSummitPairsFile)
summitPairsFile.close()
peakSummitsFile.close()
nonSummitPairsFile.close()
if __name__=="__main__":
options = parseArgument()
makeNegativeSummitPairSet(options)
| mit | -7,915,858,102,713,001,000 | 45.917808 | 139 | 0.752117 | false |
evheubel/latex2edx | latex2edx/abox.py | 1 | 26072 | #!/usr/bin/env python
#
# Answer Box class
#
# object representation of abox, used in Tutor2, now generalized to latex and word input formats.
# 13-Aug-12 ichaung: merge in sari's changes
# 13-Aug-12 ichuang: cleaned up, does more error checking, includes stub for shortanswer
# note that shortanswer can be implemented right now using customresponse and textbox
# 04-Sep-12 ichuang: switch from shlex to FSM, merge in changes for math and inline from 8.21
# 13-Oct-12 ichuang: remove csv entirely, use FSM for splitting options instead
# 20-Jan-13 ichuang: add formularesponse
# 23-Jan-13 ichuang: add multiple-line customresponse, with proper inline and math handling
import os, sys, string, re
# import shlex # for split keeping quoted strings intact
# import csv # for splitting quoted options
from lxml import etree
class AnswerBox(object):
def __init__(self, aboxstr, context=None, verbose=False):
'''
Parse a TUT abox and produce edX XML for a problem responsetype.
Examples:
-----------------------------------------------------------------------------
<abox type="option" expect="float" options=" ","noneType","int","float" />
<optionresponse>
<optioninput options="('noneType','int','float')" correct="int">
</optionresponse>
-----------------------------------------------------------------------------
<abox type="string" expect="Michigan" options="ci" />
<stringresponse answer="Michigan" type="ci">
<textline size="20" />
</stringresponse>
-----------------------------------------------------------------------------
<abox type="custom" expect="(3 * 5) / (2 + 3)" cfn="eq" />
<customresponse cfn="eq">
<textline size="40" correct_answer="(3 * 5) / (2 + 3)"/><br/>
</customresponse>
-----------------------------------------------------------------------------
<abox type="custom" expect="20" answers="11","9" prompts="Integer 1:","Integer 2:" inline="1" cfn="test_add" />
<customresponse cfn="test_add" expect="20" inline="1">
<p style="display:inline">Integer 1:<textline correct_answer="11" inline="1"/></p>
<br/>
<p style="display:inline">Integer 2:<textline correct_answer="9" inline="1"/></p>
</customresponse>
-----------------------------------------------------------------------------
<abox type="jsinput" expect="(3 * 5) / (2 + 3)" cfn="eq" gradefn="gradefn" height="500"
get_statefn="getstate" set_statefn="setstate" html_file="/static/jsinput.html"/>
<customresponse cfn="eq" expect="(3 * 5) / (2 + 3)">
<jsinput gradefn="gradefn"
height="500"
get_statefn="getstate"
set_statefn="setstate"
html_file="/static/jsinput.html"/>
</customresponse>
-----------------------------------------------------------------------------
<abox type="numerical" expect="3.141" tolerance="5%" />
<numericalresponse answer="5.0">
<responseparam type="tolerance" default="5%" name="tol" description="Numerical Tolerance" />
<textline />
</numericalresponse>
-----------------------------------------------------------------------------
<abox type="multichoice" expect="Yellow" options="Red","Green","Yellow","Blue" />
<multiplechoiceresponse direction="vertical" randomize="yes">
<choicegroup type="MultipleChoice">
<choice location="random" correct="false" name="red">Red</choice>
<choice location="random" correct="true" name="green">Green</choice>
<choice location="random" correct="false" name="yellow">Yellow</choice>
<choice location="bottom" correct="false" name="blue">Blue</choice>
</choicegroup>
</multiplechoiceresponse>
-----------------------------------------------------------------------------
<abox type="oldmultichoice" expect="1","3" options="0","1","2","3","4" />
<choiceresponse>
<checkboxgroup>
<choice correct="false"><text>0</text></choice>
<choice correct="true"><text>1</text></choice>
<choice correct="false"><text>2</text></choice>
<choice correct="true"><text>3</text></choice>
<choice correct="false"><text>4</text></choice>
</checkboxgroup>
</choiceresponse>
-----------------------------------------------------------------------------
<abox type="formula" expect="m*c^2" samples="m,c@1,2:3,4#10" intype="cs" size="40" math="1" tolerance="0.01" feqin="1" />
format of samples: <variables>@<lower_bounds>:<upper_bound>#<num_samples
* variables - a set of variables that are allowed as student input
* lower_bounds - for every variable defined in variables, a lower
bound on the numerical tests to use for that variable
* upper_bounds - for every variable defined in variables, an upper
bound on the numerical tests to use for that variable
if feqin is given as an attribute, then a formulaequationinput is used instead
of textline, for the input element.
<formularesponse type="cs" samples="m,c@1,2:3,4#10" answer="m*c^2">
<responseparam type="tolerance" default="0.01"/>
<textline size="40" math="1" />
</formularesponse>
-----------------------------------------------------------------------------
Adaptive hints:
define the hints as a dict in an included python script, and give the name
of that dict as the parameter "hints". Works inside customresponse,
optionresponse, and multiple choice problems, within latex2edx.
latex2edx automatically translates <ed_general_hint_system/> into an import
of the general_hint_system.py python code.
Thus, this input:
<abox type="custom" expect="(3 * 5) / (2 + 3)" cfn="eq" hints="hint1"/>
produces:
<edx_general_hint_system />
<script type="text/python">
do_hints_for_hint1 = HintSystem(hints=hint1).check_hint
</script>
<customresponse cfn="eq">
<textline size="40" correct_answer="(3 * 5) / (2 + 3)"/><br/>
<hintgroup hintfn="do_hints_for_hint1">
</customresponse>
-----------------------------------------------------------------------------
context is used for error reporting, and provides context like the line number and
filename where the abox is located.
'''
self.aboxstr = aboxstr
self.context = context
self.verbose = verbose
self.xml = self.abox2xml(aboxstr)
self.xmlstr = self.hint_extras + etree.tostring(self.xml)
def abox2xml(self, aboxstr):
if aboxstr.startswith('abox '): aboxstr = aboxstr[5:]
s = aboxstr
s = s.replace(' in_check= ', ' ')
# parse answer box arguments into dict
abargs = self.abox_args(s)
self.abargs = abargs
type2response = {'custom': 'customresponse',
'external': 'externalresponse',
'code': 'coderesponse',
'oldmultichoice': 'choiceresponse',
'multichoice': 'multiplechoiceresponse',
'numerical': 'numericalresponse',
'option': 'optionresponse',
'formula': 'formularesponse',
'shortans': 'shortanswerresponse',
'shortanswer': 'shortanswerresponse',
'string': 'stringresponse',
'symbolic': 'symbolicresponse',
'image': 'imageresponse',
'jsinput': 'customresponse_jsinput',
}
if 'type' in abargs and abargs['type'] in type2response:
abtype = type2response[abargs['type']]
elif 'tests' in abargs:
abtype = 'externalresponse'
elif 'type' not in abargs and 'options' in abargs:
abtype = 'optionresponse'
elif 'cfn' in abargs:
abtype = 'customresponse'
else:
abtype = 'symbolicresponse' # default
abxml = etree.Element(abtype)
if abtype == 'optionresponse':
self.require_args(['expect'])
oi = etree.Element('optioninput')
optionstr, options = self.get_options(abargs)
oi.set('options', optionstr)
oi.set('correct', self.stripquotes(abargs['expect']))
abxml.append(oi)
self.copy_attrib(abargs, 'inline', abxml)
self.copy_attrib(abargs, 'inline', oi)
if abtype == 'multiplechoiceresponse':
self.require_args(['expect', 'options'])
cg = etree.SubElement(abxml, 'choicegroup')
cg.set('direction', 'vertical')
optionstr, options = self.get_options(abargs)
expectstr, expectset = self.get_options(abargs, arg='expect')
cnt = 1
for op in options:
choice = etree.SubElement(cg, 'choice')
choice.set('correct', 'true' if op in expectset else 'false')
choice.set('name', str(cnt))
choice.append(etree.XML("<text> %s</text>" % op))
cnt += 1
if abtype == 'choiceresponse':
self.require_args(['expect', 'options'])
cg = etree.SubElement(abxml, 'checkboxgroup')
optionstr, options = self.get_options(abargs)
expectstr, expects = self.get_options(abargs, 'expect')
cnt = 1
if self.verbose:
print "[abox.py] oldmultichoice: options=/%s/, expects=/%s/" % (options, expects)
for op in options:
choice = etree.SubElement(cg, 'choice')
choice.set('correct', 'true' if (op in expects) else 'false')
choice.set('name', str(cnt))
choice.append(etree.XML("<text>%s</text>" % op))
cnt += 1
elif abtype == 'shortanswerresponse':
print "[latex2html.abox] Warning - short answer response quite yet implemented in edX!"
if 1:
tb = etree.Element('textbox')
self.copy_attrib(abargs, 'rows', tb)
self.copy_attrib(abargs, 'cols', tb)
abxml.append(tb)
abxml.tag = 'customresponse'
self.require_args(['expect', 'cfn'])
abxml.set('cfn', self.stripquotes(abargs['cfn']))
self.copy_attrib(abargs, 'expect', abxml)
else:
abxml.tag = 'stringresponse' # change to stringresponse for now (FIXME)
tl = etree.Element('textline')
if 'size' in abargs:
tl.set('size', self.stripquotes(abargs['size']))
else:
tl.set('size', '80')
self.copy_attrib(abargs, 'trailing_text', tl)
abxml.append(tl)
abxml.set('answer', 'unknown')
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'inline', abxml)
elif abtype == 'stringresponse':
self.require_args(['expect'])
tl = etree.Element('textline')
if 'size' in abargs:
tl.set('size', self.stripquotes(abargs['size']))
self.copy_attrib(abargs, 'trailing_text', tl)
abxml.append(tl)
abxml.set('answer', self.stripquotes(abargs['expect']))
if 'options' in abargs:
abxml.set('type', self.stripquotes(abargs['options']))
else:
abxml.set('type', '')
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'inline', abxml)
elif abtype == 'customresponse':
self.require_args(['expect', 'cfn'])
abxml.set('cfn', self.stripquotes(abargs['cfn']))
self.copy_attrib(abargs, 'inline', abxml)
self.copy_attrib(abargs, 'expect', abxml)
self.copy_attrib(abargs, 'options', abxml)
if abxml.get('options', ''):
abxml.set('cfn_extra_args', 'options') # tells sandbox to include 'options' in cfn call arguments
if 'answers' not in abargs:
answers = [self.stripquotes(abargs['expect'])]
else: # multiple inputs for this customresponse
ansstr, answers = self.get_options(abargs, 'answers')
if 'prompts' in abargs:
promptstr, prompts = self.get_options(abargs, 'prompts')
else:
prompts = ['']
if not len(prompts) == len(answers):
msg = "Error: number of answers and prompts must match in:"
msg += aboxstr
msg += "\nabox located: %s\n" % self.context
raise Exception(msg)
# sys.exit(-1)
cnt = 0
for ans, prompt in zip(answers, prompts):
if 'rows' in abargs:
tl = etree.Element('textbox')
self.copy_attrib(abargs, 'rows', tl)
self.copy_attrib(abargs, 'cols', tl)
else:
tl = etree.Element('textline')
self.copy_attrib(abargs, 'size', tl)
tl.set('correct_answer', ans)
self.copy_attrib(abargs, 'trailing_text', tl)
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'math', tl)
self.copy_attrib(abargs, 'preprocessorClassName', tl)
self.copy_attrib(abargs, 'preprocessorSrc', tl)
if prompt:
elem = etree.Element('p')
if 'inline' in abargs:
elem.set('style', 'display:inline')
elem.text = prompt + " "
elem.append(tl)
else:
elem = tl
if cnt > 0:
abxml.append(etree.Element('br')) # linebreak between boxes if multiple
abxml.append(elem)
cnt += 1
elif abtype == 'customresponse_jsinput':
abxml.tag = 'customresponse'
self.require_args(['expect', 'cfn'])
abxml.set('cfn', self.stripquotes(abargs['cfn']))
self.copy_attrib(abargs, 'expect', abxml)
self.copy_attrib(abargs, 'options', abxml)
if abxml.get('options', ''):
abxml.set('cfn_extra_args', 'options') # tells sandbox to include 'options' in cfn call arguments
js = etree.Element('jsinput')
jsattribs = ['width', 'height', 'gradefn', 'get_statefn', 'set_statefn', 'html_file']
for jsa in jsattribs:
self.copy_attrib(abargs, jsa, js)
abxml.append(js)
elif abtype == 'externalresponse' or abtype == 'coderesponse':
if 'url' in abargs:
self.copy_attrib(abargs, 'url', abxml)
tb = etree.Element('textbox')
self.copy_attrib(abargs, 'rows', tb)
self.copy_attrib(abargs, 'cols', tb)
self.copy_attrib(abargs, 'tests', abxml)
abxml.append(tb)
# turn script to <answer> later
elif abtype == 'numericalresponse':
self.require_args(['expect'])
self.copy_attrib(abargs, 'inline', abxml)
tl = etree.Element('textline')
self.copy_attrib(abargs, 'size', tl)
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'math', tl)
self.copy_attrib(abargs, 'trailing_text', tl)
abxml.append(tl)
self.copy_attrib(abargs, 'options', abxml)
answer = self.stripquotes(abargs['expect'])
# NOTE: The edX platform now allows mathematical expressions
# and constants in the expect field.
# try:
# x = float(answer)
# except Exception as err:
# if not answer[0] == '$': # may also be a string variable (starts with $)
# print "Error - numericalresponse expects numerical expect value, for %s" % s
# raise
abxml.set('answer', answer)
rp = etree.SubElement(tl, "responseparam")
# rp.attrib['description'] = "Numerical Tolerance" #not needed
rp.attrib['type'] = "tolerance"
rp.attrib['default'] = abargs.get('tolerance') or "0.00001"
# rp.attrib['name'] = "tol" #not needed
elif abtype == 'formularesponse':
self.require_args(['expect', 'samples'])
self.copy_attrib(abargs, 'inline', abxml)
intype = self.stripquotes(abargs.get('intype', 'cs'))
abxml.set('type', intype)
self.copy_attrib(abargs, 'samples', abxml)
if abargs.get('feqin'):
tl = etree.Element('formulaequationinput')
else:
tl = etree.Element('textline')
self.copy_attrib(abargs, 'trailing_text', tl)
self.copy_attrib(abargs, 'size', tl)
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'math', tl)
self.copy_attrib(abargs, 'preprocessorClassName', tl)
self.copy_attrib(abargs, 'preprocessorSrc', tl)
abxml.append(tl)
answer = self.stripquotes(abargs['expect'])
abxml.set('answer', answer)
rp = etree.SubElement(tl, "responseparam")
rp.attrib['type'] = "tolerance"
rp.attrib['default'] = abargs.get('tolerance') or "0.00001"
elif abtype == 'symbolicresponse':
self.require_args(['expect'])
self.copy_attrib(abargs, 'expect', abxml)
self.copy_attrib(abargs, 'debug', abxml)
self.copy_attrib(abargs, 'options', abxml)
tl = etree.Element('textline')
self.copy_attrib(abargs, 'inline', tl)
self.copy_attrib(abargs, 'size', tl)
self.copy_attrib(abargs, 'preprocessorClassName', tl)
self.copy_attrib(abargs, 'preprocessorSrc', tl)
self.copy_attrib(abargs, 'trailing_text', tl)
abxml.append(tl)
self.copy_attrib(abargs, 'inline', abxml)
if 'correct_answer' in abargs:
tl.set('correct_answer', self.stripquotes(abargs['correct_answer']))
else:
tl.set('correct_answer', self.stripquotes(abargs['expect']))
tl.set('math', '1') # use dynamath
elif abtype == 'imageresponse':
self.require_args(['src', 'width', 'height', 'rectangle'])
rect = abargs.get('rectangle')
if re.match('\(\d+\,\d+\)\-\(\d+,\d+\)', rect) is None: # check for rectangle syntax
msg = "[abox.py] ERROR: imageresponse rectancle %s has wrong syntax\n" % rect
msg += "Answer box string is \"%s\"\n" % self.aboxstr
msg += "abox located: %s\n" % self.context
raise Exception(msg)
# sys.exit(-1)
ii = etree.Element('imageinput')
self.copy_attrib(abargs, 'src', ii)
self.copy_attrib(abargs, 'width', ii)
self.copy_attrib(abargs, 'height', ii)
self.copy_attrib(abargs, 'rectangle', ii)
abxml.append(ii)
# has hint function?
if 'hintfn' in abargs:
hintfn = self.stripquotes(abargs['hintfn'])
hintgroup = etree.SubElement(abxml, 'hintgroup')
hintgroup.set('hintfn', hintfn)
# has hint?
hint_extras = ''
if 'hints' in abargs:
hints = self.stripquotes(abargs['hints'])
hintfn = "do_hints_for_%s" % hints
hintgroup = etree.SubElement(abxml, 'hintgroup')
hintgroup.set('hintfn', hintfn)
hint_extras = "<edx_general_hint_system />\n"
hint_extras += '<script type="text/python">\n%s = HintSystem(hints=%s).check_hint\n</script>\n' % (hintfn, hints)
self.hint_extras = hint_extras
s = etree.tostring(abxml, pretty_print=True)
s = re.sub('(?ms)<html>(.*)</html>', '\\1', s)
# print s
return etree.XML(s)
def get_options(self, abargs, arg='options'):
optstr = abargs[arg] # should be double quoted strings, comma delimited
# EVH 01-22-2015: Inserting quotes around single option for proper
# parsing of choices containing commas
if not optstr.startswith('"') and not optstr.startswith("'"):
optraw = repr(optstr)
optstr = optraw[0] + optstr + optraw[0]
# options = [c for c in csv.reader([optstr])][0] # turn into list of strings
options = split_args_with_quoted_strings(optstr, lambda(x): x == ',') # turn into list of strings
options = map(self.stripquotes, options)
options = [x.strip() for x in options] # strip strings
if "" in options: options.remove("")
optionstr = ','.join(["'%s'" % x for x in options]) # string of single quoted strings
optionstr = "(%s)" % optionstr # enclose in parens
return optionstr, options
def require_args(self, argnames):
for argname in argnames:
if argname not in self.abargs:
msg = "============================================================\n"
msg += "Error - abox requires %s argument\n" % argname
msg += "Answer box string is \"%s\"\n" % self.aboxstr
msg += "abox located: %s\n" % self.context
# raise Exception, "Bad abox"
raise Exception(msg)
# sys.exit(-1)
def abox_args(self, s):
'''
Parse arguments of abox. Splits by space delimitation.
'''
s = s.replace(u'\u2019', "'")
try:
s = str(s)
except Exception, err:
print "Error %s in obtaining string form of abox argument %s" % (err, s)
return {}
try:
# abargstxt = shlex.split(s)
abargstxt = split_args_with_quoted_strings(s)
except Exception, err:
print "Error %s in parsing abox argument %s" % (err, s)
return {}
if '' in abargstxt:
abargstxt.remove('')
try:
abargs = dict([x.split('=', 1) for x in abargstxt])
except Exception, err:
print "Error %s" % err
print "Failed in parsing args = %s" % s
print "abargstxt = %s" % abargstxt
raise
for arg in abargs:
abargs[arg] = self.stripquotes(abargs[arg], checkinternal=True)
return abargs
def stripquotes(self, x, checkinternal=False):
if x.startswith('"') and x.endswith('"'):
if checkinternal and '"' in x[1:-1]:
return x
return x[1:-1]
if x.startswith("'") and x.endswith("'"):
return x[1:-1]
return x
def copy_attrib(self, abargs, aname, xml):
if aname in abargs:
xml.set(aname, self.stripquotes(abargs[aname]))
def split_args_with_quoted_strings(command_line, checkfn=None):
"""from pexpect.py
This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
if checkfn is None:
def checkfn(c):
return c.isspace()
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
arg = arg + c
state = state_singlequote
elif c == r'"':
# Handle double quote
arg = arg + c
state = state_doublequote
elif checkfn(c): # OLD: c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
arg = arg + c
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
arg = arg + c
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
| agpl-3.0 | 1,928,660,598,516,613,000 | 42.381032 | 129 | 0.512696 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.