repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
larryhq/railguns | railguns/django/generics.py | 1 | 1295 | from django.conf import settings
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateView
# headers = get_headers(request, ['HTTP_APP_SCHEME', 'HTTP_USER_AGENT', 'HTTP_HOST'])
# print(headers)
def get_headers(request, keys=[]):
return dict((key, value) for (key, value) in request.META.items() if key in keys)
class BaseView(TemplateView):
name = None
class WebView(BaseView):
def get(self, request, *args, **kwargs):
title = kwargs.get('title', '{} - {}'.format(_(self.name), _('app_name')))
endpoint = kwargs.get('endpoint', '/{}{}'.format(settings.API_VERSION, request.get_full_path()))
template_name = self.template_name if self.template_name else '{}.html'.format(self.name)
return render(request, template_name, locals())
class MobileView(BaseView):
def get(self, request, *args, **kwargs):
title = kwargs.get('title', _(self.name))
endpoint = kwargs.get('endpoint', '/{}{}'.format(settings.API_VERSION, request.get_full_path().replace(kwargs.get('path', '/m/'), '/')))
template_name = self.template_name if self.template_name else 'mobile/{}.html'.format(self.name)
return render(request, template_name, locals())
| mit | 7,655,970,432,009,576,000 | 42.166667 | 144 | 0.671042 | false |
CaptFrank/EsxiController | server/app/config/models.py | 1 | 8121 | """
models.py
==========
This is the models interface for the database for the config
applications.
:copyright: (c) 2015 by GammaRay.
:license: BSD, see LICENSE for more details.
Author: GammaRay
Version: 1.0
Date: 3/11/2015
"""
"""
=============================================
Imports
=============================================
"""
import uuid
from server.app import db
from datetime import datetime
"""
=============================================
Constant
=============================================
"""
"""
=============================================
Source
=============================================
"""
# ===================
# Configuration
# ===================
class Configuration(db.Model):
"""
This is the main table used in this application.
It stores the configs in form of str(json).
Extends the Model class.
"""
# ===================
# Table name
# ===================
__tablename__ = 'configs'
# ===================
# Attributes
# ===================
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String, unique = True)
uuid = db.Column(db.String, unique = True)
favorite = db.Column(db.Boolean, default = False)
date = db.Column(db.DateTime)
access = db.Column(db.DateTime)
recent = db.Column(db.Integer, default = 0)
user = db.Column(db.String)
# Favorite Relationship
favorite_id = db.Column(db.Integer, db.ForeignKey('favorites.config_type'))
# Core attributes
session_type = db.Column(db.String, db.ForeignKey('sessions.config_type'))
# Core attributes
config_type = db.Column(db.String)
configs = db.Column(db.String, unique = False)
# ===================
# Sources
# ===================
def __init__(self,
name = None,
configs = None,
favorite = None,
config_type = None):
"""
This is the default constructor for the table
:param name: the config name
:param configs: the config in dict form
:param favorite: the favorite boolean
:param config_type: the config type
:return:
"""
self.name = name
self.configs = str(configs)
self.uuid = str(uuid.uuid4())
self.favorite = favorite
self.config_type = config_type
self.date = datetime.utcnow()
return
def update_config_record(self, user):
"""
We update the configs timestamps and counts
:param user:
:return:
"""
# Update internals
self.access = datetime.utcnow()
self.recent += 1
self.user = user
db.session.commit()
return
def __str__(self):
"""
Override the str method.
:return:
"""
return '<Configuration %s - favorite: %s - configs: %s - type: %s>' \
% (self.name, str(self.favorite), self.configs, self.config_type)
def __repr__(self):
"""
Override the repr method.
:return:
"""
return '<Configuration %r - favorite %r>' \
% (self.name, self.favorite)
# ===================
# SessionGroup
# ===================
class SessionGroup(db.Model):
"""
This is the main table used in this application.
It stores the sessions in form of union of configs.
Extends the Model class.
"""
# ===================
# Table name
# ===================
__tablename__ = 'sessions'
# ===================
# Attributes
# ===================
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String, unique = True)
uuid = db.Column(db.String, unique = True)
favorite = db.Column(db.Boolean, default = False)
date = db.Column(db.DateTime)
config_type = db.Column(db.String)
# Favorite Relationship
favorite_id = db.Column(db.Integer, db.ForeignKey('favorites.config_type'))
config = db.relationship(
'Configuration',
lazy = 'dynamic',
primaryjoin = "SessionGroup.config_type == Configuration.config_type"
)
# ===================
# Sources
# ===================
def __init__(self,
name = None,
config_type = None,
favorite = None):
"""
This is the default constructor for the table
:param name: the config name
:param config_id: the config type to unionize
:param favorite: the favorite boolean
:return:
"""
self.name = name
self.config_type = config_type
self.uuid = str(uuid.uuid4())
self.favorite = favorite
self.date = datetime.utcnow()
return
def __str__(self):
"""
Override the str method.
:return:
"""
return '<SessionGroup %s - favorite: %s - config: %s>' \
% (self.name, str(self.favorite), self.config_id)
def __repr__(self):
"""
Override the repr method.
:return:
"""
return '<Configuration %r - favorite %r>' \
% (self.name, self.favorite)
# ===================
# Favorite
# ===================
class Favorite(db.Model):
"""
This is the favorite table. It hosts both the
favorite used config and the favorite used session.
Extends the Model class.
"""
# ===================
# Table name
# ===================
__tablename__ = 'favorites'
# ===================
# Attributes
# ===================
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String, unique = True)
uuid = db.Column(db.String, unique = True)
date = db.Column(db.DateTime)
config_type = db.Column(db.String)
# Core attributes
fav_config = db.relationship(
"Configuration",
lazy = 'dynamic',
primaryjoin = "and_(Favorite.config_type == Configuration.config_type,"
+ "Configuration.favorite == 1)")
fav_session = db.relationship(
"SessionGroup",
lazy = 'dynamic',
primaryjoin = "and_(Favorite.config_type == SessionGroup.config_type,"
+ "SessionGroup.favorite == 1)")
# ===================
# Sources
# ===================
def __init__(self, name = None, config_type = None):
"""
This is the default constructor for the class.
:param name: the name of the favorite
:param config_type: the config type
:return:
"""
self.name = name
self.config_type = config_type
self.date = datetime.utcnow()
self.uuid = str(uuid.uuid4())
return
def __str__(self):
"""
Override the str method.
:return:
"""
return '<Favorite %s>' % self.name
def __repr__(self):
"""
Override the repr method.
:return:
"""
return '<Favorite %s>' % self.name
| gpl-2.0 | -6,961,536,075,342,810,000 | 26.435811 | 103 | 0.430489 | false |
GuLinux/PySpectrum | reference_catalogues.py | 1 | 1870 | from PyQt5.QtCore import QStandardPaths
import os
import json
import urllib.request
import gzip
import collections
class ReferenceCatalogues:
def __init__(self, database):
self.database = database
c = database.cursor()
cats = c.execute('SELECT id, "table", "name", spectra_url, gzipped, file_column, sptype_column FROM spectra_catalogues ORDER BY id ASC')
self.catalogues = collections.OrderedDict([(c[2], {'id':c[0],'table':c[1],'name':c[2],'url':c[3],'gzipped':c[4]==1, 'columns': {'sptype': c[6], 'file':c[5]} }) for c in cats])
def spectra(self, catalog):
cat_info = self.catalogues[catalog]
query = "SELECT {0}, {1} FROM {2} WHERE {1} <> '' ORDER BY {1} ASC".format(cat_info['columns']['file'], cat_info['columns']['sptype'], cat_info['table'])
c = self.database.cursor()
return [{'catalog': catalog, 'sptype': r[1], 'file': r[0]} for r in c.execute(query)]
def fits(self, entry):
catname = entry['catalog']
catalog = self.catalogues[catname]
return ReferenceCatalogues.get_fits(catname, entry['file'], catalog['url'], catalog['gzipped'])
def get_fits(catname, filename, url, is_gzipped):
cache_path = os.path.join(QStandardPaths.writableLocation(QStandardPaths.CacheLocation), catname)
file_path = os.path.join(cache_path, '{}.gz'.format(filename))
try:
os.makedirs(cache_path)
except FileExistsError:
pass
if not os.path.exists(file_path):
if is_gzipped:
urllib.request.urlretrieve(url.format("{}.gz".format(filename)), file_path )
else:
request = urllib.request.urlopen(url.format(filename))
with gzip.open(file_path, 'wb') as f:
f.write(request.read())
return file_path | gpl-3.0 | -121,556,637,745,708,210 | 45.775 | 183 | 0.605882 | false |
TemoaProject/temoa | data_processing/GraphVizFormats.py | 1 | 5096 | # SVG Formats
results_dot_fmt = """\
strict digraph model {
label = "Results for %(period)s"
rankdir = "LR" ;
smoothtype = "power_dist" ;
splines = "%(splinevar)s" ;
node [ style="filled" ] ;
edge [ arrowhead="vee" ] ;
subgraph unused_techs {
node [
color = "%(unused_color)s",
fontcolor = "%(unusedfont_color)s",
shape = "box",
fontcolor = "%(font_color)s"
] ;
%(dtechs)s
}
subgraph unused_energy_carriers {
node [
color = "%(unused_color)s",
fontcolor = "%(unusedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
] ;
%(dcarriers)s
}
subgraph unused_emissions {
node [
color = "%(unused_color)s",
fontcolor = "%(unusedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
]
%(demissions)s
}
subgraph in_use_techs {
node [
color = "%(tech_color)s",
fontcolor = "%(usedfont_color)s",
shape = "box"
fontcolor = "%(font_color)s"
] ;
%(etechs)s
}
subgraph in_use_energy_carriers {
node [
color = "%(commodity_color)s",
fontcolor = "%(usedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
] ;
%(ecarriers)s
}
subgraph in_use_emissions {
node [
color = "%(commodity_color)s",
fontcolor = "%(usedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
] ;
%(eemissions)s
}
subgraph unused_flows {
edge [ color="%(unused_color)s" ]
%(dflows)s
}
subgraph in_use_flows {
subgraph inputs {
edge [ color="%(arrowheadin_color)s" ] ;
%(eflowsi)s
}
subgraph outputs {
edge [ color="%(arrowheadout_color)s" ] ;
%(eflowso)s
}
}
{rank = same; %(xnodes)s}
}
"""
tech_results_dot_fmt = """\
strict digraph model {
label = "Results for %(inp_technology)s in %(period)s" ;
compound = "True" ;
concentrate = "True";
rankdir = "LR" ;
splines = "%(splinevar)s" ;
node [ style="filled" ] ;
edge [ arrowhead="vee" ] ;
subgraph cluster_vintages {
label = "Vintages\\nCapacity: %(total_cap).2f" ;
href = "%(cluster_vintage_url)s" ;
style = "filled"
color = "%(sb_vpbackg_color)s"
node [ color="%(sb_vp_color)s", shape="box", fontcolor="%(usedfont_color)s" ] ;
%(vnodes)s
}
subgraph energy_carriers {
node [
color = "%(commodity_color)s",
fontcolor = "%(usedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
] ;
%(enodes)s
}
subgraph inputs {
edge [ color="%(arrowheadin_color)s" ] ;
%(iedges)s
}
subgraph outputs {
edge [ color="%(arrowheadout_color)s" ] ;
%(oedges)s
}
}
"""
slice_dot_fmt = """\
strict digraph model {
label = "Activity split of process %(inp_technology)s, %(vintage)s in year %(period)s" ;
compound = "True" ;
concentrate = "True";
rankdir = "LR" ;
splines = "%(splinevar)s" ;
node [ style="filled" ] ;
edge [ arrowhead="vee" ] ;
subgraph cluster_slices {
label = "%(vintage)s Capacity: %(total_cap).2f" ;
color = "%(vintage_cluster_color)s" ;
rank = "same" ;
style = "filled" ;
node [ color="%(vintage_color)s", shape="box", fontcolor="%(usedfont_color)s" ] ;
%(snodes)s
}
subgraph energy_carriers {
node [
color = "%(commodity_color)s",
fontcolor = "%(usedfont_color)s",
shape = "circle",
fillcolor = "%(fill_color)s"
] ;
%(enodes)s
}
subgraph inputs {
edge [ color="%(input_color)s" ] ;
%(iedges)s
}
subgraph outputs {
edge [ color="%(output_color)s" ] ;
%(oedges)s
}
}
"""
commodity_dot_fmt = """\
strict digraph result_commodity_%(inp_commodity)s {
label = "%(inp_commodity)s - %(period)s" ;
compound = "True" ;
concentrate = "True" ;
rankdir = "LR" ;
splines = "True" ;
node [ shape="box", style="filled", fontcolor="%(font_color)s" ] ;
edge [
arrowhead = "vee",
fontsize = "8",
label = " ",
labelfloat = "False",
labelfontcolor = "lightgreen"
len = "2",
weight = "0.5",
] ;
%(resource_node)s
subgraph used_techs {
node [ color="%(tech_color)s" ] ;
%(used_nodes)s
}
subgraph used_techs {
node [ color="%(unused_color)s" ] ;
%(unused_nodes)s
}
subgraph in_use_flows {
edge [ color="%(sb_arrow_color)s" ] ;
%(used_edges)s
}
subgraph unused_flows {
edge [ color="%(unused_color)s" ] ;
%(unused_edges)s
}
}
"""
quick_run_dot_fmt = """\
strict digraph model {
rankdir = "LR" ;
// Default node and edge attributes
node [ style="filled" ] ;
edge [ arrowhead="vee", labelfontcolor="lightgreen" ] ;
// Define individual nodes
subgraph techs {
node [ color="%(tech_color)s", shape="box", fontcolor="%(font_color)s" ] ;
%(tnodes)s
}
subgraph energy_carriers {
node [ color="%(commodity_color)s", shape="circle", fillcolor="%(fill_color)s" ] ;
%(enodes)s
}
// Define edges and any specific edge attributes
subgraph inputs {
edge [ color="%(arrowheadin_color)s" ] ;
%(iedges)s
}
subgraph outputs {
edge [ color="%(arrowheadout_color)s" ] ;
%(oedges)s
}
{rank = same; %(snodes)s}
}
""" | gpl-2.0 | -627,595,549,209,957,600 | 16.697917 | 89 | 0.57084 | false |
GNOME/dia | plug-ins/python/dot2dia.py | 1 | 9213 | # PyDia DOT Import
# Copyright (c) 2009 Hans Breuer <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
# \file dot2dia.py \brief translate dot ( http://www.graphviz.org/ ) to Dia format
# \ingroup ImportFilters
import re, sys
import gettext
_ = gettext.gettext
# FIXME: keywords are case indepentend
keywords = ['node', 'edge', 'graph', 'digraph', 'strict']
# starts with either a keyword or a node name in quotes.
# BEWARE: (?<-> ) - negative lookbehind to not find nodes a second time in connection definition (and misinterpret the params)
rDecl = re.compile(r'\s*(?<!-> )(?P<cmd>(?:' + ')|(?:'.join(keywords) + ')|(?:\w+' + ')|(?:"[^"]+"))\s+\[(?P<dict>[^\]]+)\];', re.DOTALL | re.MULTILINE)
# dont assume that all node names are in quotes
rEdge = re.compile(r'\s*(?P<n1>("[^"]+")|(\w+))\s*->\s*(?P<n2>("[^"]+")|(\w+))\s+\[(?P<dict>[^\]]+)\];*', re.DOTALL | re.MULTILINE)
# a list of key=value
rParam = re.compile(r'(?P<key>\w+)\s*=(?P<val>(\w+)|("[^"]+")),?\s*', re.DOTALL | re.MULTILINE)
# units in dot are either points or inch
cmInch = 2.54
cmPoints = cmInch/72.0
# dot y up, dia y down
def StripQuotes(s) :
"strip quotes if any"
if s[0] == '"' :
s = s[1:-1]
return s
def DictFromString (s) :
#print "KVs", s
d = {}
for m in rParam.finditer (s) :
if m :
d[m.group ("key")] = StripQuotes(m.group ("val"))
return d
##
# \brief Accumulating information with _DiaObject
class Object :
""" will end as a Dia Object """
def __init__ (self, typename, parms) :
self.typename = typename
self.parms = parms
def FontSize (self) :
try :
return float(self.parms['fontsize']) * cmPoints
except :
return 0.6
##
# \brief The nodes of the graph - finally represented as _Ellipse
class Node(Object) :
def __init__ (self, name, parms) :
Object.__init__(self, "Standard - Ellipse", parms)
self.name = name
def Pos(self) :
'deliver scaled X,Y coordinate'
x, y = 0.0, 0.0
try :
xy = self.parms['pos'].split(',')
x = float(xy[0]) * cmPoints
y = float(xy[1]) * cmPoints
except :
print("No position on '%s'" % (self.name,))
return x,-y
def Size(self) :
'deliver scaled W,H coordinate'
w, h = 0.5, 0.5
try :
w = float(self.parms['width']) * cmInch #? maybe this is relative to the font size?
h = float(self.parms['height']) * cmInch
except :
print("No size on '%s'" % (self.name,))
return w,h
##
# \brief The edges of the graph - finally represented as _Bezierline
class Edge(Object) :
def __init__ (self, src, dest, parms) :
Object.__init__(self, "Standard - BezierLine", parms)
self.src = src
self.dest = dest
def LabelPos (self) :
x, y = 0.0, 0.0
try :
xy = self.parms['lp'].split(',')
x = float(xy[0]) * cmPoints
y = float(xy[1]) * cmPoints
except :
if 'label' in self.parms :
# should be optional otherwise
print("No label pos on %s" % (self.src + '->' + self.dest,))
return x, -y
def Pos (self) :
# no need to do something smart, it get adjusted anyway
return 0.0, 0.0
def SetPoints (self, diaobj) :
'the property to set must match the type'
pts = []
if 'pos' in self.parms :
s = self.parms['pos']
if s[:2] == 'e,' :
sp = s[2:].split(" ")
# apparently the first point is the end? just put it there!
sp.append(sp[-1])
del sp[0]
bp = []
for i in range(0, len(sp)) :
xy = sp[i].replace("\n", "").replace("\\", "").split(",")
try :
x = float(xy[0]) * cmPoints
y = float(xy[1]) * (-cmPoints)
except ValueError :
print(xy)
continue
bp.append((x,y))
# must convert to _one_ tuple
if i == 0 : # first must be move to
pts.append ((0, bp[0][0], bp[0][1]))
bp = [] # reset to new point
elif len(bp) == 3 : # rest is curveto ==2
pts.append ((2, bp[0][0], bp[0][1], bp[1][0], bp[1][1], bp[2][0], bp[2][1]))
bp = [] # reset
if len(bp) > 0 :
print(len(bp), "bezier points left!")
if len(pts) > 1 :
diaobj.properties['bez_points'] = pts
else :
print("BezPoints", pts)
def MergeParms (d, extra) :
for k in list(extra.keys()) :
if k not in d :
d[k] = extra[k]
##
# \brief Parsing the given dot file
def Parse(sFile) :
f = open(sFile, 'r')
s = f.read()
extra = {}
nodes = {}
edges = []
if 0 : # debug regex
dbg = rDecl.findall(s)
for db in dbg :
print(db)
for m in rDecl.finditer(s) :
if m :
name = StripQuotes(m.group("cmd"))
if name in keywords :
if name in extra :
MergeParms(extra[name], DictFromString(m.group("dict")))
else :
extra[name] = DictFromString(m.group("dict"))
else : # must be a node
n = Node(name, DictFromString(m.group("dict")))
if 'node' in extra :
MergeParms(n.parms, extra['node'])
nodes[name] = n
for m in rEdge.finditer(s) :
if m :
# the names given are not necessarily registered as nodes already
defparams = {}
if 'node' in extra :
defparams = extra['node']
for k in ["n1", "n2"] :
name = StripQuotes(m.group(k))
if name in nodes :
pass # defparms should be set above
else :
nodes[name] = Node(name, defparams)
# remember connection
edges.append(Edge(StripQuotes(m.group("n1")), StripQuotes(m.group("n2")), DictFromString(m.group("dict"))))
return [nodes, edges]
##
# \brief Adding a label for the edges
#
# This function could be improved if Dia would allow to
# attach labels to arbitrary objects. For the time being
# only the initial postion does match, but relayouting the
# graph in Dia will loose the position
def AddLabel (layer, pos, label, fontsize, center=0) :
""" create a Text object an put it into the layer """
textType = dia.get_object_type("Standard - Text")
obj, h1, h2 = textType.create(pos[0], pos[1])
#TODO: transfer font-size
obj.properties["text"] = label
obj.properties["text_height"] = fontsize
if center :
obj.properties["text_alignment"] = 1
obj.properties["text_vert_alignment"] = 2
layer.add_object(obj)
##
# \brief Callback registered for the ImportFilter
def ImportFile (sFile, diagramData) :
""" read the dot file and create diagram objects """
nodes, edges = Parse(sFile)
layer = diagramData.active_layer # can do better, e.g. layer per graph
for key in list(nodes.keys()) :
n = nodes[key]
nodeType = dia.get_object_type(n.typename) # could be optimized out of loop
x, y = n.Pos()
w, h = n.Size()
obj, h1, h2 = nodeType.create(x-w/2, y-h/2) # Dot pos is center, Dia (usually) uses top/left
# resizing the Ellipse by handle is screwed
# obj.move_handle(h2, (x+w/2, y+h/2), 0, 0) # resize the object
obj.properties["elem_width"] = w
obj.properties["elem_height"] = h
if 'fillcolor' in n.parms :
try :
obj.properties['fill_colour'] = n.parms['fillcolor'] # same color syntax?
except :
print("Failed to apply:", n.parms['fillcolor'])
layer.add_object(obj)
AddLabel (layer, (x,y), n.name, n.FontSize(), 1)
obj.properties['meta'] = n.parms # copy all (remaining) parameters
# after creation replace the node with the object (needed to connect them)
nodes[key] = obj
for e in edges :
edgeType = dia.get_object_type(e.typename) # could be optimized out of loop
x, y = e.Pos() # just to have a start
con, h1, h2 = edgeType.create(x,y)
e.SetPoints(con)
if 'style' in e.parms : # set line style
con.properties['line_style'] = (4, 0.5) #FIXME: hard-coded dotted
if 'weight' in e.parms :
con.properties['line_width'] = float(e.parms['weight']) / 10.0 # arbitray anyway
layer.add_object(con)
if e.src in nodes :
h = con.handles[0]
obj = nodes[e.src]
# by moving to the cp position first, the connection's points get recalculated
pos = obj.connections[8].pos
con.move_handle(h, pos, 0, 0)
h.connect(obj.connections[8]) # connect to mid-point
if e.dest in nodes :
h = con.handles[-1]
obj = nodes[e.dest]
pos = obj.connections[8].pos
con.move_handle(h, pos, 0, 0)
h.connect (obj.connections[8]) # connect to mid-point
if 'label' in e.parms :
AddLabel (layer, e.LabelPos(), e.parms['label'], e.FontSize())
diagram = None # FIXME: get it
if diagram :
for n, o in nodes.items() :
diagram.update_connections(o)
diagram.update_extents()
return diagramData
if __name__ == '__main__':
# just testing at the moment
nodes, edges = Parse(sys.argv[1])
for k, n in nodes.items() :
print("Name:", n.name, "Pos:", n.Pos(), "WxH:", n.Size())
for e in edges :
print(e.src, "->", e.dest, e.LabelPos(), e.parms)
else :
# run as a Dia plug-in
import dia
dia.register_import (_("Graphviz Dot"), "dot", ImportFile)
| gpl-2.0 | -8,080,859,749,009,912,000 | 31.670213 | 152 | 0.628568 | false |
aleksandra-tarkowska/django | tests/forms_tests/tests/test_forms.py | 2 | 131267 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
import json
import warnings
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import RegexValidator
from django.forms import (
BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField,
DateTimeField, EmailField, FileField, FloatField, Form, forms, HiddenInput,
IntegerField, MultipleChoiceField, MultipleHiddenInput, MultiValueField,
NullBooleanField, PasswordInput, RadioSelect, Select, SplitDateTimeField,
Textarea, TextInput, TimeField, ValidationError, widgets
)
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import str_prefix
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.safestring import mark_safe
from django.utils import six
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(TestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'"
with six.assertRaisesRegex(self, KeyError, nonexistenterror):
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']})
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': '[email protected]', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(str(f['language']), """<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id">
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict, MultiValueDict and
# MergeDict (when created as a merge of MultiValueDicts) conveniently work with
# this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
# MergeDict is deprecated, but is supported until removed.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
data = MergeDict(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE':
errors['password1'] = 'Forbidden value.'
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE':
errors['password2'] = ['Forbidden value.']
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2':
self.add_error(None, 'Non-field error 1.')
self.add_error('password1', 'Forbidden value 2.')
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2':
self.add_error('password2', 'Forbidden value 2.')
raise ValidationError('Non-field error 2.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE'}, auto_id=False)
self.assertEqual(f.errors['password1'], ['Forbidden value.'])
self.assertEqual(f.errors['password2'], ['Forbidden value.'])
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.'])
self.assertEqual(f.errors['password1'], ['Forbidden value 2.'])
self.assertEqual(f.errors['password2'], ['Forbidden value 2.'])
with six.assertRaisesRegex(self, ValueError, "has no field named"):
f.add_error('missing_field', 'Some error.')
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError(
'Please make sure your passwords match.',
code='password_mismatch',
)
f = UserRegistration(data={})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'required'))
self.assertFalse(f.has_error('password1', 'anything'))
f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'min_length'))
self.assertFalse(f.has_error('password1', 'anything'))
self.assertFalse(f.has_error('password2'))
self.assertFalse(f.has_error('password2', 'anything'))
f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'})
self.assertFalse(f.has_error('password1'))
self.assertFalse(f.has_error('password1', 'required'))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch'))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything'))
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertFalse(f1.fields['myfield'].validators[0] == f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
answer = CharField(label='Secret answer', label_suffix=' =')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n<li>Secret answer = <input type="text" name="answer" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f', 'b']
def initial_other_options():
return ['b', 'w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class DateTimeForm(Form):
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
unbound = DateTimeForm()
self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms)
self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time())
self.assertEqual(unbound['supports_microseconds'].value(), now)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""")
self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li>
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"))
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '')
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""")
self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label class="required" for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""")
def test_label_has_required_css_class(self):
"""
#17922 - required_css_class is added to the label_tag() of required fields.
"""
class SomeForm(Form):
required_css_class = 'required'
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({'field': 'test'})
self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>')
self.assertHTMLEqual(f['field'].label_tag(attrs={'class': 'foo'}),
'<label for="id_field" class="foo required">Field:</label>')
self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>')
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=widgets.SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name': ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name': ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name': ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name': 'fname lname'})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (ChoiceField(label='Rank',
choices=((1, 1), (2, 2))),
CharField(label='Name', max_length=10))
super(ChoicesField, self).__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertFalse(id(field2.fields) == id(field.fields))
self.assertFalse(id(field2.fields[0].choices) ==
id(field.fields[0].choices))
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(label='Country Code', validators=[
RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]),
CharField(label='Phone Number'),
CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}),
CharField(label='Label', required=False, help_text='E.g. home, work.'),
)
super(PhoneField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return '%s.%s ext. %s (label: %s)' % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61'])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123'])
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(None, f.clean([]))
self.assertEqual('+61. ext. (label: )', f.clean(['+61']))
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(None, f.clean([]))
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'})
form.full_clean()
self.assertEqual(form.cleaned_data, {'json': {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field:</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom:</label>'),
# the overridden label is escaped
(('custom&',), {}, '<label for="id_field">custom&:</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field:')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&:')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>')
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label='')
boundfield = SomeForm()['field']
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
def test_label_tag_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix='!')['field']
self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>')
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output('<p id="p_%(field_name)s"></p>', '%s', '</p>', ' %s', True)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2})
form = MyForm({})
self.assertEqual(form.is_valid(), False)
errors = form.errors.as_text()
control = [
'* foo\n * This field is required.',
'* bar\n * This field is required.',
'* __all__\n * Non-field error.',
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>',
]
for error in control:
self.assertInHTML(error, errors)
errors = json.loads(form.errors.as_json())
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': 'Non-field error.'}]
}
self.assertEqual(errors, control)
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('<p>Non-field error.</p>',
code='secret',
params={'a': 1, 'b': 2})
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}]
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
errors = json.loads(form.errors.as_json(escape_html=True))
control['__all__'][0]['message'] = '<p>Non-field error.</p>'
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertIsInstance(e, list)
self.assertIn('Foo', e)
self.assertIn('Foo', forms.ValidationError(e))
self.assertEqual(
e.as_text(),
'* Foo\n* Foobar'
)
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
self.assertEqual(
json.loads(e.as_json()),
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}]
)
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class='foobar-error-class')
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({'first_name': 'John'})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></td></tr>"""
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError('Generic validation error')
p = Person({'first_name': 'John', 'last_name': 'Lennon'})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>Generic validation error</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></li>
<li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>"""
)
self.assertHTMLEqual(
p.non_field_errors().as_text(),
'* Generic validation error'
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></p>
<p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>"""
)
| bsd-3-clause | 1,573,278,644,133,027,800 | 56.634446 | 519 | 0.615989 | false |
EmanueleCannizzaro/scons | test/Interactive/help.py | 1 | 2710 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Interactive/help.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the behavior of the "help" subcommand (and its "h" and "?" aliases).
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
Command('foo.out', 'foo.in', Copy('$TARGET', '$SOURCE'))
Command('1', [], Touch('$TARGET'))
""")
test.write('foo.in', "foo.in 1\n")
scons = test.start(arguments = '-Q --interactive')
scons.send("build foo.out 1\n")
test.wait_for(test.workpath('1'))
test.must_match(test.workpath('foo.out'), "foo.in 1\n")
scons.send('help\n')
scons.send('h\n')
scons.send('?\n')
help_text = """\
build [TARGETS] Build the specified TARGETS and their dependencies.
'b' is a synonym.
clean [TARGETS] Clean (remove) the specified TARGETS and their
dependencies. 'c' is a synonym.
exit Exit SCons interactive mode.
help [COMMAND] Prints help for the specified COMMAND. 'h' and
'?' are synonyms.
shell [COMMANDLINE] Execute COMMANDLINE in a subshell. 'sh' and '!'
are synonyms.
version Prints SCons version information.
"""
expect_stdout = """\
scons>>> Copy("foo.out", "foo.in")
Touch("1")
scons>>> %(help_text)s
scons>>> %(help_text)s
scons>>> %(help_text)s
scons>>>
""" % locals()
test.finish(scons, stdout = expect_stdout)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -3,640,869,122,940,750,000 | 28.78022 | 98 | 0.675646 | false |
delattreb/TemperatureHumidityServer | Doc/ssd1306-master/tests/test_sh1106.py | 1 | 2109 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from unittest.mock import call, Mock
except ImportError:
from mock import call, Mock
import pytest
from oled.device import sh1106
from oled.render import canvas
import baseline_data
serial = Mock(unsafe=True)
def setup_function(function):
serial.reset_mock()
serial.command.side_effect = None
def test_init_128x64():
sh1106(serial)
serial.command.assert_has_calls([
# Initial burst are initialization commands
call(174, 32, 16, 176, 200, 0, 16, 64, 161, 166, 168, 63, 164,
211, 0, 213, 240, 217, 34, 218, 18, 219, 32, 141, 20),
# set contrast
call(129, 127),
# reset the display
call(176, 2, 16),
call(177, 2, 16),
call(178, 2, 16),
call(179, 2, 16),
call(180, 2, 16),
call(181, 2, 16),
call(182, 2, 16),
call(183, 2, 16)
])
# Next 1024 are all data: zero's to clear the RAM
# (1024 = 128 * 64 / 8)
serial.data.assert_has_calls([call([0] * 128)] * 8)
def test_init_invalid_dimensions():
with pytest.raises(ValueError) as ex:
sh1106(serial, width=77, height=105)
assert "Unsupported display mode: 77x105" in str(ex.value)
def test_init_handle_ioerror():
serial.command.side_effect = IOError(-99, "Test exception")
with pytest.raises(IOError) as ex:
sh1106(serial)
assert "Failed to initialize SH1106 display driver" in str(ex.value)
def test_display():
device = sh1106(serial)
serial.reset_mock()
recordings = []
def data(data):
recordings.append({'data': data})
def command(*cmd):
recordings.append({'command': list(cmd)})
serial.command = Mock(side_effect=command, unsafe=True)
serial.data = Mock(side_effect=data, unsafe=True)
# Use the same drawing primitives as the demo
with canvas(device) as draw:
baseline_data.primitives(device, draw)
serial.data.assert_called()
serial.command.assert_called()
print(recordings)
assert recordings == baseline_data.demo_sh1106
| gpl-3.0 | -2,187,029,735,590,444,800 | 24.409639 | 72 | 0.626363 | false |
ICTU/quality-time | docs/src/create_metrics_and_sources_md.py | 1 | 8299 | """Script to convert the data model in a Markdown file."""
import json
import pathlib
import re
import sys
TYPE_DESCRIPTION = dict(
url="URL",
string="String",
multiple_choice="Multiple choice",
password="Password",
integer="Integer",
date="Date",
single_choice="Single choice",
multiple_choice_with_addition="Multiple choice with addition",
)
def html_escape(text: str) -> str:
"""Escape < and >."""
return text.replace("<", "<").replace(">", ">")
def get_data_model():
"""Return the data model."""
module_dir = pathlib.Path(__file__).resolve().parent
server_src_path = module_dir.parent.parent / "components" / "server" / "src"
sys.path.insert(0, str(server_src_path))
from data_model import DATA_MODEL_JSON # pylint: disable=import-error,import-outside-toplevel
return json.loads(DATA_MODEL_JSON)
def markdown_link(url: str, anchor: str = None) -> str:
"""Return a Markdown link."""
anchor = anchor or url
return f"[{anchor}]({url})"
def markdown_table_row(*cells: str) -> str:
"""Return a Markdown table row."""
return f"| {' | '.join([html_escape(cell) for cell in cells])} |\n"
def markdown_table_header(*column_headers: str) -> str:
"""Return a Markdown table header."""
headers = markdown_table_row(*column_headers)
separator = markdown_table_row(*[":" + "-" * (len(column_header) - 1) for column_header in column_headers])
return "\n" + headers + separator
def markdown_header(header: str, level: int = 1) -> str:
"""Return a Markdown header."""
return ("\n" if level > 1 else "") + "#" * level + f" {header}\n"
def metrics_table(data_model, universal_sources: list[str]) -> str:
"""Return the metrics as Markdown table."""
markdown = markdown_table_header("Name", "Description", "Default target", "Scale(s)", "Default tags", "Sources¹")
for metric in sorted(data_model["metrics"].values(), key=lambda item: str(item["name"])):
direction = {"<": "≦", ">": "≧"}[metric["direction"]]
unit = "% of the " + metric["unit"] if metric["default_scale"] == "percentage" else " " + metric["unit"]
target = f"{direction} {metric['target']}{unit}"
if len(metric["scales"]) == 1:
scales = metric["default_scale"]
else:
scales = ", ".join(
[
f"{scale} (default)" if scale == metric["default_scale"] else scale
for scale in sorted(metric["scales"])
]
)
tags = ", ".join(metric["tags"])
sources = []
for source in metric["sources"]:
if source not in universal_sources:
source_name = data_model["sources"][source]["name"]
sources.append(f"[{source_name}]({metric_source_slug(data_model, metric, source)})")
markdown += markdown_table_row(
metric["name"], metric["description"], target, scales, tags, ", ".join(sorted(sources))
)
markdown += "\n"
return markdown
def sources_table(data_model, universal_sources: list[str]) -> str:
"""Return the sources as Markdown table."""
markdown = markdown_table_header("Name", "Description", "Metrics")
for source_key, source in sorted(data_model["sources"].items(), key=lambda item: str(item[1]["name"])):
source_name = f"[{source['name']}]({source['url']})" if "url" in source else source["name"]
if source_key in universal_sources:
metrics = "¹"
else:
metrics = ", ".join(
[
f"[{metric['name']}]({metric_source_slug(data_model, metric, source_key)})"
for metric in data_model["metrics"].values()
if source_key in metric["sources"]
]
)
markdown += markdown_table_row(source_name, source["description"], metrics)
markdown += "\n"
return markdown
def metric_source_slug(data_model, metric, source) -> str:
"""Return a slug for the metric source combination."""
source_name = data_model["sources"][source]["name"]
return f"#{metric['name']} from {source_name}".lower().replace(" ", "-")
def metric_source_table(data_model, metric_key, source_key) -> str:
"""Return the metric source combination as Markdown table."""
markdown = markdown_table_header("Parameter", "Type", "Values", "Default value", "Mandatory", "Help")
for parameter in sorted(
data_model["sources"][source_key]["parameters"].values(), key=lambda parameter: str(parameter["name"])
):
if metric_key in parameter["metrics"]:
name = parameter["name"]
parameter_type = TYPE_DESCRIPTION[parameter["type"]]
default_value = parameter["default_value"]
if isinstance(default_value, list):
if not default_value and parameter["type"] in ("single_choice", "multiple_choice"):
default_value = f"_all {parameter['short_name']}_"
else:
default_value = ", ".join(default_value)
if parameter["type"] in ("single_choice", "multiple_choice"):
values = ", ".join(sorted(parameter["values"]))
else:
values = ""
mandatory = "Yes" if parameter["mandatory"] else "No"
help_url = markdown_link(parameter["help_url"]) if "help_url" in parameter else parameter.get("help", "")
markdown += markdown_table_row(name, parameter_type, values, default_value, mandatory, help_url)
markdown += "\n"
return markdown
def metric_source_configuration_table(data_model, metric_key, source_key) -> str:
"""Return the metric source combination's configuration as Markdown table."""
configurations = data_model["sources"][source_key].get("configuration", {}).values()
relevant_configurations = [config for config in configurations if metric_key in config["metrics"]]
if not relevant_configurations:
return ""
markdown = markdown_table_header("Configuration", "Value")
for configuration in sorted(relevant_configurations, key=lambda config: str(config["name"])):
name = configuration["name"]
values = ", ".join(sorted(configuration["value"], key=lambda value: value.lower()))
markdown += markdown_table_row(name, values)
markdown += "\n"
return markdown
def data_model_as_table(data_model) -> str:
"""Return the data model as Markdown table."""
markdown = markdown_header("Quality-time metrics and sources")
markdown += (
"\nThis document lists all [metrics](#metrics) that *Quality-time* can measure and all "
"[sources](#sources) that *Quality-time* can use to measure the metrics. For each "
"[supported combination of metric and source](#supported-metric-source-combinations), it lists the "
"parameters that can be used to configure the source.\n"
)
markdown += markdown_header("Metrics", 2)
markdown += metrics_table(data_model, universal_sources := ["manual_number"])
markdown += markdown_header("Sources", 2)
markdown += sources_table(data_model, universal_sources)
markdown += "¹) All metrics with the count or percentage scale can be measured using the 'Manual number' source.\n"
markdown += markdown_header("Supported metric-source combinations", 2)
for metric_key, metric in data_model["metrics"].items():
for source_key in metric["sources"]:
if source_key not in universal_sources:
markdown += markdown_header(f"{metric['name']} from {data_model['sources'][source_key]['name']}", 3)
markdown += metric_source_table(data_model, metric_key, source_key)
markdown += metric_source_configuration_table(data_model, metric_key, source_key)
markdown = re.sub(r"\n{3,}", "\n\n", markdown) # Replace multiple consecutive empty lines with one empty line
return re.sub(r"\n\n$", "\n", markdown) # Remove final empty line
if __name__ == "__main__":
data_model_md_path = pathlib.Path(__file__).resolve().parent.parent / "METRICS_AND_SOURCES.md"
with data_model_md_path.open("w") as data_model_md:
data_model_md.write(data_model_as_table(get_data_model()))
| apache-2.0 | 7,844,623,377,795,428,000 | 44.56044 | 119 | 0.614689 | false |
JustinSGray/Kona | src/kona/test/test_reduced_kkt_vector.py | 1 | 4681 | import unittest
import numpy as np
from kona.linalg.memory import KonaMemory
from dummy_solver import DummySolver
from kona.linalg.vectors.composite import ReducedKKTVector
class ReducedKKTVectorTestCase(unittest.TestCase):
def setUp(self):
solver = DummySolver(10, 0, 5)
self.km = km = KonaMemory(solver)
km.primal_factory.request_num_vectors(3)
km.dual_factory.request_num_vectors(3)
km.allocate_memory()
# can't create bare KonaVectors because the memory manager doesn't
# like them, so I'll just use the PrimalVector to test the
# KonaVectorMethods
self.pv1 = km.primal_factory.generate()
self.dv1 = km.dual_factory.generate()
self.pv1._data.data = 2*np.ones(10)
self.dv1._data.data = 3*np.ones(5)
self.pv2 = km.primal_factory.generate()
self.dv2 = km.dual_factory.generate()
self.pv2._data.data = 2*np.ones(10)
self.dv2._data.data = 2*np.ones(5)
self.rkkt_vec1 = ReducedKKTVector(self.pv1, self.dv1)
self.rkkt_vec2 = ReducedKKTVector(self.pv2, self.dv2)
def test_check_type(self):
try:
self.rkkt_vec1._check_type(self.pv1)
except TypeError as err:
self.assertEqual(
str(err),
"CompositeVector() >> Wrong vector type. Must be " +
"<class 'kona.linalg.vectors.composite.ReducedKKTVector'>")
else:
self.fail('TypeError expected')
def test_bad_init_args(self):
try:
ReducedKKTVector(self.dv1, self.dv1)
except TypeError as err:
self.assertEqual(
str(err),
'CompositeVector() >> Unidentified primal vector.')
else:
self.fail('TypeError expected')
try:
ReducedKKTVector(self.pv1, self.pv1)
except TypeError as err:
self.assertEqual(
str(err),
'CompositeVector() >> Unidentified dual vector.')
else:
self.fail('TypeError expected')
def test_equals(self):
self.rkkt_vec2.equals(self.rkkt_vec1)
err = self.dv2._data.data - self.dv1._data.data
self.assertEqual(np.linalg.norm(err), 0)
err = self.pv2._data.data - self.pv1._data.data
self.assertEqual(np.linalg.norm(err), 0)
def test_plus(self):
self.rkkt_vec2.plus(self.rkkt_vec1)
err = self.pv2._data.data - 4*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 5*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_minus(self):
self.rkkt_vec2.minus(self.rkkt_vec1)
err = self.pv2._data.data - 0*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - -1*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_times_vector(self):
self.rkkt_vec2.times(self.rkkt_vec1)
self.assertEqual(self.rkkt_vec2.inner(self.rkkt_vec2), 340.)
def test_times_scalar(self):
self.rkkt_vec2.times(3)
err = self.pv2._data.data - 6*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 6*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
self.rkkt_vec1.times(3.0)
err = self.pv1._data.data - 6*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv1._data.data - 9*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_divide_by(self):
self.rkkt_vec2.divide_by(2)
err = self.pv2._data.data - 1*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 1*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_equals_ax_p_by(self):
self.rkkt_vec2.equals_ax_p_by(2, self.rkkt_vec1, 2, self.rkkt_vec2)
err = self.pv2._data.data - 8*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 10*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_inner(self):
ip = self.rkkt_vec2.inner(self.rkkt_vec1)
self.assertEqual(ip, 70)
def test_norm2(self):
ip = self.rkkt_vec2.norm2
self.assertEqual(ip, 60**.5)
def test_equals_initial_guess(self):
self.rkkt_vec2.equals_init_guess()
err = self.pv2._data.data - 10*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - (np.ones(5))
self.assertEqual(np.linalg.norm(err), 0)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 7,608,387,035,014,271,000 | 30.628378 | 75 | 0.594745 | false |
OpenKMIP/PyKMIP | kmip/services/server/monitor.py | 1 | 6822 | # Copyright (c) 2018 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import signal
import time
from kmip.core import policy as operation_policy
def get_json_files(p):
"""
Scan the provided policy directory for all JSON policy files.
"""
f = [os.path.join(p, x) for x in os.listdir(p) if x.endswith(".json")]
return sorted(f)
class PolicyDirectoryMonitor(multiprocessing.Process):
"""
A file monitor that tracks modifications made within the policy directory.
"""
def __init__(self, policy_directory, policy_store, live_monitoring=True):
"""
Set up the file monitor with the policy directory to track.
Args:
policy_directory (string): The system path of the policy directory
that should be monitored. Required.
policy_store (DictProxy): A dictionary proxy created by the server
multiprocessing resource manager. Used to store and share the
policy information across server processes and threads.
Required.
live_monitoring (boolean): A boolean indicating whether or not
live monitoring should continue indefinitely. Optional,
defaults to True.
"""
super(PolicyDirectoryMonitor, self).__init__()
self.halt_trigger = multiprocessing.Event()
self.policy_directory = policy_directory
self.live_monitoring = live_monitoring
self.file_timestamps = None
self.policy_cache = None
self.policy_files = None
self.policy_map = None
self.policy_store = policy_store
self.reserved_policies = ['default', 'public']
def interrupt_handler(trigger, frame):
self.stop()
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
self.logger = logging.getLogger("kmip.server.monitor")
self.initialize_tracking_structures()
def stop(self):
self.halt_trigger.set()
def scan_policies(self):
"""
Scan the policy directory for policy data.
"""
policy_files = get_json_files(self.policy_directory)
for f in set(policy_files) - set(self.policy_files):
self.file_timestamps[f] = 0
for f in set(self.policy_files) - set(policy_files):
self.logger.info("Removing policies for file: {}".format(f))
self.file_timestamps.pop(f, None)
for p in self.policy_cache.keys():
self.disassociate_policy_and_file(p, f)
for p in [k for k, v in self.policy_map.items() if v == f]:
self.restore_or_delete_policy(p)
self.policy_files = policy_files
for f in sorted(self.file_timestamps.keys()):
t = os.path.getmtime(f)
if t > self.file_timestamps[f]:
self.logger.info("Loading policies for file: {}".format(f))
self.file_timestamps[f] = t
old_p = [k for k, v in self.policy_map.items() if v == f]
try:
new_p = operation_policy.read_policy_from_file(f)
except ValueError:
self.logger.error("Failure loading file: {}".format(f))
self.logger.debug("", exc_info=True)
continue
for p in new_p.keys():
self.logger.info("Loading policy: {}".format(p))
if p in self.reserved_policies:
self.logger.warning(
"Policy '{}' overwrites a reserved policy and "
"will be thrown out.".format(p)
)
continue
if p in sorted(self.policy_store.keys()):
self.logger.debug(
"Policy '{}' overwrites an existing "
"policy.".format(p)
)
if f != self.policy_map.get(p):
self.policy_cache.get(p).append(
(
time.time(),
self.policy_map.get(p),
self.policy_store.get(p)
)
)
else:
self.policy_cache[p] = []
self.policy_store[p] = new_p.get(p)
self.policy_map[p] = f
for p in set(old_p) - set(new_p.keys()):
self.disassociate_policy_and_file(p, f)
self.restore_or_delete_policy(p)
def run(self):
"""
Start monitoring operation policy files.
"""
self.initialize_tracking_structures()
if self.live_monitoring:
self.logger.info("Starting up the operation policy file monitor.")
while not self.halt_trigger.is_set():
time.sleep(1)
self.scan_policies()
self.logger.info("Stopping the operation policy file monitor.")
else:
self.scan_policies()
def initialize_tracking_structures(self):
self.file_timestamps = {}
self.policy_cache = {}
self.policy_files = []
self.policy_map = {}
for k in self.policy_store.keys():
if k not in self.reserved_policies:
self.policy_store.pop(k, None)
def disassociate_policy_and_file(self, policy, file_name):
c = self.policy_cache.get(policy, [])
for i in [c.index(e) for e in c if e[1] == file_name][::-1]:
c.pop(i)
def restore_or_delete_policy(self, policy):
c = self.policy_cache.get(policy, [])
if len(c) == 0:
self.logger.info("Removing policy: {}".format(policy))
self.policy_store.pop(policy, None)
self.policy_map.pop(policy, None)
self.policy_cache.pop(policy, None)
else:
e = c.pop()
self.policy_store[policy] = e[2]
self.policy_map[policy] = e[1]
| apache-2.0 | -7,231,256,344,903,181,000 | 37.982857 | 78 | 0.552331 | false |
kodi-addons/plugin.video.espn_3 | resources/lib/constants.py | 1 | 2272 | OLD_LISTING_MODE = 'OLD_LISTING_MODE'
LIVE_EVENTS_MODE = 'LIVE_EVENTS'
PLAY_MODE = 'PLAY'
PLAY_ITEM_MODE = 'PLAY_ITEM'
PLAY_TV_MODE = 'PLAY_TV'
UPCOMING_MODE = 'UPCOMING'
AUTHENTICATE_MODE = 'AUTHENTICATE'
AUTHENTICATION_DETAILS_MODE = 'AUTHENTICATION_DETAILS'
NETWORK_ID = 'NETWORK_ID'
EVENT_ID = 'EVENT_ID'
SIMULCAST_AIRING_ID = 'SIMULCAST_AIRING_ID'
SESSION_URL = 'SESSION_URL'
DESKTOP_STREAM_SOURCE = 'DESKTOP_STREAM_SOURCE'
NETWORK_NAME = 'NETWORK_NAME'
EVENT_NAME = 'EVENT_NAME'
EVENT_GUID = 'EVENT_GUID'
ADOBE_RSS = 'ADOBE_RSS'
EVENT_PARENTAL_RATING = 'EVENT_PARENTAL_RATING'
SHELF_ID = 'SHELF_ID'
SHOWCASE_URL = 'SHOWCASE_URL'
SHOWCASE_NAV_ID = 'SHOWCASE_NAV_ID'
PLAYBACK_URL = 'PLAYBACK_URL'
REFRESH_LIVE_MODE = 'REFRESH_LIVE_MODE'
CHANNEL_RESOURCE_ID = 'CHANNEL_RESOURCE_ID'
ESPN_URL = 'ESPN_URL'
MODE = 'MODE'
SPORT = 'SPORT'
BAM_NS = '{http://services.bamnetworks.com/media/types/2.1}'
# Taken from https://espn.go.com/watchespn/player/config
ESPN3_ID = 'n360'
SECPLUS_ID = 'n323'
ACC_EXTRA_ID = 'n321'
CHANNEL_SETTINGS = {
'ShowEspn1': 'espn1',
'ShowEspn2': 'espn2',
'ShowEspn3': 'espn3',
'ShowEspnu': 'espnu',
'ShowEspnews': 'espnews',
'ShowEspnDeportes': 'espndeportes',
'ShowSec': 'sec',
'ShowSecPlus': 'secplus',
'ShowLonghorn': 'longhorn',
'ShowBuzzerBeater': 'buzzerbeater',
'ShowAccExtra': 'accextra',
'ShowGoalLine': 'goalline',
'ShowAcc': 'acc',
}
NETWORK_ID_TO_NETWORK_NAME = {
'espn1': 30990,
'espn2': 30991,
'espn3': 30992,
'espnu': 30993,
'espnews': 30994,
'espndeportes': 30995,
'sec': 30996,
'longhorn': 30998,
'accextra': 30989,
'goalline': 30988,
'secplus': 30997,
'acc': 31000,
}
ID = 'id'
URL = 'url'
TV_OS_HOME = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/home'
TV_OS_CHANNELS = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/channels'
TV_OS_SPORTS = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/sports'
APPLE_TV_FEATURED = 'http://espn.go.com/watchespn/appletv/featured'
APPLE_TV_SPORTS = 'http://espn.go.com/watchespn/appletv/sports'
APPLE_TV_CHANNELS = 'http://espn.go.com/watchespn/appletv/channels'
WATCH_API_V1_TRENDING = 'http://watch.api.espn.com/v1/trending'
| gpl-3.0 | -2,405,966,290,796,153,000 | 27.049383 | 91 | 0.680458 | false |
rthallisey/clapper | ansible-tests/mistral/tripleo_validations/actions/load_validations.py | 1 | 2621 | import glob
import logging
import os
import yaml
from mistral.actions import base
LOG = logging.getLogger(__name__)
DEFAULT_METADATA = {
'name': 'Unnamed',
'description': 'No description',
'stage': 'No stage',
'require_plan': True,
'groups': [],
}
VALIDATIONS_DIR = '/usr/share/tripleo-validations/validations'
def get_validation_metadata(validation, key):
try:
return validation[0]['vars']['metadata'][key]
except KeyError:
return DEFAULT_METADATA.get(key)
except TypeError:
LOG.exception("Failed to get validation metadata.")
def load_validations(groups=None):
'''Loads all validations.'''
paths = glob.glob('{}/*.yaml'.format(VALIDATIONS_DIR))
results = []
for index, validation_path in enumerate(sorted(paths)):
with open(validation_path) as f:
validation = yaml.safe_load(f.read())
validation_groups = get_validation_metadata(validation, 'groups')
if not groups or \
set.intersection(set(groups), set(validation_groups)):
results.append({
'id': os.path.splitext(
os.path.basename(validation_path))[0],
'name': get_validation_metadata(validation, 'name'),
'groups': get_validation_metadata(validation, 'groups'),
'description': get_validation_metadata(validation,
'description'),
'require_plan': get_validation_metadata(validation,
'require_plan'),
'metadata': get_remaining_metadata(validation)
})
return results
def get_remaining_metadata(validation):
try:
for (k, v) in validation[0]['vars']['metadata'].items():
if len(bytes(k)) > 255 or len(bytes(v)) > 255:
LOG.error("Metadata is too long.")
return dict()
return {k: v for k, v in validation[0]['vars']['metadata'].items()
if k not in ['name', 'description', 'require_plan', 'groups']}
except KeyError:
return dict()
class ListValidations(base.Action):
def __init__(self, groups=None):
self.groups = groups
def run(self):
return load_validations(self.groups)
class ListGroups(base.Action):
def __init__(self):
pass
def run(self):
validations = load_validations()
return { group for validation in validations
for group in validation['groups'] }
| apache-2.0 | -3,117,070,980,614,347,300 | 30.578313 | 78 | 0.563144 | false |
hkarl/instinf | stellenplan/views.py | 1 | 17219 | # -*- coding: utf-8 -*-
# from django.http import HttpResponse
from django.http import HttpResponseNotFound
from django.conf import settings
from django.views.generic import ListView, View
from django.shortcuts import render
from stellenplan.models import *
from stellenplan.timeline import Timeline, TimelineGroups
from django.forms.widgets import CheckboxSelectMultiple
import tables
from django_tables2 import RequestConfig
import django_tables2
from pprint import pprint as pp
import accordion
from django.views.decorators.http import require_http_methods
from stellenplanForms import *
import os, codecs
import subprocess
import datetime
from copy import deepcopy
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
def standardfilters (qs, keywords, cleaned_data):
"""Apply all the filter keywords to the queryset, take values from cleaned_data.
@param qs: a queryset.
@param keywords: a list of pairs of strings.
The first element of the pair is the keyword,
under which the value can be looked up in the cleaned_data map.
The second element is the filterstring to be used when filtering the queryset.
(Sadly, no automatic deduction of filterstring from keyword seems plausible)
@param cleaned_data: a map of key/values, as obtained from a form.
Note: Von and Bis are always applied, nbo need to specify them in the keywords
"""
# print "standardfilters: ", keywords
# print pp(cleaned_data)
if cleaned_data['Von']:
qs = qs.exclude (bis__lt = cleaned_data['Von'])
if cleaned_data['Bis']:
qs = qs.exclude (von__gt = cleaned_data['Bis'])
for (keyword, filterstring) in keywords:
if not ((cleaned_data[keyword] == '-----') or
(cleaned_data[keyword] == '')):
print keyword, filterstring, cleaned_data[keyword]
qs = qs.filter (**{filterstring: cleaned_data[keyword]})
return qs
#########################################################
class split (View):
@method_decorator (login_required)
def post (self, request, what):
renderDir = {'result_list': []}
print request.POST
print "--- what ----"
print what
modelType = ContentType.objects.get(app_label="stellenplan", model = what)
print modelType
print type(modelType)
ids = request.POST['ids'].split(',')
splitdateStr = request.POST['Splitdatum']
splitdate = datetime.datetime.strptime (splitdateStr, '%d.%m.%Y').date()
print splitdateStr, splitdate
for oneid in ids:
entry = modelType.get_object_for_this_type (pk=oneid)
print entry, entry.von, entry.bis
print type(entry)
if ((splitdate < entry.von) or
(splitdate > entry.bis)):
renderDir['result_list'].append ('Eintrag %s (%s - %s) konnte nicht geteilt werden: Trenndatum %s liegt ausserhalb.' %
(entry.__unicode__(), entry.von, entry.bis, splitdate))
else:
# lets do the split again
try:
newentry = deepcopy(entry)
newentry.id = None
newentry.von = splitdate + datetime.timedelta(days=1)
newentry.save()
entry.bis = splitdate
entry.save()
renderDir['result_list'].append ('Eintrag %s (%s - %s) wurde erfolgreich geteilt.' %
(entry.__unicode__(), entry.von, entry.bis))
except e:
renderDir['result_list'].append ('Beim Versuch, Eintrag %s (%s - %s) zu teilen, trat folgender Fehler auf: %s. Bitte benachrichtigen Sie Ihren Adminstrator!' %
(entry.__unicode__(), entry.von, entry.bis, e.strerror))
return render (request,
"stellenplan/splitResult.html",
renderDir)
#########################################################
class stellenplanQuery (View):
additionalFields = {}
"""
additional fields is a dictionary, mapping field names to defaults.
"""
urlTarget = ''
queryFormClass = qForm
emptyFieldIndicator = '-----'
def constructAccordion (self, request):
"""
construct the renderDir dictionary, containting the filter results in
accprdions.
"""
return []
def producePDF (self):
# print 'Media:', settings.STATICFILES_DIRS[0]
workdir = settings.STATICFILES_DIRS[0]
fp = os.path.join(settings.STATICFILES_DIRS[0], 'report.tex')
preface = r"""
\documentclass{article}
\usepackage{booktabs}
\usepackage{pgfgantt}
\begin{document}
"""
body = ""
for a in self.renderDir['Accordion']:
body += a.asLatex()
postface = r"""
\end{document}
"""
outtext = preface + body + postface
# write file
fout = codecs.open(fp, 'w', 'utf-8')
fout.write (outtext)
fout.close()
# run latex
cwd = os.getcwd()
os.chdir (workdir)
retval = subprocess.call (["pdflatex",
'-interaction=batchmode',
"report.tex"])
os.chdir (cwd)
def fieldEmpty (self, f):
return ((self.ff.cleaned_data[f] == stellenplanQuery.emptyFieldIndicator) or
(self.ff.cleaned_data[f] == ''))
@method_decorator(login_required)
def get(self, request):
# print request
if not request.method == 'GET':
return HttpResponseNotFound('<h1>Request type not supported!</h1>')
if request.GET:
# es gibt schon eine Anfrage
self.ff = self.__class__.queryFormClass (request.GET)
if not self.ff.is_valid():
print "error", self.__class__.urlTarget + '.html'
print request
return render (request,
'stellenplan/' + self.__class__.urlTarget + '.html',
{'error_message': 'Bitte berichtigen Sie folgenden Fehler: ',
'form': self.ff,
'urlTarget': self.__class__.urlTarget,
})
else:
# empty request, neu aufbauen
# print "empty request!"
self.ff = self.__class__.queryFormClass (request.GET)
self.ff.cleaned_data = {'Von': None,
'Bis': None,
'PDF': False,
}
self.ff.cleaned_data.update(self.__class__.additionalFields)
self.renderDir = {
'form': self.ff,
'urlTarget': self.__class__.urlTarget,
}
## pp(self.renderDir)
## print self.renderDir['form']
self.renderDir['Accordion'] = []
self.constructAccordion (request)
if self.ff.cleaned_data['PDF']:
# here trigger the gneration of the PDF file
self.producePDF()
self.renderDir['pdf'] = True
self.renderDir['pdfname'] = 'report.pdf'
return render (request,
"stellenplan/" + self.__class__.urlTarget + ".html",
self.renderDir)
#################################
class qBesetzung (stellenplanQuery):
"""This is just an empty class"""
urlTarget = 'qBesetzung'
queryFormClass = BesetzungFilterForm
additionalFields = {'Person': stellenplanQuery.emptyFieldIndicator,
'Stellennummer': stellenplanQuery.emptyFieldIndicator,
}
def constructAccordion (self, request):
# die Besetzungen wie üblich filtern:
allBesetzung = Besetzung.objects.all()
# alle Besetzungen nach Standardfilter - TODO: check ob filter gebraucht wird
qs = standardfilters (allBesetzung, [], self.ff.cleaned_data)
# print self.ff.cleaned_data
# add a person filter, if that filter was selected
if not self.fieldEmpty ('Person'):
qs = qs.filter (person__personalnummer__exact =
self.ff.cleaned_data['Person'])
if not self.fieldEmpty ('Stellennummer'):
# print "filtering for ", self.ff.cleaned_data['Stellennummer']
qs = qs.filter (stelle__stellennummer__exact =
self.ff.cleaned_data['Stellennummer'])
## # add a fachgebiet filter, if that filer was selected
## if not self.ff.cleaned_data['Fachgebiet'] == self.__class__.emptyFieldIndicator:
## qs = qs.filter (stelle__exact =
## self.ff.cleaned_data['Fachgebiet'])
besetzungstab = tables.BesetzungTable (qs)
RequestConfig (request).configure(besetzungstab)
a = accordion.Accordion ("Besetzungen")
a.addContent (besetzungstab)
self.renderDir['Accordion'].append(a)
########################################
#########################################################
class qStellen (stellenplanQuery):
urlTarget = "qStellen"
queryFormClass = StellenFilterForm
additionalFields = {'Wertigkeit': stellenplanQuery.emptyFieldIndicator,
'Art': stellenplanQuery.emptyFieldIndicator}
def constructAccordion (self, request):
# die Stellen wie üblich filtern:
allStellen = Stelle.objects.all()
# alle Stellen nach Standardfilter
# Beispiele fuer queries; Komplikation: wir gehen durch foreign keys
# qs = allStellen.filter(wertigkeit__wertigkeit__exact='E13')
# qs = allStellen.filter(art__stellenart__exact='Drittmittel')
# print qs
qs = standardfilters (allStellen,
[('Wertigkeit', 'wertigkeit__wertigkeit__exact'),
('Art', 'art__stellenart__exact')],
self.ff.cleaned_data)
print "stellen nach Filter: "
print qs
stellentab = tables.StellenTable (qs)
RequestConfig (request).configure(stellentab)
a = accordion.Accordion ("Stellen insgesamt")
a.addContent (stellentab)
self.renderDir['Accordion'].append(a)
########################################
# gefilterte Stellen nach Wertigkeit zusammenfassen
tgWertigkeit = TimelineGroups (qs,'wertigkeit', Stellenwertigkeit, 'wertigkeit')
tgWertigkeit.asAccordion ("Stellen nach Wertigkeit gruppiert",
self.renderDir, request)
#########
# gefilterte Stellen nach Finanzierung zusammenfassen
TimelineGroups (qs,'art', Stellenart, 'stellenart').asAccordion ("Stellen nach Finanzierung gruppiert",
self.renderDir, request)
########################################
# von den Stellen die Zusagen abziehen
# ziehe von den Stellen die Zusagen ab.
# Dazu erstmal die Zusagen entsprechend filtern,
# dann ueber timelinegroups verarbeiten.
# Da nur StellenTYPEN, aber nicht spezifischer STellen
# zugesagt werdne, macht es keinen Sinn,
# das individuell pro Stellen zu machen, sondern nur für
# entsprechende Aggregierungen
zusageQs = standardfilters (Zusage.objects.all(),
[('Wertigkeit', 'wertigkeit__wertigkeit__exact'),],
self.ff.cleaned_data)
tgZusageWertigkeit = TimelineGroups (zusageQs, 'wertigkeit', Stellenwertigkeit, 'wertigkeit')
tgWertigkeitOhneZusagen = tgWertigkeit.subtract(tgZusageWertigkeit)
tgWertigkeitOhneZusagen.asAccordion ("Stellen nach Wertigkeit gruppiert, ZUSAGEN abgezogen",
self.renderDir, request)
########################################
# und noch mal fast das gleiche, nur jetzt die ZUORDNUNGEN abziehen,
qsZuordnung = standardfilters (Zuordnung.objects.all(),
[],
self.ff.cleaned_data)
if not self.fieldEmpty('Wertigkeit'):
qsZuordnung = qsZuordnung.filter (stelle__wertigkeit__wertigkeit__exact =
self.ff.cleaned_data['Wertigkeit'])
tgZuordnungWertigkeit = TimelineGroups(qsZuordnung, 'stelle__wertigkeit', Stellenwertigkeit, 'wertigkeit')
tgWertigkeitOhneZuordnung = tgWertigkeit.subtract(tgZuordnungWertigkeit)
tgWertigkeitOhneZuordnung.asAccordion ("Stellen nach Wertigkeit gruppiert, ZUORDNUNGEN abgezogen",
self.renderDir, request)
########################################
#########################################################
class qZuordnungen (stellenplanQuery):
urlTarget = "qZuordnungen"
queryFormClass = zuordnungenFilterForm
additionalFields = {'Fachgebiet': stellenplanQuery.emptyFieldIndicator}
def constructAccordion (self, request):
# wie ueblich zunaechst eine Uberblick ueber Zusagen, gefiltert
qs = standardfilters (Zuordnung.objects.all(),
[('Fachgebiet', 'fachgebiet__kuerzel__exact'), ],
self.ff.cleaned_data)
overviewtab = tables.ZusagenTable (qs)
RequestConfig (request).configure(overviewtab)
a = accordion.Accordion ("Zuordnungen insgesamt")
a.addContent (overviewtab)
self.renderDir['Accordion'].append(a)
#########################################################
class qZusagen (stellenplanQuery):
"""
Abfragen für Zusagen.
Filter nach Datum, Fachgebiet, Wertigkeit.
Zusagen sind durch Zuordnungen unterlegt; interessant sind also Zusagen, für die
es keine Zuordnungen gibt.
"""
urlTarget = "qZusagen"
queryFormClass = zusagenFilterForm
additionalFields = {'Wertigkeit': stellenplanQuery.emptyFieldIndicator,
'Fachgebiet': stellenplanQuery.emptyFieldIndicator}
def constructAccordion (self, request):
# wie ueblich zunaechst eine Uberblick ueber Zusagen, gefiltert
pp(self.ff.cleaned_data)
qs = standardfilters (Zusage.objects.all(),
[('Fachgebiet', 'fachgebiet__kuerzel__exact'),
('Wertigkeit', 'wertigkeit__wertigkeit__exact'),],
self.ff.cleaned_data)
print qs
overviewtab = tables.ZusagenTable (qs)
RequestConfig (request).configure(overviewtab)
a = accordion.Accordion ("Zusagen insgesamt")
a.addContent (overviewtab)
self.renderDir['Accordion'].append(a)
########################################
# Zusagen nach Wertigkeit gruppiert ausgeben
tgWertigkeit = TimelineGroups(qs, 'wertigkeit', Stellenwertigkeit, 'wertigkeit')
tgWertigkeit.asAccordion ("Zusagen, nach Wertigkeit gruppiert",
self.renderDir, request)
########################################
## # Zusagen, nach Fachgebiet gruppiert
## # Achtung, das ist sinnlos!
## # Man könnte das höchstens mit Personalpunkten gewichten - TODO!!
## tgFachgebiet = TimelineGroups(qs, 'fachgebiet')
## tgFachgebiet.asAccordion ("Zusagen, nach Fachgebiet gruppiert",
## self.renderDir, request)
########################################
# Zusagen nach Wertigkeit gruppiert ausgeben - davon die entsprechenden
# gruppierten ZuORDNUNGEN abziehen
qsZuordnung = standardfilters (Zuordnung.objects.all(),
[('Fachgebiet', 'fachgebiet__kuerzel__exact'),],
self.ff.cleaned_data)
if not self.fieldEmpty ('Wertigkeit'):
qsZuordnung = qsZuordnung.filter (stelle__wertigkeit__wertigkeit__exact =
self.ff.cleaned_data['Wertigkeit'])
tgZuordnungWertigkeit = TimelineGroups(qsZuordnung, 'stelle__wertigkeit', Stellenwertigkeit, 'wertigkeit')
tgZusagenOhneZuordnung = tgWertigkeit.subtract(tgZuordnungWertigkeit)
tgZusagenOhneZuordnung.asAccordion ("Offene Zusagen (Zuordnungen sind abgezogen), nach Wertigkeit gruppiert",
self.renderDir, request)
##############################################################
| gpl-2.0 | -8,266,460,927,362,443,000 | 35.159664 | 180 | 0.554148 | false |
mcrav/XDToolkit | src/emailfuncs.py | 1 | 1465 | '''
#####################################################################
#-------------------EMAIL--------------------------------------------
#####################################################################
'''
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
import os
def sendEmail(body = '', email = '', attachments = [], subject = '', toaddr = ''):
'''
Send email to my email account from [email protected]
'''
print('Sending email')
fromaddr = "[email protected]"
if not toaddr:
toaddr = "[email protected]"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
for f in attachments:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=os.path.basename(f)
)
part['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(f)
msg.attach(part)
bodyText = '{0}<br><br>Email Address: {1}'.format(body,email)
msg.attach(MIMEText(bodyText, 'html'))
print('starting smtp')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("[email protected]", '****')
print('logged in')
text = msg.as_string()
server.sendmail("[email protected]", toaddr, text)
print('sent')
server.quit()
| gpl-3.0 | 5,255,107,755,861,065,000 | 30.847826 | 91 | 0.519454 | false |
tavy14t/tw_project | Testing/Pocket_Testing.py | 1 | 4909 | import os
import sys
import requests
import time
from pocket import Pocket
import webbrowser
import collections
import json
sys.dont_write_bytecode = True
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trex.settings")
import django
django.setup()
from restapi.models import *
from random import randint
# POCKET_CONSUMER_KEY = '67853-fa80baf944f56fd495dab319' #Desktop
POCKET_CONSUMER_KEY = '67853-17e07228b29f7c44ef6d2784' # Web
REDIRECT_URI = 'http://localhost:8000/home/about'
'''
# connecting to pocket API; pocket_api stores the http response
pocket_api = requests.post('https://getpocket.com/v3/oauth/request',
data={'consumer_key': POCKET_CONSUMER_KEY,
'redirect_uri': 'http://localhost:8000/home/about'})
print pocket_api.status_code # if 200, it means all ok.
print pocket_api.headers # prints in JSON format
print pocket_api.text
code = pocket_api.text.split('=')[1]
print code
os.system('chrome "https://getpocket.com/auth/authorize?request_token={}&redirect_uri={}"'.format(code, 'http://localhost:8000/home/about'))
time.sleep(5)
print '--------------------------------------------'
pocket_auth = requests.post('https://getpocket.com/v3/oauth/authorize',
data={'consumer_key': POCKET_CONSUMER_KEY,
'code': code})
print pocket_auth.status_code
print pocket_auth.text
pocket_access_token = pocket_auth.text.split('=')[1].split('&')[0]
print '--------------------------------------------'
request_token = Pocket.get_request_token(consumer_key=POCKET_CONSUMER_KEY, redirect_uri=REDIRECT_URI)
print 1
# URL to redirect user to, to authorize your app
auth_url = Pocket.get_auth_url(code=request_token, redirect_uri=REDIRECT_URI)
print 2
# os.system('chrome "{}"'.format(auth_url))
print auth_url
webbrowser.open_new_tab(auth_url)
user_credentials = Pocket.get_credentials(consumer_key=POCKET_CONSUMER_KEY, code=request_token)
time.sleep(3)
print 3
access_token = user_credentials['access_token']
print 4
pocket_instance = Pocket(POCKET_CONSUMER_KEY, access_token)
pocket_get = open('pocket_get.txt', 'w')
def recursive_keys(d, depth=0):
for key in d:
if isinstance(d[key], collections.Mapping):
print ' ' * depth + key
pocket_get.write(' ' * depth + key + '\n')
recursive_keys(d[key], depth + 1)
else:
print ' ' * depth + key + ' ->' + unicode(d[key])
pocket_get.write(' ' * depth + key + ' ->' + unicode(d[key]) + '\n')
d = pocket_instance.get()[0]['list']
for key in d:
print d[key]['resolved_title'], d[key]['given_url']
# open('test.txt', 'w').write(str(pocket_instance.get()))
print '--------------------------------'
#access_token = 'd8830338-65cd-ef39-64db-ec5b99'
#pocket_instance = Pocket(POCKET_CONSUMER_KEY, access_token)
#sample = pocket_instance.get(detailType='complete')[0]
'''
with open('../result.json', 'r') as fp:
pocket_request = json.load(fp)
pocket_posts = pocket_request['list']
def pretty(d, indent=0):
for key, value in d.iteritems():
print ' ' * indent + unicode(key)
if isinstance(value, dict):
pretty(value, indent + 1)
else:
print ' ' * (indent + 1) + unicode(value)
data = {'posts': {}}
for post in pocket_posts:
data['posts'][post] = {}
data['posts'][post]['name'] = pocket_posts[post]['given_title']
data['posts'][post]['embed_link'] = pocket_posts[post]['resolved_url']
if 'tags' in pocket_posts[post]:
data['posts'][post]['tags'] = [tag for tag in pocket_posts[post]['tags']]
else:
data['posts'][post]['tags'] = []
print pocket_posts[pocket_posts.keys()[0]]
# print [tag for tag in pocket_posts[post]]
'''
tags = []
for post in pocket_posts:
#print post
if 'tags' in pocket_posts[post]:
tags.append(pocket_posts[post]['tags'])
print tags
pocket_api = requests.post('https://getpocket.com/v3/get',
data={'consumer_key': POCKET_CONSUMER_KEY,
'access_token': access_token,
'count': 30,
'state': 'unread',
'detailType': 'complete',
})
# print pocket_api.headers
print pocket_api.text
e = json.loads(requests.post('https://getpocket.com/v3/get',
data={'consumer_key': POCKET_CONSUMER_KEY,
'access_token': access_token,
'count': 30,
'state': 'unread',
}).text)['list']
d = json.loads(pocket_api.text)['list']
for key in d:
print set(d[key].keys()).difference(set(e[key].keys()))
e = [key]
# print d
# recursive_keys(pocket_instance.get()[0])
'''
| mit | 6,632,174,290,124,783,000 | 29.302469 | 140 | 0.586881 | false |
stryku/hb | image_processing/tesseract/trainer.py | 1 | 1576 | from subprocess import call
BUILD_DIR='build'
FONTa='Fake Receipt'
LANG='hb'
OUTPUTBASE = LANG + '.' + FONTa
def call_shell(command):
splitted = command.split()
call(splitted)
print(command)
def text2image(text_file):
splitted = str('text2image --text=' + text_file + ' --fonts_dir ..').split()
splitted.append('--outputbase=' + OUTPUTBASE)
splitted.append('--font=Fake Receipt')
call(splitted)
def training():
command = ['tesseract', OUTPUTBASE + '.tif', OUTPUTBASE, 'box.train.stderr']
call(command)
def unicharset():
command = ['unicharset_extractor',
OUTPUTBASE + '.box']
call(command)
def clustering():
command = ['mftraining',
'-F', '../font_properties',
'-U', 'unicharset',
OUTPUTBASE + '.tr']
call(command)
def cntraining():
command = ['cntraining', OUTPUTBASE + '.tr']
call(command)
def cp_with_prefix(filename, prefix):
call_shell('cp ' + filename + ' ' + prefix + '.' + filename)
def prepare_for_combine():
cp_with_prefix('unicharset', LANG)
cp_with_prefix('shapetable', LANG)
cp_with_prefix('normproto', LANG)
cp_with_prefix('inttemp', LANG)
cp_with_prefix('pffmtable', LANG)
def combine():
command = ['combine_tessdata', LANG + '.']
call(command)
def copy_combined():
name = LANG + '.traineddata'
call_shell('cp ' + name + ' ../tessdata/' + name)
text2image('../training_text.txt')
training()
unicharset()
clustering()
cntraining()
prepare_for_combine()
combine()
copy_combined()
| mit | 1,929,229,213,959,798,300 | 20.013333 | 80 | 0.613579 | false |
frankyrumple/smc | modules/ednet/sequentialguid.py | 1 | 5058 | ###### SequentialGUID
import os
import datetime
import sys
from binascii import unhexlify, hexlify
import uuid
class SequentialGUID:
SEQUENTIAL_GUID_AS_STRING = 0
SEQUENTIAL_GUID_AS_BINARY = 1
SEQUENTIAL_GUID_AT_END = 2
def __init__(self):
pass
@staticmethod
def NewGUID(guid_type = SEQUENTIAL_GUID_AS_STRING):
# What type of machine are we runing on?
endian = sys.byteorder # will be 'little' or 'big'
# Need some random info
rand_bytes = bytearray()
rand_bytes += os.urandom(10) #Get 10 random bytes
# Get the current timestamp in miliseconds - makes this sequential
ts = long((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
tsbytes = bytearray()
# NOTE: we don't pass endian into long_to_bytes
tsbytes += long_to_bytes(ts) # Convert long to byte array
while (len(tsbytes) < 8): # Make sure to padd some 0s on the front so it is 64 bits
tsbytes.insert(0, 0) # Python will most likely make it a byte array
guid_bytes = bytearray(16) # 16 bytes is 128 bit
# Combine the random and timestamp bytes into a GUID
if(guid_type != SequentialGUID.SEQUENTIAL_GUID_AT_END):
guid_bytes[0] = tsbytes[2] # Copy timestamp into guid
guid_bytes[1] = tsbytes[3]
guid_bytes[2] = tsbytes[4]
guid_bytes[3] = tsbytes[5]
guid_bytes[4] = tsbytes[6]
guid_bytes[5] = tsbytes[7]
guid_bytes[6] = rand_bytes[0] # Copy rand bytes into guid
guid_bytes[7] = rand_bytes[1]
guid_bytes[8] = rand_bytes[2]
guid_bytes[9] = rand_bytes[3]
guid_bytes[10] = rand_bytes[4]
guid_bytes[11] = rand_bytes[5]
guid_bytes[12] = rand_bytes[6]
guid_bytes[13] = rand_bytes[7]
guid_bytes[14] = rand_bytes[8]
guid_bytes[15] = rand_bytes[9]
if (guid_type == SequentialGUID.SEQUENTIAL_GUID_AS_STRING and endian == "little" and 1!=1):
## TODO: This is mucking things up for some reason hence the 1!=1
# Need to swap stuff around if this is going to be string on little endian machines
b = guid_bytes[0:4] # First data chunk (4 items)
b.reverse()
guid_bytes[0] = b[0]
guid_bytes[1] = b[1]
guid_bytes[2] = b[2]
guid_bytes[3] = b[3]
b = guid_bytes[4:6] # 2nd data chunk (2 items)
b.reverse()
guid_bytes[4] = b[0]
guid_bytes[5] = b[1]
pass
pass
else:
# Same as above, but different order - timestamp at end not beginning
guid_bytes[10] = tsbytes[2] # Copy timestamp into guid
guid_bytes[11] = tsbytes[3]
guid_bytes[12] = tsbytes[4]
guid_bytes[13] = tsbytes[5]
guid_bytes[14] = tsbytes[6]
guid_bytes[15] = tsbytes[7]
guid_bytes[0] = rand_bytes[0] # Copy rand bytes into guid
guid_bytes[1] = rand_bytes[1]
guid_bytes[2] = rand_bytes[2]
guid_bytes[3] = rand_bytes[3]
guid_bytes[4] = rand_bytes[4]
guid_bytes[5] = rand_bytes[5]
guid_bytes[6] = rand_bytes[6]
guid_bytes[7] = rand_bytes[7]
guid_bytes[8] = rand_bytes[8]
guid_bytes[9] = rand_bytes[9]
pass
# Create the guid and return it
guid = uuid.UUID(hex=hexlify(guid_bytes))
return guid
def long_to_bytes (val, endianness='big'):
""" Pulled from http://stackoverflow.com/questions/8730927/convert-python-long-int-to-fixed-size-byte-array
Use :ref:`string formatting` and :func:`~binascii.unhexlify` to
convert ``val``, a :func:`long`, to a byte :func:`str`.
:param long val: The value to pack
:param str endianness: The endianness of the result. ``'big'`` for
big-endian, ``'little'`` for little-endian.
If you want byte- and word-ordering to differ, you're on your own.
Using :ref:`string formatting` lets us use Python's C innards.
"""
# one (1) hex digit per four (4) bits
width = val.bit_length()
# unhexlify wants an even multiple of eight (8) bits, but we don't
# want more digits than we need (hence the ternary-ish 'or')
width += 8 - ((width % 8) or 8)
# format width specifier: four (4) bits per hex digit
fmt = '%%0%dx' % (width // 4)
# prepend zero (0) to the width, to zero-pad the output
s = unhexlify(fmt % val)
if endianness == 'little':
# see http://stackoverflow.com/a/931095/309233
s = s[::-1]
return s
### Usage
### guid = SequentialGUID.NewSequentialGUID(SequentialGUID.SEQUENTIAL_GUID_AS_STRING)
### Use String for most dbs, and At End for MSSQL if you use their GUID field type
### REQUIRES: Python 2.6+ with bytearray support
###### End SequentailGUID
| mit | 5,169,569,458,561,843,000 | 36.466667 | 111 | 0.574733 | false |
EdSalisbury/mtgstore | cards/migrations/0004_auto__chg_field_card_multiverse_id.py | 1 | 1724 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Card.multiverse_id'
db.alter_column(u'cards_card', 'multiverse_id', self.gf('django.db.models.fields.PositiveIntegerField')())
def backwards(self, orm):
# Changing field 'Card.multiverse_id'
db.alter_column(u'cards_card', 'multiverse_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
models = {
u'cards.card': {
'Meta': {'object_name': 'Card'},
'condition': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '2'}),
'edition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cards.Edition']"}),
'foil': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiverse_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'cards.edition': {
'Meta': {'object_name': 'Edition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'set_id': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['cards'] | mit | -511,152,586,451,084,700 | 43.230769 | 123 | 0.580626 | false |
suttond/MODOI | SimulationClient/SimulationClient.py | 1 | 12355 | from multiprocessing.connection import Client
import time
import logging
import socket
import numpy as np
from SimulationUtilities import Configuration_Processing
from SimulationUtilities.Communication_Codes import comm_code
import LinearAlgebra as la
from CustomBFGS import find_geodesic_midpoint
from MetricValues import shutdown_metric
class SimulationClient:
"""
The purpose of this object is to compute local geodesics using a modified BFGS method. The object receives a pair of
end points to compute the local geodesic between. The simulation client then returns a new position for the node.
The simulation client needs the value of the potential and it's gradient function, in order to achieve this it makes
calls to it's assigned SimulationPotential servers.
Attributes:
CONFIGURATION (dict) :
A dictionary containing the parsed values from the file in configuration_file.
CURVE_ADDRESS (str, int) :
A tuple containing a string representing the hostname/IP and an integer for the running SimulationServer.
AUTHKEY (str) :
A string containing the authorisation key for the listener method.
DELAY (float) :
The length of time the SimulationClient should wait if there is no new available jobs, before attempting to
contact the SimulationServer again.
METRIC_SERVERS :
A list containing tuples of addresses for SimulationPotential instances.
ID (str) :
A string that uniquely identifies the client amongst all other clients in the computation.
MASS_MATRIX (numpy.array) :
A NumPy matrix containing the mass matrix of the molecular system. Produced automatically from the Atomistic
Simulation Environment.
"""
def __init__(self, simulation_client_id, server_host, server_port, authkey, metric_server_addresses,
configuration_file, logfile=None, log_level=logging.INFO, callback_delay=1.0):
"""The constructor for the SimulationClient class.
Note:
This class is intended to be used in conjunction with running SimulationServer and SimulationPotential
objects. It will cause a runtime exception if this condition isn't satisfied.
Args:
simulation_client_id (str) :
A string that uniquely identifies the client amongst all other clients in the computation.
server_host (str) :
The TCP/IP hostname of the running SimulationServer instance.
server_port (int) :
The port number that the SimulationServer instance is communicating on.
authkey (str, optional) :
Authentication key used to secure process communications. Default to None for local computations to
increase speed.
metric_server_addresses :
A list containing tuples of the type (str, int) containing the hostnames and ports for the running
SimulationPotential instances.
configuration_file (str) :
Directory and filename of the configuration file.
logfile (str, optional) :
Directory and filename of the log file. Is created if doesn't exist, overwritten if it does.
log_level (int, optional) :
Specify level of logging required as described in the logging package documentation.
callback_delay (float) :
The length of time the SimulationClient should wait if there is no new available jobs, before attempting
to contact the SimulationServer again.
"""
# Set the SimulationClient log output to write to logfile at prescribed log level if specified. Otherwise write
# to console output. Setting to DEBUG will cause poor performance and should only be used to debug.
if logfile is not None:
logging.basicConfig(filename=logfile, level=log_level, filemode='w')
else:
logging.basicConfig(level=logging.INFO)
# Read configuration from configuration_file and store in SimulationPotential's CONFIGURATION attribute.
self.CONFIGURATION = Configuration_Processing.read_configuration_file(configuration_file)
# Set ADDRESS and AUTHKEY attributes for Client object in the start_client method.
self.CURVE_ADDRESS = (server_host, server_port)
self.AUTHKEY = authkey
# Set the callback delay as described in the attributes.
self.DELAY = callback_delay
# Store the ADDRESS and AUTHKEY attributes for Client objects in the start_client method used to compute the
# metric values.
self.METRIC_SERVERS = metric_server_addresses
# Set the client's unique identifier.
self.ID = simulation_client_id
# Compute the mass matrix for the molecular system.
self.MASS_MATRIX = np.diag(np.dstack((self.CONFIGURATION['molecule'].get_masses(),) *
(self.CONFIGURATION['dimension'] /
len(self.CONFIGURATION['molecule'].get_masses()))).flatten())
def start_client(self):
"""Start the instance of SimulationClient and begin computing local geodesics.
"""
# Define a flag to indicate if contact with the SimulationServer instance is possible.
connection_made = False
# Create a response to send to the SimulationServer indicating that this is the first time this SimulationClient
# has attempted to get a task.
client_response = {'status_code': comm_code('CLIENT_FIRST_CONTACT'),
'client_name': self.ID}
# Attempt to connect to the SimulationServer instance.
try:
# Create a Client object that communicates with the listener on CURVE_ADDRESS using password AUTHKEY.
server = Client(self.CURVE_ADDRESS, authkey=self.AUTHKEY)
# When a connection is made send the client message.
server.send(client_response)
# The client assumes the server will respond with a message, either a local geodesic to compute or a message
# asking the client to try again after DELAY seconds.
server_response = server.recv()
# Interpret the servers response by first extracting the status_code variable from the response.
server_response_code = server_response['status_code']
# Close the connection to the server at this point to allow other clients to communicate with the
# SimulationServer.
server.close()
# Store in the connection_made flag that it was possible to create a connection.
connection_made = True
# If it isn't possible to connect to the server than a socket.error exception is raised.
except socket.error:
# Write an error to the log for this client indicating that the connection couldn't be made.
logging.warning('Failed to Make Connection to SimulationServer. Shutting down client.')
# Send a signal to the running instances of SimulationPotential that the SimulationClient would have used
# indicating that they should also shutdown.
shutdown_metric(self.METRIC_SERVERS, self.AUTHKEY)
# This is the main loop of the SimulationClient - the program stops running when it is no longer possible to
# communicate with the SimulationServer. This is decided by the connection_made flag.
while connection_made:
# At this point in the code a new server_response should have been received. How the SimulationClient reacts
# depends on the communication code received.
# If the server has indicated it is giving the SimulationClient a new geodesic to compute then...
if server_response_code == comm_code('SERVER_GIVES_NEW_TASK'):
# Compute the rescaled tangent direction of the curve as store as a NumPy array.
tangent_direction = (1 / float(self.CONFIGURATION['local_number_of_nodes'] + 1)) * \
np.subtract(server_response['right_end_point'], server_response['left_end_point'], dtype='float64')
# Compute the local geodesic using the BFGS method and store the NumPy array in result
result = \
find_geodesic_midpoint(server_response['left_end_point'],
server_response['right_end_point'],
self.CONFIGURATION['local_number_of_nodes'],
la.orthonormal_tangent_basis(tangent_direction,
self.CONFIGURATION['dimension']),
tangent_direction, self.CONFIGURATION['codimension'],
self.METRIC_SERVERS,
self.MASS_MATRIX,
self.AUTHKEY)
# If the function find_geodesic_midpoint returned a None object then it couldn't contact it's
# SimulationPotential instances and should be restarted.
if result is None:
# Tell the user via the log that the SimulationPotential instances couldn't be contacted.
logging.warning('Failed to Make Connection to SimulationPotential. Shutting down client.')
# Exit the main loop of the SimulationClient.
break
# If there is a midpoint then construct a client response to tell the server which node has which new
# position.
client_response = {'status_code': comm_code('CLIENT_HAS_MIDPOINT_DATA'),
'node_number': server_response['node_number'],
'new_node_position': result,
'client_name': self.ID
}
# Otherwise if the server has asked the SimulationClient to try again later...
elif server_response_code == comm_code('SERVER_REQUEST_CALLBACK'):
# Make the SimulationClient wait for DELAY seconds
time.sleep(self.DELAY)
# Create a response to tell the SimulationServer that the SimulationClient would like a new job.
client_response = {'status_code': comm_code('CLIENT_HAS_NO_TASK'), 'client_name': self.ID}
# Attempt to connect to the SimulationServer instance.
try:
# Create a Client object that communicates with the listener on CURVE_ADDRESS using password AUTHKEY.
server = Client(self.CURVE_ADDRESS, authkey=self.AUTHKEY)
# When a connection is made send the client message.
server.send(client_response)
# The client assumes the server will respond with a message, either a local geodesic to compute or a
# message asking the client to try again after DELAY seconds.
server_response = server.recv()
# Interpret the servers response by first extracting the status_code variable from the response.
server_response_code = server_response['status_code']
# Close the connection to the server at this point to allow other clients to communicate with the
# SimulationServer.
server.close()
# If it isn't possible to connect to the server than a socket.error exception is raised.
except (socket.error, EOFError):
# Write an error to the log for this client indicating that the connection couldn't be made.
logging.warning('Failed to Make Connection to SimulationServer. Shutting down client.')
# Send a signal to the running instances of SimulationPotential that the SimulationClient would have
# used indicating that they should also shutdown.
shutdown_metric(self.METRIC_SERVERS, self.AUTHKEY)
# Exit the main loop of the SimulationClient.
break | lgpl-3.0 | 1,447,833,398,627,233,500 | 53.192982 | 120 | 0.637556 | false |
7sDream/zhihu-py3 | zhihu/topic.py | 1 | 17794 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from datetime import datetime
from .common import *
from .base import BaseZhihu
class Topic(BaseZhihu):
"""答案类,请使用``ZhihuClient.topic``方法构造对象."""
@class_common_init(re_topic_url)
def __init__(self, url, name=None, session=None):
"""创建话题类实例.
:param url: 话题url
:param name: 话题名称,可选
:return: Topic
"""
self.url = url
self._session = session
self._name = name
self._id = int(re_topic_url.match(self.url).group(1))
@property
def id(self):
"""获取话题Id(网址最后那串数字)
:return: 话题Id
:rtype: int
"""
return self._id
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find('input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_tid')
def tid(self):
"""话题内部Id,有时候要用到
:return: 话题内部Id
:rtype: int
"""
return int(self.soup.find(
'div', id='zh-topic-desc')['data-resourceid'])
@property
@check_soup('_name')
def name(self):
"""获取话题名称.
:return: 话题名称
:rtype: str
"""
return self.soup.find('h1').text
@property
def parents(self):
"""获取此话题的父话题。
注意:由于没找到有很多父话题的话题来测试,
所以本方法可能再某些时候出现问题,请不吝反馈。
:return: 此话题的父话题,返回生成器
:rtype: Topic.Iterable
"""
self._make_soup()
parent_topic_tag = self.soup.find('div', class_='parent-topic')
if parent_topic_tag is None:
yield []
else:
for topic_tag in parent_topic_tag.find_all('a'):
yield Topic(Zhihu_URL + topic_tag['href'],
topic_tag.text.strip(),
session=self._session)
@property
def children(self):
"""获取此话题的子话题
:return: 此话题的子话题, 返回生成器
:rtype: Topic.Iterable
"""
self._make_soup()
child_topic_tag = self.soup.find('div', class_='child-topic')
if child_topic_tag is None:
return []
elif '共有' not in child_topic_tag.contents[-2].text:
for topic_tag in child_topic_tag.div.find_all('a'):
yield Topic(Zhihu_URL + topic_tag['href'],
topic_tag.text.strip(),
session=self._session)
else:
flag = 'load'
child = ''
data = {'_xsrf': self.xsrf}
params = {
'parent': self.id
}
while flag == 'load':
params['child'] = child
res = self._session.post(Topic_Get_Children_Url,
params=params, data=data)
j = map(lambda x: x[0], res.json()['msg'][1])
*topics, last = j
for topic in topics:
yield Topic(Zhihu_URL + '/topic/' + topic[2], topic[1],
session=self._session)
flag = last[0]
child = last[2]
if flag == 'topic':
yield Topic(Zhihu_URL + '/topic/' + last[2], last[1],
session=self._session)
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取话题关注人数.
:return: 关注人数
:rtype: int
"""
follower_num_block = self.soup.find(
'div', class_='zm-topic-side-followers-info')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text)
@property
def followers(self):
"""获取话题关注者
:return: 话题关注者,返回生成器
:rtype: Author.Iterable
"""
from .author import Author, ANONYMOUS
self._make_soup()
gotten_data_num = 20
data = {
'_xsrf': self.xsrf,
'start': '',
'offset': 0
}
while gotten_data_num == 20:
res = self._session.post(
Topic_Get_More_Follower_Url.format(self.id), data=data)
j = res.json()['msg']
gotten_data_num = j[0]
data['offset'] += gotten_data_num
soup = BeautifulSoup(j[1])
divs = soup.find_all('div', class_='zm-person-item')
for div in divs:
h2 = div.h2
url = Zhihu_URL + h2.a['href']
name = h2.a.text
motto = h2.parent.div.text.strip()
try:
yield Author(url, name, motto, session=self._session)
except ValueError: # invalid url
yield ANONYMOUS
data['start'] = int(re_get_number.match(divs[-1]['id']).group(1))
@property
@check_soup('_photo_url')
def photo_url(self):
"""获取话题头像图片地址.
:return: 话题头像url
:rtype: str
"""
img = self.soup.find('a', id='zh-avartar-edit-form').img['src']
return img.replace('_m', '_r')
@property
@check_soup('_description')
def description(self):
"""获取话题描述信息.
:return: 话题描述信息
:rtype: str
"""
desc = self.soup.find('div', class_='zm-editable-content').text
return desc
@property
def top_authors(self):
"""获取最佳回答者
:return: 此话题下最佳回答者,一般来说是5个,要不就没有,返回生成器
:rtype: Author.Iterable
"""
from .author import Author, ANONYMOUS
self._make_soup()
t = self.soup.find('div', id='zh-topic-top-answerer')
if t is None:
return
for d in t.find_all('div', class_='zm-topic-side-person-item-content'):
url = Zhihu_URL + d.a['href']
name = d.a.text
motto = d.find('span', class_='bio')['title']
try:
yield Author(url, name, motto, session=self._session)
except ValueError: # invalid url
yield ANONYMOUS
@property
def top_answers(self):
"""获取话题下的精华答案.
:return: 话题下的精华答案,返回生成器.
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
top_answers_url = Topic_Top_Answers_Url.format(self.id)
params = {'page': 1}
while True:
# 超出50页直接返回
if params['page'] > 50:
return
res = self._session.get(top_answers_url, params=params)
params['page'] += 1
soup = BeautifulSoup(res.content)
# 不够50页,来到错误页面 返回
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('a', class_='question_link')
answers = soup.find_all('a', class_='answer-date-link')
authors = soup.find_all('div', class_='zm-item-answer-author-info')
upvotes = soup.find_all('a', class_='zm-item-vote-count')
for ans, up, q, au in zip(answers, upvotes, questions, authors):
answer_url = Zhihu_URL + ans['href']
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
upvote = up.text
if upvote.isdigit():
upvote = int(upvote)
else:
upvote = None
question = Question(question_url, question_title,
session=self._session)
if au.a is None:
author = ANONYMOUS
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
@property
def questions(self):
"""获取话题下的所有问题(按时间降序排列)
:return: 话题下所有问题,返回生成器
:rtype: Question.Iterable
"""
from .question import Question
question_url = Topic_Questions_Url.format(self.id)
params = {'page': 1}
older_time_stamp = int(time.time()) * 1000
while True:
res = self._session.get(question_url, params=params)
soup = BeautifulSoup(res.content)
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('div', class_='question-item')
questions = list(filter(
lambda x: int(x.h2.span['data-timestamp']) < older_time_stamp,
questions))
for qu_div in questions:
url = Zhihu_URL + qu_div.h2.a['href']
title = qu_div.h2.a.text.strip()
creation_time = datetime.fromtimestamp(
int(qu_div.h2.span['data-timestamp']) // 1000)
yield Question(url, title, creation_time=creation_time,
session=self._session)
older_time_stamp = int(questions[-1].h2.span['data-timestamp'])
params['page'] += 1
@property
def unanswered_questions(self):
"""获取话题下的等待回答的问题
什么是「等待回答」的问题:https://www.zhihu.com/question/40470324
:return: 话题下等待回答的问题,返回生成器
:rtype: Question.Iterable
"""
from .question import Question
question_url = Topic_Unanswered_Question_Url.format(self.id)
params = {'page': 1}
while True:
res = self._session.get(question_url, params=params)
soup = BeautifulSoup(res.content)
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('div', class_='question-item')
for qu_div in questions:
url = Zhihu_URL + qu_div.h2.a['href']
title = qu_div.h2.a.text.strip()
yield Question(url, title, session=self._session)
params['page'] += 1
@property
def answers(self):
"""获取话题下所有答案(按时间降序排列)
:return: 话题下所有答案,返回生成器
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
newest_url = Topic_Newest_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(newest_url)
soup = BeautifulSoup(res.content)
while True:
divs = soup.find_all('div', class_='folding')
# 如果话题下无答案,则直接返回
if len(divs) == 0:
return
last_score = divs[-1]['data-score']
for div in divs:
q = div.find('a', class_="question_link")
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title,
session=self._session)
ans = div.find('a', class_='answer-date-link')
answer_url = Zhihu_URL + ans['href']
upvote = div.find('a', class_='zm-item-vote-count').text
if upvote.isdigit():
upvote = int(upvote)
else:
upvote = None
au = div.find('div', class_='zm-item-answer-author-info')
if au.a is None:
author = ANONYMOUS
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
params['offset'] = last_score
res = self._session.post(newest_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到内容数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@property
def hot_questions(self):
"""获取话题下热门的问题
:return: 话题下的热门动态中的问题,按热门度顺序返回生成器
:rtype: Question.Iterable
"""
from .question import Question
hot_questions_url = Topic_Hot_Questions_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(hot_questions_url)
soup = BeautifulSoup(res.content)
while True:
questions_duplicate = soup.find_all('a', class_='question_link')
# 如果话题下无问题,则直接返回
if len(questions_duplicate) == 0:
return
# 去除重复的问题
questions = list(set(questions_duplicate))
questions.sort(key=self._get_score, reverse=True)
last_score = soup.find_all(
'div', class_='feed-item')[-1]['data-score']
for q in questions:
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title,
session=self._session)
yield question
params['offset'] = last_score
res = self._session.post(hot_questions_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到问题数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@property
def hot_answers(self):
"""获取话题下热门的回答
:return: 话题下的热门动态中的回答,按热门度顺序返回生成器
:rtype: Question.Iterable
"""
from .question import Question
from .author import Author
from .answer import Answer
hot_questions_url = Topic_Hot_Questions_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(hot_questions_url)
soup = BeautifulSoup(res.content)
while True:
answers_div = soup.find_all('div', class_='feed-item')
last_score = answers_div[-1]['data-score']
for div in answers_div:
# 没有 text area 的情况是:答案被和谐。
if not div.textarea:
continue
question_url = Zhihu_URL + div.h2.a['href']
question_title = div.h2.a.text.strip()
question = Question(question_url, question_title,
session=self._session)
author_link = div.find('a', class_='author-link')
if not author_link:
author_url = None
author_name = '匿名用户'
author_motto = ''
else:
author_url = Zhihu_URL + author_link['href']
author_name = author_link.text
author_motto_span = div.find('span', class_='bio')
author_motto = author_motto_span['title'] \
if author_motto_span else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
body = div.find('div', class_='zm-item-rich-text')
answer_url = Zhihu_URL + body['data-entry-url']
upvote_num = int(div.find(
'div', class_='zm-item-vote-info')['data-votecount'])
yield Answer(answer_url, question, author, upvote_num,
session=self._session)
params['offset'] = last_score
res = self._session.post(hot_questions_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到问题数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@staticmethod
def _get_score(tag):
h2 = tag.parent
div = h2.parent
try:
_ = h2['class']
return div['data-score']
except KeyError:
return div.parent.parent['data-score']
| mit | -5,743,376,127,175,184,000 | 33.827731 | 79 | 0.495597 | false |
simonzhangsm/voltdb | tools/kit_tools/build_kits.py | 1 | 13604 | #!/usr/bin/env python
import argparse, datetime, getpass, os, sys, shutil, traceback
from fabric.api import run, cd, local, get, settings, lcd, put
from fabric_ssh_config import getSSHInfoForHost
from fabric.context_managers import shell_env
from fabric.utils import abort
#Login as user test, but build in a directory by real username
username = 'test'
builddir = "/tmp/" + getpass.getuser() + "Kits/buildtemp"
version = "UNKNOWN"
nativelibdir = "/nativelibs/obj" # ~test/libs/... usually
defaultlicensedays = 70 #default trial license length
################################################
# CHECKOUT CODE INTO A TEMP DIR
################################################
def checkoutCode(voltdbGit, proGit, rbmqExportGit, gitloc):
global buildir
# clean out the existing dir
run("rm -rf " + builddir)
# make the build dir again
run("mkdir -p " + builddir)
# change to it
with cd(builddir):
# do the checkouts, collect checkout errors on both community &
# pro repos so user gets status on both checkouts
message = ""
run("git clone -q %s/voltdb.git" % gitloc)
result = run("cd voltdb; git checkout %s" % voltdbGit, warn_only=True)
if result.failed:
message = "VoltDB checkout failed. Missing branch %s." % rbmqExportGit
run("git clone -q %s/pro.git" % gitloc)
result = run("cd pro; git checkout %s" % proGit, warn_only=True)
if result.failed:
message += "\nPro checkout failed. Missing branch %s." % rbmqExportGit
#rabbitmq isn't mirrored internally, so don't use gitloc
run("git clone -q [email protected]:VoltDB/export-rabbitmq.git")
result = run("cd export-rabbitmq; git checkout %s" % rbmqExportGit, warn_only=True)
# Probably ok to use master for export-rabbitmq.
if result.failed:
print "\nExport-rabbitmg branch %s checkout failed. Defaulting to master." % rbmqExportGit
if len(message) > 0:
abort(message)
return run("cat voltdb/version.txt").strip()
################################################
# MAKE A RELEASE DIR
################################################
def makeReleaseDir(releaseDir):
# handle the case where a release dir exists for this version
if os.path.exists(releaseDir):
shutil.rmtree(releaseDir)
# create a release dir
os.makedirs(releaseDir)
print "Created dir: " + releaseDir
################################################
# BUILD THE COMMUNITY VERSION
################################################
def buildCommunity():
if build_mac:
packageMacLib="true"
else:
packageMacLib="false"
with cd(builddir + "/voltdb"):
run("pwd")
run("git status")
run("git describe --dirty")
run("ant -Djmemcheck=NO_MEMCHECK -Dkitbuild=%s %s clean default dist" % (packageMacLib, build_args))
################################################
# BUILD THE ENTERPRISE VERSION
################################################
def buildEnterprise():
if build_mac:
packageMacLib="true"
else:
packageMacLib="false"
with cd(builddir + "/pro"):
run("pwd")
run("git status")
run("git describe --dirty")
run("VOLTCORE=../voltdb ant -f mmt.xml -Djmemcheck=NO_MEMCHECK -Dallowreplication=true -DallowDrActiveActive=true -Dlicensedays=%d -Dkitbuild=%s %s clean dist.pro" % (defaultlicensedays, packageMacLib, build_args))
################################################
# BUILD THE PRO VERSION
################################################
#
def packagePro(version):
print "Making license"
makeTrialLicense(days=defaultlicensedays, dr_and_xdcr=False, nodes=3)
print "Repacking pro kit"
with cd(builddir + "/pro/obj/pro"):
run("mkdir pro_kit_staging")
with cd(builddir + "/pro/obj/pro/pro_kit_staging"):
run("tar xf ../voltdb-ent-%s.tar.gz" % version)
run("mv voltdb-ent-%s voltdb-pro-%s" % (version, version))
run("cp %s/pro/trial_*.xml voltdb-pro-%s/voltdb/license.xml" % (builddir, version))
run("tar cvfz ../voltdb-pro-%s.tar.gz voltdb-pro-%s" % (version, version))
################################################
# BUILD THE RABBITMQ EXPORT CONNECTOR
################################################
#Build rabbit MQ Exporter
def buildRabbitMQExport(version, dist_type):
# Paths to the final kit for unpacking/repacking with rmq export
paths = {
'community': builddir + "/voltdb/obj/release",
'ent' : builddir + "/pro/obj/pro/"
}
# Untar
with cd(paths[dist_type]):
run ("pwd")
run ("mkdir -p restage")
run ("tar xf voltdb-%s-%s.tar.gz -C restage" % (dist_type, version))
run ("rm -f voltdb-%s-%s.tar.gz" % (dist_type, version))
# Build RabbitMQ export jar and put it into the untarred kit
with cd(builddir + "/export-rabbitmq"):
run("pwd")
run("git status")
run("git describe --dirty", warn_only=True)
run("VOLTDIST=%s/restage/voltdb-%s-%s ant" % (paths[dist_type], dist_type, version))
# Retar
with cd(paths[dist_type]):
run("pwd")
run("tar -C restage -czf voltdb-%s-%s.tar.gz voltdb-%s-%s" % (dist_type, version, dist_type, version))
run ("rm -Rf restage")
################################################
# MAKE AN ENTERPRISE TRIAL LICENSE
################################################
# Must be called after buildEnterprise has been done
def makeTrialLicense(days=30, dr_and_xdcr="true", nodes=12):
with cd(builddir + "/pro/tools"):
run("./make_trial_licenses.pl -t %d -H %d -W %s" % (days, nodes, dr_and_xdcr ))
################################################
# MAKE A SHA256 checksum
################################################
def makeSHA256SUM(version, type):
with cd(builddir + "/pro/obj/pro"):
kitname="voltdb-" + type + "-" + version
run("sha256sum -b %s.tar.gz > %s.SHA256SUM" % (kitname, kitname))
################################################
# MAKE AN JAR FILES NEEDED TO PUSH TO MAVEN
################################################
def makeMavenJars():
with cd(builddir + "/voltdb"):
run("VOLTCORE=../voltdb ant -f build-client.xml maven-jars")
################################################
# COPY FILES
################################################
def copyFilesToReleaseDir(releaseDir, version, type=None):
print "Copying files to releaseDir"
if type:
typeString="-" + type
else:
typeString=""
get("%s/pro/obj/pro/voltdb%s-%s.tar.gz" % (builddir, typeString, version),
"%s/voltdb%s-%s.tar.gz" % (releaseDir, typeString, version))
get("%s/pro/obj/pro/voltdb%s-%s.SHA256SUM" % (builddir, typeString, version),
"%s/voltdb%s-%s.SHA256SUM" % (releaseDir, typeString, version))
def copyCommunityFilesToReleaseDir(releaseDir, version, operatingsys):
get("%s/voltdb/obj/release/voltdb-community-%s.tar.gz" % (builddir, version),
"%s/voltdb-community-%s.tar.gz" % (releaseDir, version))
# add stripped symbols
if operatingsys == "LINUX":
os.makedirs(releaseDir + "/other")
get("%s/voltdb/obj/release/voltdb-%s.sym" % (builddir, version),
"%s/other/%s-voltdb-voltkv-%s.sym" % (releaseDir, operatingsys, version))
def copyTrialLicenseToReleaseDir(releaseDir):
get("%s/pro/trial_*.xml" % (builddir),
"%s/license.xml" % (releaseDir))
def copyMavenJarsToReleaseDir(releaseDir, version):
#The .jars and upload file must be in a directory called voltdb - it is the projectname
mavenProjectDir = releaseDir + "/mavenjars/voltdb"
if not os.path.exists(mavenProjectDir):
os.makedirs(mavenProjectDir)
#Get the voltdbclient-n.n.jar from the recently built community build
get("%s/voltdb/obj/release/dist-client-java/voltdb/voltdbclient-%s.jar" % (builddir, version),
"%s/voltdbclient-%s.jar" % (mavenProjectDir, version))
#Get the upload.gradle file
get("%s/voltdb/tools/kit_tools/upload.gradle" % (builddir),
"%s/upload.gradle" % (mavenProjectDir))
#Get the src and javadoc .jar files
get("%s/voltdb/obj/release/voltdbclient-%s-javadoc.jar" % (builddir, version),
"%s/voltdbclient-%s-javadoc.jar" % (mavenProjectDir, version))
get("%s/voltdb/obj/release/voltdbclient-%s-sources.jar" % (builddir, version),
"%s/voltdbclient-%s-sources.jar" % (mavenProjectDir, version))
################################################
# CREATE CANDIDATE SYMLINKS
################################################
def createCandidateSysmlink(releaseDir):
candidateDir = os.getenv('HOME') + "/releases/candidate";
local("rm -rf " + candidateDir)
local("ln -s %s %s" % (releaseDir, candidateDir))
################################################
# BACKUP RELEASE DIR
################################################
def backupReleaseDir(releaseDir,archiveDir,version):
if not os.path.exists(archiveDir):
os.makedirs(archiveDir)
# make a backup with the timstamp of the build
timestamp = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
local("tar -czf %s/%s-%s.tgz %s" \
% (archiveDir, version, timestamp, releaseDir))
################################################
# REMOVE NATIVE LIBS FROM SHARED DIRECTORY
################################################
def rmNativeLibs():
# local("ls -l ~" + username + nativelibdir)
local("rm -rf ~" + username + nativelibdir)
################################################
# GET THE GIT TAGS OR SHAS TO BUILD FROM
################################################
parser = argparse.ArgumentParser(description = "Create a full kit. With no args, will do build of master")
parser.add_argument('voltdb_sha', nargs="?", default="master", help="voltdb repository commit, tag or branch" )
parser.add_argument('pro_sha', nargs="?", default="master", help="pro repository commit, tag or branch" )
parser.add_argument('rabbitmq_sha', nargs="?", default="master", help="rabbitmq repository commit, tag or branch" )
parser.add_argument('-g','--gitloc', default="[email protected]:VoltDB", help="Repository location. For example: /home/github-mirror")
parser.add_argument('--nomac', action='store_true', help="Don't build Mac OSX")
parser.add_argument('--nocommunity', action='store_true', help="Don't build community")
args = parser.parse_args()
proTreeish = args.pro_sha
voltdbTreeish = args.voltdb_sha
rbmqExportTreeish = args.rabbitmq_sha
print args
build_community = not args.nocommunity
build_mac = not args.nomac
#If anything is missing we're going to dump this in oneoffs dir.
build_all = build_community and build_mac
if voltdbTreeish != proTreeish or not build_all:
oneOff = True
else:
oneOff = False
rmNativeLibs()
try:
build_args = os.environ['VOLTDB_BUILD_ARGS']
except:
build_args=""
print "Building with pro: %s and voltdb: %s" % (proTreeish, voltdbTreeish)
build_errors=False
versionCentos = "unknown"
versionMac = "unknown"
releaseDir = "unknown"
# get ssh config [key_filename, hostname]
CentosSSHInfo = getSSHInfoForHost("volt15a")
MacSSHInfo = getSSHInfoForHost("voltmini")
UbuntuSSHInfo = getSSHInfoForHost("volt12d")
# build community kit on the mini so that .so can be picked up for unified kit
if build_mac or build_community:
try:
with settings(user=username,host_string=MacSSHInfo[1],disable_known_hosts=True,key_filename=MacSSHInfo[0]):
versionMac = checkoutCode(voltdbTreeish, proTreeish, rbmqExportTreeish, args.gitloc)
buildCommunity()
except Exception as e:
print traceback.format_exc()
print "Could not build MAC kit. Exception: " + str(e) + ", Type: " + str(type(e))
build_errors=True
# build kits on 15f
try:
with settings(user=username,host_string=CentosSSHInfo[1],disable_known_hosts=True,key_filename=CentosSSHInfo[0]):
versionCentos = checkoutCode(voltdbTreeish, proTreeish, rbmqExportTreeish, args.gitloc)
if build_mac:
assert versionCentos == versionMac
if oneOff:
releaseDir = "%s/releases/one-offs/%s-%s-%s" % \
(os.getenv('HOME'), versionCentos, voltdbTreeish, proTreeish)
else:
releaseDir = os.getenv('HOME') + "/releases/" + voltdbTreeish
makeReleaseDir(releaseDir)
print "VERSION: " + versionCentos
if build_community:
buildCommunity()
buildRabbitMQExport(versionCentos, "community")
copyCommunityFilesToReleaseDir(releaseDir, versionCentos, "LINUX")
buildEnterprise()
buildRabbitMQExport(versionCentos, "ent")
makeSHA256SUM(versionCentos,"ent")
copyFilesToReleaseDir(releaseDir, versionCentos, "ent")
packagePro(versionCentos)
makeSHA256SUM(versionCentos,"pro")
copyFilesToReleaseDir(releaseDir, versionCentos, "pro")
makeTrialLicense()
copyTrialLicenseToReleaseDir(releaseDir)
makeMavenJars()
copyMavenJarsToReleaseDir(releaseDir, versionCentos)
except Exception as e:
print traceback.format_exc()
print "Could not build LINUX kit. Exception: " + str(e) + ", Type: " + str(type(e))
build_errors=True
rmNativeLibs() # cleanup imported native libs so not picked up unexpectedly by other builds
exit (build_errors)
#archiveDir = os.path.join(os.getenv('HOME'), "releases", "archive", voltdbTreeish, versionCentos)
#backupReleaseDir(releaseDir, archiveDir, versionCentos)
| agpl-3.0 | -8,301,504,353,190,448,000 | 38.204611 | 222 | 0.600706 | false |
boland1992/SeisSuite | bin/SNR_plots.py | 1 | 4053 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 23:34:00 2015
@author: boland
"""
from seissuite.ant import (pscrosscorr)
import glob
import os
import pickle
#PICKLE_PATH = '/storage/ANT/PROGRAMS/ANT_OUTPUT/OUTPUT/CROSS/06.05.2015-15:53:28/XCORR-STACK_01.01.2014-31.12.2014_datalesspaz.pickle'
#PICKLE_PATH = '/home/boland/Desktop/XCORR-STACK_01.08.1999-10.06.2000_datalesspaz.part.pickle'
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
# import variables from initialised CONFIG class.
MSEED_DIR = CONFIG.MSEED_DIR
DATABASE_DIR = CONFIG.DATABASE_DIR
DATALESS_DIR = CONFIG.DATALESS_DIR
STATIONXML_DIR = CONFIG.STATIONXML_DIR
CROSSCORR_DIR = CONFIG.CROSSCORR_DIR
USE_DATALESSPAZ = CONFIG.USE_DATALESSPAZ
USE_STATIONXML = CONFIG.USE_STATIONXML
CROSSCORR_STATIONS_SUBSET = CONFIG.CROSSCORR_STATIONS_SUBSET
CROSSCORR_SKIPLOCS = CONFIG.CROSSCORR_SKIPLOCS
FIRSTDAY = CONFIG.FIRSTDAY
LASTDAY = CONFIG.LASTDAY
MINFILL = CONFIG.MINFILL
FREQMIN = CONFIG.FREQMIN
FREQMAX = CONFIG.FREQMAX
CORNERS = CONFIG.CORNERS
ZEROPHASE = CONFIG.ZEROPHASE
PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE
ONEBIT_NORM = CONFIG.ONEBIT_NORM
FREQMIN_EARTHQUAKE = CONFIG.FREQMIN_EARTHQUAKE
FREQMAX_EARTHQUAKE = CONFIG.FREQMAX_EARTHQUAKE
WINDOW_TIME = CONFIG.WINDOW_TIME
WINDOW_FREQ = CONFIG.WINDOW_FREQ
XCORR_INTERVAL = CONFIG.XCORR_INTERVAL
CROSSCORR_TMAX = CONFIG.CROSSCORR_TMAX
PLOT_CLASSIC = CONFIG.PLOT_CLASSIC
PLOT_DISTANCE = CONFIG.PLOT_DISTANCE
MAX_DISTANCE = CONFIG.MAX_DISTANCE
pickle_list = []
folder_list = sorted(glob.glob(os.path.join(CROSSCORR_DIR, '*')))
print MSEED_DIR
print CROSSCORR_TMAX
for folder in folder_list:
#check to see if there are any pickle files in the xcorr time folder
if len(glob.glob(os.path.join(folder, '*.pickle'))) < 1:
#print("There are no .pickle files in this folder. Skipping ...")
continue
else:
for file_ in glob.glob(os.path.join(folder, '*.pickle')):
if 'metadata' not in file_ and '.part' not in file_:
pickle_list.append(file_)
if len(pickle_list) < 1:
print("\nThere are no pickle files to begin from.")
raise Exception("No pickle files to process, first run the programme.")
res = ""
else:
print "\nPlease choose a file to process."
#print combinations of partial pickle files available
print '\n'.join('{} - {}'.format(i + 1, f.split('/')[-2])
for i, f in enumerate(pickle_list))
#change folder_list to pickle_list if this gives problems
res = raw_input('\n')
if not res:
raise Exception("You must choose one a number betwen {} and {}"\
.format(1, len(pickle_list)))
else:
PICKLE_PATH = pickle_list[int(res)-1]
OUTFILESPATH = PICKLE_PATH[:-7]
out_basename = os.path.basename(OUTFILESPATH)
OUTPATH = os.path.dirname(OUTFILESPATH)
OUT_SNR = os.path.join(OUTPATH, 'SNR_PLOTS')
print "\nOpening {} file to process ... ".format(OUT_SNR)
# re-initialising .part.pickle collection of cross-correlations
xc = pscrosscorr.load_pickled_xcorr(PICKLE_PATH)
# optimizing time-scale: max time = max distance / vmin (vmin = 2.5 km/s)
maxdist = max([xc[s1][s2].dist() for s1, s2 in xc.pairs()])
maxt = min(CROSSCORR_TMAX, maxdist / 2.5)
#plot distance plot of cross-correlations
#xc.plot(plot_type='distance', xlim=(-maxt, maxt),
#outfile="/home/boland/Desktop/something1342.png", showplot=False)
#plot individual cross-correlations
#xc.plot(plot_type='classic', xlim=(-maxt, maxt),
# outfile="/home/boland/Desktop/something1342.png", showplot=False)
#xc.plot_SNR(plot_type='all', outfile=OUT_SNR,
# config=os.path.basename(config_file))
xc.plot_SNR(plot_type='individual', outfile=OUT_SNR) | gpl-3.0 | 5,854,662,131,405,317,000 | 33.649573 | 135 | 0.681964 | false |
jkandasa/integration_tests | cfme/tests/services/test_iso_service_catalogs.py | 1 | 4853 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme.common.provider import cleanup_vm
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.pxe import get_template_from_config, ISODatastore
from cfme import test_requirements
from cfme.utils import testgen
from cfme.utils.log import logger
from cfme.utils.conf import cfme_data
from cfme.utils.blockers import BZ
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('vm_name', 'uses_infra_providers'),
test_requirements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
'iso_datastore',
['provisioning', 'host'],
['provisioning', 'datastore'],
['provisioning', 'iso_template'],
['provisioning', 'iso_file'],
['provisioning', 'iso_kickstart'],
['provisioning', 'iso_root_password'],
['provisioning', 'iso_image_type'],
['provisioning', 'vlan'],
])
argnames = argnames + ['iso_cust_template', 'iso_datastore']
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
iso_cust_template = args['provider'].data['provisioning']['iso_kickstart']
if iso_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
argvalues[i].append(get_template_from_config(iso_cust_template))
argvalues[i].append(ISODatastore(args['provider'].name))
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope="function")
def setup_iso_datastore(setup_provider_modscope, iso_cust_template, iso_datastore, provisioning):
if not iso_datastore.exists():
iso_datastore.create()
iso_datastore.set_iso_image_type(provisioning['iso_file'], provisioning['iso_image_type'])
if not iso_cust_template.exists():
iso_cust_template.create()
@pytest.yield_fixture(scope="function")
def catalog_item(setup_provider, provider, vm_name, dialog, catalog, provisioning):
iso_template, host, datastore, iso_file, iso_kickstart,\
iso_root_password, iso_image_type, vlan = map(provisioning.get, ('pxe_template', 'host',
'datastore', 'iso_file', 'iso_kickstart',
'iso_root_password', 'iso_image_type', 'vlan'))
provisioning_data = {
'catalog': {'vm_name': vm_name,
'provision_type': 'ISO',
'iso_file': {'name': iso_file},
},
'environment': {'host_name': {'name': host},
'datastore_name': {'name': datastore},
},
'customize': {'custom_template': {'name': iso_kickstart},
'root_password': iso_root_password,
},
'network': {'vlan': partial_match(vlan),
},
}
item_name = fauxfactory.gen_alphanumeric()
catalog_item = CatalogItem(item_type="RHEV", name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, catalog_name=iso_template,
provider=provider, prov_data=provisioning_data)
yield catalog_item
@pytest.mark.usefixtures('setup_iso_datastore')
@pytest.mark.meta(blockers=[BZ(1358069, forced_streams=["5.6", "5.7", "upstream"])])
def test_rhev_iso_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV ISO service catalog
Metadata:
test_flag: iso, provision
"""
vm_name = catalog_item.provisioning_data['catalog']["vm_name"]
request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
catalog_item.create()
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request()
assert provision_request.is_succeeded()
| gpl-2.0 | -433,454,917,170,736,100 | 40.478632 | 97 | 0.63981 | false |
fras2560/mlsb-platform | api/basic/bat.py | 1 | 8517 | '''
@author: Dallas Fraser
@date: 2016-04-12
@organization: MLSB API
@summary: The basic bat API
'''
from flask_restful import Resource, reqparse
from flask import Response, request
from json import dumps
from api import DB
from api.model import Bat
from api.authentication import requires_admin
from api.errors import BatDoesNotExist
from api.variables import PAGE_SIZE
from api.routes import Routes
from api.helper import pagination_response
from api.cached_items import handle_table_change
from api.tables import Tables
parser = reqparse.RequestParser()
parser.add_argument('player_id', type=int)
parser.add_argument('rbi', type=int)
parser.add_argument('game_id', type=int)
parser.add_argument('hit', type=str)
parser.add_argument('inning', type=int)
parser.add_argument('team_id', type=str)
post_parser = reqparse.RequestParser(bundle_errors=True)
post_parser.add_argument('player_id', type=int, required=True)
post_parser.add_argument('rbi', type=int)
post_parser.add_argument('game_id', type=int, required=True)
post_parser.add_argument('hit', type=str, required=True)
post_parser.add_argument('inning', type=int)
post_parser.add_argument('team_id', type=str, required=True)
class BatAPI(Resource):
def get(self, bat_id):
"""
GET request for Bat Object matching given bat_id
Route: Routes['bat']/<bat_id: int>
Returns:
if found
status: 200
mimetype: application/json
data:
{
'bat_id': int,
'game_id': int,
'team_id': int,
'team': string,
'rbi': int,
'hit': string,
'inning': int,
'player_id': int,
'player': string
}
otherwise
status: 404
mimetype: application/json
data: None
"""
entry = Bat.query.get(bat_id)
if entry is None:
raise BatDoesNotExist(payload={'details': bat_id})
response = Response(dumps(entry.json()), status=200,
mimetype="application/json")
return response
@requires_admin
def delete(self, bat_id):
"""
DELETE request for Bat
Route: Routes['bat']/<bat_id: int>
Returns:
status: 200
mimetype: application/json
data:
success: tells if request was successful (boolean)
message: the status message (string)
"""
bat = Bat.query.get(bat_id)
if bat is None:
raise BatDoesNotExist(payload={'details': bat_id})
# delete a single bat
DB.session.delete(bat)
DB.session.commit()
response = Response(dumps(None),
status=200,
mimetype="application/json")
handle_table_change(Tables.BAT, item=bat.json())
return response
@requires_admin
def put(self, bat_id):
"""
PUT request for Bat
Route: Routes['bat']/<bat_id: int>
Parameters :
game_id: the id of the game (int)
player_id: the id of the batter (int)
rbi: the number of runs batted in (int)
hit: the type of hit (string)
inning: the inning the hit occurred (int)
team_id: the id of the team (int)
Returns:
status: 200
mimetype: application/json
data:
success: tells if request was successful (boolean)
message: the status message (string)
failures: a list of parameters that failed to update
(list of string)
"""
# update a single bat
args = parser.parse_args()
bat = Bat.query.get(bat_id)
player_id = None
team_id = None
game_id = None
rbi = None
hit = None
inning = None
if bat is None:
raise BatDoesNotExist(payload={'details': bat_id})
if args['team_id']:
team_id = args['team_id']
if args['game_id']:
game_id = args['game_id']
if args['player_id']:
player_id = args['player_id']
if args['rbi']:
rbi = args['rbi']
if args['hit']:
hit = args['hit']
if args['inning']:
inning = args['inning']
bat.update(player_id=player_id,
team_id=team_id,
game_id=game_id,
rbi=rbi,
hit=hit,
inning=inning)
DB.session.commit()
response = Response(dumps(None), status=200,
mimetype="application/json")
handle_table_change(Tables.BAT, item=bat.json())
return response
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
class BatListAPI(Resource):
def get(self):
"""
GET request for Bats List
Route: Routes['bat']
Parameters :
Returns:
status: 200
mimetype: application/json
data:
games: [ {
'bat_id': int,
'game_id': int,
'team_id': int,
'team': string,
'rbi': int,
'hit': string,
'inning': int,
'player_id': int,
'player': string
}
,{...}
]
"""
# return a pagination of bats
page = request.args.get('page', 1, type=int)
pagination = Bat.query.paginate(page, PAGE_SIZE, False)
result = pagination_response(pagination, Routes['bat'])
resp = Response(dumps(result), status=200,
mimetype="application/json")
return resp
@requires_admin
def post(self):
"""
POST request for Bats List
Route: Routes['bat']
Parameters :
game_id: the id of the game (int)
player_id: the id of the batter (int)
rbi: the number of runs batted in (int)
hit: the type of hit (string)
inning: the inning the hit occurred (int)
team_id: the id of the team (int)
Returns:
if successful
status: 200
mimetype: application/json
data: the created bat id (int)
otherwise possible errors
status: 400, GDNESC, PDNESC, TDNESC
mimetype: application/json
data: None
"""
# create a new bat
args = post_parser.parse_args()
game_id = None
player_id = None
team_id = None
rbi = 0
hit = None
inning = 1 # just assume some first inning
if args['game_id']:
game_id = args['game_id']
if args['player_id']:
player_id = args['player_id']
if args['team_id']:
team_id = args['team_id']
if args['hit']:
hit = args['hit']
if args['rbi']:
rbi = args['rbi']
if args['inning']:
inning = args['inning']
bat = Bat(player_id,
team_id,
game_id,
hit,
inning=inning,
rbi=rbi)
DB.session.add(bat)
DB.session.commit()
bat_id = bat.id
resp = Response(dumps(bat_id), status=201, mimetype="application/json")
handle_table_change(Tables.BAT, item=bat.json())
return resp
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
| apache-2.0 | -8,683,075,583,298,692,000 | 33.905738 | 79 | 0.480099 | false |
waveform80/picroscopy | picroscopy/terminal.py | 1 | 10386 | #!/usr/bin/env python3
# vim: set et sw=4 sts=4 fileencoding=utf-8:
# Copyright 2013 Dave Hughes.
#
# This file is part of picroscopy.
#
# picroscopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# picroscopy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# picroscopy. If not, see <http://www.gnu.org/licenses/>.
"""
This module defines the command line interface for executing the picroscopy
application. The main class, PicroscopyConsoleApp, handles parsing of command
line parameters and configuration files, configuration of the logging system,
and of course launching the application itself within the reference WSGI
server included with Python.
"""
import os
import sys
import logging
import argparse
import subprocess
import locale
import configparser
from wsgiref.simple_server import make_server
# Try and use Python 3.3's ipaddress module if available. Fallback on the 3rd
# party IPy library if not
try:
from ipaddress import IPv4Address, IPv4Network
except ImportError:
from IPy import IP as IPv4Address
IPv4Network = IPv4Address
from picroscopy import __version__
from picroscopy.wsgi import PicroscopyWsgiApp
# Use the user's default locale instead of C
locale.setlocale(locale.LC_ALL, '')
# Set up a console logging handler which just prints messages to stderr without
# any other adornments. This will be used for logging messages sent before we
# "properly" configure logging according to the user's preferences
_CONSOLE = logging.StreamHandler(sys.stderr)
_CONSOLE.setFormatter(logging.Formatter('%(message)s'))
_CONSOLE.setLevel(logging.DEBUG)
logging.getLogger().addHandler(_CONSOLE)
# Determine the location of the current module on the filesystem
HERE = os.path.abspath(os.path.dirname(__file__))
def size(s):
"""
Parses a string containing a Width[xHeight] image size specification.
"""
if 'x' in s:
w, h = s.split('x', 1)
else:
w = h = s
if not (w.isdigit() and h.isdigit()):
raise ValueError(
'size "%s" is invalid; width and/or height are not numbers' % s)
return (int(w), int(h))
def interface(s):
"""
Parses a string containing a host[:port] specification.
"""
if not s:
return None
if ':' in s:
host, port = s.split(':', 1)
if not host:
host = '0.0.0.0'
if port.isdigit():
port = int(port)
else:
host = s
port = 80
return (host, port)
def network(s):
"""
Parses a string containing a network[/cidr] specification.
"""
if not s:
return None
return IPv4Network(s)
class PicroscopyConsoleApp(object):
"""
The picroscopy application launches a live view of the Raspberry Pi's
camera and serves a web application providing control of the camera.
Various options can be used to configure which network clients can access
the web application, and the paths which the camera will use when writing
images.
"""
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser(
description=self.__doc__,
# suppress creation of unspecified attributes
argument_default=argparse.SUPPRESS
)
self.parser.set_defaults(log_level=logging.WARNING)
self.parser.add_argument('--version', action='version',
version=__version__)
self.parser.add_argument(
'-c', '--config', dest='config', action='store',
help='specify the configuration file to load')
self.parser.add_argument(
'-q', '--quiet', dest='log_level', action='store_const',
const=logging.ERROR, help='produce less console output')
self.parser.add_argument(
'-v', '--verbose', dest='log_level', action='store_const',
const=logging.INFO, help='produce more console output')
self.parser.add_argument(
'-l', '--log-file', dest='log_file', metavar='FILE', default=None,
help='log messages to the specified file')
self.parser.add_argument(
'-P', '--pdb', dest='debug', action='store_true', default=False,
help='run under PuDB/PDB (debug mode)')
self.parser.add_argument(
'-L', '--listen', dest='listen', action='store',
default='0.0.0.0:%d' % (8000 if os.geteuid() else 80),
metavar='HOST[:PORT]', type=interface,
help='the address and port of the interface the web-server will '
'listen on. Default: %(default)s')
self.parser.add_argument(
'-C', '--clients', dest='clients', action='store',
default='0.0.0.0/0', metavar='NETWORK[/LEN]', type=network,
help='the network that clients must belong to. '
'Default: %(default)s')
self.parser.add_argument(
'--images-dir', dest='images_dir', action='store', metavar='DIR',
help='the directory in which to store images taken by the camera. '
'Defaults to a temporary directory')
self.parser.add_argument(
'--thumbs-dir', dest='thumbs_dir', action='store', metavar='DIR',
help='the directory in which to store the thumbnail of images '
'taken by the camera. Defaults to a temporary directory')
self.parser.add_argument(
'--thumbs-size', dest='thumbs_size', action='store',
default='320x320', metavar='WIDTHxHEIGHT', type=size,
help='the size that thumbnails should be generated at by the '
'website. Default: %(default)s')
self.parser.add_argument(
'--email-from', dest='email_from', action='store',
default='picroscopy', metavar='USER[@HOST]',
help='the address from which email will appear to be sent. '
'Default: %(default)s')
email_group = self.parser.add_mutually_exclusive_group()
email_group.add_argument(
'--sendmail', dest='sendmail', action='store',
default='/usr/sbin/sendmail', metavar='EXEC',
help='use the specified sendmail binary to send email. '
'Default: %(default)s')
email_group.add_argument(
'--smtp-server', dest='smtp_server', action='store',
metavar='HOST[:PORT]', type=interface,
help='send email directly using the specified SMTP smarthost '
'(mutually exclusive with --sendmail)')
def __call__(self, args=None):
if args is None:
args = sys.argv[1:]
args = self.read_configuration(args)
args = self.parser.parse_args(args)
self.configure_logging(args)
if args.debug:
try:
import pudb
except ImportError:
pudb = None
import pdb
return (pudb or pdb).runcall(self.main, args)
else:
try:
return self.main(args) or 0
except Exception as e:
logging.error(str(e))
return 1
def read_configuration(self, args):
# Parse the --config argument only
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-c', '--config', dest='config', action='store')
conf_args, args = parser.parse_known_args(args)
conf_files = [
'/etc/picroscopy.ini', # system wide config
'/usr/local/etc/picroscopy.ini', # alternate system config
os.path.expanduser('~/.picroscopy.ini'), # user config
]
if conf_args.config:
conf_files.append(conf_args.config)
config = configparser.ConfigParser(interpolation=None)
logging.info('Reading configuration from %s', ', '.join(conf_files))
conf_read = config.read(conf_files)
if conf_args.config and conf_args.config not in conf_read:
self.parser.error('unable to read %s', conf_args.confg)
if conf_read:
section = 'picroscopy'
if not section in config:
self.parser.error(
'unable to locate [picroscopy] section in configuration')
self.parser.set_defaults(**{
key:
config.getboolean(section, key)
if key in ('pdb', 'gstreamer') else
config.get(section, key)
for key in (
'pdb',
'log_file',
'listen',
'clients',
'images_dir',
'thumbs_dir',
'email_from',
'sendmail',
'smtp_server',
)
if key in config[section]
})
return args
def configure_logging(self, args):
_CONSOLE.setLevel(args.log_level)
if args.log_file:
log_file = logging.FileHandler(args.log_file)
log_file.setFormatter(
logging.Formatter('%(asctime)s, %(levelname)s, %(message)s'))
log_file.setLevel(logging.DEBUG)
logging.getLogger().addHandler(log_file)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
def main(self, args):
app = PicroscopyWsgiApp(**vars(args))
try:
# XXX Print IP address in big font (display image? ascii art?)
# XXX Or perhaps overlay IP address and client config on display?
httpd = make_server(args.listen[0], args.listen[1], app)
logging.info('Listening on %s:%s' % (args.listen[0], args.listen[1]))
httpd.serve_forever()
finally:
app.library.camera.close()
return 0
main = PicroscopyConsoleApp()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -1,937,034,352,929,545,000 | 37.609665 | 81 | 0.599268 | false |
sani-coop/tinjaca | doc/informe1/_graphviz/fomdes_proc4.py | 1 | 2522 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
BPMN diagram for FOMDES process 1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from bpmn_pgv import *
import pygraphviz as pgv
__author__ = 'mapologo'
PROCESS_LABEL = "Liquidación de Créditos"
# A graph for FOMDES processes
F = pgv.AGraph(strict=False, directed=True)
F.graph_attr.update(label="", rankdir="TB", splines="ortho", labelloc="b",
size="8, 7.5", forcelabels="true", ranksep="0.25", fontname="Liberation Sans Narrow Condensed")
F.node_attr.update(fontname="Liberation Sans Narrow Condensed")
F.edge_attr.update(fontname="Liberation Sans Narrow Condensed", fontsize="10")
se_cluster = {"se7": ("Recibir el documento protocolizado", "start"),
"se8": ("Revisar el documento protocolizado", "human"),
"se9": ("", "end")}
se_edges = {"se7": {"se8": {}},
"se8": {"se9": {"style": "invis"}}}
SE = add_cluster(F, "se", "Secretaría Ejecutiva", se_cluster, se_edges)
p_cluster = {"p1": ("Firma del cheque", "human"),
"p2": ("Entregar cheque a beneficiario", "message")}
p_edges = {"p1":{"p2": {}}}
P = add_cluster(F, "p", "Presidencia", p_cluster, p_edges)
pr_cluster = {"pr2": ("Verificar documentación legal y elaborar certificación de disponibilidad", "human"),
"pr3": ("Crear las cuentas por cobrar", "human"),
"pr4": ("Generar tablas de amortización", "human"),
"pr5": ("Imprimir y firmar orden de liquidación y cheque", "human")}
pr_edges = {"pr2": {"pr3": {}},
"pr3": {"pr4": {}},
"pr4": {"pr5": {}}}
PR = add_cluster(F, "pr", "Presupuesto/Administración", pr_cluster, pr_edges)
F.add_node("SIGEFOMDES Administración", image=IMAGE_PATH + "database.png", shape="plaintext", label="", xlabel="SIGEFOMDES Administración")
F.add_node("SISAC", image=IMAGE_PATH + "database.png", shape="plaintext", label="", xlabel="SISAC")
global_edges = {"Beneficiario": {"se7": {"style": "dashed"}},
"se8": {"pr2": {"style": "dashed"}},
"pr3": {"SIGEFOMDES Administración": {"style": "dashed"}},
"pr4": {"SISAC": {"style": "dashed"}},
"pr5": {"p1": {"style": "dashed"}},
"p2": {"se9": {"style": "dashed"}, "Beneficiario": {"style": "dashed"}}}
add_edges(F, global_edges)
F.draw("proc4.png", prog='dot')
F.write("proc4.dot")
| gpl-2.0 | -3,765,425,523,692,363,000 | 36.477612 | 139 | 0.597372 | false |
shashwat91/Wireless_Networking-ET4394 | GNU_Radio/Output/Output_window.py | 1 | 8961 | #!/usr/bin/env python2
##################################################
# GNU Radio Python Flow Graph
# Title: Output Window
# Generated: Sat Apr 30 16:45:27 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import forms
from gnuradio.wxgui import numbersink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import osmosdr
import time
import wx
class Output_window(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Output Window")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.threshold = threshold = -55
self.samp_rate = samp_rate = 2.048e6
self.freq = freq = 658e6
self.fft_size = fft_size = 1.024e3
##################################################
# Blocks
##################################################
self.notebook = self.notebook = wx.Notebook(self.GetWin(), style=wx.NB_TOP)
self.notebook.AddPage(grc_wxgui.Panel(self.notebook), "Spectrum")
self.notebook.AddPage(grc_wxgui.Panel(self.notebook), "Output")
self.notebook.AddPage(grc_wxgui.Panel(self.notebook), "Stream")
self.Add(self.notebook)
_threshold_sizer = wx.BoxSizer(wx.VERTICAL)
self._threshold_text_box = forms.text_box(
parent=self.notebook.GetPage(1).GetWin(),
sizer=_threshold_sizer,
value=self.threshold,
callback=self.set_threshold,
label="Threshold",
converter=forms.float_converter(),
proportion=0,
)
self._threshold_slider = forms.slider(
parent=self.notebook.GetPage(1).GetWin(),
sizer=_threshold_sizer,
value=self.threshold,
callback=self.set_threshold,
minimum=-100,
maximum=0,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.notebook.GetPage(1).Add(_threshold_sizer)
_freq_sizer = wx.BoxSizer(wx.VERTICAL)
self._freq_text_box = forms.text_box(
parent=self.notebook.GetPage(0).GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
label="freq",
converter=forms.float_converter(),
proportion=0,
)
self._freq_slider = forms.slider(
parent=self.notebook.GetPage(0).GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
minimum=10e6,
maximum=10e9,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.notebook.GetPage(0).Add(_freq_sizer)
self.wxgui_numbersink2_1 = numbersink2.number_sink_f(
self.notebook.GetPage(1).GetWin(),
unit="signal present",
minval=0,
maxval=1,
factor=1,
decimal_places=0,
ref_level=0,
sample_rate=samp_rate,
number_rate=15,
average=False,
avg_alpha=None,
label="Signal Detection",
peak_hold=False,
show_gauge=True,
)
self.notebook.GetPage(1).Add(self.wxgui_numbersink2_1.win)
self.wxgui_numbersink2_0 = numbersink2.number_sink_f(
self.notebook.GetPage(1).GetWin(),
unit="dB",
minval=-120,
maxval=0,
factor=1.0,
decimal_places=10,
ref_level=0,
sample_rate=samp_rate,
number_rate=15,
average=False,
avg_alpha=30e-3,
label="level",
peak_hold=False,
show_gauge=False,
)
self.notebook.GetPage(1).Add(self.wxgui_numbersink2_0.win)
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.notebook.GetPage(0).GetWin(),
baseband_freq=freq,
y_per_div=5,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=True,
avg_alpha=30e-3,
title="Spectrum",
peak_hold=False,
win=window.rectangular,
)
self.notebook.GetPage(0).Add(self.wxgui_fftsink2_0.win)
self.rtlsdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + "" )
self.rtlsdr_source_0.set_sample_rate(samp_rate)
self.rtlsdr_source_0.set_center_freq(freq, 0)
self.rtlsdr_source_0.set_freq_corr(0, 0)
self.rtlsdr_source_0.set_dc_offset_mode(0, 0)
self.rtlsdr_source_0.set_iq_balance_mode(0, 0)
self.rtlsdr_source_0.set_gain_mode(False, 0)
self.rtlsdr_source_0.set_gain(20, 0)
self.rtlsdr_source_0.set_if_gain(10, 0)
self.rtlsdr_source_0.set_bb_gain(5, 0)
self.rtlsdr_source_0.set_antenna("", 0)
self.rtlsdr_source_0.set_bandwidth(0, 0)
self.fft_1 = fft.fft_vcc(1024, True, (window.rectangular(1024)), True, 1)
self.blocks_vector_to_stream_0 = blocks.vector_to_stream(gr.sizeof_float*1, 1024)
self.blocks_threshold_ff_0 = blocks.threshold_ff(-100, threshold, 0)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, 1024)
self.blocks_nlog10_ff_0 = blocks.nlog10_ff(10, 1, 0)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_float*1, "/media/shashwat/DATA/Q3/Wireless Networking/gnu codes/Outputs/db_498", False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blocks_divide_xx_0 = blocks.divide_ff(1)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1024)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, 1.04858e6)
##################################################
# Connections
##################################################
self.connect((self.analog_const_source_x_0, 0), (self.blocks_divide_xx_0, 1))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_vector_to_stream_0, 0))
self.connect((self.blocks_divide_xx_0, 0), (self.blocks_nlog10_ff_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.blocks_threshold_ff_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.wxgui_numbersink2_0, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_1, 0))
self.connect((self.blocks_threshold_ff_0, 0), (self.wxgui_numbersink2_1, 0))
self.connect((self.blocks_vector_to_stream_0, 0), (self.blocks_divide_xx_0, 0))
self.connect((self.fft_1, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.rtlsdr_source_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.rtlsdr_source_0, 0), (self.wxgui_fftsink2_0, 0))
def get_threshold(self):
return self.threshold
def set_threshold(self, threshold):
self.threshold = threshold
self._threshold_slider.set_value(self.threshold)
self._threshold_text_box.set_value(self.threshold)
self.blocks_threshold_ff_0.set_hi(self.threshold)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.rtlsdr_source_0.set_sample_rate(self.samp_rate)
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self._freq_slider.set_value(self.freq)
self._freq_text_box.set_value(self.freq)
self.rtlsdr_source_0.set_center_freq(self.freq, 0)
self.wxgui_fftsink2_0.set_baseband_freq(self.freq)
def get_fft_size(self):
return self.fft_size
def set_fft_size(self, fft_size):
self.fft_size = fft_size
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = Output_window()
tb.Start(True)
tb.Wait()
| gpl-3.0 | -1,901,618,430,034,386,400 | 36.810127 | 148 | 0.579734 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/myvideolinks_scraper.py | 1 | 4919 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib import dom_parser
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.kodi import i18n
import scraper
BASE_URL = 'http://myvideolinks.xyz'
class MyVidLinks_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'MyVideoLinks.eu'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'views' in item and item['views']:
label += ' (%s Views)' % (item['views'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
views = None
pattern = '<span[^>]+>(\d+)\s+Views'
match = re.search(pattern, html)
if match:
views = int(match.group(1))
if video.video_type == VIDEO_TYPES.MOVIE:
return self.__get_movie_links(video, views, html)
else:
return self.__get_episode_links(video, views, html)
return hosters
def __get_movie_links(self, video, views, html):
q_str = ''
fragment = dom_parser.parse_dom(html, 'div', {'class': 'post-title'})
if fragment:
q_str = fragment[0]
match = re.search('<p>Size:(.*)', html, re.DOTALL)
if match:
fragment = match.group(1)
else:
fragment = html
return self.__get_links(video, views, fragment, q_str)
def __get_episode_links(self, video, views, html):
pattern = '<h4>(.*?)</h4>(.*?)</ul>'
hosters = []
for match in re.finditer(pattern, html, re.DOTALL):
q_str, fragment = match.groups()
hosters += self.__get_links(video, views, fragment, q_str)
return hosters
def __get_links(self, video, views, html, q_str):
pattern = 'li>\s*<a\s+href="(http[^"]+)'
hosters = []
for match in re.finditer(pattern, html, re.DOTALL):
url = match.group(1)
hoster = {'multi-part': False, 'class': self, 'views': views, 'url': url, 'rating': None, 'quality': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = scraper_utils.blog_get_quality(video, q_str, hoster['host'])
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video)
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="30" visible="eq(-4,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-5,true)"/>' % (name, i18n('auto_select')))
return settings
def search(self, video_type, title, year, season=''):
search_url = urlparse.urljoin(self.base_url, '/?s=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=1)
pattern = '<h\d+>.*?<a\s+href="(?P<url>[^"]*/(?P<date>\d{4}/\d{2}/\d{2})/[^"]*)"\s+rel="bookmark"\s+title="(?:Permanent Link to )?(?P<post_title>[^"]+)'
date_format = '%Y/%m/%d'
return self._blog_proc_results(html, pattern, date_format, video_type, title, year)
| gpl-2.0 | -8,640,232,997,686,215,000 | 37.429688 | 191 | 0.599106 | false |
IA-MP/KnightTour | libs/IO/txt_generator.py | 1 | 2729 | import os
from time import time
from libs.IO.input_generator import generate_input
def generate_name_file(kind, path):
"""
This function allows to generate sequential input file name
@param kind: int, 0 if input, 1 if output
@param path: string, the path where generate file
@return: string, the name file with an incremental number
"""
global filename
i = 0
while True:
if kind == 0:
filename = "input_%d" % i
elif kind == 1:
filename = "output_%d" % i
if not os.path.exists(path + filename + ".txt"):
return filename
i += 1
def generate_file(kind, text, path, num=None):
"""
This function generates input or output txt file.
@param kind: int, the int that represent the kind of file to be generated. 0 for input, 1 for output
@param text: string, the string to store in the file
@param path: string, the path where we want generate file
@param num: int, the incremental value, if there is not it override the file
"""
global file
file = None
final_path = ""
if kind == 0: # Generate input file
try:
if num is None:
name = generate_name_file(0, path)
final_path = path + name + '.txt'
file = open(final_path, 'w')
else:
final_path = path + 'input_' + str(num) + '.txt'
file = open(final_path, 'w')
for row in text:
for col in row:
for elem in col:
file.writelines(str(elem))
file.write("\n")
except Exception:
print("Si e' verificato un problema con il path, seguire le istruzioni per favore e scrivere un path regolare")
raise SystemExit()
finally:
if file != None:
file.close()
elif kind == 1: # Generate output file
try:
if num is None:
name = generate_name_file(1, path)
final_path = path + name + '.txt'
file = open(final_path, 'w')
else:
final_path = path + 'output_' + str(num) + '.txt'
file = open(final_path, 'w')
i = 1
for elem in text:
file.write("Caso " + str(i) + ": " + elem + "\n")
i += 1
finally:
file.close()
return final_path
if __name__ == "__main__":
start = time()
#generate_file(0, generate_input(100, False), "../../dataset/")
#generate_file(1, ["0", "4", "14", "impossibile", "150"], "../../dataset/")
print(time() - start)
| mit | 5,538,565,594,786,700,000 | 33.1125 | 123 | 0.512642 | false |
sbarton272/AcousticBarcodes-Explorations | barcodes/dxfwrite/tests/test_viewport_entity.py | 1 | 1662 | #!/usr/bin/env python
#coding:utf-8
# Created: 10.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <[email protected]>"
import unittest
from dxfwrite.entities import Viewport
from dxfwrite import dxfstr, DXFEngine
class TestViewportEntity(unittest.TestCase):
expected = " 0\nVIEWPORT\n 8\nVIEWPORTS\n 67\n1\n 10\n0.0\n 20\n0.0\n 30\n0.0\n 40\n3.0\n"\
" 41\n2.0\n 68\n1\n 69\n1\n" \
"1001\nACAD\n1000\nMVIEW\n1002\n{\n" \
"1070\n16\n" \
"1010\n0.0\n1020\n0.0\n1030\n0.0\n" \
"1010\n0.0\n1020\n0.0\n1030\n0.0\n" \
"1040\n0.0\n1040\n1.0\n"\
"1040\n0.0\n1040\n0.0\n"\
"1040\n50.0\n1040\n0.0\n1040\n0.0\n"\
"1070\n0\n1070\n100\n1070\n1\n"\
"1070\n3\n1070\n0\n1070\n0\n1070\n0\n1070\n0\n"\
"1040\n0.0\n1040\n0.0\n1040\n0.0\n"\
"1040\n0.1\n1040\n0.1\n1040\n0.1\n1040\n0.1\n"\
"1070\n0\n"\
"1002\n{\n1002\n}\n1002\n}\n"
def test_create_viewport_entity(self):
viewport = Viewport((0,0,0), 3, 2, id=1)
self.assertEqual(dxfstr(viewport), self.expected)
def test_viewport_by_factory(self):
viewport = DXFEngine.viewport((0,0,0), 3, 2, id=1)
self.assertEqual(dxfstr(viewport), self.expected)
def test_get_extended_data(self):
viewport = DXFEngine.viewport((0,0,0), 3, 2)
result = viewport['perspective_lens_length']
self.assertEqual(50, result)
def test_set_extended_data(self):
viewport = DXFEngine.viewport((0,0,0), 3, 2, perspective_lens_length=75.)
result = viewport['perspective_lens_length']
self.assertEqual(75, result)
if __name__=='__main__':
unittest.main()
| mit | 3,518,769,548,850,778,000 | 32.24 | 97 | 0.633574 | false |
raelga/gtav_crew_exporter | gtav_crew_exporter.py | 1 | 10812 | #!/usr/bin/python
#### Import modules
from selenium import selenium
from selenium import webdriver
import sys, time, re, string, getopt
#### Constants
default_crew = 'elotrolado'
login_url = 'https://socialclub.rockstargames.com/profile/signin'
base_crew_url = 'http://socialclub.rockstargames.com/crew'
path_gtav_base_url = '/games/gtav'
path_gtav_overview_url = '/career/overview/gtaonline'
#### Global
username = ''
password = ''
crew_name = ''
output_file = ''
verbose_flag = ''
#### Class definition
class crew_member:
def __init__(self):
self.id = ''
self.psn = ''
self.url = ''
self.level = ''
self.playtime = ''
self.country = ''
self.rank = ''
self.crew = ''
self.platform = ''
self.error = 'All ok.'
#### Function definitions
def print_help():
print 'gtav_crew_exporter.py -c <crew_name> [-u <username> -p <password>] [-o <output_file>] [-v]'
def arg_parser(argv):
global crew_name
global username
global password
global output_file
global verbose_flag
try:
opts, args = getopt.getopt(argv,"hvu:p:c:o:",["verbose","username","password","crew=","ofile="])
except getopt.GetoptError:
print_help()
debug(2)
for opt, arg in opts:
if opt == '-h':
print_help()
debug()
elif opt in ("-c", "--crew"):
crew_name = arg
elif opt in ("-o", "--ofile"):
output_file = arg
if not output_file: print_help()
elif opt in ("-v", "--verbose"):
verbose_flag = 1
elif opt in ("-u", "--username"):
username = arg
if not username: print_help()
elif opt in ("-p", "--password"):
password = arg
if not password: print_help()
if not crew_name:
crew_name = default_crew
return 0
def debug(msg):
global verbose_flag
if verbose_flag: print 'DBG : ' + msg
def WaitForElement(webdriver, path):
limit = 10 # waiting limit in seconds
inc = 1 # in seconds; sleep for 500ms
c = 0
while (c < limit):
try:
webdriver.find_element_by_xpath(path)
return 1 # Success
except:
time.sleep(inc)
c = c + inc
# print sys.exc_info()
return 0
####
def LoginSocialClub(driver):
if not username or not password:
print '!! Without login and password, only username and rank are available:'
return 1
driver.get(login_url)
path = '//*[@id="submitBtn"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("\nThe page is not loaded yet.")
else:
debug('web - page fully loaded!')
path='//input[@id="login-field"]'
driver.find_element_by_xpath(path).clear()
driver.find_element_by_xpath(path).send_keys(username)
path='//input[@id="password-field"]'
driver.find_element_by_xpath(path).clear()
driver.find_element_by_xpath(path).send_keys(password)
path = '//*[@id="submitBtn"]'
driver.find_element_by_xpath(path).click()
driver.get(login_url)
path = '//*[@id="panelaccounts"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("\nThe page is not loaded yet.")
else:
debug('web - page fully loaded!')
return 0
####
def GetMembersList(driver):
crew_url = base_crew_url + '/' + crew_name + '/hierarchy'
driver.get(crew_url)
path = '//*[@id="muscleList"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("\nThe page is not loaded yet.")
else:
debug('web - page fully loaded!')
path = '//a[@data-ga="footer_selectlanguage_en"]'
viewall = driver.find_element_by_xpath(path)
if not viewall:
debug("meh.")
else:
debug("web - set page in english.")
# viewall.click()
path = '//a[@class="viewAll"]'
try:
viewall = driver.find_element_by_xpath(path)
debug("web - unfold users.")
viewall.click()
except:
debug("web - all users visible.")
path = '//div[contains(@id, "crewRank_")]'
hierarchy = driver.find_elements_by_xpath(path)
crew_members = list()
for rank in hierarchy:
# print rank.get_attribute('id')
path = '//div[@id="' + rank.get_attribute('id') + '"]//i[@class="icon-info"]'
rank_name = rank.find_element_by_xpath(path).get_attribute('data-name')
# print rank_name
path = '//div[@id="' + rank.get_attribute('id') + '"]//ul[@id="' + rank_name + 'List"]//div[@class="member"]//img'
members = rank.find_elements_by_xpath(path)
for member in members:
cm = crew_member()
cm.id = member.get_attribute('data-original-title')
cm.url = member.find_element_by_xpath('..').get_attribute('href')
cm.rank = rank_name
crew_members.append(cm)
return crew_members
#### Function definitions
def GetMemberInfo(driver, member):
debug('[' + member.id + ']')
retry = 0
max_retry = 5
# Add retry to avoid errors
for rety in range(max_retry):
## Load profile page
driver.get(member.url)
path = '//*[@id="cardInfoVitals"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("web - The page is not loaded yet. [" + str(retry) + "]")
retry += 1
else:
debug('web - page fully loaded! [' + str(retry) + ']')
break
## Check if profile is private
try:
path = '//div[@id="no-profile"]'
profail = driver.find_element_by_xpath(path)
debug('[' + member.id + '] Profile is private!')
member.error = 'Private profile.'
return 1 # Success
except:
## Crew Principal
path = '//div[@class="crew-info"]/a'
member.crew = driver.find_element_by_xpath(path).get_attribute("href").rsplit('/',1)[1]
debug('[' + member.id + '] main crew: ' + member.crew)
try:
## PSN ID
path = '//div[@class="PSN"]/h5'
member.psn = driver.find_element_by_xpath(path).text
except:
member.psn = ''
debug('[' + member.id + '] PSN ID: ' + member.psn)
try:
## Language
path = '//div[@id="cardInfoFooter"]//span[contains(@class,"Country")]'
member.country = driver.find_element_by_xpath(path).get_attribute("data-original-title")
except:
member.country = ''
debug('[' + member.id + '] country: ' + member.country)
driver.get(member.url + '/'+ path_gtav_base_url + '/ps3' + path_gtav_overview_url)
path = '//div[@id="freemodeRank"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("\nThe page is not loaded yet.")
else:
debug('web - page fully loaded!')
try:
path = '//div[@id="freemodeRank"]//h3'
member.level = driver.find_element_by_xpath(path).text
except:
member.level = ''
if member.level == 0:
member.platform = 'XBOX360'
driver.get(member.url + '/'+ path_gtav_base_url + '/xbox' + path_gtav_overview_url)
path = '//div[@id="freemodeRank"]'
result = WaitForElement(driver, path)
if not result: # interprets returned value
# driver.close()
debug("\nThe page is not loaded yet.")
else:
debug('web - page fully loaded!')
try:
path = '//div[@id="freemodeRank"]//h3'
member.level = driver.find_element_by_xpath(path).text
except:
member.level = ''
else:
member.platform = 'PS3'
debug('[' + member.id + '] rank: ' + member.rank)
try:
## Language
path = '//div[@id="freemodeRank"]//div[@class="rankBar"]/h4'
member.playtime = driver.find_element_by_xpath(path).text.rsplit(':',1)[1]
except:
member.playtime = ''
debug('[' + member.id + '] playtime: ' + member.playtime)
# print sys.exc_info()
return member
#### Main function
if __name__ == "__main__":
arg_parser(sys.argv[1:])
debug('web - starting browser')
driver = webdriver.Firefox()
print 'Crew: ' + crew_name
crew_members = GetMembersList(driver)
print 'Crew Size: ' + str(len(crew_members)) + ' members'
error = LoginSocialClub(driver)
if error:
print 'Crew Members :'
for cm in crew_members:
print cm.rank + ", " + cm.id + ", " + cm.url
debug('You need to provide login information to view each member info.')
for cm in crew_members:
cm = GetMemberInfo(driver, cm)
if output_file:
f = open(output_file,'w')
for cm in crew_members:
member_csv = str(cm.id) + ', ' \
+ str(cm.country) + ', ' \
+ str(cm.psn) + ', ' \
+ str(cm.platform) + ', ' \
+ str(cm.crew) + ', ' \
+ str(cm.rank) + ', ' \
+ str(cm.level) + ', ' \
+ str(cm.playtime) + ', ' \
+ str(cm.error)
if output_file:
f.write(member_csv + '\n')
else:
print member_csv
if output_file:
print 'Output saved as ' + output_file + '.'
f.close() # you can omit in most cases as the destructor will call if
driver.close()
sys.exit()
# Grab URL
#url = str(sys.argv[1])
# Check if it's malformed
#regex = re.compile(
# r'^(?:http|ftp)s?://' # http:// or https://
# r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
# r'localhost|' #localhost...
# r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
# r'(?::\d+)?' # optional port
# r'(?:/?|[/?]\S+)$', re.IGNORECASE)
#vurl = regex.match(url)
#if vurl:
# print ("Good url : %s" % url)
#else:
# debug ("Malformed url : %s" % url)
| gpl-2.0 | 1,753,025,060,716,105,000 | 27.452632 | 122 | 0.515631 | false |
jimsize/PySolFC | tests/lib/pysol_tests/import_file1.py | 1 | 4537 | # Written by Shlomi Fish, under the MIT Expat License.
import unittest
from pysollib.acard import AbstractCard
from pysollib.hint import FreeCellSolver_Hint, PySolHintLayoutImportError
import pysollib.stack
class MockItem:
def __init__(self):
pass
def tkraise(self):
return
def addtag(self, nouse):
return
class MockCanvas:
def __init__(self):
self.xmargin = self.ymargin = 50
class MockImages:
def __init__(self):
self.CARDW = self.CARDH = self.CARD_YOFFSET = 50
class MockOpt:
def __init__(self):
self.randomize_place = False
class MockApp:
def __init__(self):
self.images = MockImages()
self.opt = MockOpt()
class MockTalon:
def __init__(self, g):
self.cards = [
AbstractCard(1000+r*100+s*10, 0, s, r, g)
for s in range(4) for r in range(13)]
for c in self.cards:
c.item = MockItem()
class MockGame:
def __init__(self):
self.app = MockApp()
self.talon = MockTalon(self)
self.allstacks = []
self.stackmap = {}
self.canvas = MockCanvas()
self.foundations = [
pysollib.stack.SS_FoundationStack(0, 0, self, s) for s in range(4)]
self.rows = [pysollib.stack.AC_RowStack(0, 0, self) for s in range(8)]
self.reserves = [
pysollib.stack.AC_RowStack(0, 0, self) for s in range(4)]
self.preview = 0
def _empty_override(*args):
return True
pysollib.stack.MfxCanvasGroup = _empty_override
class Mock_S_Game:
def __init__(self):
self.s = MockGame()
def flipMove(self, foo):
pass
def moveMove(self, cnt, frm, to, frames=0):
c = frm.cards.pop()
c.face_up = True
to.addCard(c)
pass
class MyTests(unittest.TestCase):
def _calc_hint(self, fn):
"""docstring for _calc_hint"""
s_game = Mock_S_Game()
h = FreeCellSolver_Hint(s_game, None)
fh = open(fn, 'r+b')
h.importFileHelper(fh, s_game)
return h
def _successful_import(self, fn, want_s, blurb):
self.assertEqual(self._calc_hint(fn).calcBoardString(), want_s, blurb)
def test_import(self):
return self._successful_import('tests/unit/data/with-10-for-rank.txt',
'''FC: - - - -
4C 2C 9C 8C QS 4S 2H
5H QH 3C AC 3H 4H QD
QC 9S 6H 9H 3S KS 3D
5D 2S JC 5C JH 6D AS
2D KD TH TC TD 8D
7H JS KH TS KC 7C
AH 5S 6S AD 8H JD
7S 6C 7D 4D 8S 9D
''', 'import worked with "10"s as ranks')
def test_import_2(self):
return self._successful_import('tests/unit/data/624.board',
'''FC: - - - -
KC 6H 4C QS 2D 4S AS
4H TH 2S JH 2H 9S AH
3S 6C 9H AD KH QD 7C
3C JS 5H KS TC 9C 8C
4D 9D 7S JC 5D TS
KD QC 5C QH 6S 3D
5S JD 8D 6D TD 8H
8S 7H 3H 2C AC 7D
''', 'import worked with Ts')
def test_import_utf8_bom(self):
return self._successful_import(
'tests/unit/data/624-with-utf8-bom.board',
'''FC: - - - -
KC 6H 4C QS 2D 4S AS
4H TH 2S JH 2H 9S AH
3S 6C 9H AD KH QD 7C
3C JS 5H KS TC 9C 8C
4D 9D 7S JC 5D TS
KD QC 5C QH 6S 3D
5S JD 8D 6D TD 8H
8S 7H 3H 2C AC 7D
''', 'import worked with utf-8 bom')
def test_throw_error_on_duplicate_card(self):
try:
self._calc_hint('tests/unit/data/624-with-dup-card.board')
except PySolHintLayoutImportError as err:
self.assertEqual(err.msg, "Duplicate cards in input")
self.assertEqual(err.cards, ["KC"])
self.assertEqual(err.line_num, 1)
self.assertEqual(err.format(), "Duplicate cards in input:\n\nKC")
return
self.fail("No exception thrown.")
def test_throw_error_on_invalid_foundations_line(self):
try:
self._calc_hint(
'tests/unit/data/624-invalid-foundations-line.board')
except PySolHintLayoutImportError as err:
self.assertEqual(err.msg, "Invalid Foundations line")
self.assertEqual(err.cards, [])
self.assertEqual(err.line_num, 1)
return
self.fail("No exception thrown.")
def test_throw_error_on_missing_cards(self):
try:
self._calc_hint('tests/unit/data/624-missing-cards.board')
except PySolHintLayoutImportError as err:
self.assertEqual(err.msg, "Missing cards in input")
self.assertEqual(err.cards, ["5H"])
return
self.fail("No exception thrown.")
| gpl-3.0 | 6,461,702,182,618,086,000 | 26.005952 | 79 | 0.588495 | false |
ActiveState/code | recipes/Python/360698_Extending_pythprolog_syntax/recipe-360698.py | 1 | 8888 | #
# pythologic2.py
#
# Add logic programming (Prolog) syntax and *resolution* into Python.
#
# (c) 2004 Francisco Coelho
# after (c) 2004 Shai Berger
# and AIMA examples
#
import string
import copy
class Struct:
def __init__(self, database, head, subs):
"""
The head and subs are essential - what makes this struct.
The database should only be used while structs are constructed,
and later removed.
"""
self.database = database
self.head = head
self.subs = subs
def __pos__(self):
"""
unary + means insert into database as fact
"""
self.database.add_fact(self)
def __invert__(self):
"""
unary ~ means insert into database as query
"""
self.database.add_query(self)
def __lshift__(self, requisites):
"""
The ideal is
consequent(args) << cond1(args1),...
for now we must do with
consequent(args) << [cond1(args1),...]
"""
self.database.add_conditional(self, requisites)
def __str__(self):
subs = map (str, self.subs)
return str(self.head) + "(" + string.join(subs,',') + ")"
class Symbol:
def __init__ (self, name, database):
self.name = name
self.database = database
def __call__(self, *args):
return Struct(self.database, self, args)
def __str__(self):
return self.name
class Constant(Symbol):
"""
A constant is a name. Its value is its name too.
"""
def value(self): return self.name
class Variable(Symbol):
pass
def symbol(name, database):
if (name[0] in string.uppercase):
return Variable(name,database)
else:
return Constant(name, database)
class Database:
def __init__(self, name):
self.name= name
self.facts = []
self.conditionals = []
self.queries = []
def add_fact(self, fact):
self.facts.append(fact)
def add_query(self, query):
self.queries.append(query)
def add_conditional(self,head,requisites):
if not(isinstance(requisites, list)):
requisites = [requisites]
self.conditionals.append((head,requisites))
def __str__(self):
factsStr= string.join(map(str, self.facts),'\n')
condsStr= ''
for (h,r) in self.conditionals:
condsStr = condsStr + "%s << %s\n"%(h,string.join( map(str, r), ', '))
queryStr= string.join( map(str, self.queries),'\n')
return self.name + ' facts\n' + factsStr +'\n'+self.name + ' conditionals\n'+ condsStr + '\n'+self.name + ' queries\n'+queryStr + '\n'
def append(self, func):
"""
Include definitions from func into database
"""
try:
code = func.func_code
except:
raise TypeError, "function or method argument expected"
names = code.co_names
locally_defined = code.co_varnames
globally_defined = func.func_globals.keys()
defined = locally_defined+tuple(globally_defined)
undefined = [name for name in names if name not in defined]
newglobals = func.func_globals.copy()
for name in undefined:
newglobals[name] = symbol(name, self)
exec code in newglobals
def __lshift__(self, func):
"""
A helper for decorator implementation
"""
self.append(func)
return LogicalFunction(self, func)
def solve(self, V = [{}]):
"""
The query queue is LIFO:
Extend valuations in V satisfying the last query.
"""
def solve1( v ):
# get solutions from facts
unify_facts = [unify(query, fact, v) for fact in self.facts]
# look for solutions from conditionals
unify_conditionals = []
for ( header , condition_list ) in self.conditionals:
u = unify(query, header , v) # unify headers
U = [ u ]
if u != None:
# remember query queue
oldQueries = copy.deepcopy(self.queries)
# we want to start by the first conditional
D = copy.copy( condition_list )
D.reverse()
# phase 1: append the conditionals to query queue
for condition in D:
if type( condition ) == type('string'):
# process python code
# should return True or False
self.queries.append( condition )
#eval_python_string( condition , u)
else:
# append the conditional,
# with variables replaced according to u
# to the query queue
unified_condition = subst(u, condition )
self.queries.append( unified_condition )
# phase 2: solve the appended conditionals
for condition in D:
U = self.solve( U )
# restore query queue
self.queries = oldQueries
# grow the list of solutions
unify_conditionals = unify_conditionals + U
return [ u for u in (unify_facts + unify_conditionals) if not u in [None, {}] ]
if self.queries:
query = self.queries[-1]
del self.queries[-1]
else:
return []
if type( query ) == type( 'string' ):
U = [ v for v in V if python_eval_string(query, v) ]
else:
U = []
for v in V:
U = U + solve1(v)
return U
def python_eval_string(s, v):
for k in v:
s=string.replace(s, str(k), str(v[k]))
return eval( s, {} )
def subst(v, x):
if v.has_key(x):
return v[x]
elif isinstance(x, Variable):
return x
elif isinstance(x, Struct):
return Struct( x.database, x.head, [subst(v, xi) for xi in x.subs])
def unify(x,y,v={}):
"""
Find one valuation extending v and unifying x with y
"""
def extend(v, x, t):
"""
Extend valuation v with v[x] = t
"""
v1 = copy.copy(v)
v1[x] = t
return v1
def occur_check(x, t):
"""
Test if the variable x occurr in structure t
"""
if x == t:
return True
elif isinstance(t, Struct):
return t.head == x.head or occur_check(x, t.subs)
return False
def unify_var(x, t, v):
"""
Test if v can be extended with v[x] = t;
In that case return the extention
Else return None
"""
if x in v:
return unify( v[ x ], t, v)
elif occur_check(x, t):
return None
else:
return extend(v, x, t)
if v == None:
return None
elif x == y:
return v
elif isinstance(x, Variable):
return unify_var(x, y, v)
elif isinstance(y, Variable):
return unify_var(y, x, v)
elif isinstance(x, Struct) and isinstance(y, Struct) and (x.head == y.head):
z = v
n = len(x.subs)
m = len(y.subs)
if n == m:
for i in range( n ):
z = unify( x.subs[i], y.subs[i], z)
return z
else:
return None
else:
return None
class LogicalFunction:
"""
This class replaces a logical function once it has
been consulted, to avoid erroneous use
"""
def __init__(self, database, func):
self.database=database
self.logical_function=func
def __call__(self):
raise TypeError, "Logical functions are not really callable"
if __name__ == "__main__":
db = Database('TEST')
print "Defining a prolog program... ",
def prolog_func():
# prolog facts are prefixed with "+"
+ number(0)
+ number(1)
+ number(2)
+ number(3)
+ number(4)
# prolog conditionals have the pattern p << [q1, ..., qn]
test(X, Y) << [number(X), number(Y), 'X==2*Y' ]
# prolog queries are prefixed with "~"
~ test(X, Y)
# Update the database
db << prolog_func
print "done"
print "Before solving"
print db
# Solve the queries
x = db.solve()
print 'Solutions'
for v in x:
for k in v: print k,"=", v[k],' ',
print
print "After solving"
print db
| mit | 1,482,748,753,242,608,400 | 27.670968 | 143 | 0.502813 | false |
kevinpt/ripyl | test/test_stats.py | 1 | 3176 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''Ripyl protocol decode library
Statistical operations test suite
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import unittest
import random
import ripyl.util.stats as stats
# def fequal(a, b, epsilon=0.0001):
# '''Compare floating point values for relative equality'''
# return abs(math.log10(a) - math.log10(b)) <= epsilon
class TestOnlineStats(unittest.TestCase):
def test_basic(self):
os = stats.OnlineStats()
data = [1.0] * 100
for n in data:
os.accumulate(n)
self.assertAlmostEqual(os.mean(), 1.0, msg='Invalid mean')
self.assertAlmostEqual(os.variance(), 0.0, msg='Invalid variance')
self.assertAlmostEqual(os.std(), 0.0, msg='Invalid std. dev.')
os.reset()
self.assertAlmostEqual(os.mean(), 0.0, msg='Invalid mean')
self.assertAlmostEqual(os.variance(), 0.0, msg='Invalid variance')
#data = range(11)
#for n in data:
# os.accumulate(n)
os.accumulate_array(range(11))
self.assertAlmostEqual(os.mean(), 5.0, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 3.16227766, msg='Invalid std. dev.')
def test_rand(self):
os = stats.OnlineStats()
# uniform random numbers
for i in xrange(10):
os.reset()
for _ in xrange(10000): os.accumulate(random.uniform(0.0, 1.0))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.28, places=1, msg='Invalid std. dev.')
# gaussian random numbers
for i in xrange(10):
os.reset()
for _ in xrange(1000): os.accumulate(random.gauss(0.5, 0.1))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.1, places=1, msg='Invalid std. dev.')
# gaussian random numbers 2
for i in xrange(10):
os.reset()
for _ in xrange(1000): os.accumulate(random.gauss(0.5, 0.3))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.3, places=1, msg='Invalid std. dev.')
| lgpl-3.0 | 1,296,993,798,884,937,000 | 33.674157 | 85 | 0.595591 | false |
adamlwgriffiths/pyfilesystem | fs/watch.py | 1 | 23532 | """
fs.watch
========
Change notification support for FS.
This module defines a standard interface for FS subclasses that support change
notification callbacks. It also offers some WrapFS subclasses that can
simulate such an ability on top of an ordinary FS object.
An FS object that wants to be "watchable" must provide the following methods:
* ``add_watcher(callback,path="/",events=None,recursive=True)``
Request that the given callback be executed in response to changes
to the given path. A specific set of change events can be specified.
This method returns a Watcher object.
* ``del_watcher(watcher_or_callback)``
Remove the given watcher object, or any watchers associated with
the given callback.
If you would prefer to read changes from a filesystem in a blocking fashion
rather than using callbacks, you can use the function 'iter_changes' to obtain
an iterator over the change events.
"""
import sys
import weakref
import threading
import Queue
import traceback
from fs.path import *
from fs.errors import *
from fs.wrapfs import WrapFS
from fs.base import FS
from fs.filelike import FileWrapper
from six import b
class EVENT(object):
"""Base class for change notification events."""
def __init__(self,fs,path):
super(EVENT, self).__init__()
self.fs = fs
if path is not None:
path = abspath(normpath(path))
self.path = path
def __str__(self):
return unicode(self).encode("utf8")
def __unicode__(self):
return u"<fs.watch.%s object (path='%s') at %s>" % (self.__class__.__name__,self.path,hex(id(self)))
def clone(self,fs=None,path=None):
if fs is None:
fs = self.fs
if path is None:
path = self.path
return self.__class__(fs,path)
class ACCESSED(EVENT):
"""Event fired when a file's contents are accessed."""
pass
class CREATED(EVENT):
"""Event fired when a new file or directory is created."""
pass
class REMOVED(EVENT):
"""Event fired when a file or directory is removed."""
pass
class MODIFIED(EVENT):
"""Event fired when a file or directory is modified."""
def __init__(self,fs,path,data_changed=False, closed=False):
super(MODIFIED,self).__init__(fs,path)
self.data_changed = data_changed
self.closed = closed
def clone(self,fs=None,path=None,data_changed=None):
evt = super(MODIFIED,self).clone(fs,path)
if data_changed is None:
data_changed = self.data_changed
evt.data_changed = data_changed
return evt
class MOVED_DST(EVENT):
"""Event fired when a file or directory is the target of a move."""
def __init__(self,fs,path,source=None):
super(MOVED_DST,self).__init__(fs,path)
if source is not None:
source = abspath(normpath(source))
self.source = source
def __unicode__(self):
return u"<fs.watch.%s object (path=%r,src=%r) at %s>" % (self.__class__.__name__,self.path,self.source,hex(id(self)))
def clone(self,fs=None,path=None,source=None):
evt = super(MOVED_DST,self).clone(fs,path)
if source is None:
source = self.source
evt.source = source
return evt
class MOVED_SRC(EVENT):
"""Event fired when a file or directory is the source of a move."""
def __init__(self,fs,path,destination=None):
super(MOVED_SRC,self).__init__(fs,path)
if destination is not None:
destination = abspath(normpath(destination))
self.destination = destination
def __unicode__(self):
return u"<fs.watch.%s object (path=%r,dst=%r) at %s>" % (self.__class__.__name__,self.path,self.destination,hex(id(self)))
def clone(self,fs=None,path=None,destination=None):
evt = super(MOVED_SRC,self).clone(fs,path)
if destination is None:
destination = self.destination
evt.destination = destination
return evt
class CLOSED(EVENT):
"""Event fired when the filesystem is closed."""
pass
class ERROR(EVENT):
"""Event fired when some miscellaneous error occurs."""
pass
class OVERFLOW(ERROR):
"""Event fired when some events could not be processed."""
pass
class Watcher(object):
"""Object encapsulating filesystem watch info."""
def __init__(self,fs,callback,path="/",events=None,recursive=True):
if events is None:
events = (EVENT,)
else:
events = tuple(events)
# Since the FS probably holds a reference to the Watcher, keeping
# a reference back to the FS would create a cycle containing a
# __del__ method. Use a weakref to avoid this.
self._w_fs = weakref.ref(fs)
self.callback = callback
self.path = abspath(normpath(path))
self.events = events
self.recursive = recursive
@property
def fs(self):
return self._w_fs()
def delete(self):
fs = self.fs
if fs is not None:
fs.del_watcher(self)
def handle_event(self,event):
if not isinstance(event,self.events):
return
if event.path is not None:
if not isprefix(self.path,event.path):
return
if not self.recursive:
if event.path != self.path:
if dirname(event.path) != self.path:
return
try:
self.callback(event)
except Exception:
print >>sys.stderr, "error in FS watcher callback", self.callback
traceback.print_exc()
class WatchableFSMixin(FS):
"""Mixin class providing watcher management functions."""
def __init__(self,*args,**kwds):
self._watchers = PathMap()
super(WatchableFSMixin,self).__init__(*args,**kwds)
def __getstate__(self):
state = super(WatchableFSMixin,self).__getstate__()
state.pop("_watchers",None)
return state
def __setstate__(self,state):
super(WatchableFSMixin,self).__setstate__(state)
self._watchers = PathMap()
def add_watcher(self,callback,path="/",events=None,recursive=True):
"""Add a watcher callback to the FS."""
w = Watcher(self,callback,path,events,recursive=recursive)
self._watchers.setdefault(path,[]).append(w)
return w
def del_watcher(self,watcher_or_callback):
"""Delete a watcher callback from the FS."""
if isinstance(watcher_or_callback,Watcher):
self._watchers[watcher_or_callback.path].remove(watcher_or_callback)
else:
for watchers in self._watchers.itervalues():
for i,watcher in enumerate(watchers):
if watcher.callback is watcher_or_callback:
del watchers[i]
break
def _find_watchers(self,callback):
"""Find watchers registered with the given callback."""
for watchers in self._watchers.itervalues():
for watcher in watchers:
if watcher.callback is callback:
yield watcher
def notify_watchers(self,event_or_class,path=None,*args,**kwds):
"""Notify watchers of the given event data."""
if isinstance(event_or_class,EVENT):
event = event_or_class
else:
event = event_or_class(self,path,*args,**kwds)
if path is None:
path = event.path
if path is None:
for watchers in self._watchers.itervalues():
for watcher in watchers:
watcher.handle_event(event)
else:
for prefix in recursepath(path):
if prefix in self._watchers:
for watcher in self._watchers[prefix]:
watcher.handle_event(event)
class WatchedFile(FileWrapper):
"""File wrapper for use with WatchableFS.
This file wrapper provides access to a file opened from a WatchableFS
instance, and fires MODIFIED events when the file is modified.
"""
def __init__(self,file,fs,path,mode=None):
super(WatchedFile,self).__init__(file,mode)
self.fs = fs
self.path = path
self.was_modified = False
def _write(self,string,flushing=False):
self.was_modified = True
return super(WatchedFile,self)._write(string,flushing=flushing)
def _truncate(self,size):
self.was_modified = True
return super(WatchedFile,self)._truncate(size)
def flush(self):
super(WatchedFile,self).flush()
# Don't bother if python if being torn down
if Watcher is not None:
if self.was_modified:
self.fs.notify_watchers(MODIFIED,self.path,True)
def close(self):
super(WatchedFile,self).close()
# Don't bother if python if being torn down
if Watcher is not None:
if self.was_modified:
self.fs.notify_watchers(MODIFIED,self.path,True)
class WatchableFS(WatchableFSMixin,WrapFS):
"""FS wrapper simulating watcher callbacks.
This FS wrapper intercepts method calls that modify the underlying FS
and generates appropriate notification events. It thus allows watchers
to monitor changes made through the underlying FS object, but not changes
that might be made through other interfaces to the same filesystem.
"""
def __init__(self, *args, **kwds):
super(WatchableFS, self).__init__(*args, **kwds)
def close(self):
super(WatchableFS, self).close()
self.notify_watchers(CLOSED)
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
existed = self.wrapped_fs.isfile(path)
f = super(WatchableFS, self).open(path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
if not existed:
self.notify_watchers(CREATED, path)
self.notify_watchers(ACCESSED, path)
return WatchedFile(f, self, path, mode)
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
existed = self.wrapped_fs.isfile(path)
ret = super(WatchableFS, self).setcontents(path, data, chunk_size=chunk_size)
if not existed:
self.notify_watchers(CREATED, path)
self.notify_watchers(ACCESSED, path)
if data:
self.notify_watchers(MODIFIED, path, True)
return ret
def createfile(self, path, wipe=False):
existed = self.wrapped_fs.isfile(path)
ret = super(WatchableFS, self).createfile(path, wipe=False)
if not existed:
self.notify_watchers(CREATED,path)
self.notify_watchers(ACCESSED,path)
return ret
def makedir(self,path,recursive=False,allow_recreate=False):
existed = self.wrapped_fs.isdir(path)
try:
super(WatchableFS,self).makedir(path,allow_recreate=allow_recreate)
except ParentDirectoryMissingError:
if not recursive:
raise
parent = dirname(path)
if parent != path:
self.makedir(dirname(path),recursive=True,allow_recreate=True)
super(WatchableFS,self).makedir(path,allow_recreate=allow_recreate)
if not existed:
self.notify_watchers(CREATED,path)
def remove(self,path):
super(WatchableFS,self).remove(path)
self.notify_watchers(REMOVED,path)
def removedir(self,path,recursive=False,force=False):
if not force:
for nm in self.listdir(path):
raise DirectoryNotEmptyError(path)
else:
for nm in self.listdir(path,dirs_only=True):
try:
self.removedir(pathjoin(path,nm),force=True)
except ResourceNotFoundError:
pass
for nm in self.listdir(path,files_only=True):
try:
self.remove(pathjoin(path,nm))
except ResourceNotFoundError:
pass
super(WatchableFS,self).removedir(path)
self.notify_watchers(REMOVED,path)
if recursive:
parent = dirname(path)
while parent and not self.listdir(parent):
super(WatchableFS,self).removedir(parent)
self.notify_watchers(REMOVED,parent)
parent = dirname(parent)
def rename(self,src,dst):
d_existed = self.wrapped_fs.exists(dst)
super(WatchableFS,self).rename(src,dst)
if d_existed:
self.notify_watchers(REMOVED,dst)
self.notify_watchers(MOVED_DST,dst,src)
self.notify_watchers(MOVED_SRC,src,dst)
def copy(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).copy(src,dst,**kwds)
self._post_copy(src,dst,d)
def copydir(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).copydir(src,dst,**kwds)
self._post_copy(src,dst,d)
def move(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).move(src,dst,**kwds)
self._post_copy(src,dst,d)
self._post_move(src,dst,d)
def movedir(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).movedir(src,dst,**kwds)
self._post_copy(src,dst,d)
self._post_move(src,dst,d)
def _pre_copy(self,src,dst):
dst_paths = {}
try:
for (dirnm,filenms) in self.wrapped_fs.walk(dst):
dirnm = dirnm[len(dst)+1:]
dst_paths[dirnm] = True
for filenm in filenms:
dst_paths[filenm] = False
except ResourceNotFoundError:
pass
except ResourceInvalidError:
dst_paths[""] = False
src_paths = {}
try:
for (dirnm,filenms) in self.wrapped_fs.walk(src):
dirnm = dirnm[len(src)+1:]
src_paths[dirnm] = True
for filenm in filenms:
src_paths[pathjoin(dirnm,filenm)] = False
except ResourceNotFoundError:
pass
except ResourceInvalidError:
src_paths[""] = False
return (src_paths,dst_paths)
def _post_copy(self,src,dst,data):
(src_paths,dst_paths) = data
for src_path,isdir in sorted(src_paths.items()):
path = pathjoin(dst,src_path)
if src_path in dst_paths:
self.notify_watchers(MODIFIED,path,not isdir)
else:
self.notify_watchers(CREATED,path)
for dst_path,isdir in sorted(dst_paths.items()):
path = pathjoin(dst,dst_path)
if not self.wrapped_fs.exists(path):
self.notify_watchers(REMOVED,path)
def _post_move(self,src,dst,data):
(src_paths,dst_paths) = data
for src_path,isdir in sorted(src_paths.items(),reverse=True):
path = pathjoin(src,src_path)
self.notify_watchers(REMOVED,path)
def setxattr(self,path,name,value):
super(WatchableFS,self).setxattr(path,name,value)
self.notify_watchers(MODIFIED,path,False)
def delxattr(self,path,name):
super(WatchableFS,self).delxattr(path,name)
self.notify_watchers(MODIFIED,path,False)
class PollingWatchableFS(WatchableFS):
"""FS wrapper simulating watcher callbacks by periodic polling.
This FS wrapper augments the functionality of WatchableFS by periodically
polling the underlying FS for changes. It is thus capable of detecting
changes made to the underlying FS via other interfaces, albeit with a
(configurable) delay to account for the polling interval.
"""
def __init__(self,wrapped_fs,poll_interval=60*5):
super(PollingWatchableFS,self).__init__(wrapped_fs)
self.poll_interval = poll_interval
self.add_watcher(self._on_path_modify,"/",(CREATED,MOVED_DST,))
self.add_watcher(self._on_path_modify,"/",(MODIFIED,ACCESSED,))
self.add_watcher(self._on_path_delete,"/",(REMOVED,MOVED_SRC,))
self._path_info = PathMap()
self._poll_thread = threading.Thread(target=self._poll_for_changes)
self._poll_cond = threading.Condition()
self._poll_close_event = threading.Event()
self._poll_thread.start()
def close(self):
self._poll_close_event.set()
self._poll_thread.join()
super(PollingWatchableFS,self).close()
def _on_path_modify(self,event):
path = event.path
try:
try:
self._path_info[path] = self.wrapped_fs.getinfo(path)
except ResourceNotFoundError:
self._path_info.clear(path)
except FSError:
pass
def _on_path_delete(self,event):
self._path_info.clear(event.path)
def _poll_for_changes(self):
try:
while not self._poll_close_event.isSet():
# Walk all directories looking for changes.
# Come back to any that give us an error.
error_paths = set()
for dirnm in self.wrapped_fs.walkdirs():
if self._poll_close_event.isSet():
break
try:
self._check_for_changes(dirnm)
except FSError:
error_paths.add(dirnm)
# Retry the directories that gave us an error, until
# we have successfully updated them all
while error_paths and not self._poll_close_event.isSet():
dirnm = error_paths.pop()
if self.wrapped_fs.isdir(dirnm):
try:
self._check_for_changes(dirnm)
except FSError:
error_paths.add(dirnm)
# Notify that we have completed a polling run
self._poll_cond.acquire()
self._poll_cond.notifyAll()
self._poll_cond.release()
# Sleep for the specified interval, or until closed.
self._poll_close_event.wait(timeout=self.poll_interval)
except FSError:
if not self.closed:
raise
def _check_for_changes(self,dirnm):
# Check the metadata for the directory itself.
new_info = self.wrapped_fs.getinfo(dirnm)
try:
old_info = self._path_info[dirnm]
except KeyError:
self.notify_watchers(CREATED,dirnm)
else:
if new_info != old_info:
self.notify_watchers(MODIFIED,dirnm,False)
# Check the metadata for each file in the directory.
# We assume that if the file's data changes, something in its
# metadata will also change; don't want to read through each file!
# Subdirectories will be handled by the outer polling loop.
for filenm in self.wrapped_fs.listdir(dirnm,files_only=True):
if self._poll_close_event.isSet():
return
fpath = pathjoin(dirnm,filenm)
new_info = self.wrapped_fs.getinfo(fpath)
try:
old_info = self._path_info[fpath]
except KeyError:
self.notify_watchers(CREATED,fpath)
else:
was_accessed = False
was_modified = False
for (k,v) in new_info.iteritems():
if k not in old_info:
was_modified = True
break
elif old_info[k] != v:
if k in ("accessed_time","st_atime",):
was_accessed = True
elif k:
was_modified = True
break
else:
for k in old_info:
if k not in new_info:
was_modified = True
break
if was_modified:
self.notify_watchers(MODIFIED,fpath,True)
elif was_accessed:
self.notify_watchers(ACCESSED,fpath)
# Check for deletion of cached child entries.
for childnm in self._path_info.iternames(dirnm):
if self._poll_close_event.isSet():
return
cpath = pathjoin(dirnm,childnm)
if not self.wrapped_fs.exists(cpath):
self.notify_watchers(REMOVED,cpath)
def ensure_watchable(fs,wrapper_class=PollingWatchableFS,*args,**kwds):
"""Ensure that the given fs supports watching, simulating it if necessary.
Given an FS object, this function returns an equivalent FS that has support
for watcher callbacks. This may be the original object if it supports them
natively, or a wrapper class if they must be simulated.
"""
if isinstance(fs,wrapper_class):
return fs
try:
w = fs.add_watcher(lambda e: None,"/",recursive=False)
except (AttributeError,FSError):
return wrapper_class(fs,*args,**kwds)
else:
fs.del_watcher(w)
return fs
class iter_changes(object):
"""Blocking iterator over the change events produced by an FS.
This class can be used to transform the callback-based watcher mechanism
into a blocking stream of events. It operates by having the callbacks
push events onto a queue as they come in, then reading them off one at a
time.
"""
def __init__(self,fs=None,path="/",events=None,**kwds):
self.closed = False
self._queue = Queue.Queue()
self._watching = set()
if fs is not None:
self.add_watcher(fs,path,events,**kwds)
def __iter__(self):
return self
def __del__(self):
self.close()
def next(self,timeout=None):
if not self._watching:
raise StopIteration
try:
event = self._queue.get(timeout=timeout)
except Queue.Empty:
raise StopIteration
if event is None:
raise StopIteration
if isinstance(event,CLOSED):
event.fs.del_watcher(self._enqueue)
self._watching.remove(event.fs)
return event
def close(self):
if not self.closed:
self.closed = True
for fs in self._watching:
fs.del_watcher(self._enqueue)
self._queue.put(None)
def add_watcher(self,fs,path="/",events=None,**kwds):
w = fs.add_watcher(self._enqueue,path,events,**kwds)
self._watching.add(fs)
return w
def _enqueue(self,event):
self._queue.put(event)
def del_watcher(self,watcher):
for fs in self._watching:
try:
fs.del_watcher(watcher)
break
except ValueError:
pass
else:
raise ValueError("watcher not found: %s" % (watcher,))
| bsd-3-clause | -2,816,136,849,653,434,000 | 34.386466 | 130 | 0.581464 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_tts_sdk/test/test_fragment.py | 1 | 1571 | # coding: utf-8
"""
Tts API
Description # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_tts
from telestream_cloud_tts.models.fragment import Fragment # noqa: E501
from telestream_cloud_tts.rest import ApiException
class TestFragment(unittest.TestCase):
"""Fragment unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Fragment
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_tts.models.fragment.Fragment() # noqa: E501
if include_optional :
return Fragment(
start_time = 0.11,
variants = [
telestream_cloud_tts.models.fragment_variant.FragmentVariant(
fragment = 'Lorem',
confidence = 0.9, )
],
end_time = 0.45
)
else :
return Fragment(
)
def testFragment(self):
"""Test Fragment"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | 8,340,039,760,802,430,000 | 25.627119 | 81 | 0.590707 | false |
mudyc/deftity | text.py | 1 | 2784 | # deftity - a tool for interaction architect
#
# Copyright (C) 2011 Matti Katila
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Written by Matti J. Katila, 2011
import pango
import pangocairo
import cairo
import tool
import actions
class TextComp(tool.Component, actions.KeyHandler):
def __init__(self):
self.wh = [100, 40]
self.data = { 'text': 'Text..', 'size': '' }
self.modelF = self.get_data
self.name = 'text'
def save_data(self):
ret = tool.Component.save_data(self)
ret['data'] = self.data
return ret
def get_data(self): return self.data
def pos(self, x,y): self.xy = [ x,y]
def size(self, w,h): self.wh = [w,h]
def xywh(self): return (self.xy[0], self.xy[1], self.wh[0], self.wh[1])
def draw(self, c, tc, mx, my):
x,y,w,h = self.xywh()
if self.is_close(mx, my):
c.new_path()
c.rectangle(x,y,w,h)
c.close_path()
c.set_source(cairo.SolidPattern(1,0,.7, .2))
c.fill_preserve()
c.move_to(x, y)
pctx = pangocairo.CairoContext(c)
pctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pctx.create_layout()
self.layout = layout
fontname = "Sans "+str(self.data['size'])
font = pango.FontDescription(fontname)
layout.set_font_description(font)
layout.set_width(int(w*pango.SCALE))
layout.set_wrap(pango.WRAP_WORD_CHAR)
layout.set_justify(True)
layout.set_text(self.modelF()[self.name])
if self in tc.selected_comps:
c.set_source_rgb(1, 0, 0)
else:
c.set_source_rgb(0, 0, 0)
pctx.update_layout(layout)
pctx.show_layout(layout)
def mouse_released(self, tc, mx,my):
x,y,w,h = self.xywh()
tc.cursor.set_obj(self, self.layout.xy_to_index(
int((mx-x)*pango.SCALE), int((my-y)*pango.SCALE))[0])
def key(self, k, cur=None):
actions.KeyHandler.key(self, k, cur, {'Return': '\n'})
if self.modelF()[self.name] == '':
self.tool.comps.remove(self)
| gpl-2.0 | 6,217,041,692,376,003,000 | 32.95122 | 80 | 0.619253 | false |
minesense/VisTrails | vistrails/db/versions/v0_8_1/translate/v0_8_0.py | 2 | 4302 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.db import VistrailsDBException
from vistrails.db.versions.v0_8_0.domain import DBAdd, DBAnnotation, DBChange, DBDelete
# two step process
# 1. remap all the old "notes" so that they exist in the id scope
# 2. remap all the annotations that were numbered correctly
# note that for 2, we don't need to worry about uniqueness -- they are unique
# but step 1 may have taken some of their ids...
def translateVistrail(vistrail):
id_remap = {}
for action in vistrail.db_get_actions():
# don't need to change key idx since none of that changes
new_action_idx = {}
for annotation in action.db_get_annotations():
annotation.db_id = vistrail.idScope.getNewId(DBAnnotation.vtType)
new_action_idx[annotation.db_id] = annotation
action.db_annotations_id_index = new_action_idx
for operation in action.db_get_operations():
# never have annotations as parent objs so
# don't have to worry about those ids
if operation.db_what == DBAnnotation.vtType:
if operation.vtType == 'add':
new_id = vistrail.idScope.getNewId(DBAnnotation.vtType)
old_id = operation.db_objectId
operation.db_objectId = new_id
operation.db_data.db_id = new_id
id_remap[old_id] = new_id
elif operation.vtType == 'change':
changed_id = operation.db_oldObjId
if id_remap.has_key(changed_id):
operation.db_oldObjId = id_remap[changed_id]
else:
raise VistrailsDBException('cannot translate')
new_id = vistrail.idScope.getNewId(DBAnnotation.vtType)
old_id = operation.db_newObjId
operation.db_newObjId = new_id
operation.db_data.db_id = new_id
id_remap[old_id] = new_id
elif operation.vtType == 'delete':
old_id = operation.db_objectId
if id_remap.has_key(old_id):
operation.db_objectId = id_remap[old_id]
else:
raise VistrailsDBException('cannot translate')
vistrail.db_version = '0.8.1'
return vistrail
| bsd-3-clause | -7,632,964,265,899,056,000 | 48.448276 | 87 | 0.630404 | false |
tysonholub/twilio-python | tests/integration/taskrouter/v1/workspace/test_task_channel.py | 1 | 12513 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TaskChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_sid_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_fetch_unique_name_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": false,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0",
"key": "channels",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0",
"key": "channels",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_sid_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Default",
"unique_name": "default",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_update_unique_name_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Default",
"unique_name": "default",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_sid_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_delete_unique_name_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels(sid="TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.create(friendly_name="friendly_name", unique_name="unique_name")
values = {'FriendlyName': "friendly_name", 'UniqueName': "unique_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Outbound Voice",
"unique_name": "ovoice",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"channel_optimized_routing": true,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.create(friendly_name="friendly_name", unique_name="unique_name")
self.assertIsNotNone(actual)
| mit | 5,694,684,889,522,185,000 | 41.706485 | 160 | 0.566291 | false |
bohdan-shramko/learning-python | source/chapter05/geek_translator.py | 1 | 2413 | # Geek Translator
# Demonstrates using dictionaries
geek = {"404": "clueless. From the web error message 404, meaning page not found.",
"Googling": "searching the Internet for background information on a person.",
"Keyboard Plaque" : "the collection of debris found in computer keyboards.",
"Link Rot" : "the process by which web page links become obsolete.",
"Percussive Maintenance" : "the act of striking an electronic device to make it work.",
"Uninstalled" : "being fired. Especially popular during the dot-bomb era."}
choice = None
while choice != "0":
print(
"""
Geek Translator
0 - Quit
1 - Look Up a Geek Term
2 - Add a Geek Term
3 - Redefine a Geek Term
4 - Delete a Geek Term
"""
)
choice = input("Choice: ")
print()
# exit
if choice == "0":
print("Good-bye.")
# get a definition
elif choice == "1":
term = input("What term do you want me to translate?: ")
if term in geek:
definition = geek[term]
print("\n", term, "means", definition)
else:
print("\nSorry, I don't know", term)
# add a term-definition pair
elif choice == "2":
term = input("What term do you want me to add?: ")
if term not in geek:
definition = input("\nWhat's the definition?: ")
geek[term] = definition
print("\n", term, "has been added.")
else:
print("\nThat term already exists! Try redefining it.")
# redefine an existing term
elif choice == "3":
term = input("What term do you want me to redefine?: ")
if term in geek:
definition = input("What's the new definition?: ")
geek[term] = definition
print("\n", term, "has been redefined.")
else:
print("\nThat term doesn't exist! Try adding it.")
# delete a term-definition pair
elif choice == "4":
term = input("What term do you want me to delete?: ")
if term in geek:
del geek[term]
print("\nOkay, I deleted", term)
else:
print("\nI can't do that!", term, "doesn't exist in the dictionary.")
# some unknown choice
else:
print("\nSorry, but", choice, "isn't a valid choice.")
input("\n\nPress the enter key to exit.")
| mit | -6,250,750,825,611,069,000 | 31.173333 | 95 | 0.56237 | false |
kacchan822/django-chatwork | chatwork/utils.py | 1 | 1552 | from django.template.loader import render_to_string
from .api import ChatworkApiClient
client = ChatworkApiClient()
api_account_info = client.get_my_profile()
api_account_id = getattr(api_account_info, 'account_id', '0')
api_room_id = getattr(api_account_info, 'room_id', '0')
def get_rooms(room_type='group'):
""" 所属するルームを取得する """
rooms = client.get_rooms()
return [room for room in rooms if room['type'] == room_type]
def send_chatwork(text, room, title=None, to_all=None):
""" 一つのルームにメッセージを送信する """
context = {
'body': text,
'title': title,
'to_all': to_all,
}
message = render_to_string('chatwork/message.txt', context)
return client.add_messages(room, message.strip())
def send_chatwork_many(text, rooms, title=None, to_all=None):
""" 複数のルームにメッセージを送信する """
results = []
for room in rooms:
result = send_chatwork(text, room, title=title, to_all=to_all)
results.append(result)
return results
def delete_message(room_id, message_id):
""" 指定したメッセージを削除する """
return client.delete_message(room_id, message_id)
def create_task(text, room, assigned_to, limit=None, **kwargs):
""" タスクを依頼する """
data = {
'body': text,
'to_ids': ','.join(list(map(str, assigned_to))),
}
if limit is not None:
data['limit'] = int(limit.timestamp())
return client.add_tasks(room, **data)
| mit | 5,175,957,696,668,686,000 | 26.764706 | 70 | 0.632768 | false |
COCS4950G7/COSC4950 | Source/demoCrack2.py | 1 | 6002 | # Chris Bugg
# 10/1/14
# NOTE: Runs on Python 2.7.6
# UPDATE:
# 10/10/14
# -> Now runs with 8 sub-processes using
# the [a-z] alphabet
import hashlib
from time import time
from multiprocessing import Process, Pipe, Lock
import os
class DemoCrack():
algorithm = "sha256"
origHash = ''
alphabet = list("abcdefghijklmnopqrstuvwxyz")
chunk1 = 1
chunk2 = 1
key = ''
alphaChoice = "abcdefghijklmnopqrstuvwxyz"
countey = 0
def __init__(self):
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.getHash()
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.chunkIt()
start = time()
self.countey += 1
lock = Lock()
parentPipe, childPipe = Pipe()
child1 = Process(target=self.subProcess, args=(childPipe, lock, ))
child2 = Process(target=self.subProcess, args=(childPipe, lock, ))
child3 = Process(target=self.subProcess, args=(childPipe, lock, ))
child4 = Process(target=self.subProcess, args=(childPipe, lock, ))
child5 = Process(target=self.subProcess, args=(childPipe, lock, ))
child6 = Process(target=self.subProcess, args=(childPipe, lock, ))
child7 = Process(target=self.subProcess, args=(childPipe, lock, ))
child8 = Process(target=self.subProcess, args=(childPipe, lock, ))
child1.start()
child2.start()
child3.start()
child4.start()
child5.start()
child6.start()
child7.start()
child8.start()
parentPipe.send("6")
parentPipe.send(self.chunk1)
parentPipe.send("6")
parentPipe.send(self.chunk2)
parentPipe.send("6")
parentPipe.send(self.chunk3)
parentPipe.send("6")
parentPipe.send(self.chunk4)
parentPipe.send("6")
parentPipe.send(self.chunk5)
parentPipe.send("6")
parentPipe.send(self.chunk6)
parentPipe.send("6")
parentPipe.send(self.chunk7)
parentPipe.send("6")
parentPipe.send(self.chunk8)
count = 0
done = False
rec = 0
while not done:
if count > 7:
child1.join()
child2.join()
child3.join()
child4.join()
child5.join()
child6.join()
child7.join()
child8.join()
print "No Dice!"
done = True
else:
rec = parentPipe.recv()
if rec == "found":
self.countey = parentPipe.recv()
child1.terminate()
child2.terminate()
child3.terminate()
child4.terminate()
child5.terminate()
child6.terminate()
child7.terminate()
child8.terminate()
done = True
count += 1
elapsed = (time() - start)
print "That took: ", elapsed, " seconds."
speed = (8 * int(self.countey)) / elapsed
if rec == "found":
print "At about: ", speed, " hashes per second."
exit = raw_input("Hit (Enter/Return) to quit ")
def subProcess(self, pipe, lock):
lock.acquire()
loops = pipe.recv()
alphabet = pipe.recv()
lock.release()
if self.looper6(alphabet) == True:
lock.acquire()
pipe.send("found")
pipe.send(self.countey)
pipe.close()
lock. release()
else:
lock.acquire()
pipe.send("not found")
pipe.close()
lock. release()
def chunkIt(self):
chunky = [self.alphabet[i::8] for i in range(8)]
self.chunk1 = chunky.pop()
self.chunk2 = chunky.pop()
self.chunk3 = chunky.pop()
self.chunk4 = chunky.pop()
self.chunk5 = chunky.pop()
self.chunk6 = chunky.pop()
self.chunk7 = chunky.pop()
self.chunk8 = chunky.pop()
def getHash(self):
key = raw_input("What's the 6 LowerCase-Letter Key: ")
self.key = key
tempKey = hashlib.sha256()
byteKey = str.encode(key)
type(byteKey)
tempKey.update(byteKey)
self.origHash = tempKey.hexdigest()
print "The Key you entered was: ", key
print "Which has a hash of: ", self.origHash
def whatWeGot(self):
print "**********************************"
print "Here's what we've got so far: "
print
print "Key is: ", self.key
print "Hash is: ", self.origHash
print "Searching: ", self.alphaChoice
print "**********************************"
def isSolution(self, key):
tempKey = hashlib.sha256()
byteKey = str.encode(key)
type(byteKey)
tempKey.update(byteKey)
possible = tempKey.hexdigest()
if possible == self.origHash:
print
print"Solution found!"
print "Key is: ", key
print "Which has a hash of: ", possible
return True
else:
return False
def looper6(self, alphabet):
for x in alphabet:
print "Searching ...", x, "*****"
for y in self.alphabet:
for z in self.alphabet:
for a in self.alphabet:
for b in self.alphabet:
for c in self.alphabet:
self.countey += 1
key = x + y + z + a + b + c
if self.isSolution(key):
return True
return False
DemoCrack() | gpl-3.0 | -6,494,871,366,440,019,000 | 18.178914 | 74 | 0.489837 | false |
weaponsjtu/RecommederSystem | data_model.py | 1 | 1487 | import sys
import math
import time
class MatrixModel():
def __init__(self, data_dic, matrix = None, user_list = None, item_list = None, train_conf, test_conf):
self.data_dic = data_dic
self.train_conf = train_conf
self.test_conf = test_conf
# compute the user_item matrix
# integrate the bought times
# split dataset into train/test
# train_conf/test_conf: [start, end], eg: [2012.1, 2012.6]
def split_data(self):
print "function purchase_matrix"
rows = len(self.user_list)
cols = len(self.item_list)
train = []
test = []
for i in range(rows):
u_train = [0] * cols
u_test = [0] * cols
uid = self.user_list[i]
if self.data_dic.has_key(uid):
for t in self.data_dic[uid]:
date = t[1]
if date.find("2014") == -1:
u_train[ t[0] ] = 1
else:
#u_train[ t[0] ] = 1
u_test[ t[0] ] = 1
train.append( u_train )
test.append( u_test )
self.train = train
self.test = test
def is_train_sample(self, date):
pass
def is_test_sample(self, date):
pass
def sort_by_row(row_index):
pass
def sort_by_col(col_index):
pass
class User():
def __init__(self):
pass
class Item():
def __init__(self):
pass
| apache-2.0 | -184,760,025,857,332,130 | 24.637931 | 107 | 0.488904 | false |
home-assistant/home-assistant | homeassistant/components/juicenet/__init__.py | 1 | 3040 | """The JuiceNet integration."""
from datetime import timedelta
import logging
import aiohttp
from pyjuicenet import Api, TokenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR
from .device import JuiceNetApi
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "switch"]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the JuiceNet component."""
conf = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up JuiceNet from a config entry."""
config = entry.data
session = async_get_clientsession(hass)
access_token = config[CONF_ACCESS_TOKEN]
api = Api(access_token, session)
juicenet = JuiceNetApi(api)
try:
await juicenet.setup()
except TokenError as error:
_LOGGER.error("JuiceNet Error %s", error)
return False
except aiohttp.ClientError as error:
_LOGGER.error("Could not reach the JuiceNet API %s", error)
raise ConfigEntryNotReady from error
if not juicenet.devices:
_LOGGER.error("No JuiceNet devices found for this account")
return False
_LOGGER.info("%d JuiceNet device(s) found", len(juicenet.devices))
async def async_update_data():
"""Update all device states from the JuiceNet API."""
for device in juicenet.devices:
await device.update_state(True)
return True
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="JuiceNet",
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.entry_id] = {
JUICENET_API: juicenet,
JUICENET_COORDINATOR: coordinator,
}
await coordinator.async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| apache-2.0 | 7,707,611,442,826,321,000 | 27.679245 | 82 | 0.694079 | false |
JulianKunkel/siox | tools/siox-skeleton-builder/layers/posix/template.py | 1 | 1048 | template = {
'errorErrno': {
'variables': 'Condition="ret<0" Activity=sioxActivity',
'afterFirst': ''' int errsv = errno;''',
'after': '''
if ( %(Condition)s ){
siox_activity_report_error( %(Activity)s, translateErrnoToSIOX(errsv) );
siox_activity_end(%(Activity)s);
errno = errsv;
return ret;
}''',
},
'POSIX_activity': {
'variables': 'Name=%(FUNCTION_NAME)s ComponentActivity=cv%(FUNCTION_NAME)s ComponentVariable=global ActivityVar=sioxActivity Condition="ret<0"',
'templates': ["@guard", "@errorErrno Condition=''%(Condition)s'' Activity=%(ActivityVar)s", "@activity Name=%(Name)s ComponentActivity=%(ComponentActivity)s ComponentVariable=%(ComponentVariable)s ActivityVariable=%(ActivityVar)s"]
},
'syscall' : {
'variables' : 'Name=%(FUNCTION_NAME)s Arguments',
'templates' : ["@replaceCall syscall ''%(Name)s,%(Arguments)s''"]
}
}
templateParameters = {
"includes" : ['<syscall.h>']
}
| lgpl-3.0 | -3,724,696,507,870,285,000 | 42.666667 | 233 | 0.597328 | false |
gt-ros-pkg/hrl-pr2 | hrl_pr2_lib/src/hrl_pr2_lib/pr2.py | 1 | 22369 | import roslib; roslib.load_manifest('hrl_pr2_lib')
import rospy
import actionlib
import actionlib_msgs.msg as amsg
import move_base_msgs.msg as mm
import sensor_msgs.msg as sm
import pr2_controllers_msgs.msg as pm
import trajectory_msgs.msg as tm
import pr2_mechanism_msgs.srv as pmm
import std_msgs.msg as stdm
import geometry_msgs.msg as gm
import dynamic_reconfigure.client as dc
import tf
import tf.transformations as tr
import hrl_lib.tf_utils as tfu
import hrl_lib.rutils as ru
import hrl_lib.util as ut
import functools as ft
import numpy as np
import math
import time
import hrl_pr2_lib.msg as hm
#from sound_play.libsoundplay import SoundClient
#from interpolated_ik_motion_planner import ik_utilities as iku
import pr2_kinematics as pr2k
import os
import os.path as pt
import pdb
#Test this
def unwrap2(cpos, npos):
two_pi = 2*np.pi
nin = npos % two_pi
n_multiples_2pi = np.floor(cpos/two_pi)
return nin + n_multiples_2pi*two_pi
def unwrap(cpos, npos):
two_pi = 2*np.pi
if cpos < npos:
while cpos < npos:
npos = npos - two_pi
npos = npos + two_pi
elif cpos > npos:
while cpos > npos:
npos = npos + two_pi
npos = npos - two_pi
return npos
def diff_arm_pose(pose1, pose2):
pcpy = pose2.copy()
pcpy[4,0] = unwrap2(pose1[4,0], pose2[4,0])
pcpy[6,0] = unwrap2(pose1[6,0], pose2[6,0])
diff = pose1 - pose2
for i in range(pose1.shape[0]):
diff[i,0] = ut.standard_rad(diff[i,0])
return diff
class KinematicsError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Joint:
def __init__(self, name, joint_provider):
self.joint_provider = joint_provider
self.joint_names = rospy.get_param('/%s/joints' % name)
self.pub = rospy.Publisher('%s/command' % name, tm.JointTrajectory)
self.names_index = None
self.zeros = [0 for j in range(len(self.joint_names))]
def pose(self, joint_states=None):
if joint_states == None:
joint_states = self.joint_provider()
if self.names_index == None:
self.names_index = {}
for i, n in enumerate(joint_states.name):
self.names_index[n] = i
self.joint_idx = [self.names_index[n] for n in self.joint_names]
return (np.matrix(joint_states.position).T)[self.joint_idx, 0]
def _create_trajectory(self, pos_mat, times, vel_mat=None):
#Make JointTrajectoryPoints
points = [tm.JointTrajectoryPoint() for i in range(pos_mat.shape[1])]
for i in range(pos_mat.shape[1]):
points[i].positions = pos_mat[:,i].A1.tolist()
points[i].accelerations = self.zeros
if vel_mat == None:
points[i].velocities = self.zeros
else:
points[i].velocities = vel_mat[:,i].A1.tolist()
for i in range(pos_mat.shape[1]):
points[i].time_from_start = rospy.Duration(times[i])
#Create JointTrajectory
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.points = points
jt.header.stamp = rospy.get_rostime()
return jt
def set_poses(self, pos_mat, times):
joint_trajectory = self._create_trajectory(pos_mat, times)
self.pub.publish(joint_trajectory)
class PR2Arm(Joint):
def __init__(self, joint_provider, tf_listener, arm, use_kinematics=True):
joint_controller_name = arm + '_arm_controller'
cart_controller_name = arm + '_arm_cartesian_pose_controller'
Joint.__init__(self, joint_controller_name, joint_provider)
self.arm = arm
self.tf_listener = tf_listener
self.client = actionlib.SimpleActionClient('/%s/joint_trajectory_action' % joint_controller_name, pm.JointTrajectoryAction)
rospy.loginfo('pr2arm: waiting for server %s' % joint_controller_name)
self.client.wait_for_server()
self.joint_controller_name = joint_controller_name
self.cart_posture_pub = rospy.Publisher("/%s/command_posture" % cart_controller_name, stdm.Float64MultiArray).publish
self.cart_pose_pub = rospy.Publisher("/%s/command" % cart_controller_name, gm.PoseStamped).publish
if arm == 'l':
self.full_arm_name = 'left'
else:
self.full_arm_name = 'right'
if use_kinematics:
self.kinematics = pr2k.PR2ArmKinematics(self.full_arm_name,
self.tf_listener)
#self.ik_utilities = iku.IKUtilities(self.full_arm_name, self.tf_listener)
self.POSTURES = {
'off': np.matrix([]),
'mantis': np.matrix([0, 1, 0, -1, 3.14, -1, 3.14]).T,
'elbowupr': np.matrix([-0.79,0,-1.6, 9999, 9999, 9999, 9999]).T,
'elbowupl': np.matrix([0.79,0,1.6 , 9999, 9999, 9999, 9999]).T,
'old_elbowupr': np.matrix([-0.79,0,-1.6, -0.79,3.14, -0.79,5.49]).T,
'old_elbowupl': np.matrix([0.79,0,1.6, -0.79,3.14, -0.79,5.49]).T,
'elbowdownr': np.matrix([-0.028262077316910873, 1.2946342642324222, -0.25785640577652386, -1.5498884526859626]).T,
'elbowdownl': np.matrix([-0.0088195719039858515, 1.2834828245284853, 0.20338442004843196, -1.5565279256852611]).T
}
def set_posture(self, posture_mat):
self.cart_posture_pub(stdm.Float64MultiArray(data=posture_mat.A1.tolist()))
##
# Send a cartesian pose to *_cart controllers
# @param trans len 3 list
# @param rot len 4 list
# @param frame string
# @param msg_time float
def set_cart_pose(self, trans, rot, frame, msg_time):
ps = gm.PoseStamped()
for i, field in enumerate(['x', 'y', 'z']):
exec('ps.pose.position.%s = trans[%d]' % (field, i))
for i, field in enumerate(['x', 'y', 'z', 'w']):
exec('ps.pose.orientation.%s = rot[%d]' % (field, i))
ps.header.frame_id = frame
ps.header.stamp = rospy.Time(msg_time)
self.cart_pose_pub(ps)
##
# @param pos_mat column matrix of poses
# @param times array of times
def set_poses(self, pos_mat, times, vel_mat=None, block=True):
p = self.pose()
for i in range(pos_mat.shape[1]):
pos_mat[4,i] = unwrap2(p[4,0], pos_mat[4,i])
pos_mat[6,i] = unwrap2(p[6,0], pos_mat[6,i])
p = pos_mat[:,i]
joint_traj = Joint._create_trajectory(self, pos_mat, times, vel_mat)
#Create goal msg
joint_traj.header.stamp = rospy.get_rostime() + rospy.Duration(1.)
g = pm.JointTrajectoryGoal()
g.trajectory = joint_traj
self.client.send_goal(g)
if block:
return self.client.wait_for_result()
return self.client.get_state()
def stop_trajectory_execution(self):
self.client.cancel_all_goals()
def has_active_goal(self):
s = self.client.get_state()
if s == amsg.GoalStatus.ACTIVE or s == amsg.GoalStatus.PENDING:
return True
else:
return False
def set_poses_monitored(self, pos_mat, times, vel_mat=None, block=True, time_look_ahead=.050):
joint_traj = Joint._create_trajectory(self, pos_mat, times, vel_mat)
#Create goal msg
joint_traj.header.stamp = rospy.get_rostime() + rospy.Duration(1.)
g = pm.JointTrajectoryGoal()
g.trajectory = joint_traj
self.client.send_goal(g)
if block:
return self.client.wait_for_result()
return self.client.get_state()
def set_pose(self, pos, nsecs=5., block=True):
for i in range(2):
cpos = self.pose()
pos[4,0] = unwrap(cpos[4,0], pos[4,0])
pos[6,0] = unwrap(cpos[6,0], pos[6,0])
self.set_poses(np.column_stack([pos]), np.array([nsecs]), block=block)
#self.set_poses(np.column_stack([cpos, pos]), np.array([min_time, min_time+nsecs]), block=block)
def pose_cartesian(self, frame='base_link'):
gripper_tool_frame = self.arm + '_gripper_tool_frame'
return tfu.transform(frame, gripper_tool_frame, self.tf_listener)
def pose_cartesian_tf(self, frame='base_link'):
p, r = tfu.matrix_as_tf(self.pose_cartesian(frame))
return np.matrix(p).T, np.matrix(r).T
class PR2Head(Joint):
def __init__(self, name, joint_provider):
Joint.__init__(self, name, joint_provider)
self.head_client = actionlib.SimpleActionClient('head_traj_controller/point_head_action',
pm.PointHeadAction)
def look_at(self, pt3d, frame='base_link', pointing_frame="wide_stereo_link",
pointing_axis=np.matrix([1, 0, 0.]).T, wait=True):
g = pm.PointHeadGoal()
g.target.header.frame_id = frame
g.target.point = gm.Point(*pt3d.T.A1.tolist())
#pdb.set_trace()
g.pointing_frame = pointing_frame
g.pointing_axis.x = pointing_axis[0,0]
g.pointing_axis.y = pointing_axis[1,0]
g.pointing_axis.z = pointing_axis[2,0]
g.min_duration = rospy.Duration(1.0)
g.max_velocity = 10.
self.head_client.send_goal(g)
if wait:
self.head_client.wait_for_result(rospy.Duration(1.))
if self.head_client.get_state() == amsg.GoalStatus.SUCCEEDED:
return True
else:
return False
def set_pose(self, pos, nsecs=5.):
for i in range(2):
cpos = self.pose()
min_time = .1
self.set_poses(np.column_stack([cpos, pos]), np.array([min_time, min_time+nsecs]))
###
# DANGER. DON"T RUN STOP AND WALK AWAY.
##
class PR2Base:
def __init__(self, tflistener):
self.tflistener = tflistener
self.client = actionlib.SimpleActionClient('move_base', mm.MoveBaseAction)
rospy.loginfo('pr2base: waiting for move_base')
self.client.wait_for_server()
rospy.loginfo('pr2base: waiting transforms')
try:
self.tflistener.waitForTransform('map', 'base_footprint', rospy.Time(), rospy.Duration(20))
except Exception, e:
rospy.loginfo('pr2base: WARNING! Transform from map to base_footprint not found! Did you launch the nav stack?')
# pass
self.go_angle_client = actionlib.SimpleActionClient('go_angle', hm.GoAngleAction)
self.go_xy_client = actionlib.SimpleActionClient('go_xy', hm.GoXYAction)
##
# Turns to given angle using pure odometry
def turn_to(self, angle, block=True):
goal = hm.GoAngleGoal()
goal.angle = angle
self.go_angle_client.send_goal(goal)
print 'SENT TURN GOAL'
if block:
rospy.loginfo('turn_to: waiting for turn..')
self.go_angle_client.wait_for_result()
rospy.loginfo('turn_to: done.')
##
# Turns a relative amount given angle using pure odometry
def turn_by(self, delta_ang, block=True, overturn=False):
#overturn
if overturn and (abs(delta_ang) < math.radians(10.)):
#turn in that direction by an extra 15 deg
turn1 = np.sign(delta_ang) * math.radians(15.) + delta_ang
turn2 = -np.sign(delta_ang) * math.radians(15.)
rospy.loginfo('Requested really small turn angle. Using overturn trick.')
#pdb.set_trace()
self._turn_by(turn1, block=True)
time.sleep(3) #TODO remove this restriction
self._turn_by(turn2, block)
else:
self._turn_by(delta_ang, block)
def _turn_by(self, delta_ang, block=True):
current_ang_odom = tr.euler_from_matrix(tfu.transform('base_footprint',\
'odom_combined', self.tflistener)[0:3, 0:3], 'sxyz')[2]
self.turn_to(current_ang_odom + delta_ang, block)
##
# Move to xy_loc_bf
def move_to(self, xy_loc_bf, block=True):
goal = hm.GoXYGoal()
goal.x = xy_loc_bf[0,0]
goal.y = xy_loc_bf[1,0]
self.go_xy_client.send_goal(goal)
if block:
self.go_xy_client.wait_for_result()
def set_pose(self, t, r, frame, block=True):
g = mm.MoveBaseGoal()
p = g.target_pose
p.header.frame_id = frame
p.header.stamp = rospy.get_rostime()
p.pose.position.x = t[0]
p.pose.position.y = t[1]
p.pose.position.z = 0
p.pose.orientation.x = r[0]
p.pose.orientation.y = r[1]
p.pose.orientation.z = r[2]
p.pose.orientation.w = r[3]
self.client.send_goal(g)
if block:
self.client.wait_for_result()
return self.client.get_state()
def get_pose(self):
p_base = tfu.transform('map', 'base_footprint', self.tflistener) \
* tfu.tf_as_matrix(([0., 0., 0., 1.], tr.quaternion_from_euler(0,0,0)))
return tfu.matrix_as_tf(p_base)
class PR2Torso(Joint):
def __init__(self, joint_provider):
Joint.__init__(self, 'torso_controller', joint_provider)
self.torso = actionlib.SimpleActionClient('torso_controller/position_joint_action', pm.SingleJointPositionAction)
rospy.loginfo('waiting for torso_controller')
self.torso.wait_for_server()
def set_pose(self, p, block=True):
self.torso.send_goal(pm.SingleJointPositionGoal(position = p))
if block:
self.torso.wait_for_result()
return self.torso.get_state()
class PR2Gripper:
def __init__(self, gripper, joint_provider):
self.gripper = gripper
self.joint_provider = joint_provider
if gripper == 'l':
self.client = actionlib.SimpleActionClient(
'l_gripper_controller/gripper_action', pm.Pr2GripperCommandAction)
self.full_gripper_name = 'left_gripper'
self.joint_names = [rospy.get_param('/l_gripper_controller/joint')]
else:
self.client = actionlib.SimpleActionClient(
'r_gripper_controller/gripper_action', pm.Pr2GripperCommandAction)
self.full_gripper_name = 'right_gripper'
self.joint_names = [rospy.get_param('/r_gripper_controller/joint')]
self.client.wait_for_server()
self.names_index = None
def pose(self, joint_states=None):
if joint_states == None:
joint_states = self.joint_provider()
if self.names_index == None:
self.names_index = {}
for i, n in enumerate(joint_states.name):
self.names_index[n] = i
self.joint_idx = [self.names_index[n] for n in self.joint_names]
return (np.matrix(joint_states.position).T)[self.joint_idx, 0]
def close(self, block, position=0.0, effort=-1):
self.client.send_goal(pm.Pr2GripperCommandGoal(
pm.Pr2GripperCommand(position = position, max_effort = effort)))
if block:
self.client.wait_for_result()
return self.client.get_state()
def open(self, block, position=0.1, effort = -1):
self.client.send_goal(pm.Pr2GripperCommandGoal(
pm.Pr2GripperCommand(position = position, max_effort = effort)))
if block:
self.client.wait_for_result()
return self.client.get_state()
class StructuredLightProjector:
def __init__(self):
self.client = dc.Client("camera_synchronizer_node")
def set(self, on):
config = {"projector_mode":2}
if on:
config["narrow_stereo_trig_mode"] = 3
else:
config["narrow_stereo_trig_mode"] = 2
self.client.update_configuration(config)
def set_prosilica_inhibit(self, on):
self.node_config['prosilica_projector_inhibit'] = on
self.client.update_configuration(self.node_config)
class ControllerManager:
def __init__(self):
# LoadController
self.load = rospy.ServiceProxy('pr2_controller_manager/load_controller', pmm.LoadController)
# UnloadController
self.unload = rospy.ServiceProxy('pr2_controller_manager/unload_controller', pmm.UnloadController)
# SwitchController
self._switch_controller = rospy.ServiceProxy('pr2_controller_manager/switch_controller', pmm.SwitchController)
def switch(self, start_con, stop_con):
for n in start_con:
self.load(n)
resp = self._switch_controller(start_con, stop_con, pmm.SwitchControllerRequest.STRICT)
for n in stop_con:
self.unload(n)
return resp.ok
class SoundPlay:
def __init__(self):
self.ros_home = pt.join(os.getenv("HOME"), '.ros')
def say(self, phrase):
wav_file_name = pt.join(self.ros_home, 'soundplay_temp.wav')
os.system("text2wave %s -o %s" % (phrase, wav_file_name))
os.system("aplay %s" % (wav_file_name))
def play(self, filename):
os.system("aplay %s" % filename)
class PR2:
def __init__(self, tf_listener=None, arms=True, base=False, grippers=True,
use_kinematics=True, use_projector=True):
try:
rospy.init_node('pr2', anonymous=True)
except rospy.exceptions.ROSException, e:
pass
if tf_listener == None:
self.tf_listener = tf.TransformListener()
else:
self.tf_listener = tf_listener
jl = ru.GenericListener('joint_state_listener', sm.JointState, 'joint_states', 100)
self.joint_provider = ft.partial(jl.read, allow_duplication=False, willing_to_wait=True, warn=False, quiet=True)
if arms:
self.left = PR2Arm(self.joint_provider, self.tf_listener, 'l',
use_kinematics)
self.right = PR2Arm(self.joint_provider, self.tf_listener, 'r',
use_kinematics)
if grippers:
self.left_gripper = PR2Gripper('l', self.joint_provider)
self.right_gripper = PR2Gripper('r', self.joint_provider)
self.head = PR2Head('head_traj_controller', self.joint_provider)
if base:
self.base = PR2Base(self.tf_listener)
self.torso = PR2Torso(self.joint_provider)
self.controller_manager = ControllerManager()
self.sound = SoundPlay()
#SoundClient()
if use_projector:
self.projector = StructuredLightProjector()
def pose(self):
s = self.joint_provider()
return {'larm': self.left.pose(s), 'rarm': self.right.pose(s), 'head_traj': self.head.pose(s)}
#if __name__ == '__main__':
# #pr2 = PR2()
# #pr2.controller_manager
#
# raw_input('put robot in final pose')
# pose2 = pr2.left.pose_cartesian()
#
# raw_input('put robot in initial pose')
# pose1 = pr2.left.pose_cartesian()
# pose2 = pose1.copy()
# pose2[0,3] = pose2[0,3] + .2
# r = rospy.Rate(4)
# while not rospy.is_shutdown():
# cart = pr2.left.pose_cartesian()
# ik_sol = pr2.left.kinematics.ik(cart, 'base_link')
# if ik_sol != None:
# diff = pr2.left.kinematics.fk(ik_sol, 'base_link') - cart
# pos_diff = diff[0:3,3]
# print '%.2f %.2f %.2f' % (pos_diff[0,0], pos_diff[1,0], pos_diff[2,0])
#
# pdb.set_trace()
# print 'going to final pose'
# pr2.left.set_cart_pose_ik(pose2, 2.5)
#
# print 'going back to initial pose'
# pr2.left.set_cart_pose_ik(pose1, 2.5)
#
#
# r = rospy.Rate(4)
# while not rospy.is_shutdown():
# cart = pr2.left.pose_cartesian()
# ik_sol = pr2.left.kinematics.ik(cart, 'base_link', seed=pr2.left.pose())
# if ik_sol != None:
# print ik_sol.T
# r.sleep()
#from class PR2Arm
#def set_cart_pose_ik(self, cart, total_time, frame='base_link', block=True,
# seed=None, pos_spacing=.001, rot_spacing=.001, best_attempt=True):
# cpos = self.pose()
# start_pos, start_rot = tfu.matrix_as_tf(self.pose_cartesian(frame))
# #Check to see if there is an IK solution at end point.
# target_pose = None
# alpha = 1.
# dir_endpoint = cart[0:3,3] - start_pos
# while target_pose == None:
# target_pose = self.kinematics.ik(perturbed_cart, frame, seed)
# if target_pose == None:
# raise KinematicsError('Unable to reach goal at %s.' % str(cart))
# cpos = self.pose()
# start_pos, start_rot = tfu.matrix_as_tf(self.pose_cartesian(frame))
# end_pos, end_rot = tfu.matrix_as_tf(cart)
# interpolated_poses = self.ik_utilities.interpolate_cartesian(start_pos, start_rot, end_pos, end_rot, pos_spacing, rot_spacing)
# nsteps = len(interpolated_poses)
# tstep = total_time / nsteps
# tsteps = np.array(range(nsteps+1)) * tstep
# valid_wps = []
# valid_times = []
# #last_valid = seed
# #all_sols = []
# if seed == None:
# seed = cpos
# for idx, pose in enumerate(interpolated_poses):
# pos, rot = pose
# #sol = self.kinematics.ik(tfu.tf_as_matrix((pos,rot)), frame, seed=last_valid)
# sol = self.kinematics.ik(tfu.tf_as_matrix((pos,rot)), frame, seed=seed)
# if sol != None:
# sol_cpy = sol.copy()
# sol_cpy[4,0] = unwrap2(cpos[4,0], sol[4,0])
# sol_cpy[6,0] = unwrap2(cpos[6,0], sol[6,0])
# valid_wps.append(sol_cpy)
# valid_times.append(tsteps[idx])
# #cpos = sol_cpy
# #all_sols.append(sol)
# #last_valid = sol_cpy
# #valid_wps.reverse()
# #all_sols = np.column_stack(all_sols)
# #pdb.set_trace()
# if len(valid_wps) > 2:
# rospy.loginfo('set_cart_pose_ik: number of waypoints %d' % len(valid_wps))
# valid_wps_mat = np.column_stack(valid_wps)
# valid_times_arr = np.array(valid_times) + .3
# #self.set_pose(valid_wps_mat[:,0])
# #pdb.set_trace()
# self.set_poses(valid_wps_mat, valid_times_arr, block=block)
# else:
# raise KinematicsError('Unable to reach goal at %s. Not enough valid IK solutions.' % str(cart))
| bsd-3-clause | 4,273,235,845,285,186,600 | 34.338073 | 137 | 0.589611 | false |
qenops/dGraph | test/test5.py | 1 | 5488 | #!/usr/bin/python
'''Test for an openGL based stereo renderer - test binocular rendering to a single window
David Dunn
Feb 2017 - created
www.qenops.com
'''
__author__ = ('David Dunn')
__version__ = '1.0'
import OpenGL
OpenGL.ERROR_CHECKING = False # Uncomment for 2x speed up
OpenGL.ERROR_LOGGING = False # Uncomment for speed up
#OpenGL.FULL_LOGGING = True # Uncomment for verbose logging
#OpenGL.ERROR_ON_COPY = True # Comment for release
import OpenGL.GL as GL
import math, os
import numpy as np
import dGraph as dg
import dGraph.ui as ui
import dGraph.cameras as dgc
import dGraph.shapes as dgs
import dGraph.materials as dgm
import dGraph.shaders as dgshdr
import dGraph.config as config
import dGraph.util.imageManip as im
import time
MODELDIR = '%s/data'%os.path.dirname(__file__)
WINDOWS = [{
"name": 'Test 5',
"location": (0, 0),
#"location": (2436, 1936), # px coordinates of the startup screen for window location
#"size": (1920, 1080),
"size": (1600,800), # px size of the startup screen for centering
"center": (400,400), # center of the display
"refresh_rate": 60, # refreshrate of the display for precise time measuring
"px_size_mm": 0.09766, # px size of the display in mm
"distance_cm": 20, # distance from the viewer in cm,
#"is_hmd": False,
#"warp_path": 'data/calibration/newRight/',
},
]
def loadScene(renderStack, file=None, cross=False):
'''Load or create our sceneGraph'''
scene = dg.SceneGraph(file)
stereoCam = dgc.StereoCamera('front', scene)
stereoCam.setResolution((renderStack.width/2, renderStack.height))
stereoCam.setTranslate(0.,-.06,0.)
stereoCam.setRotate(20.,0.,0.)
stereoCam.setFOV(50.)
stereoCam.IPD = .062
crosses = [
#np.array((.031,.0,-10.)),
#np.array((-.031,.0,-10.)),
np.array((-.2,-.2,-10.)),
np.array((-.2,.0,-10.)),
np.array((-.2,.2,-10.)),
np.array((.0,-.2,-10.)),
np.array((.0,.0,-10.)),
np.array((.0,.2,-10.)),
np.array((.2,-.2,-10.)),
np.array((.2,.0,-10.)),
np.array((.2,.2,-10.)),
]
for idx, position in enumerate(crosses):
cross = dgs.PolySurface('cross%s'%idx, scene, file = '%s/cross.obj'%MODELDIR)
cross.setScale(.01,.01,.01)
cross.translate = position
renderStack.objects[cross.name] = cross
print(1,(idx/3.)/3.+1/3.,(idx%3)/3.+1/3.)
material = dgm.Material('material%s'%idx,ambient=(1,(idx/3.)/3.+1/3.,(idx%3)/3.+1/3.), amb_coeff=.5)
#material = dgm.Lambert('material%s'%idx,ambient=(1,0,0), amb_coeff=.5, diffuse=(1,1,1), diff_coeff=1)
cross.setMaterial(material)
renderStack.cameras = [stereoCam]
renderStack.append(stereoCam)
return True
def animateScene(renderStack, frame):
''' Create motion in our scene '''
# infinity rotate:
y = 1
x = math.cos(frame*math.pi/60)
for obj in renderStack.objects.itervalues():
obj.rotate += np.array((x,y,0.))
def addInput():
for rs in renderStack:
ui.add_key_callback(arrowKey, ui.KEY_RIGHT, renderStack=rs, direction=3)
ui.add_key_callback(arrowKey, ui.KEY_LEFT, renderStack=rs, direction=2)
ui.add_key_callback(arrowKey, ui.KEY_UP, renderStack=rs, direction=1)
ui.add_key_callback(arrowKey, ui.KEY_DOWN, renderStack=rs, direction=0)
def arrowKey(window,renderStack,direction):
for o in renderStack.objects:
if direction == 3: # print "right"
o.rotate(np.array((0.,5.,0.)))
elif direction == 2: # print "left"
o.rotate(-np.array((0.,5.,0.)))
elif direction == 1: # print 'up'
o.translate(np.array((0.,.01,0.)))
else: # print "down"
o.translate(-np.array((0.,.01,0.)))
def drawScene(renderStack):
''' Render the stack '''
myStack = list(renderStack) # copy the renderStack so we can pop and do it again next frame
temp = myStack.pop()
temp.render(renderStack.width, renderStack.height, myStack) # Render our warp to screen
def setup():
winData = WINDOWS[0]
renderStack = ui.RenderStack()
renderStack.display = ui.Display(resolution=winData['size'])
ui.init()
mainWindow = renderStack.addWindow(ui.open_window(winData['name'], winData['location'][0], winData['location'][1], renderStack.display.width, renderStack.display.height))
if not mainWindow:
ui.terminate()
exit(1)
ui.make_context_current(mainWindow)
ui.add_key_callback(ui.close_window, ui.KEY_ESCAPE)
scenes = [loadScene(renderStack) for renderStack in renderStacks]
for rs in renderStacks:
rs.graphicsCardInit()
return renderStacks, scenes, windows
def runLoop(renderStack, mainWindow):
# Print message to console, and kick off the loop to get it rolling.
print("Hit ESC key to quit.")
frame = 0
start = time.time()
while not ui.window_should_close(mainWindow):
ui.make_context_current(mainWindow)
drawScene(renderStack)
now = time.time()
time.sleep(max((frame+1)/config.maxFPS+start-now,0))
ui.swap_buffers(mainWindow)
ui.poll_events()
#animateScene(renderStack, frame)
frame += 1
ui.terminate()
exit(0)
if __name__ == '__main__':
renderStack, scene, windows = setup()
addInput()
runLoop(renderStack, windows[0])
| apache-2.0 | 1,592,792,672,699,302,100 | 35.586667 | 174 | 0.622449 | false |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_peephole_opt.py | 1 | 33978 |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
import sys
import unittest
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, Compare, Bytecode, ControlFlowGraph
from _pydevd_frame_eval.vendored.bytecode import peephole_opt
from _pydevd_frame_eval.vendored.bytecode.tests import TestCase, dump_bytecode
from unittest import mock
class Tests(TestCase):
maxDiff = 80 * 100
def optimize_blocks(self, code):
if isinstance(code, Bytecode):
code = ControlFlowGraph.from_bytecode(code)
optimizer = peephole_opt.PeepholeOptimizer()
optimizer.optimize_cfg(code)
return code
def check(self, code, *expected):
if isinstance(code, Bytecode):
code = ControlFlowGraph.from_bytecode(code)
optimizer = peephole_opt.PeepholeOptimizer()
optimizer.optimize_cfg(code)
code = code.to_bytecode()
try:
self.assertEqual(code, expected)
except AssertionError:
print("Optimized code:")
dump_bytecode(code)
print("Expected code:")
for instr in expected:
print(instr)
raise
def check_dont_optimize(self, code):
code = ControlFlowGraph.from_bytecode(code)
noopt = code.to_bytecode()
optim = self.optimize_blocks(code)
optim = optim.to_bytecode()
self.assertEqual(optim, noopt)
def test_unary_op(self):
def check_unary_op(op, value, result):
code = Bytecode(
[Instr("LOAD_CONST", value), Instr(op), Instr("STORE_NAME", "x")]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
check_unary_op("UNARY_POSITIVE", 2, 2)
check_unary_op("UNARY_NEGATIVE", 3, -3)
check_unary_op("UNARY_INVERT", 5, -6)
def test_binary_op(self):
def check_bin_op(left, op, right, result):
code = Bytecode(
[
Instr("LOAD_CONST", left),
Instr("LOAD_CONST", right),
Instr(op),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
check_bin_op(10, "BINARY_ADD", 20, 30)
check_bin_op(5, "BINARY_SUBTRACT", 1, 4)
check_bin_op(5, "BINARY_MULTIPLY", 3, 15)
check_bin_op(10, "BINARY_TRUE_DIVIDE", 3, 10 / 3)
check_bin_op(10, "BINARY_FLOOR_DIVIDE", 3, 3)
check_bin_op(10, "BINARY_MODULO", 3, 1)
check_bin_op(2, "BINARY_POWER", 8, 256)
check_bin_op(1, "BINARY_LSHIFT", 3, 8)
check_bin_op(16, "BINARY_RSHIFT", 3, 2)
check_bin_op(10, "BINARY_AND", 3, 2)
check_bin_op(2, "BINARY_OR", 3, 3)
check_bin_op(2, "BINARY_XOR", 3, 1)
def test_combined_unary_bin_ops(self):
# x = 1 + 3 + 7
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 3),
Instr("BINARY_ADD"),
Instr("LOAD_CONST", 7),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", 11), Instr("STORE_NAME", "x"))
# x = ~(~(5))
code = Bytecode(
[
Instr("LOAD_CONST", 5),
Instr("UNARY_INVERT"),
Instr("UNARY_INVERT"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", 5), Instr("STORE_NAME", "x"))
# "events = [(0, 'call'), (1, 'line'), (-(3), 'call')]"
code = Bytecode(
[
Instr("LOAD_CONST", 0),
Instr("LOAD_CONST", "call"),
Instr("BUILD_TUPLE", 2),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", "line"),
Instr("BUILD_TUPLE", 2),
Instr("LOAD_CONST", 3),
Instr("UNARY_NEGATIVE"),
Instr("LOAD_CONST", "call"),
Instr("BUILD_TUPLE", 2),
Instr("BUILD_LIST", 3),
Instr("STORE_NAME", "events"),
]
)
self.check(
code,
Instr("LOAD_CONST", (0, "call")),
Instr("LOAD_CONST", (1, "line")),
Instr("LOAD_CONST", (-3, "call")),
Instr("BUILD_LIST", 3),
Instr("STORE_NAME", "events"),
)
# 'x = (1,) + (0,) * 8'
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", 0),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", 8),
Instr("BINARY_MULTIPLY"),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
]
)
zeros = (0,) * 8
result = (1,) + zeros
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
def test_max_size(self):
max_size = 3
with mock.patch.object(peephole_opt, "MAX_SIZE", max_size):
# optimized binary operation: size <= maximum size
#
# (9,) * size
size = max_size
result = (9,) * size
code = Bytecode(
[
Instr("LOAD_CONST", 9),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
# don't optimize binary operation: size > maximum size
#
# x = (9,) * size
size = max_size + 1
code = Bytecode(
[
Instr("LOAD_CONST", 9),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
]
)
self.check(
code,
Instr("LOAD_CONST", (9,)),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
)
def test_bin_op_dont_optimize(self):
# 1 / 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_TRUE_DIVIDE"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 // 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_FLOOR_DIVIDE"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 % 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_MODULO"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 % 1j
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 1j),
Instr("BINARY_MODULO"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
def test_build_tuple(self):
# x = (1, 2, 3)
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_TUPLE", 3),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", (1, 2, 3)), Instr("STORE_NAME", "x"))
def test_build_list(self):
# test = x in [1, 2, 3]
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_LIST", 3),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
]
)
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", (1, 2, 3)),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
)
def test_build_list_unpack_seq(self):
for build_list in ("BUILD_TUPLE", "BUILD_LIST"):
# x, = [a]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr(build_list, 1),
Instr("UNPACK_SEQUENCE", 1),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_NAME", "a"), Instr("STORE_NAME", "x"))
# x, y = [a, b]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr(build_list, 2),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("ROT_TWO"),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
)
# x, y, z = [a, b, c]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("LOAD_NAME", "c"),
Instr(build_list, 3),
Instr("UNPACK_SEQUENCE", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("LOAD_NAME", "c"),
Instr("ROT_THREE"),
Instr("ROT_TWO"),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
)
def test_build_tuple_unpack_seq_const(self):
# x, y = (3, 4)
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 4),
Instr("BUILD_TUPLE", 2),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
]
)
self.check(
code,
Instr("LOAD_CONST", (3, 4)),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
)
def test_build_list_unpack_seq_const(self):
# x, y, z = [3, 4, 5]
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 4),
Instr("LOAD_CONST", 5),
Instr("BUILD_LIST", 3),
Instr("UNPACK_SEQUENCE", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
]
)
self.check(
code,
Instr("LOAD_CONST", 5),
Instr("LOAD_CONST", 4),
Instr("LOAD_CONST", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
)
def test_build_set(self):
# test = x in {1, 2, 3}
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_SET", 3),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
]
)
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", frozenset((1, 2, 3))),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
)
def test_compare_op_unary_not(self):
for op, not_op in (
(Compare.IN, Compare.NOT_IN), # in => not in
(Compare.NOT_IN, Compare.IN), # not in => in
(Compare.IS, Compare.IS_NOT), # is => is not
(Compare.IS_NOT, Compare.IS), # is not => is
):
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("COMPARE_OP", op),
Instr("UNARY_NOT"),
Instr("STORE_NAME", "x"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("COMPARE_OP", not_op),
Instr("STORE_NAME", "x"),
)
# don't optimize:
# x = not (a and b is True)
label_instr5 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("JUMP_IF_FALSE_OR_POP", label_instr5),
Instr("LOAD_NAME", "b"),
Instr("LOAD_CONST", True),
Instr("COMPARE_OP", Compare.IS),
label_instr5,
Instr("UNARY_NOT"),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
def test_dont_optimize(self):
# x = 3 < 5
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 5),
Instr("COMPARE_OP", Compare.LT),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# x = (10, 20, 30)[1:]
code = Bytecode(
[
Instr("LOAD_CONST", (10, 20, 30)),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", None),
Instr("BUILD_SLICE", 2),
Instr("BINARY_SUBSCR"),
Instr("STORE_NAME", "x"),
]
)
self.check_dont_optimize(code)
def test_optimize_code_obj(self):
# Test optimize() method with a code object
#
# x = 3 + 5 => x = 8
noopt = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 5),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
noopt = noopt.to_code()
optimizer = peephole_opt.PeepholeOptimizer()
optim = optimizer.optimize(noopt)
code = Bytecode.from_code(optim)
self.assertEqual(
code,
[
Instr("LOAD_CONST", 8, lineno=1),
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_CONST", None, lineno=1),
Instr("RETURN_VALUE", lineno=1),
],
)
def test_return_value(self):
# return+return: remove second return
#
# def func():
# return 4
# return 5
code = Bytecode(
[
Instr("LOAD_CONST", 4, lineno=2),
Instr("RETURN_VALUE", lineno=2),
Instr("LOAD_CONST", 5, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2)
)
# return+return + return+return: remove second and fourth return
#
# def func():
# return 4
# return 5
# return 6
# return 7
code = Bytecode(
[
Instr("LOAD_CONST", 4, lineno=2),
Instr("RETURN_VALUE", lineno=2),
Instr("LOAD_CONST", 5, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("LOAD_CONST", 6, lineno=4),
Instr("RETURN_VALUE", lineno=4),
Instr("LOAD_CONST", 7, lineno=5),
Instr("RETURN_VALUE", lineno=5),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2)
)
# return + JUMP_ABSOLUTE: remove JUMP_ABSOLUTE
# while 1:
# return 7
if sys.version_info < (3, 8):
setup_loop = Label()
return_label = Label()
code = Bytecode(
[
setup_loop,
Instr("SETUP_LOOP", return_label, lineno=2),
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),
Instr("POP_BLOCK", lineno=3),
return_label,
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
end_loop = Label()
self.check(
code,
Instr("SETUP_LOOP", end_loop, lineno=2),
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
end_loop,
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
)
else:
setup_loop = Label()
return_label = Label()
code = Bytecode(
[
setup_loop,
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 7, lineno=3), Instr("RETURN_VALUE", lineno=3)
)
def test_not_jump_if_false(self):
# Replace UNARY_NOT+POP_JUMP_IF_FALSE with POP_JUMP_IF_TRUE
#
# if not x:
# y = 9
label = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("UNARY_NOT"),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "y"),
label,
]
)
code = self.optimize_blocks(code)
label = Label()
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("POP_JUMP_IF_TRUE", label),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "y"),
label,
)
def test_unconditional_jump_to_return(self):
# def func():
# if test:
# if test2:
# x = 10
# else:
# x = 20
# else:
# x = 30
label_instr11 = Label()
label_instr14 = Label()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_GLOBAL", "test", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_instr11, lineno=2),
Instr("LOAD_GLOBAL", "test2", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),
Instr("LOAD_CONST", 10, lineno=4),
Instr("STORE_FAST", "x", lineno=4),
Instr("JUMP_ABSOLUTE", label_instr14, lineno=4),
label_instr7,
Instr("LOAD_CONST", 20, lineno=6),
Instr("STORE_FAST", "x", lineno=6),
Instr("JUMP_FORWARD", label_instr14, lineno=6),
label_instr11,
Instr("LOAD_CONST", 30, lineno=8),
Instr("STORE_FAST", "x", lineno=8),
label_instr14,
Instr("LOAD_CONST", None, lineno=8),
Instr("RETURN_VALUE", lineno=8),
]
)
label1 = Label()
label3 = Label()
label4 = Label()
self.check(
code,
Instr("LOAD_GLOBAL", "test", lineno=2),
Instr("POP_JUMP_IF_FALSE", label3, lineno=2),
Instr("LOAD_GLOBAL", "test2", lineno=3),
Instr("POP_JUMP_IF_FALSE", label1, lineno=3),
Instr("LOAD_CONST", 10, lineno=4),
Instr("STORE_FAST", "x", lineno=4),
Instr("JUMP_ABSOLUTE", label4, lineno=4),
label1,
Instr("LOAD_CONST", 20, lineno=6),
Instr("STORE_FAST", "x", lineno=6),
Instr("JUMP_FORWARD", label4, lineno=6),
label3,
Instr("LOAD_CONST", 30, lineno=8),
Instr("STORE_FAST", "x", lineno=8),
label4,
Instr("LOAD_CONST", None, lineno=8),
Instr("RETURN_VALUE", lineno=8),
)
def test_unconditional_jumps(self):
# def func():
# if x:
# if y:
# func()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_GLOBAL", "x", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=2),
Instr("LOAD_GLOBAL", "y", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),
Instr("LOAD_GLOBAL", "func", lineno=4),
Instr("CALL_FUNCTION", 0, lineno=4),
Instr("POP_TOP", lineno=4),
label_instr7,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
]
)
label_return = Label()
self.check(
code,
Instr("LOAD_GLOBAL", "x", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_return, lineno=2),
Instr("LOAD_GLOBAL", "y", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_return, lineno=3),
Instr("LOAD_GLOBAL", "func", lineno=4),
Instr("CALL_FUNCTION", 0, lineno=4),
Instr("POP_TOP", lineno=4),
label_return,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
)
def test_jump_to_return(self):
# def func(condition):
# return 'yes' if condition else 'no'
label_instr4 = Label()
label_instr6 = Label()
code = Bytecode(
[
Instr("LOAD_FAST", "condition"),
Instr("POP_JUMP_IF_FALSE", label_instr4),
Instr("LOAD_CONST", "yes"),
Instr("JUMP_FORWARD", label_instr6),
label_instr4,
Instr("LOAD_CONST", "no"),
label_instr6,
Instr("RETURN_VALUE"),
]
)
label = Label()
self.check(
code,
Instr("LOAD_FAST", "condition"),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", "yes"),
Instr("RETURN_VALUE"),
label,
Instr("LOAD_CONST", "no"),
Instr("RETURN_VALUE"),
)
def test_jump_if_true_to_jump_if_false(self):
# Replace JUMP_IF_TRUE_OR_POP jumping to POP_JUMP_IF_FALSE <target>
# with POP_JUMP_IF_TRUE <offset after the second POP_JUMP_IF_FALSE>
#
# if x or y:
# z = 1
label_instr3 = Label()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("JUMP_IF_TRUE_OR_POP", label_instr3),
Instr("LOAD_NAME", "y"),
label_instr3,
Instr("POP_JUMP_IF_FALSE", label_instr7),
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "z"),
label_instr7,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr4 = Label()
label_instr7 = Label()
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("POP_JUMP_IF_TRUE", label_instr4),
Instr("LOAD_NAME", "y"),
Instr("POP_JUMP_IF_FALSE", label_instr7),
label_instr4,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "z"),
label_instr7,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
def test_jump_if_false_to_jump_if_false(self):
# Replace JUMP_IF_FALSE_OR_POP jumping to POP_JUMP_IF_FALSE <label>
# with POP_JUMP_IF_FALSE <label>
#
# while n > 0 and start > 3:
# func()
if sys.version_info < (3, 8):
label_instr1 = Label()
label_instr15 = Label()
label_instr17 = Label()
label_instr9 = Label()
code = Bytecode(
[
Instr("SETUP_LOOP", label_instr17),
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
# JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE
# which jumps to label_instr15
Instr("JUMP_IF_FALSE_OR_POP", label_instr9),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
label_instr9,
Instr("POP_JUMP_IF_FALSE", label_instr15),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr15,
Instr("POP_BLOCK"),
label_instr17,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr1 = Label()
label_instr14 = Label()
label_instr16 = Label()
self.check(
code,
Instr("SETUP_LOOP", label_instr16),
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr14,
Instr("POP_BLOCK"),
label_instr16,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
else:
label_instr1 = Label()
label_instr15 = Label()
label_instr9 = Label()
code = Bytecode(
[
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
# JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE
# which jumps to label_instr15
Instr("JUMP_IF_FALSE_OR_POP", label_instr9),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
label_instr9,
Instr("POP_JUMP_IF_FALSE", label_instr15),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr15,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr1 = Label()
label_instr14 = Label()
self.check(
code,
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr14,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
def test_nop(self):
code = Bytecode(
[Instr("LOAD_NAME", "x"), Instr("NOP"), Instr("STORE_NAME", "test")]
)
self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))
def test_dead_code_jump(self):
label = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("JUMP_ABSOLUTE", label),
# dead code
Instr("LOAD_NAME", "y"),
Instr("STORE_NAME", "test"),
label,
Instr("STORE_NAME", "test"),
]
)
self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))
def test_uncond_jump_to_uncond_jump(self):
# Replace JUMP_FORWARD t1 jumping to JUMP_FORWARD t2
# with JUMP_ABSOLUTE t2
label = Label()
label2 = Label()
label3 = Label()
label4 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label),
# redundant jump
Instr("JUMP_FORWARD", label2),
label,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label3),
label2,
Instr("JUMP_FORWARD", label4),
label3,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
label4,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label = Label()
label3 = Label()
label4 = Label()
self.check(
code,
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label),
# JUMP_FORWARD label2 was replaced with JUMP_ABSOLUTE label4
Instr("JUMP_ABSOLUTE", label4),
label,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label3),
Instr("JUMP_FORWARD", label4),
label3,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
label4,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| mit | -7,548,279,328,693,912,000 | 32.495431 | 126 | 0.411178 | false |
pearu/sympycore | sympycore/tests/test_lowlevel.py | 1 | 6359 |
from sympycore.core import Expr, Pair, heads, IntegerList
from sympycore.heads import *
class MyExpr(Expr):
@classmethod
def convert(cls, obj, typeerror=True):
if isinstance(obj, cls):
return obj
if isinstance(obj, (int, long, float, complex)):
return MyExpr(NUMBER, obj)
if isinstance(obj, str):
return MyExpr(SYMBOL, obj)
if typeerror:
raise TypeError('Cannot convert type %r to %r' % (type(obj), cls.__name__))
return NotImplemented
def __add__(self, other):
return Add(self, self.convert(other))
__radd__ = __add__
class AExpr(Expr):
pass
def Number(n):
return MyExpr(NUMBER, n)
def Symbol(s):
return MyExpr(SYMBOL, s)
def Add(x, y):
d = {}
if x==y:
d[x] = 2
else:
d[x] = 1
d[y] = 1
return MyExpr(TERMS, d)
def test_equality_number():
assert Number(1)==Number(1)
assert Number(1)==Number(1L)
assert Number(1)==1
assert Number(1)==1.0
assert Number(1.0)==1
assert Number(1)==1.0+0j
assert Number(1)==1L
assert 1==Number(1)
assert 1L==Number(1)
assert 1==Number(1L)
assert Number(1)!=Number(2)
assert Number(1)!=Number(2L)
assert Number(1)!=2
assert Number(1)!=2L
assert 2!=Number(1)
def test_equality_symbol():
assert Symbol('x')==Symbol('x')
assert Symbol('x')=='x'
assert 'x'==Symbol('x')
assert Symbol('x')!=Symbol('y')
assert Symbol('x')!='y'
assert 'x'!=Symbol('y')
assert MyExpr(SYMBOL, 'x')==AExpr(SYMBOL, 'x')
def test_pos():
x, y, z = map(Symbol,'xyz')
assert MyExpr(POS,x)==x
assert hash(MyExpr(POS,x))==hash(x)
def test_neg():
x, y, z = map(Symbol,'xyz')
z, o, t = map(Number, [0,1,2])
assert MyExpr(NEG, t)==-2, MyExpr(NEG, t)
assert hash(MyExpr(NEG, t))==hash(-2), `hash(MyExpr(NEG, t)),hash(-2)`
def test_equality_add():
x, y, z = map(Symbol,'xyz')
assert x + y == y + x
assert x + x == x + x
assert x + y == (TERMS, {x:1, y:1})
assert x + y != y + z
assert x + y != x
assert x + y != 'x'
assert x + y != 1
assert MyExpr(ADD,[])==0,`MyExpr(ADD,[]).as_lowlevel()`
assert MyExpr(ADD,[x])==x
assert (not not MyExpr(ADD,[])) == False
assert hash(MyExpr(ADD,[]))==hash(0)
assert hash(MyExpr(ADD,[x]))==hash(x)
x1 = AExpr(SYMBOL, 'x')
y1 = AExpr(SYMBOL, 'y')
assert AExpr(ADD,[x1])==MyExpr(ADD,[x])
assert AExpr(ADD,[x1,y1])==MyExpr(ADD,[x,y])
assert not (AExpr(ADD,[x1,y1])<MyExpr(ADD,[x,y]))
assert not (AExpr(ADD,[x1,y1])>MyExpr(ADD,[x,y]))
assert (AExpr(ADD,[x1,y1])<=MyExpr(ADD,[x,y]))
assert (AExpr(ADD,[x1,y1])>=MyExpr(ADD,[x,y]))
def test_equality_mul():
x, y, z = map(Symbol,'xyz')
assert (not not MyExpr(MUL,[])) == True
assert MyExpr(MUL,[])==1
assert MyExpr(MUL,[x])==x
assert hash(MyExpr(MUL,[]))==hash(1)
assert hash(MyExpr(MUL,[x]))==hash(x)
def test_equality_term_coeff():
x, y, z = map(Symbol,'xyz')
z, o, t = map(Number, [0,1,2])
assert MyExpr(TERM_COEFF, (x, 0))==0
assert MyExpr(TERM_COEFF, (x, 1))==x
assert MyExpr(TERM_COEFF, (o, 2))==2
assert MyExpr(TERM_COEFF, (x, -1))==MyExpr(NEG, x)
assert hash(MyExpr(TERM_COEFF, (x, 0)))==hash(0)
assert hash(MyExpr(TERM_COEFF, (x, 1)))==hash(x)
assert hash(MyExpr(TERM_COEFF, (o, 2)))==hash(2)
assert hash(MyExpr(TERM_COEFF, (x, -1)))==hash(MyExpr(NEG, x))
def test_equality_term_coeff_dict():
x, y, z = map(Symbol,'xyz')
z, o, t = map(Number, [0,1,2])
assert MyExpr(TERM_COEFF_DICT, {})==0
assert MyExpr(TERM_COEFF_DICT, {x:0})==0
assert MyExpr(TERM_COEFF_DICT, {x:1})==x
assert MyExpr(TERM_COEFF_DICT, {x:2})==MyExpr(TERM_COEFF, (x, 2))
assert MyExpr(TERM_COEFF_DICT, {o:2})==2
assert hash(MyExpr(TERM_COEFF_DICT, {}))==hash(0)
assert hash(MyExpr(TERM_COEFF_DICT, {x:0}))==hash(0)
assert hash(MyExpr(TERM_COEFF_DICT, {x:1}))==hash(x)
assert hash(MyExpr(TERM_COEFF_DICT, {x:2}))==hash(MyExpr(TERM_COEFF, (x, 2)))
assert hash(MyExpr(TERM_COEFF_DICT, {o:2}))==hash(2)
def test_equality_pow():
x, y, z = map(Symbol,'xyz')
z, o = map(Number, [0,1])
assert MyExpr(POW, (x, 0))==1
assert MyExpr(POW, (x, 1))==x
assert MyExpr(POW, (o, x))==1
assert hash(MyExpr(POW, (x, 0)))==hash(1)
assert hash(MyExpr(POW, (x, 1)))==hash(x)
assert hash(MyExpr(POW, (o, x)))==hash(1)
def test_equality_base_exp_dict():
x, y, z = map(Symbol,'xyz')
z, o, t = map(Number, [0,1,2])
assert MyExpr(BASE_EXP_DICT, {})==1
assert MyExpr(BASE_EXP_DICT, {x:0})==1
assert MyExpr(BASE_EXP_DICT, {x:1})==x
assert MyExpr(BASE_EXP_DICT, {x:2})==MyExpr(POW, (x, 2))
assert MyExpr(BASE_EXP_DICT, {o:2})==1
assert hash(MyExpr(BASE_EXP_DICT, {}))==hash(1)
assert hash(MyExpr(BASE_EXP_DICT, {x:0}))==hash(1)
assert hash(MyExpr(BASE_EXP_DICT, {x:1}))==hash(x)
assert hash(MyExpr(BASE_EXP_DICT, {x:2}))==hash(MyExpr(POW, (x, 2)))
assert hash(MyExpr(BASE_EXP_DICT, {o:2}))==hash(1)
def test_equality_exp_coeff_dict():
x, y, z = map(Symbol,'xyz')
assert MyExpr(EXP_COEFF_DICT, Pair((x,), {}))==0
assert MyExpr(EXP_COEFF_DICT, Pair((x,), {IntegerList(0):2}))==2
assert MyExpr(EXP_COEFF_DICT, Pair((x,), {IntegerList(1):1}))==x
assert MyExpr(EXP_COEFF_DICT, Pair((x,), {IntegerList(1):2}))==MyExpr(TERM_COEFF, (x,2))
assert MyExpr(EXP_COEFF_DICT, Pair((x,), {IntegerList(2):1}))==MyExpr(POW, (x,2))
def test_hash_number():
assert hash(Number(1))==hash(1)
assert hash(Number(-1))==hash(-1)
assert hash(Number(1212424))==hash(1212424)
assert hash(Number(-1212424))==hash(-1212424)
def test_hash_symbol():
assert hash(Symbol('x'))==hash('x'),`hash(Symbol('x')),hash('x')`
assert hash(Symbol('y'))==hash('y')
def test_hash_dict_data():
x, y, z = map(Symbol,'xyz')
assert hash(x + y) == hash((TERMS, frozenset([(x,1),(y,1)])))
def test_hash_list_data():
l = map(MyExpr,[1,2,3])
e1 = MyExpr(MUL, l)
assert e1.is_writable
e2 = MyExpr(MUL, tuple(l))
assert hash(e1)==hash(e2)
assert not e1.is_writable
def test_is_writable():
n, m = map(MyExpr, [1,2])
assert MyExpr(MUL, [n,m]).is_writable
assert not MyExpr(MUL, (n,m)).is_writable
| bsd-3-clause | 4,462,805,225,297,965,000 | 29.868932 | 92 | 0.577135 | false |
MediaKraken/MediaKraken_Deployment | source/testing/test_database/test_database_metadata_gamesdb.py | 1 | 1731 | """
Copyright (C) 2016 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import json
import sys
import pytest # pylint: disable=W0611
sys.path.append('.')
import database as database_base
class TestDatabaseMetadataGamesdb:
@classmethod
def setup_class(self):
self.db_connection = database_base.MKServerDatabase()
self.db_connection.db_open(True)
@classmethod
def teardown_class(self):
self.db_connection.db_close()
@pytest.mark.parametrize(("platform_id", "platform_name", "platform_alias", "platform_json"), [
(34, 'Test', 'Test', json.dumps({'Tt': 'M'})),
(3, 'Tst', 'Tst', None)])
def test_db_meta_gamesdb_system_insert(self, platform_id, platform_name,
platform_alias, platform_json):
"""
# insert gamesdb game system
"""
self.db_connection.db_rollback()
self.db_connection.db_meta_games_system_insert(platform_id, platform_name,
platform_alias, platform_json)
| gpl-3.0 | -1,576,640,985,692,887,800 | 34.326531 | 99 | 0.6632 | false |
taxpon/pyomni | pyomni/pyomni.py | 1 | 5467 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import io
import zipfile
import logging
import datetime
from pyomni import util
from pyomni.webdav.WebdavClient import CollectionStorer
from pyomni.webdav.WebdavClient import ResourceStorer
from pyomni.webdav.WebdavClient import parseDigestAuthInfo
from pyomni.webdav.Connection import AuthorizationError
OMNI_SERVER_BASE_URL = "https://sync1.omnigroup.com/"
logging.disable(logging.INFO)
def digest(storer):
def _digest(func):
def __digest(*args, **kwargs):
self = args[0]
if len(args) > 1 and isinstance(args[1], (str, unicode)):
if args[1][0] != "/":
url = self.base_url + "/" + args[1]
else:
url = self.base_url + args[1]
else:
url = self.base_url
conn = storer(url, validateResourceNames=False)
try:
conn.readAllProperties()
except AuthorizationError as e:
if e.authType == "Digest":
info = parseDigestAuthInfo(e.authInfo)
conn.connection.addDigestAuthorization(
self.username, self.password,
realm=info["realm"],
qop=info["qop"],
nonce=info["nonce"]
)
else:
raise
args = args + (conn, )
result = func(*args, **kwargs)
return result
return __digest
return _digest
class PyOmni(object):
BASE_HOST = "https://sync1.omnigroup.com"
BASE_URL = BASE_HOST + "/{}/OmniFocus.ofocus/"
def __init__(self, username, password):
"""Create PyOmni instance
:param str username:
:param str password:
:return:
"""
self._username = username
self._password = password
self._base_url = PyOmni.BASE_URL.format(username)
return
@property
def username(self):
return self._username
@property
def password(self):
return self._password
@property
def base_url(self):
return self._base_url
@staticmethod
def create_zip_name(last_id):
now = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
return "{}={}+{}.zip".format(
now, last_id, util.generate_random_code()
)
@staticmethod
def create_zip_body(file_list, write=False):
if write:
fo = "sample.zip"
else:
fo = io.BytesIO()
with zipfile.ZipFile(fo, "w") as fh:
for fd in file_list:
fh.writestr(fd[0], fd[1])
if write:
return
return fo.getvalue()
@staticmethod
def unzip_body(buf):
fo = io.BytesIO(buf)
zf = zipfile.ZipFile(fo)
file_list = []
for zip_file in zf.infolist():
with zf.open(zip_file.filename) as fh:
file_info = [
zip_file.filename,
fh.read()
]
file_list.append(file_info)
return file_list
@digest(CollectionStorer)
def ls(self, conn):
file_list = []
for resource, properties in conn.getCollectionContents():
file_list.append(resource.path.encode(sys.getfilesystemencoding()))
return file_list
@digest(CollectionStorer)
def get_last_id(self, conn):
"""Return latest transaction id
:param conn:
:rtype: str | None
"""
zip_file_list = []
for resource, properties in conn.getCollectionContents():
ext = resource.path.encode(sys.getfilesystemencoding()).split('.')[-1]
if ext == "zip":
zip_file_list.append(resource.path.encode(sys.getfilesystemencoding()))
if len(zip_file_list) > 0:
zip_file_list.sort()
return zip_file_list[-1].split("+")[1].split(".")[0]
return None
@digest(ResourceStorer)
def get_content(self, file_path, conn):
buf = conn.downloadContent().read()
if file_path.split('.')[-1] == "zip":
fo = io.BytesIO(buf)
zf = zipfile.ZipFile(fo)
file_list = []
for zip_file in zf.infolist():
with zf.open(zip_file.filename) as fh:
file_info = [
zip_file.filename,
fh.read()
]
file_list.append(file_info)
else:
file_list = [
file_path.split('/')[-1],
buf
]
return file_list
@digest(ResourceStorer)
def upload_content(self, file_path, buf, conn):
conn.uploadContent(buf)
@digest(ResourceStorer)
def delete_content(self, file_path, conn):
conn.deleteContent()
@digest(ResourceStorer)
def rm(self, file_path, conn):
conn.delete()
def add_task(self, task):
"""Add task to Omnifocus sync server
:param pyomni.object.task.OmniTask task:
:rtype: None
"""
last_id = self.get_last_id()
zip_name = PyOmni.create_zip_name(last_id)
zip_buf = PyOmni.create_zip_body([[
"contents.xml",
task.get_xml()
]])
self.upload_content(zip_name, zip_buf)
return
| mit | -726,213,792,981,818,500 | 28.235294 | 87 | 0.527529 | false |
carloderamo/mushroom | mushroom_rl/environments/mujoco_envs/humanoid_gait/_external_simulation/mtc_model.py | 1 | 7802 | import numpy as np
"""
It is created for using MTC in Mujoco. The dynamics in this model is not continuous. The integration error will be
accumulated overtime. And the system might get unstable if the timestep is too large. It is recommended to set the
timestamp lower than 5e-4 to get decent results.
The model is created based on Song's and Geyer's 2015 paper:
Song, S. and Geyer, H., 2015. A neural circuitry that emphasizes spinal feedback generates diverse behaviours of human
locomotion. The Journal of physiology, 593(16), pp.3493-3511.
V0.1
Passed basic tests. There're slightly difference compared to the simmechanics model.
V0.2
1. Verified with the simmechanics model. Difference in most of the cases can be ignored.
2. Changed the integration method from forward Euler to trapezoid.
3. Muscle force vce etc might vibrate/jitter if in some cases if the timestep is not low enough.
Need to improve this in the next version.
"""
class MuscleTendonComplex:
def __init__(self, paraMuscle, stateMuscle, paraMusAttach, offsetCorr, timestep, nameMuscle, angJoi):
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau, self.w, self.c, self.N, self.K = paraMuscle
self.stim, self.act, self.lmtc, self.lce, self.vce, self.frcmtc = stateMuscle
self.timestep = timestep
self.nameMuscle = nameMuscle
self.angJoi = angJoi
self.offsetCorr = offsetCorr
self.r, self.phiref, self.phimaxref, self.rho, self.dirAng, self.phiScale = paraMusAttach
self.MR = 0.01
self.typeMuscle = self.angJoi.size
nJoi = self.typeMuscle
self.levelArm = np.zeros(nJoi)
tmpL = np.zeros(nJoi)
for i in range(0, nJoi):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
self.lce = self.lmtc - self.lslack
self.lse = self.lmtc - self.lce
# unitless parameters
self.Lse = self.lse / self.lslack
self.Lce = self.lce / self.lopt
self.actsubstep = (self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act
self.lcesubstep = self.vce * self.timestep / 2.0 + self.lce
# test
self.lce_avg = self.lce
self.vce_avg = self.vce
self.frcmtc_avg = 0
self.act_avg = self.act
self.frame = 0
# self.Fse = 0.0
# self.Fbe = 0.0
# self.Fpe = 0.0
# self.Fce = 0.0
def stepUpdateState(self, angJoi):
"""
Muscle Tendon Complex Dynamics
update muscle states based on the muscle dynamics
Muscle state stim has to be updated outside before this function is called
"""
# update lmtc and level arm based on the geometry
self.angJoi = angJoi
nJoi = self.typeMuscle
tmpL = np.zeros(nJoi)
for i in range(0, nJoi):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
# update muscle activation
# integration, forward-Euler method
# self.act = (self.stim - self.act) * self.timestep / self.tau + self.act
# integration, trapezoidal method, 2-step
self.act = (self.stim - self.actsubstep) * self.timestep / 2.0 / self.tau + self.actsubstep
self.actsubstep = (self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act
# update lce and lse based on the lmtc
# integration, forward-Euler method
# self.lce = self.vce * self.timestep + self.lce
# integration, trapezoidal method, 2-step
self.lce = self.vce * self.timestep / 2.0 + self.lcesubstep
self.lcesubstep = self.vce * self.timestep / 2.0 + self.lce
self.lse = self.lmtc - self.lce
self.Lse = self.lse / self.lslack
self.Lce = self.lce / self.lopt
# Serial Elastic element (tendon) force-length relationship
if self.Lse > 1.0:
Fse = np.power((self.Lse - 1.0) / self.eref, 2)
else:
Fse = 0.0
# Parallel Elasticity PE
if self.Lce > 1.0:
Fpe = np.power((self.Lce - 1.0) / self.w, 2)
else:
Fpe = 0.0
# update frcmtc
self.frcmtc = Fse * self.frcmax
#self.frcmtc = np.clip(self.frcmtc, 0, self.frcmax)
# Buffer Elasticity BE
if (self.Lce - (1.0 - self.w)) < 0:
Fbe = np.power((self.Lce - (1.0 - self.w)) / (self.w / 2), 2)
else:
Fbe = 0.0
# Contractile Element force-length relationship
tmp = np.power(np.absolute(self.Lce - 1.0) / self.w, 3)
Fce = np.exp(tmp * np.log(self.c))
#Fv = (Fse + Fbe) / (Fpe + Fce * self.act)
if (Fpe + Fce * self.act) < 1e-10: # avoid numerical error
if (Fse + Fbe) < 1e-10:
Fv = 1.0
else:
Fv = (Fse + Fbe) / 1e-10
else:
Fv = (Fse + Fbe) / (Fpe + Fce * self.act)
# Contractile Element inverse force-velocity relationship
if Fv <= 1.0:
# Concentric
v = (Fv - 1) / (Fv * self.K + 1.0)
elif Fv <= self.N:
# excentric
tmp = (Fv - self.N) / (self.N - 1.0)
v = (tmp + 1.0) / (1.0 - tmp * 7.56 * self.K)
else:
# excentric overshoot
v = ((Fv - self.N) * 0.01 + 1)
self.vce = v * self.lopt * self.vmax
v_frac = self.vce / self.vmax
mr_scale = self.act * np.absolute(self.frcmax*self.vmax) *self.timestep
if self.vce <= 1:
self.MR = 0.01 - 0.11*(v_frac) + 0.06*np.exp(-8*v_frac)
else:
self.MR = 0.23 - 0.16*np.exp(-8*v_frac)
self.MR *= mr_scale
self.frame += 1
self.lce_avg = (self.lce_avg*(self.frame - 1) + self.lce) / self.frame
self.vce_avg = (self.vce_avg*(self.frame - 1) + self.vce) / self.frame
self.frcmtc_avg = (self.frcmtc_avg*(self.frame - 1) + self.frcmtc) / self.frame
self.act_avg = (self.act_avg*(self.frame - 1) + self.act) / self.frame
#self.MR = np.exp(-self.MR)
# print(self.MR, np.exp(-self.MR))
# self.Fv = Fv
# self.Fse = Fse
# self.Fbe = Fbe
# self.Fpe = Fpe
# self.Fce = Fce
def reset_state(self):
self.frame = 0
self.lce_avg = 0
self.frcmtc_avg = 0
self.act_avg = 0
self.vce_avg = 0
| mit | -4,500,805,567,716,075,500 | 40.721925 | 120 | 0.566265 | false |
BrentonEarl/slpkg | slpkg/log_deps.py | 1 | 1667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# log_deps.py file is part of slpkg.
# Copyright 2014-2015 Dimitris Zlatanidis <[email protected]>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://github.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from slpkg.__metadata__ import MetaData as _meta_
from slpkg.pkg.find import find_package
def write_deps(deps_dict):
"""Write dependencies in a log file
into directory `/var/log/slpkg/dep/`
"""
for name, dependencies in deps_dict.iteritems():
if find_package(name + _meta_.sp, _meta_.pkg_path):
dep_path = _meta_.log_path + "dep/"
if not os.path.exists(dep_path):
os.mkdir(dep_path)
if os.path.isfile(dep_path + name):
os.remove(dep_path + name)
if len(dependencies) >= 1:
with open(dep_path + name, "w") as f:
for dep in dependencies:
f.write(dep + "\n")
f.close()
| gpl-3.0 | -3,798,792,738,748,708,400 | 34.468085 | 70 | 0.652669 | false |
m4nh/roars | scripts/roars/training/meta_trainer.py | 1 | 4030 | from abc import ABCMeta, abstractmethod, abstractproperty
import os
#TODO: li abbiamo da qualche parte questi?
LABEL_FOLDER_NAME = 'labels'
LABEL_FILES_EXTENSION = 'txt'
IMAGE_FOLDER_NAME = 'images'
IMAGE_FILES_EXTENSION = 'jpg'
CLASS_FILE = 'class_list.txt'
class meta_trainer():
"""
Meta class for a generic CNN trainer, it will have different implementation according to the framework used
"""
__metaclass__=ABCMeta
def __init__(self, **kwargs):
self._ready=False
self._setup_trainer(kwargs)
#abstract stuff that should be implemented in the inheriting classes
@abstractproperty
def _default_batch_size(self):
pass
@abstractproperty
def _default_max_iteration(self):
pass
@abstractmethod
def _prepare_dataset(self):
"""Setup the dataset to be used during the training phase, return False if the dataset already exist in the detsination folder, True otherways"""
pass
@abstractmethod
def _setup_training_parameters(self):
"""Create additional configuration file if needed"""
pass
@abstractmethod
def _train(self):
"""Proper training"""
pass
@abstractmethod
def _export(self):
"""Save the best model found"""
pass
#Common high order methods
def _setup_trainer(self,args):
"""
Check the args to see if everything is properly configured and save needed info in internal fields
"""
print('='*50)
print('= Checking Arguments =')
print('='*50)
if 'input_folder' not in args:
print('ERROR: Please specify an input directory')
raise Exception('"input_folder" is missing')
if 'detector_name' not in args:
print('ERROR: "detector_name" not specified, this should not happen')
raise Exception('Detector_name not specified')
if 'batch_size' not in args:
print('WARNING: "batch_size" not specified, using default {}'.format(self._default_batch_size))
args['batch_size']=self._default_batch_size
if 'max_iteration' not in args:
print('WARNING: "max_iteration" not specified, using default {}'.format(self._default_max_iteration))
args['max_iteration']=self._default_max_iteration
#map args to objet fields
self._input_folder = args['input_folder']
self._detector_name = args['detector_name']
self._batch_size = args['batch_size']
self._max_iteration = args['max_iteration']
def _check_ready(self):
"""
If the trainer is not ready raise an exception
"""
if not self._ready:
print('ERROR: trainer not correctly configured, you are not supposed to end here!')
raise Exception('trainer not correctly configured')
@property
def _class_map(self):
"""
Read class list files and return a list containing class names
"""
self._check_ready()
if not hasattr(self,'_c_map'):
if os.path.exists(os.path.join(self._input_folder,CLASS_FILE)):
with open(os.path.join(self._input_folder,CLASS_FILE)) as f_in:
classes =[c.strip() for c in f_in.readlines()]
self._c_map = classes
else:
raise Exception('Unable to find {}'.format(CLASS_FILE))
return self._c_map
def train_detector(self,output_folder):
self._check_ready()
self._output_folder = output_folder
#setup data
self._prepare_dataset()
#setup config parameters
self._setup_training_parameters()
#lunch training
result=self._train()
if result==0:
#save final model
self._export()
print('All DONE!')
else:
raise Exception('Train Failed') | gpl-3.0 | 7,888,146,045,697,104,000 | 29.308271 | 153 | 0.588337 | false |
RiceMunk/omnifit | omnifit/fitter/fitter.py | 1 | 23070 | import numpy as np
from lmfit import minimize, Parameters, Parameter
from astropy import units as u
from .. import spectrum
from functions import *
class Fitter():
"""
A class for multi-component fitting to spectroscopic data of ices.
This is the heart of Omnifit, which receives spectra from the spectrum
module, and is then capable of fitting an arbitrary number of different
components to the target spectrum thus designated.
Attributes
----------
target_x : `numpy.ndarray`
The x axis of the target spectrum, e.g. the wavelength.
target_y : `numpy.ndarray`
The y axis of the target spectrum, e.g. the optical depth.
target_dy : `float`
A single number expressing the average uncertainty of the y
axis data.
modelname : `string`
A human-readable name for the model being fitted.
psf : `Nonetype`, `numpy.ndarray`, or `astropy.convolution.Kernel`
If set, this attribute can be used to give a kernel which should
be used to convolve all the fitted data with.
fitrange : `Nonetype` or `list`
If set, this specifies the inclusive limits to which
the fitting should be performed in x axis coordinates.
For example a fitrange of [[200,250],[300,350]] sets
two fitting windows of 200 to 250, and 300 to 350.
color : `string`
A string inidcating the desired plotting color of the target
data, in a format understandable by matplotlib.
funclist : `list`
A list containing all the fittable functions. Each list entry
is a dictionary containing the following keys and values:
* 'name' : A human-readable name for the function being fitted,
in string format.
* 'color' : A string inidcating the desired plotting color of
the data, in a format understandable by matplotlib.
* 'type' : A string indicating what type of data the function
consists of. It can be either 'analytical' or 'empirical',
indicating an analytical function or empirical spectrum,
respectively.
* 'shape' : The shape of the function being fitted. In the case
of an analytical function, this is a string indicating the
callable name of the function. In the case of an empirical
spectrum, this is the y-axis data from the spectrum.
* 'params' : an lmfit `Parameters` instance containing the fitting
parameters appropriate to the data being fitted.
fitpars : `Parameters`
This is where the fitting parameters are stored during and after
minimization.
fitres : `Minimizer`
The fitting results are stored in this class, as documented in
lmfit.
"""
def __init__(self,x,y,dy=1.0,modelname='Unknown model',psf=None,fitrange=None,color='black'):
"""
Fitter(x,y,dy=1.0,modelname='Unknown model',psf=None,fitrange=None,color='black')
Constructor for the Fitter class. Initialisation happens by
designating the target spectrum.
Parameters
----------
x : `numpy.ndarray`
The x axis of the target spectrum, e.g. the wavelength.
y : `numpy.ndarray`
The y axis of the target spectrum, e.g. the optical depth.
dy : `float`, optional
A single number expressing the average uncertainty of the y
axis data.
modelname : `string`, optional
A human-readable name for the model being fitted.
psf : Nonetype or numpy.ndarray or astropy.convolution.Kernel, optional
This attribute can be used to give a kernel which should be
used to convolve all the fitted data with.
fitrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive limits to which
the fitting should be performed in x axis coordinates.
For example a fitrange of [[200,250],[300,350]] sets
two fitting windows of 200 to 250, and 300 to 350.
color : `string`, optional
A string inidcating the desired plotting color of the target
data, in a format understandable by matplotlib.
"""
if len(x) != len(y):
raise RuntimeError('Input arrays have different sizes.')
self.target_x=x
self.target_y=y
self.target_dy=dy
self.modelname=modelname
self.psf=psf
self.fitrange=fitrange
self.color=color
self.funclist=[]
@classmethod
def fromspectrum(cls,spectrum,**kwargs):
"""
Fitter.fromspectrum(spectrum,**kwargs)
An alternate way to initialise Fitter, by directly giving it
a spectrum. Extracted data from the spectrum are the x, y,
and (if the spectrum has been baselined) dy parameters.
Parameters
----------
spectrum : `omnifit.spectrum.BaseSpectrum` or its child class
The input spectrum.
**kwargs : Arguments, optional
Additional initialisation arguments can be passed to `Fitter`
using this. Note that x and y (and dy, if applicable) are defined
using the data contained in the input spectrum.
"""
if spectrum.baselined:
return cls(spectrum.x.value,spectrum.y.value,spectrum.dy,**kwargs)
else:
return cls(spectrum.x.value,spectrum.y.value,**kwargs)
def add_empirical(self,spectrum,params,funcname=None,color='red'):
"""
add_empirical(spectrum,params,funcname=None,color='red')
Add empirical data in the form of a spectrum to the fitting list.
The spectrum must be interpolated to match the target x axis.
Parameters
----------
spectrum : `spectrum.BaseSpectrum`
The input spectrum.
params : `Parameters`
The input parameters. Specifically this must contain
the 'mul' parameter, which indicates what value the
spectrum will be multiplied with during fitting.
funcname : `Nonetype` or `string`, optional
A human-readable name for the data being fitted.
If this is left as None, the name of the spectrum will
be used.
color : `string`, optional
A string inidcating the desired plotting color of the
data, in a format understandable by matplotlib.
"""
if not(funcname):
funcname=spectrum.name
if not np.all(spectrum.x.value == self.target_x):
raise RuntimeError('Input spectrum x axis does not match the target spectrum x axis.')
self.funclist.append({'type':'empirical','shape':spectrum.y.value,'params':params,'name':funcname,'color':color})
def add_analytical(self,shape,params,funcname='Unknown function',color='red'):
"""
add_analytical(shape,params,funcname=None,color='red')
Add analytical data in the form of a callable function to the
fitting list.
Parameters
----------
shape : `string`
The callable name of the function to be fitted.
params : `Parameters`
The input parameters. These should be formatted in a way that
the function defined by shape can understand them, and that
function should be created in such a way that it can make use
of lmfit parameters.
funcname : `string`, optional
A human-readable name for the data being fitted.
color : `string`, optional
A string inidcating the desired plotting color of the
data, in a format understandable by matplotlib.
"""
self.funclist.append({'type':'analytical','shape':shape,'params':params,'name':funcname,'color':color})
def perform_fit(self,**kwargs):
"""
perform_fit(**kwargs)
Uses `minimize` in lmfit to perform least-squares fitting of all the
functions in the function list to the target data.
Parameters
----------
**kwargs : Arguments, optional
This can be used to give additional arguments for `minimize`.
"""
self.fitpars = self.__extract_pars()
self.fitres=minimize(self.__fit_residual,self.fitpars,**kwargs)
if not(self.fitres.success):
raise RuntimeError('Fitting failed!')
else:
self.fitpars = self.fitres.params
def __fit_residual(self,params,custrange=None):
"""
__fit_residual(params,custrange=None)
This is an internal function used for calculating the total
residual of the data against the fittings function(s), given
a set of lmfit parameters. The residual calculation can also
be limited to a specific x axis range.
Parameters
----------
params : `Parameters`
The parameters used for calculating the residual.
custrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive range within which
the residual is calculated. Otherwise the fitting range
specified during Initialisation is used.
Returns
-------
The residual function within the fitting range with the given
lmfit parameters.
"""
if custrange==None:
fitrange=self.fitrange
else:
fitrange=custrange
residual=1.0*self.target_y
totModel=np.zeros(len(residual))
for indFunc,cFunc in enumerate(self.funclist):
oPar=Parameters()
cParlist = cFunc['params']
for cPar in cParlist.values():
cParams=params[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cParams.value,vary=cParams.vary,
min=cParams.min,max=cParams.max,
expr=cParams.expr)
residual-=self.__parse_function(oPar,cFunc)
#Crop out not-numbers and fitting range exterior if necessary
if np.any(fitrange):
fitInd=np.isinf(residual)
for cRange in fitrange:
fitInd=np.logical_or(fitInd,np.logical_and(
np.less_equal(cRange[0],self.target_x),
np.greater_equal(cRange[1],self.target_x)))
else:
fitInd=np.isfinite(residual)
return residual[fitInd]
def chisq(self,checkrange=None):
"""
chisq(checkrange=None)
Return chi squared of fit, either in a custom range
or in the range used by the fit.
Parameters
----------
checkrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive range within which
the chi squared value is calculated. Otherwise the fitting
range specified during Initialisation is used.
Returns
-------
The chi squared within the desired ranged.
"""
residual = self.__fit_residual(self.fitpars,custrange=checkrange)
return np.sum((residual**2.0)/(self.target_dy**2.0))
def plot_fitresults(self,ax,lw=[1,2,3],color_total='blue',legend=True,**kwargs):
"""
plot_fitresults(ax,lw=[1,2,3],color_total='blue',legend=True,**kwargs)
Plot the fitting results to the given matplotlib axis, with a
number of optional parameters specifying how the different plottable
components are presented.
Parameters
----------
axis : `matplotlib.axis`
The axis which the plot will be generated in.
lw : `list`, optional
This list of 3 numbers specifies the line widths of the target
spectrum, the fitted functions, and the total fit, respectively.
color_total : `string`, optional
A string inidcating the desired plotting color of the total sum
of the fit results, in a format understandable by matplotlib.
The colors of the target spectrum and the fitted functions are
specified during their initialisation and addition.
legend : `bool`, optional
If set to True, a legend is automatically created using the
target spectrum and fitted function names.
**kwargs : Arguments, optional
This can be used to pass additional arguments
to `matplotlib.pyplot.plot`, which is used by this
method for its plotting.
"""
ax.plot(self.target_x,self.target_y,color=self.color,lw=lw[0],**kwargs)
legList = [self.modelname]
#totres=self.targ_y+self.fitres.residual
totRes=np.zeros(len(self.target_y))
for indFunc,cFunc in enumerate(self.funclist):
oPar=Parameters()
cParList = cFunc['params']
cCol = cFunc['color']
for cPar in cParList.values():
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
funcRes = self.__parse_function(oPar,cFunc)
totRes+=funcRes
ax.plot(self.target_x,funcRes,lw=lw[1],color=cCol,**kwargs)
legList.append(cFunc['name'])
legList.append('Total fit')
ax.plot(self.target_x,totRes,lw=lw[2],color=color_total,**kwargs)
if legend:
ax.legend(legList,shadow=True)
def fitresults_tofile(self,filename,detection_threshold=5.0):
"""
fitresults_tofile(filename)
Export fit results to two output files which are intended to be
easily readable and paraseable with other software.
The first file is filename.csv, which contains x and y data of
the fitted models, as would be visualized in a plotted fit result.
The first column of the csv is the x value, which is shared by all
models.
The second column is the y value of data that was being fitted to.
The third column is total sum of fitted models.
The fourth to Nth columns are the individual models, in the order
described in the second file, filename.xml.
The second file, filename.xml is an XML file containing additional
information about the fitted data and the fit results which are not
easily representable in a csv-formatted file. This data is
formatted using the following XML elements:
* INFO : Contains all the other elements described below, and has
the attribute "file", which is the name of the csv file pair of
this xml file.
* MODELNAME : Contains the name of the model.
* HAVEPSF : A boolean value indicating whether there is a PSF
associated with the model.
* RMS_DATA : The uncertainty of the data.
* NUMBER_FUNCTIONS : An integer indicating how many functions
have been fitted to the total data.
In addition to the above elements, each fitted function has its own
element, designated FUNCTION, having the attribute "name" which is
the name of the function. FUNCTION contains the following elements:
* TYPE : If the function is an empirical one, this contains the
string "empirical". Otherwise it contains the name of the
called analytical function.
* DETECTION : When generating the contents of this element,
The method is_nondet with the detection threshold designated
by the parameter detection_threshold. The result given by
the method is indicated here with a "True" or "False"
depending on whether the result is considered a detection.
* CSV_COLUMN : Indicates which column in the CSV contains the
fitted data for this function.
* NUMBER_PARAMS : Inidicates how many parameters are used by
this function i.e. the number of PARAMETER elements.
Finally, contained within each FUNCTION element is a number of
PARAMETER elements, which list the best-fit data for each fitted
parameter pertaining to that function. Each PARAMETER element
contains the attribute "name", which tells the name of the
parameter. In addition the following elements are contained by
each PARAMETER element:
* VALUE : The best-fit value for this parameter.
Parameters
----------
filename : `string`
The extensionless version of the desired filename which the
data should be exported to. As a result the files
"filename.csv" and "filename.xml" are created.
detection_threshold : `float`, optional
The threshold of detection to be used in determining whether
the value contained by the DETECTION element is true or not.
"""
filename_csv = filename+'.csv'
filename_xml = filename+'.xml'
file_xml = open(filename_xml,'w')
file_xml.write('<!-- Automatically generated information file for csv file '+filename_csv+'-->\n')
file_xml.write('<INFO file="'+filename_csv+'">\n')
file_xml.write('<MODELNAME>'+self.modelname+'</MODELNAME>\n')
file_xml.write('<HAVEPSF>'+str(self.psf != None)+'</HAVEPSF>\n')
file_xml.write('<RMS_DATA>'+str(self.target_dy)+'</RMS_DATA>\n')
file_xml.write('<NUMBER_FUNCTIONS>'+str(len(self.funclist))+'</NUMBER_FUNCTIONS>\n')
outdata_csv = np.vstack([self.target_x,self.target_y])
outdata_functions = np.empty([0,len(self.target_x)])
totRes = np.zeros(len(self.target_x))
for indFunc,cFunc in enumerate(self.funclist):
file_xml.write('<FUNCTION name="'+cFunc['name']+'">\n')
file_xml.write('<TYPE>')
if cFunc['type'] == 'analytical':
file_xml.write(cFunc['shape'])
elif cFunc['type'] == 'empirical':
file_xml.write('empirical')
else:
file_xml.write('unknown'+'\n')
file_xml.write('</TYPE>\n')
file_xml.write('<DETECTION>'+str(not self.is_nondet(sigma=detection_threshold)[cFunc['name']])+'</DETECTION>\n')
file_xml.write('<CSV_COLUMN>'+str(indFunc+3)+'</CSV_COLUMN>\n')
cParlist = cFunc['params']
file_xml.write('<NUMBER_PARAMS>'+str(len(cParlist))+'</NUMBER_PARAMS>\n')
oPar=Parameters()
for cPar in cParlist.values():
file_xml.write('<PARAMETER name="'+cPar.name+'">\n')
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
file_xml.write('<VALUE>'+str(cFitPar.value)+'</VALUE>\n')
file_xml.write('</PARAMETER>\n')
funcRes = self.__parse_function(oPar,cFunc)
outdata_functions = np.vstack([outdata_functions,funcRes])
totRes += funcRes
file_xml.write('</FUNCTION>\n')
file_xml.write('</INFO>')
file_xml.close()
outdata_csv = np.vstack([outdata_csv,totRes,outdata_functions])
np.savetxt(filename_csv,outdata_csv.transpose(),delimiter=',',header='For info, see '+filename_xml)
def is_nondet(self,sigma=5.0):
"""
is_nondet(sigma=5.0)
Determines whether the fitted functions in the function list can
be considered detections or non-detections using the given detection
thereshold. This is done by comparing the peak of the fitted function
within the fitting range to a multiple (set by the parameter sigma)
of the RMS noise in the target data.
It should be emphasized that unless the dy attribute has been set
during the fitter class initialisation, the results returned by this
method are meaningless.
Parameters
----------
sigma : `float`, optional
The multiplier that should be applied to the noise when comparing
it against the fitted function peaks.
Returns
-------
A dictionary containing boolean values for each function (with
their names as the keys) and the total fit (key 'total'), with
True indicating that the function is considered a non-detection
using the criteria outlined above.
"""
minY = sigma*self.target_dy
out = {}
totRes = np.zeros(len(self.target_x))
for indFunc,cFunc in enumerate(self.funclist):
cParlist = cFunc['params']
oPar=Parameters()
for cPar in cParlist.values():
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
funcRes = self.__parse_function(oPar,cFunc)
if np.max(funcRes) < minY:
out[cFunc['name']] = True
else:
out[cFunc['name']] = False
totRes += funcRes
if np.max(totRes) < minY:
out['total'] = True
else:
out['total'] = False
return out
def fit_results(self):
"""
fit_results()
Return the fitting results as a dictionary.
Parameters
----------
None
Returns
-------
A dictionary containing all the individual functions which were
fitted. The key-value combinations of this dictionary consist of
the function name, and its lmfit Parameters instance, which
contains the best-fit results.
"""
oResults={}
for indFunc,cFunc in enumerate(self.funclist):
oKeyname_base=cFunc['name']
oKeyind=0
oKeyname=oKeyname_base
while oResults.__contains__(oKeyname): #In case of duplicate function names
oKeyind+=1
oKeyname=oKeyname_base+'(duplicate '+str(oKeyind)+')'
oResults[cFunc['name']]=self.__fit_result(indFunc)
return oResults
def __fit_result(self,index):
"""
__fit_result(index)
Return fitting results for a specific function in the internal
function list.
Parameters
----------
index : `int`
Desired index of the function to fetch from the function lsit.
Returns
-------
An `Parameters` instance containing the fitting
results for the desired function.
"""
oParlist=self.funclist[index]['params']
for cParname in oParlist.keys():
coPar=self.fitpars[self.__func_ident(index)+cParname]
coPar.name=cParname
oParlist[cParname]=coPar
return oParlist
def __parse_function(self,params,function):
"""
__parse_function(params,function)
Parse the input function, insert parameters, return result.
Parameters
----------
params : `Parameters`
The lmfit `Parameters` instance to use as input parameters.
function : `dict`
A dictionary formatted in the style that the entries inside
funclist are formatted
Returns
-------
The result of the given function with given parameters.
"""
if function['type']=='empirical':
funcres=muldata(function['shape'],params['mul'].value)
elif function['type']=='analytical':
funcres=globals()[function['shape']](self.target_x,params,self.psf)
else:
raise RuntimeError('Unknown function type!')
return funcres
def __extract_pars(self):
"""
__extract_pars()
Extracts the paramers from the function list and converts them to
a single lmfit Parameters instance, which can then be manipulated
by the residual minimization routines.
Parameters
----------
None
Returns
-------
An lmfit `Parameters` instance containing the parameters
of *all* the fittable functions in a single place.
"""
oPars=Parameters()
for indFunc,cFunc in enumerate(self.funclist):
cParlist = cFunc['params']
for cPar in cParlist.values():
oPars.add(self.__func_ident(indFunc)+cPar.name,
value=cPar.value,vary=cPar.vary,
min=cPar.min,max=cPar.max,
expr=cPar.expr)
return oPars
def __func_ident(self,index):
"""
__func_ident(index)
Generate a unique prefix string for a function, which can be
used by `__extract_pars` to generate its master Parameters list.
Parameters
----------
index : `int`
The index of the function.
Returns
-------
A unique identifier string pertaining to that function, which
can be used to generate unique parameter names.
"""
return '__Func'+str(index)+'__' | bsd-3-clause | 300,213,225,591,060,030 | 37.971284 | 118 | 0.670741 | false |
apache/chemistry-cmislib | src/cmislib/net.py | 2 | 5195 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Module that takes care of network communications for cmislib. It does
not know anything about CMIS or do anything special with regard to the
response it receives.
"""
from urllib import urlencode
import logging
import httplib2
class RESTService(object):
"""
Generic service for interacting with an HTTP end point. Sets headers
such as the USER_AGENT and builds the basic auth handler.
"""
def __init__(self):
self.user_agent = 'cmislib/%s +http://chemistry.apache.org/'
self.logger = logging.getLogger('cmislib.net.RESTService')
def get(self,
url,
username=None,
password=None,
**kwargs):
""" Makes a get request to the URL specified."""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a GET on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
return h.request(url, method='GET', headers=headers)
def delete(self, url, username=None, password=None, **kwargs):
""" Makes a delete request to the URL specified. """
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a DELETE on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
return h.request(url, method='DELETE', headers=headers)
def put(self,
url,
payload,
contentType,
username=None,
password=None,
**kwargs):
"""
Makes a PUT request to the URL specified and includes the payload
that gets passed in. The content type header gets set to the
specified content type.
"""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a PUT on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
if contentType is not None:
headers['Content-Type'] = contentType
return h.request(url, body=payload, method='PUT', headers=headers)
def post(self,
url,
payload,
contentType,
username=None,
password=None,
**kwargs):
"""
Makes a POST request to the URL specified and posts the payload
that gets passed in. The content type header gets set to the
specified content type.
"""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a POST on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
if contentType is not None:
headers['Content-Type'] = contentType
return h.request(url, body=payload, method='POST', headers=headers)
| apache-2.0 | 5,155,019,192,614,484,000 | 31.879747 | 75 | 0.561309 | false |
joaomoreno/facilis | facilis/core/web.py | 1 | 2344 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Facilis
# João Moreno <http://www.joaomoreno.com/>
# GPLv3
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import sys
from misc import UnknownURL, UnknownFile
from os.path import split, getsize
from pkg_resources import resource_string, resource_filename
CHUNK_SIZE = 65536
print sys.version_info
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""HTTP request handler for Facilis"""
def do_GET(self):
"""Respond to a GET request."""
req = self.path[1:]
if req == "logo.png":
self.__sendFile(resource_filename(__name__, "resources/logo.png"), "image/png")
else:
try:
fname, mime = self.server.getFile(req)
print fname, mime
self.__sendFile(fname, mime)
except:
self.send_response(404)
self.send_header("Content-type", "text/html")
self.send_header("Connection", "close")
self.end_headers()
self.wfile.write(resource_string(__name__, "resources/404.html"))
def __sendFile(self, fname, mime):
print mime
name = split(fname)[1]
self.send_response(200)
if mime:
self.send_header("Content-type", mime)
self.send_header("Connection", "close")
self.send_header("Content-Disposition", 'attachment; filename="' + name + '"')
self.send_header("Content-Length", getsize(fname))
self.end_headers()
f = open(fname, "rb")
self.wfile.write(f.read())
f.close()
class FacilisServer(ThreadingMixIn, HTTPServer):
def __init__(self, address, handler, app):
HTTPServer.__init__(self, address, handler)
self.app = app
def getFile(self, url):
return self.app.getFile(url)
class ServerHandler(Thread):
def __init__(self, app, port):
Thread.__init__(self)
self.app = app
self.port = port
def run(self):
try:
httpd = FacilisServer(('', self.port), HTTPRequestHandler, self.app)
httpd.serve_forever()
httpd.server_close()
except:
exit(-2)
| gpl-3.0 | 2,496,950,977,492,467,000 | 29.038462 | 91 | 0.578745 | false |
wholeshoot/mongo_datatables | mongo_datatables/editor.py | 1 | 2907 | from bson.objectid import ObjectId
import json
class Editor(object):
def __init__(self, pymongo_object, collection, request_args, doc_id):
"""
:param pymongo_object:
:param collection:
:param request_args:
:param doc_id:
"""
self.mongo = pymongo_object
self.collection = collection
self.request_args = request_args
self.doc_id = doc_id
@property
def db(self):
return self.mongo.db
@property
def action(self):
return self.request_args.get("action")
@property
def data(self):
return self.request_args.get("data")
@property
def list_of_ids(self):
return self.doc_id.split(",")
def remove(self):
"""
:return: empty {}
"""
for _id in self.list_of_ids:
self.db[self.collection].delete_one({"_id": ObjectId(_id)})
return {}
def create(self):
"""
Use PyMongo insert_one to add a document to a collection. self.data contains the new entry with no _id, like
{'0': {'val': 'test', 'group': 'test', 'text': 'test'}}
:return: output like {'data': [{'DT_RowID': 'x', ... }]}
"""
data_obj = {k: v for k, v in self.data['0'].items() if v} # ignore keys that might not exist
# try to save an object or array
for key, val in data_obj.items():
try:
data_obj[key] = json.loads(val)
except (json.decoder.JSONDecodeError, TypeError):
pass
self.db[self.collection].insert_one(data_obj)
# After insert, data_obj now includes an _id of type ObjectId, but we need it named DT_RowId and of type str.
data_obj["DT_RowId"] = str(data_obj.pop("_id", None))
return {"data": [data_obj]}
def edit(self):
"""
:return: output like { 'data': [ {'DT_RowID': 'x', ... }, {'DT_RowID': 'y',... }, ...]}
"""
data = []
for _id in self.list_of_ids:
doc = {k: v for k, v in self.data[_id].items() if v} # ignore keys that might not exist
# try to save an object or array
for key, val in doc.items():
try:
doc[key] = json.loads(val)
except (json.decoder.JSONDecodeError, TypeError):
pass
self.db[self.collection].update_one({"_id": ObjectId(_id)}, {"$set": doc}, upsert=False)
# add the _id to the doc object
doc["DT_RowId"] = _id
# add each doc object to the data array
data.append(doc)
return {"data": data}
def update_rows(self):
if self.action == "remove":
return self.remove()
elif self.action == "create":
return self.create()
elif self.action == "edit":
return self.edit()
| mit | 7,445,774,761,510,483,000 | 27.782178 | 117 | 0.522876 | false |
shawnadelic/shuup | shuup/core/models/_suppliers.py | 1 | 3709 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from jsonfield import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.core.modules import ModuleInterface
from shuup.utils.analog import define_log_model
from ._base import ShuupModel
class SupplierType(Enum):
INTERNAL = 1
EXTERNAL = 2
class Labels:
INTERNAL = _('internal')
EXTERNAL = _('external')
@python_2_unicode_compatible
class Supplier(ModuleInterface, ShuupModel):
default_module_spec = "shuup.core.suppliers:BaseSupplierModule"
module_provides_key = "supplier_module"
identifier = InternalIdentifierField(unique=True)
name = models.CharField(verbose_name=_("name"), max_length=64)
type = EnumIntegerField(SupplierType, verbose_name=_("supplier type"), default=SupplierType.INTERNAL)
stock_managed = models.BooleanField(verbose_name=_("stock managed"), default=False)
module_identifier = models.CharField(max_length=64, blank=True, verbose_name=_('module'))
module_data = JSONField(blank=True, null=True, verbose_name=_("module data"))
def __str__(self):
return self.name
def get_orderability_errors(self, shop_product, quantity, customer):
"""
:param shop_product: Shop Product
:type shop_product: shuup.core.models.ShopProduct
:param quantity: Quantity to order
:type quantity: decimal.Decimal
:param contect: Ordering contact.
:type contect: shuup.core.models.Contact
:rtype: iterable[ValidationError]
"""
return self.module.get_orderability_errors(shop_product=shop_product, quantity=quantity, customer=customer)
def get_stock_statuses(self, product_ids):
"""
:param product_ids: Iterable of product IDs
:return: Dict of {product_id: ProductStockStatus}
:rtype: dict[int, shuup.core.stocks.ProductStockStatus]
"""
return self.module.get_stock_statuses(product_ids)
def get_stock_status(self, product_id):
"""
:param product_id: Product ID
:type product_id: int
:rtype: shuup.core.stocks.ProductStockStatus
"""
return self.module.get_stock_status(product_id)
def get_suppliable_products(self, shop, customer):
"""
:param shop: Shop to check for suppliability
:type shop: shuup.core.models.Shop
:param customer: Customer contact to check for suppliability
:type customer: shuup.core.models.Contact
:rtype: list[int]
"""
return [
shop_product.pk
for shop_product
in self.shop_products.filter(shop=shop)
if shop_product.is_orderable(self, customer, shop_product.minimum_purchase_quantity)
]
def adjust_stock(self, product_id, delta, created_by=None, type=None):
from shuup.core.suppliers.base import StockAdjustmentType
adjustment_type = type or StockAdjustmentType.INVENTORY
return self.module.adjust_stock(product_id, delta, created_by=created_by, type=adjustment_type)
def update_stock(self, product_id):
return self.module.update_stock(product_id)
def update_stocks(self, product_ids):
return self.module.update_stocks(product_ids)
SupplierLogEntry = define_log_model(Supplier)
| agpl-3.0 | 7,370,786,921,387,717,000 | 36.09 | 115 | 0.689674 | false |
bioinfo-core-BGU/neatseq-flow_modules | neatseq_flow_modules/RNA_seq/trinity_statistics.py | 1 | 8022 | # -*- coding: UTF-8 -*-
"""
``trinity_statistics``
-----------------------------------------------------------------
:Authors: Menachem Sklarz
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
A class that defines a module for running ``abundance_estimates_to_matrix.pl`` on genes or isoforms counts tables produced by ``align_and_estimate_abundance.pl``
See the script documentation `here <https://github.com/trinityrnaseq/trinityrnaseq/wiki/Trinity-Transcript-Quantification#building-expression-matrices>`_.
This conversion makes sense at the project level - combining all sample matrices into a single, normalized, comparison table. However, for completeness, we included a sample scope option for running the script in each sample separately.
.. Note:: ``scope`` is not defined for this module. It only makes sense to run ``abundance_estimates_to_matrix`` when comparing many samples against a single assembly
Requires
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Either ``genes.results`` or ``isoforms.results`` files in the following slots:
* ``sample_data[<sample>]["genes.results"]``
* ``sample_data[<sample>]["isoforms.results"]``
Output:
~~~~~~~~~~~~~
* Creates the following files in the following slots:
* ``<project>.counts.matrix`` in ``self.sample_data["project_data"]["counts.matrix"]``
* ``<project>.not_cross_norm.fpkm.tmp`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp"]``
* ``<project>.not_cross_norm.fpkm.tmp.TMM_info.txt`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp.TMM_info.txt"]``
* ``<project>.TMM.fpkm.matrix`` in ``self.sample_data["project_data"]["TMM.fpkm.matrix"]``
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table::
:header: "Parameter", "Values", "Comments"
"use_genes", "", "Use 'genes.results' matrix. If not passed, use 'isoforms.results'"
"redirects: --gene_trans_map", "path or 'none'", "If path, use path as gene_trans_map for all samples. If 'none', does not produce gene level estimates. **In order to use an internal gene_trans_map, do not pass this parameter!**"
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
trin_map_stats:
module: trinity_statistics
base: trin_map1
script_path: /path/to/abundance_estimates_to_matrix.pl
use_genes:
redirects:
--est_method: RSEM
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Grabherr, M.G., Haas, B.J., Yassour, M., Levin, J.Z., Thompson, D.A., Amit, I., Adiconis, X., Fan, L., Raychowdhury, R., Zeng, Q. and Chen, Z., 2011. **Trinity: reconstructing a full-length transcriptome without a genome from RNA-Seq data**. *Nature biotechnology*, 29(7), p.644.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
class Step_trinity_statistics(Step):
def step_specific_init(self):
self.shell = "bash" # Can be set to "bash" by inheriting instances
self.file_tag = "trin_stats"
if "use_genes" not in self.params:
self.write_warning("'use_genes' not passed. Using 'isoforms.results' matrix")
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
Here you should do testing for dependency output. These will NOT exist at initiation of this instance. They are set only following sample_data updating
"""
# In new version, --gene_trans_map is compulsory! Adding
# If not passed:
# If one exists, use it.
# Otherwise, specify "none"
# If passed:
# If with value, use the value and set project "gene_trans_map" to value
# Otherwise, use existing
if "--gene_trans_map" not in self.params["redir_params"]:
if "gene_trans_map" in self.sample_data["project_data"]:
self.params["redir_params"]["--gene_trans_map"] = self.sample_data["project_data"]["gene_trans_map"]
self.use_gene_trans_map = True
else:
self.params["redir_params"]["--gene_trans_map"] = "none"
self.use_gene_trans_map = False
else: # --gene_trans_map is defined in redir_params
if self.params["redir_params"]["--gene_trans_map"] == None:
raise AssertionExcept("You passed --gene_trans_map with no value. Please specify path or 'none'")
elif self.params["redir_params"]["--gene_trans_map"] == "none":
self.use_gene_trans_map = False
else:
self.sample_data["project_data"]["gene_trans_map"] = self.params["redir_params"]["--gene_trans_map"]
self.use_gene_trans_map = True
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
def build_scripts(self):
# Name of specific script:
self.spec_script_name = self.set_spec_script_name()
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(self.base_dir)
prefix = self.sample_data["Title"]
self.script += self.get_script_const()
self.script += "--out_prefix %s \\\n\t" % os.sep.join([use_dir, prefix])
# type2use is 'genes.results' or 'isoforms.results'. This is used to then select the correct slot from "mapping"
type2use = "genes.results" if "use_genes" in list(self.params.keys()) else "isoforms.results"
for sample in self.sample_data["samples"]:
try:
self.script += "%s \\\n\t" % self.sample_data[sample][type2use]
except:
raise AssertionExcept("file type %s does not exist for sample." % type2use, sample)
self.script = self.script.rstrip("\\\n\t")
self.script += "\n\n"
if not "version" in self.params or self.params["version"].lower() == "new":
# Storing all output files even though probably not very useful downstream...
self.sample_data["project_data"]["isoform.raw_counts"] = os.sep.join([self.base_dir, "%s.isoform.counts.matrix" % prefix])
self.sample_data["project_data"]["isoform.norm_counts"] = os.sep.join([self.base_dir, "%s.isoform.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["isoform.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["isoform.norm_counts"])
if(self.use_gene_trans_map): # True when --gene_trans_map is not "none"
self.sample_data["project_data"]["gene.raw_counts"] = os.sep.join([self.base_dir, "%s.gene.counts.matrix" % prefix])
self.sample_data["project_data"]["gene.norm_counts"] = os.sep.join([self.base_dir, "%s.gene.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["gene.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["gene.norm_counts"])
else:
self.write_warning("Not storing output files for old version of trinity. "
"If required, load the appropriate files with a 'manage_types' module")
# Move all files from temporary local dir to permanent base_dir
# Sees to copying local files to final destination (and other stuff)
self.local_finish(use_dir,self.base_dir)
self.create_low_level_script()
| gpl-3.0 | -7,218,198,031,064,507,000 | 42.597826 | 279 | 0.596111 | false |
shoopio/shoop | shuup/admin/modules/attributes/__init__.py | 2 | 1483 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.base import AdminModule, MenuEntry
from shuup.admin.menu import STOREFRONT_MENU_CATEGORY
from shuup.admin.utils.urls import derive_model_url, get_edit_and_list_urls
from shuup.core.models import Attribute
class AttributeModule(AdminModule):
name = _("Attributes")
breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:attribute.list")
def get_urls(self):
return get_edit_and_list_urls(
url_prefix="^attributes",
view_template="shuup.admin.modules.attributes.views.Attribute%sView",
name_template="attribute.%s"
)
def get_menu_category_icons(self):
return {self.name: "fa fa-tags"}
def get_menu_entries(self, request):
return [
MenuEntry(
text=_("Attributes"),
icon="fa fa-tags",
url="shuup_admin:attribute.list",
category=STOREFRONT_MENU_CATEGORY,
ordering=8
)
]
def get_model_url(self, object, kind, shop=None):
return derive_model_url(Attribute, "shuup_admin:attribute", object, kind)
| agpl-3.0 | -2,326,765,358,164,317,000 | 32.704545 | 83 | 0.650708 | false |
jehine-MSFT/azure-storage-python | azure/storage/_common_conversion.py | 1 | 2874 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import hashlib
import hmac
import sys
from dateutil.tz import tzutc
from .models import (
_unicode_type,
)
if sys.version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
def _int_to_str(value):
return str(int(value)) if value is not None else None
def _bool_to_str(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _to_utc_datetime(value):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _datetime_to_utc_string(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value is None:
return None
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _lower(text):
return text.lower()
| apache-2.0 | -4,880,600,829,352,784,000 | 26.883495 | 76 | 0.626741 | false |
Artimi/waktu | waktu/timetracker.py | 1 | 6274 | #!/usr/bin/env python2.7
#-*- coding: UTF-8 -*-
from category import Category
from gi.repository import Wnck, Gdk, Gtk, GObject, Notify, GLib
from activityrecord import ActivityRecord
from threading import Thread, Event
from time import sleep, time
import copy
class TimeTracker(Thread):
"""Core module of this project. It's running in separated thread
to not block GUI."""
stopthread = Event()
track = Event()
mode = Event()
def __init__(self, _stat, _categories, _activities, _configuration):
Thread.__init__(self)
self.categories = _categories
self.activities = _activities
self.stat = _stat
self.lastActivity = ActivityRecord()
self.screen = Wnck.Screen.get_default()
self.n = Notify.Notification()
self.tmpName = ''
if _configuration.getValue('state'):
self.track.set()
else:
self.track.clear()
if _configuration.getValue('mode'):
self.mode.set()
else:
self.mode.clear()
def run(self):
"""Start tracking user activities"""
while not self.stopthread.isSet():
sleep(1)
"""Skip tracking if it's disabled"""
if not self.track.isSet():
continue
Gdk.threads_enter()
GObject.idle_add(self.screen.force_update)
active_window = self.screen.get_active_window()
"""Skip if there is no active window"""
if active_window == None:
Gdk.threads_leave()
continue
appName = active_window.get_application().get_name()
appPid = active_window.get_application().get_pid()
"""If the learning mode is activive, only append an activity"""
if self.mode.isSet():
self.activities.addActivity(appName)
Gdk.threads_leave()
continue
if self.lastActivity.getActivity().getPid() == appPid:
"""Still the same activity, just actualize the end time"""
self.lastActivity.setEndTime(time())
else:
"""New activity, actualize the lastActivity and append
the new activity"""
if self.lastActivity.getActivity().getPid() != 0:
tmp = copy.deepcopy(self.lastActivity)
self.stat.appendActivityRecord(tmp)
self.activities.addActivity(tmp.getActivity().getName())
print "DBG: Zmena aktivity! Ulozena aktivita %s (%s)" % (tmp.getActivity().getName(), tmp.getCategory())
self.lastActivity.getActivity().setName(appName)
self.lastActivity.getActivity().setPid(appPid)
self.lastActivity.setCategory('OTHER')
self.getCorrectCategory()
self.lastActivity.setStartTime(time())
self.lastActivity.setEndTime(time())
Gdk.threads_leave()
if self.track.isSet() and not self.mode.isSet():
tmp = copy.deepcopy(self.lastActivity)
self.stat.appendActivityRecord(tmp)
print "DBG: Ulozena aktivita %s (%s)" % (tmp.getActivity().getName(), tmp.getCategory())
"""Store all records to file to make them persistent"""
self.stat.storeRecords()
self.activities.storeActivities()
def stop(self):
"""Stop the tracking system, uses id stored in initialization"""
self.stopthread.set()
def getCorrectCategory(self, _activity = None):
"""Find out category where the activity belongs to"""
if _activity == None:
_activity = self.lastActivity.getActivity()
activityCategories = self.categories.getContainingCategories(_activity)
if len(activityCategories) == 0:
"""The activity isn't in any category"""
self.lastActivity.setCategory('OTHER')
elif len(activityCategories) == 1:
"""The activity is in exactly one category"""
self.lastActivity.setCategory(activityCategories[0].name)
else:
"""The activity is in more than one category.
The Waktu needs to ask user."""
lastOccurrence = self.stat.getLastOccurrence(_activity.getName())
if lastOccurrence == None or (time() - lastOccurrence.getEndTime()) > 600 : # 10 minutes is the default time to remember users choice
self.askUser(_activity, activityCategories)
else:
self.lastActivity.setCategory(lastOccurrence.getCategory())
def askUser(self, _activity, _categories):
"""Creates a notification and asks a user where the activity belongs to"""
if not Notify.is_initted():
Notify.init('Waktu')
self.n.clear_hints()
self.n.clear_actions()
self.n.set_property('summary','Kam patří aktivita %s?' % _activity.getName())
self.n.set_property('body', 'Zdá se, že tuto aktivitu máte zvolenou ve více kategoriích. Zvolte, prosím, níže jednu, do které spadá tato aktivita práve teď.')
self.n.set_property('icon_name','dialog-question')
self.n.set_urgency(Notify.Urgency.NORMAL)
self.n.set_timeout(Notify.EXPIRES_NEVER)
self.n.set_hint("resident", GLib.Variant('b',True))
for cat in _categories:
self.n.add_action(cat.name, cat.name, self.getUserAnswer, _activity, None)
self.n.add_action("OTHER", "Jinam", self.getUserAnswer, _activity, None)
self.n.show()
def getUserAnswer(self, n, _action, _data):
"""Process user answer and delegate result"""
n.close()
if self.lastActivity.getActivity().getName() == _data.getName():
"""The focused app is still the same"""
self.lastActivity.setCategory(_action)
else:
"""There is another activity, need to find it backwards"""
self.stat.getLastOccurrence(_data.getName()).setCategory(_action)
| mit | 6,195,104,910,922,810,000 | 40.184211 | 166 | 0.579393 | false |
pyfa-org/Pyfa | gui/builtinContextMenus/itemProject.py | 1 | 1630 | import wx
import gui.fitCommands as cmd
import gui.mainFrame
from gui.contextMenu import ContextMenuSingle
from service.fit import Fit
_t = wx.GetTranslation
class ProjectItem(ContextMenuSingle):
visibilitySetting = 'project'
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
if srcContext not in ("marketItemGroup", "marketItemMisc") or self.mainFrame.getActiveFit() is None:
return False
if mainItem is None:
return False
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(fitID)
if fit.isStructure:
return False
return mainItem.isType("projected")
def getText(self, callingWindow, itmContext, mainItem):
return _t("Project {0} onto Fit").format(itmContext)
def activate(self, callingWindow, fullContext, mainItem, i):
fitID = self.mainFrame.getActiveFit()
if mainItem.isModule:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedModuleCommand(fitID=fitID, itemID=mainItem.ID))
elif mainItem.isDrone:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedDroneCommand(fitID=fitID, itemID=mainItem.ID))
elif mainItem.isFighter:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedFighterCommand(fitID=fitID, itemID=mainItem.ID))
else:
success = False
if success:
self.mainFrame.additionsPane.select('Projected', focus=False)
ProjectItem.register()
| gpl-3.0 | 5,919,210,914,069,837,000 | 31.6 | 119 | 0.682822 | false |
techtonik/warehouse | warehouse/views.py | 1 | 1071 | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from warehouse.utils import cache, fastly, render_response
@cache(browser=1, varnish=120)
@fastly("index")
def index(app, request):
return render_response(
app, request, "index.html",
project_count=app.db.packaging.get_project_count(),
download_count=app.db.packaging.get_download_count(),
recently_updated=app.db.packaging.get_recently_updated(),
)
| apache-2.0 | -4,845,347,867,932,633,000 | 37.25 | 74 | 0.741363 | false |
CamDavidsonPilon/lifelines | lifelines/utils/lowess.py | 1 | 2541 | # -*- coding: utf-8 -*-
"""
This module implements the Lowess function for nonparametric regression.
Functions:
lowess Fit a smooth nonparametric regression curve to a scatterplot.
For more information, see
William S. Cleveland: "Robust locally weighted regression and smoothing
scatterplots", Journal of the American Statistical Association, December 1979,
volume 74, number 368, pp. 829-836.
William S. Cleveland and Susan J. Devlin: "Locally weighted regression: An
approach to regression analysis by local fitting", Journal of the American
Statistical Association, September 1988, volume 83, number 403, pp. 596-610.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
# Slight updates in lifelines 0.16.0+, 2018
from math import ceil
import warnings
import numpy as np
from scipy import linalg
def lowess(x, y, f=2.0 / 3.0, iterations=1):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = np.clip([np.sort(np.abs(x - x[i]))[r] for i in range(n)], 1e-8, np.inf)
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
yest = np.zeros(n)
delta = np.ones(n)
for _ in range(iterations):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)], [np.sum(weights * x), np.sum(weights * x * x)]])
# I think it is safe to assume this.
# pylint: disable=unexpected-keyword-arg
try:
beta = linalg.solve(A, b, assume_a="pos", check_finite=False)
except np.linalg.LinAlgError:
beta = [0, 0]
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest
| mit | -498,146,806,779,797,570 | 38.703125 | 114 | 0.637151 | false |
consultit/Ely | ely/direct/data_structures_and_algorithms/ch05/high_scores.py | 1 | 3387 | # Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class GameEntry:
"""Represents one entry of a list of high scores."""
def __init__(self, name, score):
"""Create an entry with given name and score."""
self._name = name
self._score = score
def get_name(self):
"""Return the name of the person for this entry."""
return self._name
def get_score(self):
"""Return the score of this entry."""
return self._score
def __str__(self):
"""Return string representation of the entry."""
return '({0}, {1})'.format(self._name, self._score) # e.g., '(Bob, 98)'
class Scoreboard:
"""Fixed-length sequence of high scores in nondecreasing order."""
def __init__(self, capacity=10):
"""Initialize scoreboard with given maximum capacity.
All entries are initially None.
"""
self._board = [None] * capacity # reserve space for future scores
self._n = 0 # number of actual entries
def __getitem__(self, k):
"""Return entry at index k."""
return self._board[k]
def __str__(self):
"""Return string representation of the high score list."""
return '\n'.join(str(self._board[j]) for j in range(self._n))
def add(self, entry):
"""Consider adding entry to high scores."""
score = entry.get_score()
# Does new entry qualify as a high score?
# answer is yes if board not full or score is higher than last entry
good = self._n < len(self._board) or score > self._board[-1].get_score()
if good:
if self._n < len(self._board): # no score drops from list
self._n += 1 # so overall number increases
# shift lower scores rightward to make room for new entry
j = self._n - 1
while j > 0 and self._board[j-1].get_score() < score:
self._board[j] = self._board[j-1] # shift entry from j-1 to j
j -= 1 # and decrement j
self._board[j] = entry # when done, add new entry
if __name__ == '__main__':
board = Scoreboard(5)
for e in (
('Rob', 750), ('Mike',1105), ('Rose', 590), ('Jill', 740),
('Jack', 510), ('Anna', 660), ('Paul', 720), ('Bob', 400),
):
ge = GameEntry(e[0], e[1])
board.add(ge)
print('After considering {0}, scoreboard is:'.format(ge))
print(board)
print()
| lgpl-3.0 | -1,765,403,955,418,676,700 | 36.633333 | 81 | 0.581931 | false |
3dbug/blender | DialScale.py | 1 | 4821 | bl_info = {
"name": "Dial and Scale",
"author": "stacker, sambler",
"version": (1, 2),
"blender": (2, 80, 0),
"location": "3DView > Add > Curve > Dial and Scale",
"description": "Add an array of text number objects or watch dials.",
"warning": "",
"wiki_url": "https://github.com/3dbug/blender/blob/master/DialScale.py",
"tracker_url": "https://github.com/3dbug/blender/issues",
"category": "Add Curve"}
import bpy,math
import mathutils
from bpy.props import IntProperty,FloatProperty,StringProperty,EnumProperty,BoolProperty
fonts_list = []
def getFonts(self, context):
fonts_list = []
for afont in bpy.data.fonts:
fonts_list.append(( afont.name, afont.name,""))
if len(fonts_list) == 0:
fonts_list.append(("Bfont","Bfont",""))
return fonts_list
class DialScale(bpy.types.Operator):
""" Creates an array of text elements"""
bl_idname = "curve.dial_scale"
bl_label = "Create Dials and Scales"
bl_options = {'REGISTER', 'UNDO'}
start : IntProperty(name="Start",description="Start value",min=-10000, max=10000,default=1 )
count : IntProperty(name="Count",description="Number of items to create",min=1, max=100, default=12 )
step : IntProperty(name="Step",description="Increment of number",min=-10000, max=10000, default=1 )
offset : FloatProperty(name="Offset",description="Distance",min=0.01, max=100.0, default=2.5 )
dialType : EnumProperty( name="Dial Type",description="Basis of creating the dial", items=[("circular","circular","A round dial"),("horizontal","horizontal","A horizontal scale"),("vertical","vertical","A vertical scale")], default="circular")
rotate : FloatProperty(name="Rotation",description="Start rotation of first item",min=-360.0, max=360.0, default=0.0 )
segment : FloatProperty(name="Segment",description="Circle Segment",min=-360.0, max=360.0, default=360.0 )
ticks : IntProperty(name="Ticks",description="Number of ticks between numbers",min=0, max=100, default=5 )
tickOffset : FloatProperty(name="Tick Offset",description="Distance to offset the Ticks",min=-100.0, max=100.0, default=1.3 )
font : EnumProperty( name="Fonts",items=getFonts)
def execute(self, context):
x = -self.offset
y = 0.0
angle = math.radians( self.rotate ) - math.pi/2
angle_step = -math.radians( self.segment ) / self.count
angle = angle - angle_step
pos = self.start - 1
num = self.start
end = self.count + self.start - 1
while pos < end:
if self.dialType == "circular":
vec3d = mathutils.Vector((self.offset, 0, 0))
vpos = vec3d @ mathutils.Matrix.Rotation( angle , 3, 'Z')
elif self.dialType == "horizontal":
x = x + self.offset
vpos=(x,0,0)
else:
y = y + self.offset
vpos = (0,y,0)
bpy.ops.object.text_add()
ob=bpy.context.object
ob.data.body = str(num)
ob.data.font = bpy.data.fonts[ self.font ]
ob.data.align_x = ob.data.align_y = 'CENTER'
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
bpy.ops.transform.translate(value=vpos)
for t in range(0,self.ticks):
bpy.ops.mesh.primitive_plane_add(size=.04 if t == 0 else .02)
if self.dialType == "circular":
tick_step = angle_step / self.ticks
vec3d = mathutils.Vector((self.offset*self.tickOffset, 0, 0))
tpos = vec3d @ mathutils.Matrix.Rotation( (angle + (t*tick_step)) , 3, 'Z')
bpy.ops.transform.resize(value=(6,1,1))
bpy.ops.transform.rotate(value= angle + t*tick_step, axis=(0, 0, 1))
elif self.dialType == "horizontal" and pos < end-1:
tick_step = self.offset / self.ticks
tpos=(x+t*tick_step,self.tickOffset,0)
bpy.ops.transform.resize(value=(1,6,1))
elif pos < end -1:
tick_step = self.offset / self.ticks
tpos=(self.tickOffset,y+t*tick_step,0)
bpy.ops.transform.resize(value=(6,1,1))
bpy.ops.transform.translate(value=tpos)
angle = angle - angle_step
pos = pos + 1
num = num + self.step
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(DialScale.bl_idname, icon='PLUGIN')
def register():
bpy.utils.register_class(DialScale)
bpy.types.VIEW3D_MT_curve_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(DialScale)
bpy.types.VIEW3D_MT_curve_add.remove(menu_func)
if __name__ == "__main__":
register()
| gpl-3.0 | -1,068,458,594,242,140,500 | 42.432432 | 247 | 0.594897 | false |
exekias/django-achilles | doc/conf.py | 1 | 8035 | # -*- coding: utf-8 -*-
#
# Django Achilles documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 9 01:46:37 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Achilles'
copyright = u'2013, Carlos Pérez-Aradros Herce <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __import__('achilles').get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAchillesdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAchilles.tex', u'Django Achilles Documentation',
u'Carlos Pérez-Aradros Herce', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoachilles', u'Django Achilles Documentation',
[u'Carlos Pérez-Aradros Herce'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoAchilles', u'Django Achilles Documentation',
u'Carlos Pérez-Aradros Herce', 'DjangoAchilles', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
| apache-2.0 | 7,124,134,120,415,301,000 | 31.51417 | 87 | 0.708505 | false |
openstack/mistral | mistral/api/controllers/v2/sub_execution.py | 1 | 4240 | # Copyright 2020 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from pecan import request
from pecan import rest
import wsmeext.pecan as wsme_pecan
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import types
from mistral.db.v2 import api as db_api
from mistral.utils import rest_utils
from mistral.workflow import states
LOG = logging.getLogger(__name__)
def get_task_sub_executions_list(task_ex_id, filters, cur_depth):
task_sub_execs = []
with db_api.transaction():
task_ex = db_api.get_task_execution(task_ex_id)
if filters['errors_only'] and task_ex.state != states.ERROR:
return []
child_wf_executions = task_ex.workflow_executions
for c_ex in child_wf_executions:
task_sub_execs.extend(
get_execution_sub_executions_list(
c_ex.id,
filters,
cur_depth
)
)
return task_sub_execs
def get_execution_sub_executions_list(wf_ex_id, filters, cur_depth):
max_depth = filters['max_depth']
include_output = filters['include_output']
ex_sub_execs = []
if 0 <= max_depth < cur_depth:
return []
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
wf_resource = _get_wf_resource_from_db_model(
wf_ex,
include_output)
ex_sub_execs.append(wf_resource)
task_execs = wf_ex.task_executions
for t_ex in task_execs:
task_sub_executions = get_task_sub_executions_list(
t_ex.id,
filters,
cur_depth + 1
)
ex_sub_execs.extend(task_sub_executions)
return ex_sub_execs
def _get_wf_resource_from_db_model(wf_ex, include_output):
if include_output:
rest_utils.load_deferred_fields(wf_ex, ['params', 'input', 'output'])
else:
rest_utils.load_deferred_fields(wf_ex, ['params', 'input'])
return resources.Execution.from_db_model(wf_ex)
def _get_sub_executions(origin, id, filters):
if origin == 'execution':
return get_execution_sub_executions_list(id, filters, cur_depth=0)
else:
return get_task_sub_executions_list(id, filters, cur_depth=0)
class SubExecutionsController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, types.uuid, bool, int, bool)
def get(self, id, errors_only=False, max_depth=-1, include_output=False):
"""Return workflow execution report.
:param id: The ID of the workflow execution or task execution
to get the sub-executions of.
:param errors_only: Optional. If True, only error paths of the
execution tree are returned .
:param max_depth: Optional. Limits the depth of recursion while
obtaining the execution tree. If a value of the
flag is a negative number then no limit is set.
:param include_output: Optional. Include the output for all executions
in the list.
"""
origin = 'execution' if request.path.startswith('/v2/executions') \
else 'task'
LOG.info(
"Fetching sub executions of %s [id=%s]",
origin,
id
)
filters = {
'errors_only': errors_only,
'max_depth': max_depth,
'include_output': include_output
}
sub_executions_resource = _get_sub_executions(origin, id, filters)
return resources.Executions.convert_with_links(
sub_executions_resource,
request.application_url,
)
| apache-2.0 | -6,536,085,960,045,563,000 | 30.641791 | 78 | 0.635849 | false |
liaralabs/swizzin | scripts/deluge.UpdateTracker.py | 1 | 2540 | # from https://github.com/s0undt3ch/Deluge/blob/master/deluge/ui/console/commands/update-tracker.py
# update-tracker.py
#
# Copyright (C) 2008-2009 Ido Abramovich <[email protected]>
# Copyright (C) 2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from deluge.ui.console.main import BaseCommand
import deluge.ui.console.colors as colors
from deluge.ui.client import client
import deluge.component as component
from optparse import make_option
class Command(BaseCommand):
"""Update tracker for torrent(s)"""
usage = "Usage: update-tracker [ * | <torrent-id> [<torrent-id> ...] ]"
aliases = ['reannounce']
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
if len(args) == 0:
self.console.write(self.usage)
return
if len(args) > 0 and args[0].lower() == '*':
args = [""]
torrent_ids = []
for arg in args:
torrent_ids.extend(self.console.match_torrent(arg))
client.core.force_reannounce(torrent_ids)
def complete(self, line):
# We use the ConsoleUI torrent tab complete method
return component.get("ConsoleUI").tab_complete_torrent(line)
| gpl-3.0 | -8,216,069,250,364,968,000 | 38.076923 | 99 | 0.699213 | false |
aesuli/rss-feed-monitor | dirs_to_csv.py | 1 | 1995 | import argparse
import csv
import logging
import re
import os
import sys
BLANK_SUB = '_'
LABEL_SEPARATOR = ':'
ID_SEPARATOR = '_'
def read_data(directory):
logger = logging.getLogger(sys.argv[0])
for subdir in next(os.walk(directory))[1]:
label = subdir
subpath = os.path.join(directory, subdir)
for file in next(os.walk(subpath))[2]:
filename = os.path.join(subpath, file)
logger.info(filename)
with open(filename, mode='r', encoding='utf-8') as inputfile:
text = inputfile.read()
yield directory + ID_SEPARATOR + file, text, label
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true')
parser.add_argument('name', help='name of the classification schema', type=str)
parser.add_argument('output_file', help='output csv file', type=str)
parser.add_argument('directory',
help='Paths to directories contaning the labeled documents (label=subdir, document=file in subdir)',
type=str,
nargs='+')
args = parser.parse_args()
logger = logging.getLogger(sys.argv[0])
ch = logging.StreamHandler()
logger.addHandler(ch)
if args.verbose:
logger.setLevel(logging.INFO)
logger.info('Verbose output')
schema_name = args.name
count = 0
with open(args.output_file, mode='w', encoding='utf-8') as output:
csvout = csv.writer(output)
for directory in args.directory:
data_generator = read_data(directory)
for id, text, label in data_generator:
no_space_label = re.sub('\s', BLANK_SUB, schema_name + LABEL_SEPARATOR + label)
csvout.writerow([id, text, no_space_label])
count += 1
logger.info('Processed '+ str(count) + ' files.')
| gpl-3.0 | -1,952,062,115,747,103,200 | 35.272727 | 124 | 0.61203 | false |
dichen001/Go4Jobs | JackChen/linked_list/328. Odd Even Linked List.py | 1 | 1154 | """
Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.
You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.
Example:
Given 1->2->3->4->5->NULL,
return 1->3->5->2->4->NULL.
Note:
The relative order inside both the even and odd groups should remain as it was in the input.
The first node is considered odd, the second node even and so on ...
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
odd, even = head, head.next
o, e = odd, even
while odd and even and odd.next and even.next:
odd.next = odd.next.next
odd = odd.next
even.next = even.next.next
even = even.next
odd.next = e
return o
| gpl-3.0 | -3,072,681,138,911,164,400 | 29.368421 | 170 | 0.613518 | false |
childresslab/MicrocavityExp1 | hardware/laser/simple_laser_dummy.py | 2 | 6456 | # -*- coding: utf-8 -*-
"""
This module acts like a laser.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from core.module import Base
from interface.simple_laser_interface import SimpleLaserInterface
from interface.simple_laser_interface import LaserState
from interface.simple_laser_interface import ShutterState
from interface.simple_laser_interface import ControlMode
import math
import random
import time
class SimpleLaserDummy(Base, SimpleLaserInterface):
"""
Lazor dummy
"""
_modclass = 'laserdummy'
_modtype = 'hardware'
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.lstate = LaserState.OFF
self.shutter = ShutterState.CLOSED
self.mode = ControlMode.POWER
self.current_setpoint = 0
self.power_setpoint = 0
def on_activate(self):
""" Activate module.
"""
pass
def on_deactivate(self):
""" Deactivate module.
"""
pass
def get_power_range(self):
""" Return optical power range
@return (float, float): power range
"""
return (0, 0.250)
def get_power(self):
""" Return laser power
@return float: Laser power in watts
"""
return self.power_setpoint * random.gauss(1, 0.01)
def get_power_setpoint(self):
""" Return optical power setpoint.
@return float: power setpoint in watts
"""
return self.power_setpoint
def set_power(self, power):
""" Set power setpoint.
@param float power: power setpoint
@return float: actual new power setpoint
"""
self.power_setpoint = power
self.current_setpoint = math.sqrt(4*self.power_setpoint)*100
return self.power_setpoint
def get_current_unit(self):
""" Get unit for laser current.
@return str: unit
"""
return '%'
def get_current_range(self):
""" Get laser current range.
@return (float, float): laser current range
"""
return (0, 100)
def get_current(self):
""" Get current laser current
@return float: laser current in current curent units
"""
return self.current_setpoint * random.gauss(1, 0.05)
def get_current_setpoint(self):
""" Get laser curent setpoint
@return float: laser current setpoint
"""
return self.current_setpoint
def set_current(self, current):
""" Set laser current setpoint
@prarm float current: desired laser current setpoint
@return float: actual laser current setpoint
"""
self.current_setpoint = current
self.power_setpoint = math.pow(self.current_setpoint/100, 2) / 4
return self.current_setpoint
def allowed_control_modes(self):
""" Get supported control modes
@return list(): list of supported ControlMode
"""
return [ControlMode.POWER, ControlMode.CURRENT]
def get_control_mode(self):
""" Get the currently active control mode
@return ControlMode: active control mode
"""
return self.mode
def set_control_mode(self, control_mode):
""" Set the active control mode
@param ControlMode control_mode: desired control mode
@return ControlMode: actual active ControlMode
"""
self.mode = control_mode
return self.mode
def on(self):
""" Turn on laser.
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = LaserState.ON
return self.lstate
def off(self):
""" Turn off laser.
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = LaserState.OFF
return self.lstate
def get_laser_state(self):
""" Get laser state
@return LaserState: actual laser state
"""
return self.lstate
def set_laser_state(self, state):
""" Set laser state.
@param LaserState state: desired laser state
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = state
return self.lstate
def get_shutter_state(self):
""" Get laser shutter state
@return ShutterState: actual laser shutter state
"""
return self.shutter
def set_shutter_state(self, state):
""" Set laser shutter state.
@param ShutterState state: desired laser shutter state
@return ShutterState: actual laser shutter state
"""
time.sleep(1)
self.shutter = state
return self.shutter
def get_temperatures(self):
""" Get all available temperatures.
@return dict: dict of temperature namce and value in degrees Celsius
"""
return {
'psu': 32.2 * random.gauss(1, 0.1),
'head': 42.0 * random.gauss(1, 0.2)
}
def set_temperatures(self, temps):
""" Set temperatures for lasers with tunable temperatures.
@return {}: empty dict, dummy not a tunable laser
"""
return {}
def get_temperature_setpoints(self):
""" Get temperature setpoints.
@return dict: temperature setpoints for temperature tunable lasers
"""
return {'psu': 32.2, 'head': 42.0}
def get_extra_info(self):
""" Multiple lines of dignostic information
@return str: much laser, very useful
"""
return "Dummy laser v0.9.9\nnot used very much\nvery cheap price very good quality"
| gpl-3.0 | -5,375,667,728,137,546,000 | 26.355932 | 91 | 0.606103 | false |
zhounetworking/build_job | lib/build_job.py | 1 | 5769 | #!/usr/bin/python
#-*- coding:utf8 -*-
#
import os
import sys
import time
import json
import traceback
import jenkinsapi
from functools import partial
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.custom_exceptions import WillNotBuild
from conf.config import * # job_dict,jenkins_dic
from conf.game_config import game_info
from lib.utils import Mail,log,notification
from task.manage_task import revoke_task,get_active_task_list
import socket
socket.setdefaulttimeout(connect_jenkins_timeout)
#def build_job(job_name, jenkins_url="",username="",passwd="",params={}):
def build_job(job_name, jenkins_url="",username="",passwd=""):
status = {
'error' : [],
'stat' : None,
'last_no' : None,
'next_no' : None,
'params' : None,
'run_time' : None,
'job_name' : job_name[0],
}
if not isinstance(job_name, tuple):
status['error'].append("job_name_not_tuple")
return status
try:
j = Jenkins( jenkins_url,username,passwd )
except:
status['error'].append("connect_jenkins_err")
return status
if job_name[0] not in j.get_jobs_list():
status['error'].append("job_not_exist")
return status
# 为 jenkins任务 加参数
# if params:
# job = j.get_job(job_name[0],params)
# else:
# job = j.get_job(job_name[0])
job = j.get_job(job_name[0])
# when job running in first , get_last_buildnumber error
try:
s_last = job.get_last_buildnumber()
except:
s_last = 0
# if job running now, stop it!
if job.is_queued():
status['error'].append("before_is_queued")
return status
elif job.is_running():
s_last_job = job.get_last_build()
if s_last_job.stop():
status['stop_before_running'] = True
else:
status['stop_before_running'] = False
return status
try:
if len(job_name) > 1:
j.build_job( job_name[0], job_name[1] )
status['params'] = job_name[1]
else:
j.build_job( job_name[0])
except WillNotBuild:
status['error'].append("job_run_err")
return status
#return traceback.print_exc()
except Exception:
log.exception('otherError')
status['error'].append("other_error")
return status
# In the quiet period of jenkins
while job.is_queued():
time.sleep(1)
else:
if job.get_last_buildnumber() > s_last:
e_last = job.get_last_build()
else:
status['error'].append("job_number_err")
return status
while e_last.is_running():
time.sleep(1)
else:
if e_last.is_good() and e_last.get_status() == 'SUCCESS':
status['stat'] = 'SUCCESS'
else:
status['stat'] = 'FAILURE'
status['error'].append("job_appfail_err")
status['last_no'] = s_last
status['next_no'] = e_last.buildno
status['task_info']= e_last.get_console()
status['run_time'] = e_last.get_duration().total_seconds()
return status
def choose_jenkins(jenkins_job_list_txt):
'''
jenkins_job_list :
conf/config.py 中定义的任务名中的元组, 如 ('testjob',)
jenkins_dic :
jenkins 配置
'''
#job = jenkins_job_list[0]
job = jenkins_job_list_txt
if job.startswith('zgh') or job.startswith('zhanguo'):
jenkins_info = jenkins_dic['zgh']
elif job.startswith('lme'):
jenkins_info = jenkins_dic['lme']
elif job.startswith('pp2'):
jenkins_info = jenkins_dic['pp2']
elif job.startswith('pp1') or job.startswith('test'):
jenkins_info = jenkins_dic['pp1']
else:
raise Exception, "No jenkins config info"
print "job_list: %s ,url: %s"%(job,jenkins_info['url'])
return jenkins_info
#def build_job_handle(jenkins_info,jenkins_job,params={}):
def build_job_handle(jenkins_info,jenkins_job):
jenkins_url = jenkins_info['url']
jenkins_user = jenkins_info['user']
jenkins_passwd = jenkins_info['passwd']
build_job_handle = partial(
build_job,
jenkins_url=jenkins_url,
username=jenkins_user,
passwd=jenkins_passwd,
# params=params,
)
return build_job_handle(jenkins_job)
def check_job_status(res_list,p_id,u_type):
# add qa mail
MAIL_TO = mail_to[:]
MAIL_TO.extend(mail_to_qa)
# add designer mail
if game_info[p_id]['messages']['inform']:
try:
MAIL_TO.extend(game_info[p_id]['messages']['design_mail'])
print('add designer mail: %s'%game_info[p_id]['messages']['design_mail'])
except:
print('get platform name fail [ %s ]'%game_info[p_id][u_type]['context'])
mail = partial( Mail,
user = papasg_user,
passwd = papasg_passwd,
mail_to= MAIL_TO,
smtp_server = smtp_server,
subject = subject
)
success = True
for res in res_list:
if res['stat'] != 'SUCCESS':
success = False
err_job_name = res['job_name']
if success:
mail_text = mail_end_notify_ok
else:
mail_text = mail_end_notify_fail % err_job_name
mail(subject=game_info[p_id][u_type]['context'],mail_text=mail_text)
if __name__ == '__main__':
jenkins_url_test = 'http://jenkins.hrgame.com:8080/'
stat = build_job(('客户端_04_同步资源到正式环境_FL越狱!!',{'ok':'no','Bool':False}),jenkins_url_test)
if stat['stat']:
notification(stat)
else:
print 'fail'
check_job_status([ stat ])
print json.dumps(stat,indent=3)
print stat['job_name']
| apache-2.0 | 5,577,391,660,055,654,000 | 26 | 91 | 0.579428 | false |
Azure/WALinuxAgent | azurelinuxagent/common/osutil/default.py | 1 | 60738 | #
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import base64
import datetime
import errno
import fcntl
import glob
import json
import multiprocessing
import os
import platform
import pwd
import re
import shutil
import socket
import struct
import sys
import time
from pwd import getpwall
import array
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.future import ustr, array_to_bytes
from azurelinuxagent.common.utils.cryptutil import CryptUtil
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard, AddFirewallRules
from azurelinuxagent.common.utils.shellutil import CommandError
__RULES_FILES__ = ["/lib/udev/rules.d/75-persistent-net-generator.rules",
"/etc/udev/rules.d/70-persistent-net.rules"]
"""
Define distro specific behavior. OSUtil class defines default behavior
for all distros. Each concrete distro classes could overwrite default behavior
if needed.
"""
_IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") # pylint: disable=W1401
_IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21')
def _add_wait(wait, command):
"""
If 'wait' is True, adds the wait option (-w) to the given iptables command line
"""
if wait:
command.insert(1, "-w")
return command
def _get_iptables_version_command():
return ["iptables", "--version"]
def _get_firewall_accept_command(wait, command, destination, owner_uid):
return AddFirewallRules.get_iptables_accept_command(wait, command, destination, owner_uid)
def _get_firewall_drop_command(wait, command, destination):
return AddFirewallRules.get_iptables_drop_command(wait, command, destination)
def _get_firewall_list_command(wait):
return _add_wait(wait, ["iptables", "-t", "security", "-L", "-nxv"])
def _get_firewall_packets_command(wait):
return _add_wait(wait, ["iptables", "-t", "security", "-L", "OUTPUT", "--zero", "OUTPUT", "-nxv"])
# Precisely delete the rules created by the agent.
# this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact.
def _get_firewall_delete_conntrack_accept_command(wait, destination):
return _add_wait(wait,
["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "conntrack",
"--ctstate", "INVALID,NEW", "-j", "ACCEPT"])
def _get_firewall_delete_owner_accept_command(wait, destination, owner_uid):
return _add_wait(wait, ["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "owner",
"--uid-owner", str(owner_uid), "-j", "ACCEPT"])
def _get_firewall_delete_conntrack_drop_command(wait, destination):
return _add_wait(wait,
["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "conntrack",
"--ctstate", "INVALID,NEW", "-j", "DROP"])
PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" # pylint: disable=W1401
ALL_CPUS_REGEX = re.compile('^cpu .*')
_enable_firewall = True
DMIDECODE_CMD = 'dmidecode --string system-uuid'
PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid'
UUID_PATTERN = re.compile(
r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$',
re.IGNORECASE)
IOCTL_SIOCGIFCONF = 0x8912
IOCTL_SIOCGIFFLAGS = 0x8913
IOCTL_SIOCGIFHWADDR = 0x8927
IFNAMSIZ = 16
IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') # pylint: disable=W1401
STORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/'
GEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb'
class DefaultOSUtil(object):
def __init__(self):
self.agent_conf_file_path = '/etc/waagent.conf'
self.selinux = None
self.disable_route_warning = False
self.jit_enabled = False
self.service_name = self.get_service_name()
@staticmethod
def get_service_name():
return "waagent"
@staticmethod
def get_systemd_unit_file_install_path():
return "/lib/systemd/system"
@staticmethod
def get_agent_bin_path():
return "/usr/sbin"
def get_firewall_dropped_packets(self, dst_ip=None):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return 0
try:
wait = self.get_firewall_will_wait()
try:
output = shellutil.run_command(_get_firewall_packets_command(wait))
pattern = re.compile(PACKET_PATTERN.format(dst_ip))
for line in output.split('\n'):
m = pattern.match(line)
if m is not None:
return int(m.group(1))
except Exception as e:
if isinstance(e, CommandError) and e.returncode == 3: # pylint: disable=E1101
# Transient error that we ignore. This code fires every loop
# of the daemon (60m), so we will get the value eventually.
return 0
logger.warn("Failed to get firewall packets: {0}", ustr(e))
return -1
return 0
except Exception as e:
_enable_firewall = False
logger.warn("Unable to retrieve firewall packets dropped"
"{0}".format(ustr(e)))
return -1
def get_firewall_will_wait(self):
# Determine if iptables will serialize access
try:
output = shellutil.run_command(_get_iptables_version_command())
except Exception as e:
msg = "Unable to determine version of iptables: {0}".format(ustr(e))
logger.warn(msg)
raise Exception(msg)
m = _IPTABLES_VERSION_PATTERN.match(output)
if m is None:
msg = "iptables did not return version information: {0}".format(output)
logger.warn(msg)
raise Exception(msg)
wait = "-w" \
if FlexibleVersion(m.group(1)) >= _IPTABLES_LOCKING_VERSION \
else ""
return wait
def _delete_rule(self, rule):
"""
Continually execute the delete operation until the return
code is non-zero or the limit has been reached.
"""
for i in range(1, 100): # pylint: disable=W0612
try:
rc = shellutil.run_command(rule) # pylint: disable=W0612
except CommandError as e:
if e.returncode == 1:
return
if e.returncode == 2:
raise Exception("invalid firewall deletion rule '{0}'".format(rule))
def remove_firewall(self, dst_ip, uid):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return False
try:
wait = self.get_firewall_will_wait()
# This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25
# has aged out, keep this cleanup in place.
self._delete_rule(_get_firewall_delete_conntrack_accept_command(wait, dst_ip))
self._delete_rule(_get_firewall_delete_owner_accept_command(wait, dst_ip, uid))
self._delete_rule(_get_firewall_delete_conntrack_drop_command(wait, dst_ip))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to remove firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
def remove_legacy_firewall_rule(self, dst_ip):
# This function removes the legacy firewall rule that was added <= 2.2.25.
# Not adding the global _enable_firewall check here as this will only be called once per service start and
# we dont want the state of this call to affect other iptable calls.
try:
wait = self.get_firewall_will_wait()
# This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25
# has aged out, keep this cleanup in place.
self._delete_rule(_get_firewall_delete_conntrack_accept_command(wait, dst_ip))
except Exception as error:
logger.info(
"Unable to remove legacy firewall rule, won't try removing it again. Error: {0}".format(ustr(error)))
def enable_firewall(self, dst_ip, uid):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return False
try:
wait = self.get_firewall_will_wait()
# If the DROP rule exists, make no changes
try:
drop_rule = _get_firewall_drop_command(wait, "-C", dst_ip)
shellutil.run_command(drop_rule)
logger.verbose("Firewall appears established")
return True
except CommandError as e:
if e.returncode == 2:
self.remove_firewall(dst_ip, uid)
msg = "please upgrade iptables to a version that supports the -C option"
logger.warn(msg)
raise Exception(msg)
# Otherwise, append both rules
try:
AddFirewallRules.add_iptables_rules(wait, dst_ip, uid)
except Exception as error:
logger.warn(ustr(error))
raise
logger.info("Successfully added Azure fabric firewall rules")
try:
output = shellutil.run_command(_get_firewall_list_command(wait))
logger.info("Firewall rules:\n{0}".format(output))
except Exception as e:
logger.warn("Listing firewall rules failed: {0}".format(ustr(e)))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to establish firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
@staticmethod
def _correct_instance_id(instance_id):
"""
Azure stores the instance ID with an incorrect byte ordering for the
first parts. For example, the ID returned by the metadata service:
D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8
will be found as:
544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8
This code corrects the byte order such that it is consistent with
that returned by the metadata service.
"""
if not UUID_PATTERN.match(instance_id):
return instance_id
parts = instance_id.split('-')
return '-'.join([
textutil.swap_hexstring(parts[0], width=2),
textutil.swap_hexstring(parts[1], width=2),
textutil.swap_hexstring(parts[2], width=2),
parts[3],
parts[4]
])
def is_current_instance_id(self, id_that):
"""
Compare two instance IDs for equality, but allow that some IDs
may have been persisted using the incorrect byte ordering.
"""
id_this = self.get_instance_id()
logger.verbose("current instance id: {0}".format(id_this))
logger.verbose(" former instance id: {0}".format(id_that))
return id_this.lower() == id_that.lower() or \
id_this.lower() == self._correct_instance_id(id_that).lower()
def get_agent_conf_file_path(self):
return self.agent_conf_file_path
def get_instance_id(self):
"""
Azure records a UUID as the instance ID
First check /sys/class/dmi/id/product_uuid.
If that is missing, then extracts from dmidecode
If nothing works (for old VMs), return the empty string
"""
if os.path.isfile(PRODUCT_ID_FILE):
s = fileutil.read_file(PRODUCT_ID_FILE).strip()
else:
rc, s = shellutil.run_get_output(DMIDECODE_CMD)
if rc != 0 or UUID_PATTERN.match(s) is None:
return ""
return self._correct_instance_id(s.strip())
@staticmethod
def get_userentry(username):
try:
return pwd.getpwnam(username)
except KeyError:
return None
def is_sys_user(self, username):
"""
Check whether use is a system user.
If reset sys user is allowed in conf, return False
Otherwise, check whether UID is less than UID_MIN
"""
if conf.get_allow_reset_sys_user():
return False
userentry = self.get_userentry(username)
uidmin = None
try:
uidmin_def = fileutil.get_line_startingwith("UID_MIN",
"/etc/login.defs")
if uidmin_def is not None:
uidmin = int(uidmin_def.split()[1])
except IOError as e: # pylint: disable=W0612
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin:
return True
else:
return False
def useradd(self, username, expiration=None, comment=None):
"""
Create user account with 'username'
"""
userentry = self.get_userentry(username)
if userentry is not None:
logger.info("User {0} already exists, skip useradd", username)
return
if expiration is not None:
cmd = ["useradd", "-m", username, "-e", expiration]
else:
cmd = ["useradd", "-m", username]
if comment is not None:
cmd.extend(["-c", comment])
self._run_command_raising_OSUtilError(cmd, err_msg="Failed to create user account:{0}".format(username))
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
if self.is_sys_user(username):
raise OSUtilError(("User {0} is a system user, "
"will not set password.").format(username))
passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len)
self._run_command_raising_OSUtilError(["usermod", "-p", passwd_hash, username],
err_msg="Failed to set password for {0}".format(username))
def get_users(self):
return getpwall()
def conf_sudoer(self, username, nopasswd=False, remove=False):
sudoers_dir = conf.get_sudoers_dir()
sudoers_wagent = os.path.join(sudoers_dir, 'waagent')
if not remove:
# for older distros create sudoers.d
if not os.path.isdir(sudoers_dir):
# create the sudoers.d directory
fileutil.mkdir(sudoers_dir)
# add the include of sudoers.d to the /etc/sudoers
sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers')
include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir)
fileutil.append_file(sudoers_file, include_sudoers_dir)
sudoer = None
if nopasswd:
sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username)
else:
sudoer = "{0} ALL=(ALL) ALL".format(username)
if not os.path.isfile(sudoers_wagent) or \
fileutil.findstr_in_file(sudoers_wagent, sudoer) is False:
fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer))
fileutil.chmod(sudoers_wagent, 0o440)
else:
# remove user from sudoers
if os.path.isfile(sudoers_wagent):
try:
content = fileutil.read_file(sudoers_wagent)
sudoers = content.split("\n")
sudoers = [x for x in sudoers if username not in x]
fileutil.write_file(sudoers_wagent, "\n".join(sudoers))
except IOError as e:
raise OSUtilError("Failed to remove sudoer: {0}".format(e))
def del_root_password(self):
try:
passwd_file_path = conf.get_passwd_file_path()
passwd_content = fileutil.read_file(passwd_file_path)
passwd = passwd_content.split('\n')
new_passwd = [x for x in passwd if not x.startswith("root:")]
new_passwd.insert(0, "root:*LOCK*:14600::::::")
fileutil.write_file(passwd_file_path, "\n".join(new_passwd))
except IOError as e:
raise OSUtilError("Failed to delete root password:{0}".format(e))
@staticmethod
def _norm_path(filepath):
home = conf.get_home_dir()
# Expand HOME variable if present in path
path = os.path.normpath(filepath.replace("$HOME", home))
return path
def deploy_ssh_keypair(self, username, keypair):
"""
Deploy id_rsa and id_rsa.pub
"""
path, thumbprint = keypair
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
lib_dir = conf.get_lib_dir()
prv_path = os.path.join(lib_dir, thumbprint + '.prv')
if not os.path.isfile(prv_path):
raise OSUtilError("Can't find {0}.prv".format(thumbprint))
shutil.copyfile(prv_path, path)
pub_path = path + '.pub'
crytputil = CryptUtil(conf.get_openssl_cmd())
pub = crytputil.get_pubkey_from_prv(prv_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0')
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
os.chmod(path, 0o644)
os.chmod(pub_path, 0o600)
def openssl_to_openssh(self, input_file, output_file):
cryptutil = CryptUtil(conf.get_openssl_cmd())
cryptutil.crt_to_ssh(input_file, output_file)
def deploy_ssh_pubkey(self, username, pubkey):
"""
Deploy authorized_key
"""
path, thumbprint, value = pubkey
if path is None:
raise OSUtilError("Public key path is None")
crytputil = CryptUtil(conf.get_openssl_cmd())
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
if value is not None:
if not value.startswith("ssh-"):
raise OSUtilError("Bad public key: {0}".format(value))
if not value.endswith("\n"):
value += "\n"
fileutil.write_file(path, value)
elif thumbprint is not None:
lib_dir = conf.get_lib_dir()
crt_path = os.path.join(lib_dir, thumbprint + '.crt')
if not os.path.isfile(crt_path):
raise OSUtilError("Can't find {0}.crt".format(thumbprint))
pub_path = os.path.join(lib_dir, thumbprint + '.pub')
pub = crytputil.get_pubkey_from_crt(crt_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path,
'unconfined_u:object_r:ssh_home_t:s0')
self.openssl_to_openssh(pub_path, path)
fileutil.chmod(pub_path, 0o600)
else:
raise OSUtilError("SSH public key Fingerprint and Value are None")
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
fileutil.chowner(path, username)
fileutil.chmod(path, 0o644)
def is_selinux_system(self):
"""
Checks and sets self.selinux = True if SELinux is available on system.
"""
if self.selinux == None:
if shellutil.run("which getenforce", chk_err=False) == 0:
self.selinux = True
else:
self.selinux = False
return self.selinux
def is_selinux_enforcing(self):
"""
Calls shell command 'getenforce' and returns True if 'Enforcing'.
"""
if self.is_selinux_system():
output = shellutil.run_get_output("getenforce")[1]
return output.startswith("Enforcing")
else:
return False
def set_selinux_context(self, path, con): # pylint: disable=R1710
"""
Calls shell 'chcon' with 'path' and 'con' context.
Returns exit result.
"""
if self.is_selinux_system():
if not os.path.exists(path):
logger.error("Path does not exist: {0}".format(path))
return 1
try:
shellutil.run_command(['chcon', con, path], log_error=True)
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0
def conf_sshd(self, disable_password):
option = "no" if disable_password else "yes"
conf_file_path = conf.get_sshd_conf_file_path()
conf_file = fileutil.read_file(conf_file_path).split("\n")
textutil.set_ssh_config(conf_file, "PasswordAuthentication", option)
textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option)
textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval()))
fileutil.write_file(conf_file_path, "\n".join(conf_file))
logger.info("{0} SSH password-based authentication methods."
.format("Disabled" if disable_password else "Enabled"))
logger.info("Configured SSH client probing to keep connections alive.")
def get_dvd_device(self, dev_dir='/dev'):
pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]|vd[b-z])'
device_list = os.listdir(dev_dir)
for dvd in [re.match(pattern, dev) for dev in device_list]:
if dvd is not None:
return "/dev/{0}".format(dvd.group(0))
inner_detail = "The following devices were found, but none matched " \
"the pattern [{0}]: {1}\n".format(pattern, device_list)
raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir),
inner=inner_detail)
def mount_dvd(self,
max_retry=6,
chk_err=True,
dvd_device=None,
mount_point=None,
sleep_time=5):
if dvd_device is None:
dvd_device = self.get_dvd_device()
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
mount_list = shellutil.run_get_output("mount")[1]
existing = self.get_mount_point(mount_list, dvd_device)
if existing is not None:
# already mounted
logger.info("{0} is already mounted at {1}", dvd_device, existing)
return
if not os.path.isdir(mount_point):
os.makedirs(mount_point)
err = ''
for retry in range(1, max_retry):
return_code, err = self.mount(dvd_device,
mount_point,
option=["-o", "ro", "-t", "udf,iso9660,vfat"],
chk_err=False)
if return_code == 0:
logger.info("Successfully mounted dvd")
return
else:
logger.warn(
"Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]",
retry,
max_retry - 1,
sleep_time)
if retry < max_retry:
time.sleep(sleep_time)
if chk_err:
raise OSUtilError("Failed to mount dvd device", inner=err)
def umount_dvd(self, chk_err=True, mount_point=None):
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
return_code = self.umount(mount_point, chk_err=chk_err)
if chk_err and return_code != 0:
raise OSUtilError("Failed to unmount dvd device at {0}".format(mount_point))
def eject_dvd(self, chk_err=True):
dvd = self.get_dvd_device()
dev = dvd.rsplit('/', 1)[1]
pattern = r'(vd[b-z])'
# We should not eject if the disk is not a cdrom
if re.search(pattern, dev):
return
try:
shellutil.run_command(["eject", dvd])
except shellutil.CommandError as cmd_err:
if chk_err:
msg = "Failed to eject dvd: ret={0}\n[stdout]\n{1}\n\n[stderr]\n{2}"\
.format(cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
raise OSUtilError(msg)
def try_load_atapiix_mod(self):
try:
self.load_atapiix_mod()
except Exception as e:
logger.warn("Could not load ATAPI driver: {0}".format(e))
def load_atapiix_mod(self):
if self.is_atapiix_mod_loaded():
return
ret, kern_version = shellutil.run_get_output("uname -r")
if ret != 0:
raise Exception("Failed to call uname -r")
mod_path = os.path.join('/lib/modules',
kern_version.strip('\n'),
'kernel/drivers/ata/ata_piix.ko')
if not os.path.isfile(mod_path):
raise Exception("Can't find module file:{0}".format(mod_path))
ret, output = shellutil.run_get_output("insmod " + mod_path) # pylint: disable=W0612
if ret != 0:
raise Exception("Error calling insmod for ATAPI CD-ROM driver")
if not self.is_atapiix_mod_loaded(max_retry=3):
raise Exception("Failed to load ATAPI CD-ROM driver")
def is_atapiix_mod_loaded(self, max_retry=1):
for retry in range(0, max_retry):
ret = shellutil.run("lsmod | grep ata_piix", chk_err=False)
if ret == 0:
logger.info("Module driver for ATAPI CD-ROM is already present.")
return True
if retry < max_retry - 1:
time.sleep(1)
return False
def mount(self, device, mount_point, option=None, chk_err=True):
if not option:
option = []
cmd = ["mount"]
cmd.extend(option + [device, mount_point])
try:
output = shellutil.run_command(cmd, log_error=chk_err)
except shellutil.CommandError as cmd_err:
detail = "[{0}] returned {1}:\n stdout: {2}\n\nstderr: {3}".format(cmd, cmd_err.returncode,
cmd_err.stdout, cmd_err.stderr)
return cmd_err.returncode, detail
return 0, output
def umount(self, mount_point, chk_err=True):
try:
shellutil.run_command(["umount", mount_point], log_error=chk_err)
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0
def allow_dhcp_broadcast(self):
# Open DHCP port if iptables is enabled.
# We supress error logging on error.
shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
def remove_rules_files(self, rules_files=None):
if rules_files is None:
rules_files = __RULES_FILES__
lib_dir = conf.get_lib_dir()
for src in rules_files:
file_name = fileutil.base_name(src)
dest = os.path.join(lib_dir, file_name)
if os.path.isfile(dest):
os.remove(dest)
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", file_name, dest)
shutil.move(src, dest)
def restore_rules_files(self, rules_files=None):
if rules_files is None:
rules_files = __RULES_FILES__
lib_dir = conf.get_lib_dir()
for dest in rules_files:
filename = fileutil.base_name(dest)
src = os.path.join(lib_dir, filename)
if os.path.isfile(dest):
continue
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", filename, dest)
shutil.move(src, dest)
def get_mac_addr(self):
"""
Convenience function, returns mac addr bound to
first non-loopback interface.
"""
ifname = self.get_if_name()
addr = self.get_if_mac(ifname)
return textutil.hexstr_to_bytearray(addr)
def get_if_mac(self, ifname):
"""
Return the mac-address bound to the socket.
"""
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
param = struct.pack('256s', (ifname[:15] + ('\0' * 241)).encode('latin-1'))
info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param)
sock.close()
return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]])
@staticmethod
def _get_struct_ifconf_size():
"""
Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes;
on 32-bit platforms the size is 32 bytes.
"""
python_arc = platform.architecture()[0]
struct_size = 32 if python_arc == '32bit' else 40
return struct_size
def _get_all_interfaces(self):
"""
Return a dictionary mapping from interface name to IPv4 address.
Interfaces without a name are ignored.
"""
expected = 16 # how many devices should I expect...
struct_size = DefaultOSUtil._get_struct_ifconf_size()
array_size = expected * struct_size
buff = array.array('B', b'\0' * array_size)
param = struct.pack('iL', array_size, buff.buffer_info()[0])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param)
retsize = (struct.unpack('iL', ret)[0])
sock.close()
if retsize == array_size:
logger.warn(('SIOCGIFCONF returned more than {0} up '
'network interfaces.'), expected)
ifconf_buff = array_to_bytes(buff)
ifaces = {}
for i in range(0, array_size, struct_size):
iface = ifconf_buff[i:i + IFNAMSIZ].split(b'\0', 1)[0]
if len(iface) > 0:
iface_name = iface.decode('latin-1')
if iface_name not in ifaces:
ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i + 20:i + 24])
return ifaces
def get_first_if(self):
"""
Return the interface name, and IPv4 addr of the "primary" interface or,
failing that, any active non-loopback interface.
"""
primary = self.get_primary_interface()
ifaces = self._get_all_interfaces()
if primary in ifaces:
return primary, ifaces[primary]
for iface_name in ifaces.keys():
if not self.is_loopback(iface_name):
logger.info("Choosing non-primary [{0}]".format(iface_name))
return iface_name, ifaces[iface_name]
return '', ''
@staticmethod
def _build_route_list(proc_net_route):
"""
Construct a list of network route entries
:param list(str) proc_net_route: Route table lines, including headers, containing at least one route
:return: List of network route objects
:rtype: list(RouteEntry)
"""
idx = 0
column_index = {}
header_line = proc_net_route[0]
for header in filter(lambda h: len(h) > 0, header_line.split("\t")):
column_index[header.strip()] = idx
idx += 1
try:
idx_iface = column_index["Iface"]
idx_dest = column_index["Destination"]
idx_gw = column_index["Gateway"]
idx_flags = column_index["Flags"]
idx_metric = column_index["Metric"]
idx_mask = column_index["Mask"]
except KeyError:
msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line)
logger.error(msg)
return []
route_list = []
for entry in proc_net_route[1:]:
route = entry.split("\t")
if len(route) > 0:
route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask],
route[idx_flags], route[idx_metric])
route_list.append(route_obj)
return route_list
@staticmethod
def read_route_table():
"""
Return a list of strings comprising the route table, including column headers. Each line is stripped of leading
or trailing whitespace but is otherwise unmolested.
:return: Entries in the text route table
:rtype: list(str)
"""
try:
with open('/proc/net/route') as routing_table:
return list(map(str.strip, routing_table.readlines()))
except Exception as e:
logger.error("Cannot read route table [{0}]", ustr(e))
return []
@staticmethod
def get_list_of_routes(route_table):
"""
Construct a list of all network routes known to this system.
:param list(str) route_table: List of text entries from route table, including headers
:return: a list of network routes
:rtype: list(RouteEntry)
"""
route_list = []
count = len(route_table)
if count < 1:
logger.error("/proc/net/route is missing headers")
elif count == 1:
logger.error("/proc/net/route contains no routes")
else:
route_list = DefaultOSUtil._build_route_list(route_table)
return route_list
def get_primary_interface(self):
"""
Get the name of the primary interface, which is the one with the
default route attached to it; if there are multiple default routes,
the primary has the lowest Metric.
:return: the interface which has the default route
"""
# from linux/route.h
RTF_GATEWAY = 0x02
DEFAULT_DEST = "00000000"
primary_interface = None
if not self.disable_route_warning:
logger.info("Examine /proc/net/route for primary interface")
route_table = DefaultOSUtil.read_route_table()
def is_default(route):
return route.destination == DEFAULT_DEST and int(route.flags) & RTF_GATEWAY == RTF_GATEWAY
candidates = list(filter(is_default, DefaultOSUtil.get_list_of_routes(route_table)))
if len(candidates) > 0:
def get_metric(route):
return int(route.metric)
primary_route = min(candidates, key=get_metric)
primary_interface = primary_route.interface
if primary_interface is None:
primary_interface = ''
if not self.disable_route_warning:
with open('/proc/net/route') as routing_table_fh:
routing_table_text = routing_table_fh.read()
logger.warn('Could not determine primary interface, '
'please ensure /proc/net/route is correct')
logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text))
logger.warn('Primary interface examination will retry silently')
self.disable_route_warning = True
else:
logger.info('Primary interface is [{0}]'.format(primary_interface))
self.disable_route_warning = False
return primary_interface
def is_primary_interface(self, ifname):
"""
Indicate whether the specified interface is the primary.
:param ifname: the name of the interface - eth0, lo, etc.
:return: True if this interface binds the default route
"""
return self.get_primary_interface() == ifname
def is_loopback(self, ifname):
"""
Determine if a named interface is loopback.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ifname_buff = ifname + ('\0' * 256)
result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff)
flags, = struct.unpack('H', result[16:18])
isloopback = flags & 8 == 8
if not self.disable_route_warning:
logger.info('interface [{0}] has flags [{1}], '
'is loopback [{2}]'.format(ifname, flags, isloopback))
s.close()
return isloopback
def get_dhcp_lease_endpoint(self):
"""
OS specific, this should return the decoded endpoint of
the wireserver from option 245 in the dhcp leases file
if it exists on disk.
:return: The endpoint if available, or None
"""
return None
@staticmethod
def get_endpoint_from_leases_path(pathglob):
"""
Try to discover and decode the wireserver endpoint in the
specified dhcp leases path.
:param pathglob: The path containing dhcp lease files
:return: The endpoint if available, otherwise None
"""
endpoint = None
HEADER_LEASE = "lease"
HEADER_OPTION_245 = "option unknown-245"
HEADER_EXPIRE = "expire"
FOOTER_LEASE = "}"
FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S"
option_245_re = re.compile(
r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);')
logger.info("looking for leases in path [{0}]".format(pathglob))
for lease_file in glob.glob(pathglob):
leases = open(lease_file).read()
if HEADER_OPTION_245 in leases:
cached_endpoint = None
option_245_match = None
expired = True # assume expired
for line in leases.splitlines():
if line.startswith(HEADER_LEASE):
cached_endpoint = None
expired = True
elif HEADER_EXPIRE in line:
if "never" in line:
expired = False
else:
try:
expire_string = line.split(" ", 4)[-1].strip(";")
expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME)
if expire_date > datetime.datetime.utcnow():
expired = False
except: # pylint: disable=W0702
logger.error("could not parse expiry token '{0}'".format(line))
elif FOOTER_LEASE in line:
logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format(
cached_endpoint, option_245_match is not None, expired))
if not expired and cached_endpoint is not None:
endpoint = cached_endpoint
logger.info("found endpoint [{0}]".format(endpoint))
# we want to return the last valid entry, so
# keep searching
else:
option_245_match = option_245_re.match(line)
if option_245_match is not None:
cached_endpoint = '{0}.{1}.{2}.{3}'.format(
int(option_245_match.group(1), 16),
int(option_245_match.group(2), 16),
int(option_245_match.group(3), 16),
int(option_245_match.group(4), 16))
if endpoint is not None:
logger.info("cached endpoint found [{0}]".format(endpoint))
else:
logger.info("cached endpoint not found")
return endpoint
def is_missing_default_route(self):
try:
route_cmd = ["ip", "route", "show"]
routes = shellutil.run_command(route_cmd)
for route in routes.split("\n"):
if route.startswith("0.0.0.0 ") or route.startswith("default "):
return False
return True
except CommandError as e:
logger.warn("Cannot get the routing table. {0} failed: {1}", ustr(route_cmd), ustr(e))
return False
def get_if_name(self):
if_name = ''
if_found = False
while not if_found:
if_name = self.get_first_if()[0]
if_found = len(if_name) >= 2
if not if_found:
time.sleep(2)
return if_name
def get_ip4_addr(self):
return self.get_first_if()[1]
def set_route_for_dhcp_broadcast(self, ifname):
try:
route_cmd = ["ip", "route", "add", "255.255.255.255", "dev", ifname]
return shellutil.run_command(route_cmd)
except CommandError:
return ""
def remove_route_for_dhcp_broadcast(self, ifname):
try:
route_cmd = ["ip", "route", "del", "255.255.255.255", "dev", ifname]
shellutil.run_command(route_cmd)
except CommandError:
pass
def is_dhcp_available(self):
return True
def is_dhcp_enabled(self):
return False
def stop_dhcp_service(self):
pass
def start_dhcp_service(self):
pass
def start_network(self):
pass
def start_agent_service(self):
pass
def stop_agent_service(self):
pass
def register_agent_service(self):
pass
def unregister_agent_service(self):
pass
def restart_ssh_service(self):
pass
def route_add(self, net, mask, gateway): # pylint: disable=W0613
"""
Add specified route
"""
try:
cmd = ["ip", "route", "add", net, "via", gateway]
return shellutil.run_command(cmd)
except CommandError:
return ""
@staticmethod
def _text_to_pid_list(text):
return [int(n) for n in text.split()]
@staticmethod
def _get_dhcp_pid(command):
try:
return DefaultOSUtil._text_to_pid_list(shellutil.run_command(command))
except CommandError as exception: # pylint: disable=W0612
return []
def get_dhcp_pid(self):
return self._get_dhcp_pid(["pidof", "dhclient"])
def set_hostname(self, hostname):
fileutil.write_file('/etc/hostname', hostname)
self._run_command_without_raising(["hostname", hostname], log_error=False)
def set_dhcp_hostname(self, hostname):
autosend = r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])'
dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf']
for conf_file in dhclient_files:
if not os.path.isfile(conf_file):
continue
if fileutil.findre_in_file(conf_file, autosend):
# Return if auto send host-name is configured
return
fileutil.update_conf_file(conf_file,
'send host-name',
'send host-name "{0}";'.format(hostname))
def restart_if(self, ifname, retries=3, wait=5):
retry_limit = retries + 1
for attempt in range(1, retry_limit):
try:
shellutil.run_command(["ifdown", ifname])
shellutil.run_command(["ifup", ifname])
return
except shellutil.CommandError as cmd_err:
msg = "failed to restart {0}: returncode={1}\n[stdout]{2}\n\n[stderr]{3}\n"\
.format(ifname, cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
if cmd_err.returncode == 1:
logger.info(msg)
else:
logger.warn(msg)
if attempt < retry_limit:
logger.info("retrying in {0} seconds".format(wait))
time.sleep(wait)
else:
logger.warn("exceeded restart retries")
def publish_hostname(self, hostname):
self.set_dhcp_hostname(hostname)
self.set_hostname_record(hostname)
ifname = self.get_if_name()
self.restart_if(ifname)
def set_scsi_disks_timeout(self, timeout):
for dev in os.listdir("/sys/block"):
if dev.startswith('sd'):
self.set_block_device_timeout(dev, timeout)
def set_block_device_timeout(self, dev, timeout):
if dev is not None and timeout is not None:
file_path = "/sys/block/{0}/device/timeout".format(dev)
content = fileutil.read_file(file_path)
original = content.splitlines()[0].rstrip()
if original != timeout:
fileutil.write_file(file_path, timeout)
logger.info("Set block dev timeout: {0} with timeout: {1}",
dev, timeout)
def get_mount_point(self, mountlist, device):
"""
Example of mountlist:
/dev/sda1 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs
(rw,rootcontext="system_u:object_r:tmpfs_t:s0")
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sdb1 on /mnt/resource type ext4 (rw)
"""
if (mountlist and device):
for entry in mountlist.split('\n'):
if (re.search(device, entry)):
tokens = entry.split()
# Return the 3rd column of this line
return tokens[2] if len(tokens) > 2 else None
return None
@staticmethod
def _enumerate_device_id():
"""
Enumerate all storage device IDs.
Args:
None
Returns:
Iterator[Tuple[str, str]]: VmBus and storage devices.
"""
if os.path.exists(STORAGE_DEVICE_PATH):
for vmbus in os.listdir(STORAGE_DEVICE_PATH):
deviceid = fileutil.read_file(os.path.join(STORAGE_DEVICE_PATH, vmbus, "device_id"))
guid = deviceid.strip('{}\n')
yield vmbus, guid
@staticmethod
def search_for_resource_disk(gen1_device_prefix, gen2_device_id):
"""
Search the filesystem for a device by ID or prefix.
Args:
gen1_device_prefix (str): Gen1 resource disk prefix.
gen2_device_id (str): Gen2 resource device ID.
Returns:
str: The found device.
"""
device = None
# We have to try device IDs for both Gen1 and Gen2 VMs.
logger.info('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id))
try:
for vmbus, guid in DefaultOSUtil._enumerate_device_id():
if guid.startswith(gen1_device_prefix) or guid == gen2_device_id:
for root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): # pylint: disable=W0612
root_path_parts = root.split('/')
# For Gen1 VMs we only have to check for the block dir in the
# current device. But for Gen2 VMs all of the disks (sda, sdb,
# sr0) are presented in this device on the same SCSI controller.
# Because of that we need to also read the LUN. It will be:
# 0 - OS disk
# 1 - Resource disk
# 2 - CDROM
if root_path_parts[-1] == 'block' and (
guid != gen2_device_id or
root_path_parts[-2].split(':')[-1] == '1'):
device = dirs[0]
return device
else:
# older distros
for d in dirs:
if ':' in d and "block" == d.split(':')[0]:
device = d.split(':')[1]
return device
except (OSError, IOError) as exc:
logger.warn('Error getting device for {0} or {1}: {2}', gen1_device_prefix, gen2_device_id, ustr(exc))
return None
def device_for_ide_port(self, port_id):
"""
Return device name attached to ide port 'n'.
"""
if port_id > 3:
return None
g0 = "00000000"
if port_id > 1:
g0 = "00000001"
port_id = port_id - 2
gen1_device_prefix = '{0}-000{1}'.format(g0, port_id)
device = DefaultOSUtil.search_for_resource_disk(
gen1_device_prefix=gen1_device_prefix,
gen2_device_id=GEN2_DEVICE_ID
)
logger.info('Found device: {0}'.format(device))
return device
def set_hostname_record(self, hostname):
fileutil.write_file(conf.get_published_hostname(), contents=hostname)
def get_hostname_record(self):
hostname_record = conf.get_published_hostname()
if not os.path.exists(hostname_record):
# older agents (but newer or equal to 2.2.3) create published_hostname during provisioning; when provisioning is done
# by cloud-init the hostname is written to set-hostname
hostname = self._get_cloud_init_hostname()
if hostname is None:
logger.info("Retrieving hostname using socket.gethostname()")
hostname = socket.gethostname()
logger.info('Published hostname record does not exist, creating [{0}] with hostname [{1}]', hostname_record, hostname)
self.set_hostname_record(hostname)
record = fileutil.read_file(hostname_record)
return record
@staticmethod
def _get_cloud_init_hostname():
"""
Retrieves the hostname set by cloud-init; returns None if cloud-init did not set the hostname or if there is an
error retrieving it.
"""
hostname_file = '/var/lib/cloud/data/set-hostname'
try:
if os.path.exists(hostname_file):
#
# The format is similar to
#
# $ cat /var/lib/cloud/data/set-hostname
# {
# "fqdn": "nam-u18",
# "hostname": "nam-u18"
# }
#
logger.info("Retrieving hostname from {0}", hostname_file)
with open(hostname_file, 'r') as file_:
hostname_info = json.load(file_)
if "hostname" in hostname_info:
return hostname_info["hostname"]
except Exception as exception:
logger.warn("Error retrieving hostname: {0}", ustr(exception))
return None
def del_account(self, username):
if self.is_sys_user(username):
logger.error("{0} is a system user. Will not delete it.", username)
self._run_command_without_raising(["touch", "/var/run/utmp"])
self._run_command_without_raising(['userdel', '-f', '-r', username])
self.conf_sudoer(username, remove=True)
def decode_customdata(self, data):
return base64.b64decode(data).decode('utf-8')
def get_total_mem(self):
# Get total memory in bytes and divide by 1024**2 to get the value in MB.
return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024 ** 2)
def get_processor_cores(self):
return multiprocessing.cpu_count()
def check_pid_alive(self, pid):
try:
pid = int(pid)
os.kill(pid, 0)
except (ValueError, TypeError):
return False
except OSError as os_error:
if os_error.errno == errno.EPERM:
return True
return False
return True
@property
def is_64bit(self):
return sys.maxsize > 2 ** 32
@staticmethod
def _get_proc_stat():
"""
Get the contents of /proc/stat.
# cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0
# cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0
# cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0
:return: A single string with the contents of /proc/stat
:rtype: str
"""
results = None
try:
results = fileutil.read_file('/proc/stat')
except (OSError, IOError) as ex:
logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror))
raise
return results
@staticmethod
def get_total_cpu_ticks_since_boot():
"""
Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot.
:return: int
"""
system_cpu = 0
proc_stat = DefaultOSUtil._get_proc_stat()
if proc_stat is not None:
for line in proc_stat.splitlines():
if ALL_CPUS_REGEX.match(line):
system_cpu = sum(
int(i) for i in line.split()[1:8]) # see "man proc" for a description of these fields
break
return system_cpu
def get_nic_state(self, as_string=False):
"""
Capture NIC state (IPv4 and IPv6 addresses plus link state).
:return: By default returns a dictionary of NIC state objects, with the NIC name as key. If as_string is True
returns the state as a string
:rtype: dict(str,NetworkInformationCard)
"""
state = {}
all_command = ["ip", "-a", "-o", "link"]
inet_command = ["ip", "-4", "-a", "-o", "address"]
inet6_command = ["ip", "-6", "-a", "-o", "address"]
try:
all_output = shellutil.run_command(all_command)
except shellutil.CommandError as command_error:
logger.verbose("Could not fetch NIC link info: {0}", ustr(command_error))
return "" if as_string else {}
if as_string:
def run_command(command):
try:
return shellutil.run_command(command)
except shellutil.CommandError as command_error:
return str(command_error)
inet_output = run_command(inet_command)
inet6_output = run_command(inet6_command)
return "Executing {0}:\n{1}\nExecuting {2}:\n{3}\nExecuting {4}:\n{5}\n".format(all_command, all_output, inet_command, inet_output, inet6_command, inet6_output)
else:
self._update_nic_state_all(state, all_output)
self._update_nic_state(state, inet_command, NetworkInterfaceCard.add_ipv4, "an IPv4 address")
self._update_nic_state(state, inet6_command, NetworkInterfaceCard.add_ipv6, "an IPv6 address")
return state
@staticmethod
def _update_nic_state_all(state, command_output):
for entry in command_output.splitlines():
# Sample output:
# 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64
# 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64
# 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64
result = IP_COMMAND_OUTPUT.match(entry)
if result:
name = result.group(1)
state[name] = NetworkInterfaceCard(name, result.group(2))
@staticmethod
def _update_nic_state(state, ip_command, handler, description):
"""
Update the state of NICs based on the output of a specified ip subcommand.
:param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects
:param str ip_command: The ip command to run
:param handler: A method on the NetworkInterfaceCard class
:param str description: Description of the particular information being added to the state
"""
try:
output = shellutil.run_command(ip_command)
for entry in output.splitlines():
# family inet sample output:
# 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
# 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever
# 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever
#
# family inet6 sample output:
# 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
# 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever
result = IP_COMMAND_OUTPUT.match(entry)
if result:
interface_name = result.group(1)
if interface_name in state:
handler(state[interface_name], result.group(2))
else:
logger.error("Interface {0} has {1} but no link state".format(interface_name, description))
except shellutil.CommandError as command_error:
logger.error("[{0}] failed: {1}", ' '.join(ip_command), str(command_error))
@staticmethod
def _run_command_without_raising(cmd, log_error=True):
try:
shellutil.run_command(cmd, log_error=log_error)
# Original implementation of run() does a blanket catch, so mimicking the behaviour here
except Exception:
pass
@staticmethod
def _run_multiple_commands_without_raising(commands, log_error=True, continue_on_error=False):
for cmd in commands:
try:
shellutil.run_command(cmd, log_error=log_error)
# Original implementation of run() does a blanket catch, so mimicking the behaviour here
except Exception:
if continue_on_error:
continue
break
@staticmethod
def _run_command_raising_OSUtilError(cmd, err_msg, cmd_input=None):
# This method runs shell command using the new secure shellutil.run_command and raises OSUtilErrors on failures.
try:
return shellutil.run_command(cmd, log_error=True, input=cmd_input)
except shellutil.CommandError as e:
raise OSUtilError(
"{0}, Retcode: {1}, Output: {2}, Error: {3}".format(err_msg, e.returncode, e.stdout, e.stderr))
except Exception as e:
raise OSUtilError("{0}, Retcode: {1}, Error: {2}".format(err_msg, -1, ustr(e)))
| apache-2.0 | -4,743,960,978,200,930,000 | 38.67211 | 356 | 0.563288 | false |
dtiarks/ThesisPlot | Chap2/MoleculeMemory/memory.py | 1 | 3776 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 11:29:40 2017
@author: daniel
"""
import Tomography as tom
import quPy as qp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import json
import io
import scipy.constants as co
#import rydpy
c = 299792458 # m/s, speed of light CODATA 2014
a0 = 0.52917721067e-10 # m, Bohr radius
C6 = 2.3e23 * 4.36e-18 * a0**6 # Jm^6, Van-der-Waals coefficient for the 67s - 69s
hbar = 6.626070040e-34/(2 * np.pi) # Js, Planck constant, CODATA 2014
rho_peak = 2.0e12/1e-6 # peak density in cm^-3/centi^-3
d = 2.534e-29 # Cm, dipole matrix element (D. A. Steck)
Gamma_e = 2*np.pi * 6.065e6 # decay rate (D. A. Steck)
epsilon_0 = 8.854187817e-12 # dielectric constant, CODATA 2014
L = 61e-6 # medium length in m
omega_s = 2*np.pi * 384.23e12 # rad/s, transition frequency
gamma_21 = 0.0577314
chi_0 = 2*rho_peak*d**2 / (epsilon_0*hbar*Gamma_e) # prefactor of the susceptibility for the cycling transition (|R> polarization)
R_b=18e-6
n1=69
n2=100
ec=co.e
m_e=co.electron_mass
#def classicalRadius(n,l=0,j=0.5):
# return (ec**2/(4*np.pi*epsilon_0*co.h*rydpy.IonEnergy(n,l,j,units="Hz")))
#
#def orbitalRadius(n,l=0,j=0.5):
# r,rrRR,_,_=rydpy.Numerov(n,l,j)
# imax=np.argmax(rrRR)
# return r[imax]*a0
def moleculeState(t,pm,omega):
return np.cos(pm)*np.array([1,0])+np.sin(pm)*np.exp(-1j*omega*t)*np.array([0,1])
def nuMolecule(t,pm,omega):
return np.cos(pm)**4 + np.sin(pm)**4 + 2*np.sin(pm)**2*np.cos(pm)**2*np.cos(omega*t)
def nuNumerical(ts,pm,omega):
s=np.array([np.inner(moleculeState(0,pm,omega),moleculeState(tx,pm,omega)) for tx in ts])
return np.abs(s)**2
def phiNumerical(ts,pm,omega):
s=np.array([np.angle(moleculeState(tx,pm,omega)[0]+moleculeState(tx,pm,omega)[1]) for tx in ts])
return s
def alphaMu(rat):
alpha=1/(1+(rat)**2)
beta=1/(1+(1/rat)**2)
return alpha,beta
def etaStephan(eps, alpha, mu):
return alpha**2+mu**2+2*alpha*mu*np.cos(2*eps)
def phiStephan(eps, alpha, mu):
phi=np.angle(alpha*np.exp(-1j*eps)+mu*np.exp(1j*eps))
phi=-(np.arctan(((mu-alpha)/(mu+alpha))*np.tan(eps))-np.pi*np.sign(mu-alpha)*np.mod(eps/np.pi+0.5,1))
return phi
pM1=np.pi/12.
pM2=np.pi/6.
omega=2*np.pi*220e3
ts=np.linspace(0,7e-6,1000)
rat1=0.2
rat2=0.9
alpha,mu=alphaMu(rat1)
print etaStephan(1.,alpha,mu)
#nu=np.abs(np.outer(moleculeState(np.zeros_like(ts),pM,0),moleculeState(ts,pM,omega)))**2
plot_dict={}
# plot it
f=plt.figure()
h=pd.DataFrame(index=omega*ts,data=etaStephan(omega*ts,alpha,mu))
h2=pd.DataFrame(index=omega*ts,data=etaStephan(omega*ts,*alphaMu(rat2)))
plot_dict['121']={
'A':{'type':'plot','y':h[0].to_json(),'ylabel':u'$\eta_L(t)$','xlabel':r'$\omega_B t_D$ (rad)','ylim':(0,1.2),'num':'a','label':r'$r=0.2$'},
'B':{'type':'plot','y':h2[0].to_json(),'label':r'$r=0.9$'}
}
plt.subplot(121)
plt.ylabel(u'$\Delta \phi$')
plt.plot(omega*ts,etaStephan(omega*ts,alpha,mu))
plt.plot(omega*ts,etaStephan(omega*ts,*alphaMu(rat2)))
#plt.plot(1e6*ts,nuMolecule(ts,pM,omega))
#plt.axhline(1)
h=pd.DataFrame(index=omega*ts,data=phiStephan(omega*ts,*alphaMu(rat1))+0.5*np.pi)
h2=pd.DataFrame(index=omega*ts,data=phiStephan(omega*ts,*alphaMu(rat2))+0.5*np.pi)
plot_dict['122']={
'A':{'type':'plot','y':h[0].to_json(),'ylabel':r'$\phi_{L}$ (rad)','xlabel':r'$\omega_B t_D$ (rad)','num':'b','ylim':(-1,1.0)},
'B':{'type':'plot','y':h2[0].to_json(),}
}
plt.subplot(122)
plt.ylabel(u'$\Delta \phi$')
plt.plot(omega*ts,phiStephan(omega*ts,*alphaMu(rat1))-0.5*np.pi)
plt.plot(omega*ts,phiStephan(omega*ts,*alphaMu(rat2))-0.5*np.pi)
with io.open('memory.json', 'w+') as f:
f.write(unicode(json.dumps(plot_dict, ensure_ascii=False,indent=4)))
plt.show() | mit | -2,405,916,120,718,962,700 | 30.214876 | 160 | 0.649894 | false |
FedoraScientific/salome-smesh | src/Tools/blocFissure/gmu/partitionBlocDefaut.py | 1 | 6971 | # -*- coding: utf-8 -*-
import logging
from geomsmesh import geompy
# -----------------------------------------------------------------------------
# --- partition du bloc defaut par generatrice, tore et plan fissure
def partitionBlocDefaut(volDefaut, facesDefaut, gener, pipe,
facefis, ellipsoide):
"""
Partition du bloc defaut par la generatrice de l'ellipse,
le tore elliptique, la face plane de fissure
@param volDefaut : le bloc defaut
@param gener : la generatrice de l'ellipse, positionnée dans l'espace
@param pipe : le tore partitionné par le plan de fissure, positionné
dans l'espace
@param facefis : la face plane de la fissure, positionnée dans l'espace
@return (volDefautPart, blocp, tore, faceFissure), le bloc partitionné,
les sous shapes issues de la partition
(le bloc moins le tore, la generatrice, le tore, la face de fissure)
"""
logging.info("start")
volDefautPart = geompy.MakePartition([volDefaut], [pipe, facefis, ellipsoide], [], [], geompy.ShapeType["SOLID"], 0, [], 1)
blocp = geompy.GetInPlaceByHistory(volDefautPart, volDefaut)
#gencnt = geompy.GetInPlaceByHistory(volDefautPart, gener)
tore = geompy.GetInPlaceByHistory(volDefautPart, pipe)
faceFissure = geompy.GetInPlaceByHistory(volDefautPart, facefis)
#ellipsoidep =geompy.GetInPlaceByHistory(volDefautPart, ellipsoide)
geompy.addToStudy( volDefautPart, 'volDefautPart' )
geompy.addToStudyInFather( volDefautPart, tore, 'tore' )
geompy.addToStudyInFather( volDefautPart, faceFissure, 'FACE1' )
#geompy.addToStudyInFather( volDefautPart, gencnt, 'generatrice' )
solids = geompy.ExtractShapes(blocp, geompy.ShapeType["SOLID"], True)
vols = []
for i in range(len(solids)):
props = geompy.BasicProperties(solids[i])
vols.append(props[2])
maxvol = max(vols)
imaxvol = vols.index(maxvol)
blocp = solids[imaxvol]
vols[imaxvol] = 0
maxvol = max(vols)
imaxvol = vols.index(maxvol)
ellipsoidep = solids[imaxvol]
geompy.addToStudyInFather( volDefautPart, blocp, 'bloc' )
geompy.addToStudyInFather( volDefautPart, ellipsoidep, 'ellipsoide' )
sharedFaces = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["FACE"])
for i in range(len(sharedFaces)):
name = "faceCommuneEllipsoideBloc_%d"%i
geompy.addToStudyInFather(blocp, sharedFaces[i], name)
#sharedEdges = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["EDGE"])
allSharedEdges = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["EDGE"])
sharedEdges = []
for i in range(len(allSharedEdges)):
if geompy.NbShapes(allSharedEdges[i], geompy.ShapeType["VERTEX"]) > 1: # edge non degeneree
sharedEdges.append(allSharedEdges[i])
for i in range(len(sharedEdges)):
name = "edgeCommuneEllipsoideBloc_%d"%i
geompy.addToStudyInFather(blocp, sharedEdges[i], name)
facesExternes = []
facesExtBloc = []
facesExtElli = []
faces = geompy.ExtractShapes(facesDefaut, geompy.ShapeType["FACE"], True)
if len(faces) == 0:
faces = [facesDefaut]
for i in range(len(faces)):
faceExt = geompy.GetInPlace(ellipsoidep, faces[i])
if faceExt is not None:
name = "faceExterne_e%d"%i
geompy.addToStudyInFather(ellipsoidep, faceExt, name)
facesExternes.append(faceExt)
facesExtElli.append(faceExt)
faceExt = geompy.GetInPlace(blocp, faces[i])
if faceExt is not None:
name = "faceExterne_b%d"%i
geompy.addToStudyInFather(blocp, faceExt, name)
facesExternes.append(faceExt)
facesExtBloc.append(faceExt)
else:
logging.info(" recherche faces externes par GetShapesOnShape")
vertex = geompy.MakeVertexOnSurface(faces[i], 0.5, 0.5)
normal = geompy.GetNormal(faces[i], vertex)
extrusionFace = geompy.MakePrismVecH(faces[i], normal, 1)
#extrusionFace = geompy.MakePrismVecH2Ways(faces[i], normal, 0.1)
#extrusionFace = geompy.MakeScaleTransform(extrusionFace, vertex, 1.01)
name = "extrusionFace_b%d"%i
geompy.addToStudyInFather(blocp, extrusionFace, name)
#facesExt = geompy.GetShapesOnShape(extrusionFace, blocp, geompy.ShapeType["FACE"], GEOM.ST_ONIN)
facesExt = geompy.GetShapesOnShape(extrusionFace, blocp, geompy.ShapeType["FACE"], GEOM.ST_ON)
for j in range(len(facesExt)):
name = "faceExterne_b%d_%d"%(i,j)
geompy.addToStudyInFather(blocp, facesExt[j], name)
facesExternes.append(facesExt[j])
facesExtBloc.append(facesExt[j])
if len(facesExtBloc) < len(faces): # toutes les faces externes du bloc n'ont pas été trouvées. TODO eliminer les detections multiples
logging.info(" recherche faces externes par aretes partagees avec faces externes ellipsoide")
facesBloc = geompy.ExtractShapes(blocp, geompy.ShapeType["FACE"], True)
for i in range(len(facesBloc)):
notOnEllipsoide = True
for j in range(len(sharedFaces)): # eliminer les faces communes avec l'ellipsoide
if facesBloc[i].IsSame(sharedFaces[j]):
notOnEllipsoide = False
break
if notOnEllipsoide:
for j in range(len(facesExtElli)): # les faces recherchees ont une ou plusieurs edge communes avec la ou les faces externes de l'ellipsoide
allSharedEdges = []
try:
allSharedEdges += geompy.GetSharedShapesMulti([facesBloc[i], facesExtElli[j]], geompy.ShapeType["EDGE"])
except:
pass
if len(allSharedEdges) > 0:
name = "faceExterne_b%d_%d"%(i,j)
geompy.addToStudyInFather(blocp, facesBloc[i], name)
facesExternes.append(facesBloc[i])
facesExtBloc.append(facesBloc[i])
aretesInternes = []
for i in range(len(facesExternes)):
for j in range(i+1,len(facesExternes)):
shared = []
try:
shared += geompy.GetSharedShapesMulti([facesExternes[i], facesExternes[j]], geompy.ShapeType["EDGE"])
except:
logging.info("no shared edges in %s,%s",i,j)
else:
aretesInternes += shared
for i in range(len(aretesInternes)):
name = "aretesInternes_%d"%i
geompy.addToStudyInFather(blocp, aretesInternes[i], name)
edgesBords = []
for faceExtB in facesExtBloc:
edges = geompy.ExtractShapes(faceExtB, geompy.ShapeType["EDGE"], True)
for i in range(len(edges)):
isInterne = False
for j in range(len(aretesInternes)):
if edges[i].IsSame(aretesInternes[j]):
isInterne = True
break
if not isInterne:
edgesBords.append(edges[i])
name = "edgeBord%d"%i
geompy.addToStudyInFather(blocp,edges[i] , name)
group = None
if len(edgesBords) > 0:
group = geompy.CreateGroup(blocp, geompy.ShapeType["EDGE"])
geompy.UnionList(group, edgesBords)
edgesBords = group
return volDefautPart, blocp, tore, faceFissure, facesExternes, facesExtBloc, facesExtElli, aretesInternes, ellipsoidep, sharedFaces, sharedEdges, edgesBords
| lgpl-2.1 | -5,217,617,912,348,345,000 | 42.51875 | 158 | 0.690507 | false |
andreasBihlmaier/arni | arni_gui/src/arni_gui/topic_item.py | 1 | 16458 | from rospy.rostime import Time
import rospy
from python_qt_binding.QtCore import QTranslator
from abstract_item import AbstractItem
from helper_functions import prepare_number_for_representation, UPDATE_FREQUENCY, TOPIC_AGGREGATION_FREQUENCY, \
ROUND_DIGITS, MAXIMUM_OFFLINE_TIME
from arni_core.helper import SEUID, SEUID_DELIMITER
from node_item import NodeItem
from rospy.timer import Timer
from rospy.impl.tcpros_service import ServiceProxy
from rospy.rostime import Duration
from rospy.rostime import Time
from connection_item import ConnectionItem
import re
class TopicItem(AbstractItem):
"""
A TopicItem represents a specific topic which contains many connections and has attributes like the number of sent messages.
"""
def __init__(self, logger, seuid, first_message, parent=None):
"""Initializes the TopicItem.
:param seuid: the seuid of the item
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param parent: the parent-item
:type parent: AbstractItem
"""
AbstractItem.__init__(self, logger, seuid, parent)
self.__parent = parent
self._type = "topic"
self.add_keys=["dropped_msgs", "traffic", "bandwidth", "frequency"]
self.avg_keys=["period_mean", "period_stddev", "stamp_age_mean", "stamp_age_stddev"]
self.max_keys=["period_max", "stamp_age_max"]
self._attributes = []
self._attributes.extend(["dropped_msgs", "traffic",
"period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max", "bandwidth", "frequency"])
for item in self._attributes:
self._add_data_list(item)
self.__calculated_data = {}
for key in self._attributes:
self.__calculated_data[key] = []
self.__calculated_data["window_stop"] = []
self.__calculated_data["window_start"] = []
for item in self._attributes:
self._rated_attributes.append(item + ".actual_value")
self._rated_attributes.append(item + ".expected_value")
self._rated_attributes.append(item + ".state")
for item in self._rated_attributes:
self._add_rated_data_list(item)
self._logger.log("info", Time.now(), seuid, "Created a new TopicItem")
self.__timer = Timer(Duration(nsecs=TOPIC_AGGREGATION_FREQUENCY), self.__aggregate_topic_data)
self.tree_items = []
self.__aggregation_window = rospy.get_param("~aggregation_window", 5)
# def _updateTimer(self, event):
# """
# Updates the timer to the last changed status. If it
# :return:
# """
# self.alive = False
# # TODO this can be very expensive - is there a better way?
# for item in self.tree_items:
# for child in item.get_childs():
# if child.alive:
# self.alive = True
# break
#
# if not self.alive:
# self.set_state("offline")
def get_child(self, row, parent=None):
"""
Returns the child at the position row.
:param row: the index of the row
:type row: int
:param parent: the model parent at the given index (not global / logical parent)
:type parent: NodeItem
:returns: the child at the position row
:rtype: AbstractItem
"""
if not isinstance(parent, NodeItem):
print(type(parent))
raise UserWarning
return self.__get_local_childs(parent)[row]
def __get_local_childs(self, parent=None):
"""
Returns all childs of the topic item at the given position in the gui.
:param parent: the model parent at the given index (not global / logical parent)
:type parent: NodeItem
:param sub_activated: Defines if subscriber shall be shown too.
:returns: the child at the position row
:rtype: AbstractItem
"""
childs = []
if parent is not None:
# a specific parent has been chosen - we have to use it to display the correct connection items
# use the seuid to determine the node and compare this to the parts in the connections item (child of this
# item.
seuid = parent.get_seuid()
seuid_helper = SEUID()
seuid_helper.identifier = seuid
seuid_helper.set_fields()
node = seuid_helper.node
for child in self.get_childs():
child_seuid = child.get_seuid()
seuid_helper.identifier = child_seuid
seuid_helper.set_fields()
node_comp = seuid_helper.publisher
# do the check on the publisher
if node == node_comp:
# match.
childs.append(child)
continue
return childs
else:
return self._child_items
def row(self, parent=None):
"""
Returns the index of the Item.
:returns: the index of the Item
:rtype: int
"""
if parent:
return parent.get_childs().index(self)
elif self.__parent:
return self.__parent.get_childs().index(self)
def child_count(self, parent=None):
"""
Returns the number of children from the AbstractItem.
:returns: number of childs
:rtype: int
"""
return len(self.__get_local_childs(parent))
def get_childs(self, parent=None):
"""
Returns a list with all children.
WARNING: This is the same method as in AbstractItem (superclass) to warn you using this function in the gui
context. Topic item shows only some connections depending on the parent node. This is *not* implemented for
this function.
:returns: list of children
:rtype: list
"""
if parent is not None:
return self.__get_local_childs(parent)
return self._child_items
def get_items_younger_than(self, time, *args):
"""
Used to overwrite the standard implementation in AbstractItem. This method provides the data from the
calculated data and *not* from the raw input. This is especially wanted when plotting
:param time:
:param args:
:return:
"""
self._data_lock.acquire()
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self.__calculated_data:
return_values[key] = None
breakpoint = 0
list_of_time = self.__calculated_data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self.__calculated_data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self.__calculated_data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self.__calculated_data:
return_values[key] = self.__calculated_data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i,
len(self.__calculated_data[key]), key)
break
self._data_lock.release()
return return_values
def get_raw_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self._data:
return_values[key] = None
breakpoint = 0
list_of_time = self._data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self._data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._data:
return_values[key] = self._data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
return return_values
def __aggregate_topic_data(self, event):
"""
Aggregates the topic every TOPIC_AGGREGATION_FREQUENCY nsecs and pushes the updated data to
self.__calculated_data.
:param event: containing information when this method was called - not used but needed for the interface
"""
aggregated_data = {}
for key in self._attributes:
aggregated_data[key] = 0
for key in self.__calculated_data.keys():
self.__calculated_data[key].append(0)
child_count = 0
for connection in self.get_childs(): # !assuming all childs are connection items!
values = connection.aggregate_data(self.__aggregation_window) # average over N seconds
if values:
for key in self.add_keys:
aggregated_data[key] += values[key]
for key in self.max_keys:
if values[key] > aggregated_data[key]:
aggregated_data[key] = values[key]
for key in self.avg_keys:
aggregated_data[key] += values[key]
child_count += 1
for key in self.avg_keys:
if child_count == 0:
aggregated_data[key] = 0
else:
aggregated_data[key] /= child_count
self._data_lock.acquire()
for key in self._attributes:
self.__calculated_data[key][-1] = aggregated_data[key]
self.__calculated_data["window_start"][-1] = Time.now()
self.__calculated_data["window_stop"][-1] = Time.now() - (Duration(secs=1) if int(Duration(secs=1).to_sec()) <= int(Time.now().to_sec()) else Time(0))
self._data_lock.release()
def execute_action(self, action):
"""
Not senseful, Topics cannot execute actions.
:param action: action to be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns the detailed data of the HostItem.
:returns: detailed data
:rtype: str
"""
data_dict = self.get_latest_data()
for key in self.__calculated_data:
if self.__calculated_data[key]:
data_dict[key] = self.__calculated_data[key][-1]
else:
data_dict[key] = self.tr("Currently no value available")
data_dict["state"] = self.get_state()
content = "<p class=\"detailed_data\">"
content += self.get_erroneous_entries()
content += "Rounded to a second:<br>"
if "frequency" in self._attributes:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " <br>"
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) \
+ " " + self.tr("bandwidth_unit") + " <br>"
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit") + " <br>"
content += self.tr("period_max") + ": " + prepare_number_for_representation(data_dict["period_max"]) \
+ " " + self.tr("period_max_unit") + " <br>"
content += self.tr("stamp_age_max") + ": " + prepare_number_for_representation(data_dict["stamp_age_max"]) \
+ " " + self.tr("stamp_age_max_unit") + " <br>"
content += "</p>"
return content
def get_plotable_items(self):
"""
Returns items for the plot.
:returns: str[]
"""
if "frequency" in self.__calculated_data:
return ["dropped_msgs","stamp_age_max", "period_max",
"bandwidth", "frequency"]
else:
return ["dropped_msgs", "traffic", "stamp_age_max", "period_max", "bandwidth"]
def get_short_data(self):
"""
Returns a shortend version of the item data.
:returns: data of the item
:rtype: str
"""
data_dict = {}
for key in self.__calculated_data:
if self.__calculated_data[key]:
data_dict[key] = self.__calculated_data[key][-1]
else:
data_dict[key] = self.tr("Currently no value available")
data_dict["window_stop"] = Time(0)
data_dict["window_start"] = Time(0)
data_dict["state"] = self.get_state()
try:
if data_dict["window_stop"] == Time(0):
return "No data yet"
elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME):
# last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline!
return "No data since " + prepare_number_for_representation(Time.now() - data_dict["window_stop"]) \
+ " seconds"
except:
print(data_dict["window_stop"])
raise UserWarning
content = ""
if data_dict["state"] is "error":
content += self.get_erroneous_entries_for_log()
else:
content += self.tr("frequency") + ": " + prepare_number_for_representation(
data_dict["frequency"]) + " " \
+ self.tr("frequency_unit") + " - "
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) \
+ " " + self.tr("bandwidth_unit") + " - "
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit")
return content
def can_execute_actions(self):
"""
This item cannot execute actions, so it returns False
:return: False
"""
return False
def get_list_items(self):
return []
def get_time_items(self):
return ["stamp_age_mean", "stamp_age_max"]
| bsd-2-clause | -6,375,115,065,249,970,000 | 35.251101 | 158 | 0.537429 | false |
autopulated/yotta | yotta/main.py | 2 | 10966 | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
from yotta.lib import lazyregex #pylint: disable=unused-import
from yotta.lib import errors #pylint: disable=unused-import
# NOTE: argcomplete must be first!
# argcomplete, pip install argcomplete, tab-completion for argparse, Apache-2
import argcomplete
# standard library modules, , ,
import argparse
import sys
import os
# globalconf, share global arguments between modules, internal
import yotta.lib.globalconf as globalconf
# hook to support coverage information when yotta runs itself during tests:
if 'COVERAGE_PROCESS_START' in os.environ:
import coverage
coverage.process_startup()
# set __version__ using the same file that's read by setup.py when installing:
with open(os.path.join(os.path.dirname(__file__), 'version.txt')) as _version_f:
__version__ = _version_f.read().strip()
def splitList(l, at_value):
r = [[]]
for x in l:
if x == at_value:
r.append(list())
else:
r[-1].append(x)
return r
def _handleUnhandledReqestExceptions(fn):
import functools
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# requests, apache2
import requests
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
import logging
if e.request is not None:
logging.critical('%s %s failed with status %s', e.request.method, e.request.url, e.response.status_code)
sys.exit(1)
else:
raise
return wrapped
def _exitSilentlyOnUnhandledPipeError(fn):
import functools
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except IOError as e:
import errno
if e.errno == errno.EPIPE:
# unhandled pipe error -> exit silently, but with an error code
sys.exit(1)
else:
raise
return wrapped
@_exitSilentlyOnUnhandledPipeError
@_handleUnhandledReqestExceptions
def main():
# standard library modules, , ,
import logging
from functools import reduce
# logging setup, , setup the logging system, internal
from yotta.lib import logging_setup
# options, , common argument parser options, internal
import yotta.options as options
logging_setup.init(level=logging.INFO, enable_subsystems=None, plain=False)
# we override many argparse things to make options more re-usable across
# subcommands, and allow lazy loading of subcommand modules:
parser = options.parser.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Build software using re-usable components.\n'+
'For more detailed help on each subcommand, run: yotta <subcommand> --help'
)
subparser = parser.add_subparsers(dest='subcommand_name', metavar='<subcommand>')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
# add re-usable top-level options which subcommands may also accept
options.verbosity.addTo(parser)
options.debug.addTo(parser)
options.plain.addTo(parser)
options.noninteractive.addTo(parser)
options.registry.addTo(parser)
options.target.addTo(parser)
options.config.addTo(parser)
def addParser(name, module_name, description, help=None):
if help is None:
help = description
def onParserAdded(parser):
import importlib
module = importlib.import_module('.' + module_name, 'yotta')
module.addOptions(parser)
parser.set_defaults(command=module.execCommand)
subparser.add_parser_async(
name, description=description, help=help,
formatter_class=argparse.RawTextHelpFormatter,
callback=onParserAdded
)
addParser('search', 'search',
'Search for open-source modules and targets that have been published '+
'to the yotta registry (with yotta publish). See help for `yotta '+
'install` for installing modules, and for `yotta target` for '+
'switching targets.',
'Search for published modules and targets'
)
addParser('init', 'init', 'Create a new module.')
addParser('install', 'install',
'Add a specific module as a dependency, and download it, or install all '+
'dependencies for the current module. Use yotta install '+
'modulename@version to install a specific version.'
)
addParser('build', 'build',
'Build the current module. Options can be passed to the underlying '+
'build tool by passing them after --, e.g. to do a verbose build '+
'which will display each command as it is run, use:\n'+
' yotta build -- -v\n\n'+
'The programs or libraries to build can be specified (by default '+
'only the libraries needed by the current module and the current '+
"module's own tests are built). For example, to build the tests of "+
'all dependencies, run:\n yotta build all_tests\n\n',
'Build the current module.'
)
addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.')
addParser('link', 'link',
'Symlink a module to be used into another module.\n\n'+
'Use: "yotta link" in a module to link it globally, then use "yotta '+
'link <modulename>" to link it into the module where you want to use '+
'it.\n\n'+
'"yotta link ../path/to/module" is also supported, which will create '+
'the global link and a link into the current module in a single step.',
'Symlink a module'
)
addParser('link-target', 'link_target',
'Symlink a target to be used into another module.\n\n'+
'Use: "yotta link" in a target to link it globally, then use "yotta '+
'link-target <targetname>" to link it into the module where you want to use '+
'it.\n\n'+
'"yotta link ../path/to/target" is also supported, which will create '+
'the global link and a link into the current module in a single step.',
'Symlink a target'
)
addParser('update', 'update', 'Update dependencies for the current module, or a specific module.')
addParser('target', 'target', 'Set or display the target device.')
addParser('debug', 'debug', 'Attach a debugger to the current target. Requires target support.')
addParser('test', 'test_subcommand',
'Run the tests for the current module on the current target. A build '+
'will be run first, and options to the build subcommand are also '+
'accepted by test.\nThis subcommand requires the target to provide a '+
'"test" script that will be used to run each test. Modules may also '+
'define a "testReporter" script, which will be piped the output from '+
'each test, and may produce a summary.',
'Run the tests for the current module on the current target. Requires target support for cross-compiling targets.'
)
addParser('start', 'start',
'Launch the compiled program (available for executable modules only). Requires target support for cross-compiling targets.'
)
addParser('publish', 'publish', 'Publish a module or target to the public registry.')
addParser('unpublish', 'unpublish', 'Un-publish a recently published module or target.')
addParser('login', 'login', 'Authorize for access to private github repositories and publishing to the yotta registry.')
addParser('logout', 'logout', 'Remove saved authorization token for the current user.')
addParser('whoami', 'whoami', 'Display who the currently logged in user is (if any).')
addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.')
addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.')
addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.')
addParser('remove', 'remove',
'Remove the downloaded version of a dependency module or target, or '+
'un-link a linked module or target (see yotta link --help for details '+
'of linking). This command does not modify your module.json file.',
'Remove or unlink a dependency without removing it from module.json.'
)
addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.')
addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.')
addParser('clean', 'clean', 'Remove files created by yotta and the build.')
addParser('config', 'config', 'Display the target configuration info.')
addParser('shrinkwrap', 'shrinkwrap', 'Create a yotta-shrinkwrap.json file to freeze dependency versions.')
# short synonyms, subparser.choices is a dictionary, so use update() to
# merge in the keys from another dictionary
short_commands = {
'up':subparser.choices['update'],
'in':subparser.choices['install'],
'ln':subparser.choices['link'],
'v':subparser.choices['version'],
'ls':subparser.choices['list'],
'rm':subparser.choices['remove'],
'unlink':subparser.choices['remove'],
'unlink-target':subparser.choices['remove'],
'owner':subparser.choices['owners'],
'lics':subparser.choices['licenses'],
'who':subparser.choices['whoami'],
'run':subparser.choices['start']
}
subparser.choices.update(short_commands)
# split the args into those before and after any '--'
# argument - subcommands get raw access to arguments following '--', and
# may pass them on to (for example) the build tool being used
split_args = splitList(sys.argv, '--')
following_args = reduce(lambda x,y: x + ['--'] + y, split_args[1:], [])[1:]
# complete all the things :)
argcomplete.autocomplete(
parser,
exclude = list(short_commands.keys()) + ['-d', '--debug', '-v', '--verbose']
)
# when args are passed directly we need to strip off the program name
# (hence [:1])
args = parser.parse_args(split_args[0][1:])
# set global arguments that are shared everywhere and never change
globalconf.set('interactive', args.interactive)
globalconf.set('plain', args.plain)
# finally, do stuff!
if 'command' not in args:
parser.print_usage()
sys.exit(0)
try:
status = args.command(args, following_args)
except KeyboardInterrupt:
logging.warning('interrupted')
status = -1
sys.exit(status or 0)
| apache-2.0 | 3,106,736,014,995,484,700 | 43.217742 | 131 | 0.654933 | false |
andrewyoung1991/scons | src/engine/SCons/Platform/aix.py | 1 | 3083 | """engine.SCons.Platform.aix
Platform-specific initialization for IBM AIX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import subprocess
import posix
import SCons.Util
import SCons.Action
def get_xlc(env, xlc=None, packages=[]):
# Use the AIX package installer tool lslpp to figure out where a
# given xl* compiler is installed and what version it is.
xlcPath = None
xlcVersion = None
if xlc is None:
xlc = env.get('CC', 'xlc')
if SCons.Util.is_List(xlc):
xlc = xlc[0]
for package in packages:
# find the installed filename, which may be a symlink as well
pipe = SCons.Action._subproc(env, ['lslpp', '-fc', package],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
# output of lslpp is something like this:
# #Path:Fileset:File
# /usr/lib/objrepos:vac.C 6.0.0.0:/usr/vac/exe/xlCcpp
# /usr/lib/objrepos:vac.C 6.0.0.0:/usr/vac/bin/xlc_r -> /usr/vac/bin/xlc
for line in pipe.stdout:
if xlcPath:
continue # read everything to let lslpp terminate
fileset, filename = line.split(':')[1:3]
filename = filename.split()[0]
if ('/' in xlc and filename == xlc) \
or ('/' not in xlc and filename.endswith('/' + xlc)):
xlcVersion = fileset.split()[1]
xlcPath, sep, xlc = filename.rpartition('/')
pass
pass
return (xlcPath, xlc, xlcVersion)
def generate(env):
posix.generate(env)
#Based on AIX 5.2: ARG_MAX=24576 - 3000 for environment expansion
env['MAXLINELENGTH'] = 21576
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -2,343,016,563,431,805,000 | 35.270588 | 84 | 0.66591 | false |
smips/Temporary_Insanity | TI/src/libtcodpy.py | 1 | 62671 | #
# libtcod 1.5.1 python wrapper
# Copyright (c) 2008,2009,2010 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll['./libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll['./libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll['./libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll['./libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll['./libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010501
STRVERSION = "1.5.1"
TECHVERSION = 0x01050103
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int_)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
| mit | 6,940,255,849,136,559,000 | 30.024016 | 177 | 0.621707 | false |
solvo/derb | report_builder/forms.py | 1 | 2294 | from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from report_builder.models import Question, Answer, Report
from report_builder.report_shortcuts import get_question_permission
class QuestionForm(forms.ModelForm):
children = forms.CharField
class Meta:
model = Question
fields = ('text', 'help', 'required', 'id')
widgets = {
'text': forms.Textarea(attrs={
'rows': 6,
'placeholder': 'Write your question here',
'class': 'form-control'
}),
'help': forms.Textarea(attrs={
'cols': 80,
'rows': 5,
'placeholder': 'A little help never hurts',
'class': 'form-control'
})
}
exclude = ('order',)
class AnswerForm(forms.ModelForm):
"""
TODO: docstring
"""
def clean_text(self):
text = self.cleaned_data['text']
required = get_question_permission(self.instance.question)
if required == 1 and not text:
raise ValidationError(_('This field is required'), code='required')
return text
class Meta:
model = Answer
fields = ('annotation', 'text')
widgets = {
'annotation': forms.Textarea(attrs={
'rows': 9,
'placeholder': 'Annotations',
'class': 'form-control'
}),
'text': forms.Textarea(attrs={
'rows': 6,
'placeholder': 'Write here your answer',
'class': 'form-control'
})
}
def save(self, db_use):
instance = super(AnswerForm, self).save(db_use)
instance.display_text = instance.text
return instance
class AdminReportForm(forms.ModelForm):
'''
Form for creating and updating a Report object
This is implementation is meant to be used in the admin report view
'''
template = forms.CharField(widget=forms.HiddenInput, max_length=1024**3, initial=' ')
order = forms.CharField(widget=forms.HiddenInput, max_length=10, initial='-1')
class Meta:
model = Report
exclude = ('type', 'questions')
| gpl-3.0 | 230,806,146,539,116,030 | 29.586667 | 89 | 0.559285 | false |
ddiazpinto/python-redsys | setup.py | 1 | 1065 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='redsys',
version='0.2.6',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple, clean and less dependant client for handle payments through RedSys.',
long_description=README,
url='https://github.com/ddiazpinto/python-redsys',
author='David Díaz',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=['pycrypto>=2.6,<2.7']
)
| mit | -1,102,163,221,218,876,400 | 33.354839 | 96 | 0.646948 | false |
eleme/archer | archer/cli.py | 1 | 5921 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.append('.')
import importlib
import traceback
import click
import re
from ._compat import iteritems
from .helper import make_client
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def locate_app(app_id):
"""Attempts to locate the application."""
if app_id is None:
return find_app_in_cwd()
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from .app import Archer
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Archer):
return app
# Otherwise find the only object that is a Archer instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Archer)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Archer application? '
% module.__name__)
def find_app_in_cwd():
# from examples import app
# return app
trial_modules = []
for f in os.listdir(os.getcwd()):
if f.endswith('.py') and f not in ('setup.py',):
trial_modules.append(importlib.import_module(f[:-3]))
if os.path.isdir(f):
# import pdb
# pdb.set_trace()
fs = os.listdir(f)
if '__init__.py' in fs:
trial_modules.append(importlib.import_module(f))
for module in trial_modules:
try:
return find_best_app(module)
except NoAppException:
continue
raise NoAppException
class Config(object):
def __init__(self):
self.app = None
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--app', default=None)
@pass_config
def main(config, app):
config.app = app
@main.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=True,
help='Enable or disable the reloader. By default the reloader '
)
@pass_config
def run(config, host, port, reload):
app = locate_app(config.app)
app.run(host, port, use_reloader=reload)
@main.command('shell', short_help='Runs a shell in the app context.')
@pass_config
def shell(config):
app = locate_app(config.app)
banner = 'Python %s on %s\nApp: %s%s\n' % (
sys.version,
sys.platform,
app.name,
app.debug and ' [debug]' or '',
)
ctx = {'a': 123}
ctx.update(app.make_shell_context())
sys.path.append('.')
try:
import IPython
IPython.embed(user_ns=ctx, banner1=banner)
except ImportError:
import code
code.interact(banner=banner, local=ctx)
@main.command('call', short_help='Runs a client')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@click.argument('api')
@click.argument('arguments', required=False, nargs=-1)
@pass_config
def call(config, host, port, api, arguments):
"""
call an api with given arguments, this is a command for quickly
testing if a api is working, it's better to write test case
warning: arguments of customized thrift type not supported yet
"""
arguments = ' '.join(arguments)
if ',' in arguments:
sep = '\s*,\s*'
else:
sep = '\s+'
args = re.split(sep, arguments.strip())
params = []
for arg in args:
if ':' in arg:
value, type_ = arg.split(':')
type_ = getattr(sys.modules['__builtin__'], type_)
value = type_(value)
params.append(value)
else:
try:
params.append(int(arg))
except ValueError:
params.append(arg)
app = locate_app(config.app)
client = make_client(app.service, host, port)
try:
result = getattr(client, api)(*params)
if result is not None:
click.echo(result)
else:
click.echo('OK')
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
click.echo(traceback.format_exc(exc_traceback))
@main.command('client', short_help='Runs a client shell')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@pass_config
def client(config, host, port):
from .helper import make_client
app = locate_app(config.app)
client = make_client(app.service, host, port)
banner = 'Python %s on %s\nApp: %s%s\n' % (
sys.version,
sys.platform,
app.name,
app.debug and ' [debug]' or '',
)
ctx = {'client': client}
ctx.update(app.make_shell_context())
sys.path.append('.')
try:
import IPython
IPython.embed(user_ns=ctx, banner1=banner)
except ImportError:
import code
code.interact(banner=banner, local=ctx)
| mit | -3,892,443,103,610,141,000 | 26.539535 | 78 | 0.582334 | false |
saurabh6790/frappe | frappe/model/document.py | 1 | 43369 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import time
from frappe import _, msgprint, is_whitelisted
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from six import iteritems, string_types
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.desk.utils import slug
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
:param for_update: [optional] select document for update.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "[email protected]")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "[email protected]",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='[email protected]')
# select a document for update
user = get_doc("User", "[email protected]", for_update=True)
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], string_types):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if len(args) < 2 and kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], string_types):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
if 'for_update' in kwargs:
self.flags.for_update = kwargs.get('for_update')
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(fn):
"""Decorator: Whitelist method to be called remotely via REST API."""
frappe.whitelist()(fn)
return fn
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1, for_update=self.flags.for_update)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
if not (frappe.flags.in_migrate or frappe.local.flags.in_install or frappe.flags.in_setup_wizard):
follow_document(self.doctype, self.name, frappe.session.user)
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
self.flags.ignore_version = frappe.flags.in_test if ignore_version is None else ignore_version
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
return self
def copy_attachments_from_amended_from(self):
"""Copy attachments from `amended_from`"""
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
_file = frappe.get_doc({
"doctype": "File",
"file_url": attach_item.file_url,
"file_name": attach_item.file_name,
"attached_to_name": self.name,
"attached_to_doctype": self.doctype,
"folder": "Home/Attachments"})
_file.save()
def update_children(self):
"""update child tables"""
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
"""sync child table for given fieldname"""
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def get_doc_before_save(self):
return getattr(self, '_doc_before_save', None)
def has_value_changed(self, fieldname):
'''Returns true if value is changed before and after saving'''
previous = self.get_doc_before_save()
return previous.get(fieldname)!=self.get(fieldname) if previous else True
def set_new_name(self, force=False, set_name=None, set_child_names=True):
"""Calls `frappe.naming.set_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
# If autoname has set as Prompt (name)
if self.get("__newname"):
self.name = self.get("__newname")
self.flags.name_set = True
return
if set_name:
self.name = set_name
else:
set_new_name(self)
if set_child_names:
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
"""Get the document title based on title_field or `title` or `name`"""
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in iteritems(values):
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from `tabSingles` where doctype=%s""", self.doctype)
for field, value in iteritems(d):
if field != "doctype":
frappe.db.sql("""insert into `tabSingles` (doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_data_fields()
self._validate_selects()
self._validate_non_negative()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_data_fields()
d._validate_selects()
d._validate_non_negative()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def _validate_non_negative(self):
def get_msg(df):
if self.parentfield:
return "{} {} #{}: {} {}".format(frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value cannot be negative for"), frappe.bold(_(df.label)))
else:
return _("Value cannot be negative for {0}: {1}").format(_(df.parent), frappe.bold(_(df.label)))
for df in self.meta.get('fields', {'non_negative': ('=', 1),
'fieldtype': ('in', ['Int', 'Float', 'Currency'])}):
if flt(self.get(df.fieldname)) < 0:
msg = get_msg(df)
frappe.throw(msg, frappe.NonNegativeError, title=_("Negative Value"))
def validate_workflow(self):
"""Validate if the workflow transition is valid"""
if frappe.flags.in_install == 'frappe': return
workflow = self.meta.get_workflow()
if workflow:
validate_workflow(self)
if not self._action == 'save':
set_workflow_state_on_action(self, workflow, self._action)
def validate_set_only_once(self):
"""Validate that fields are not changed if not in insert"""
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype in table_fields:
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(field.fieldname)),
frappe.CannotChangeConstantError)
return False
def is_child_table_same(self, fieldname):
"""Validate child table is same as original table before saving"""
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
"""Remove values the user is not allowed to read (called when loading in desk)"""
if frappe.session.user == "Administrator":
return
has_higher_permlevel = False
all_fields = self.meta.fields.copy()
for table_field in self.meta.get_table_fields():
all_fields += frappe.get_meta(table_field.options).fields or []
for df in all_fields:
if df.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
if frappe.session.user == "Administrator":
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# If new record then don't reset the values for child table
if self.is_new(): return
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
self._has_access_to = {}
self._has_access_to[permission_type] = []
roles = frappe.get_roles()
for perm in self.get_permissions():
if perm.role in roles and perm.get(permission_type):
if perm.permlevel not in self._has_access_to[permission_type]:
self._has_access_to[permission_type].append(perm.permlevel)
return self._has_access_to[permission_type]
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal') and not self.meta.get('is_virtual'):
if self.meta.issingle:
modified = frappe.db.sql("""select value from tabSingles
where doctype=%s and field='modified' for update""", self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 to 2"))
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 to 0"))
elif docstatus==2:
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": ['in', table_fields]}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
run_server_script_for_doc_event(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
"""Run notifications for this method"""
if (frappe.flags.in_import and frappe.flags.mute_emails) or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed==None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications == None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts==None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['on_change'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self, ignore_permissions=False):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, ignore_permissions = ignore_permissions, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
# before_validate method should be executed before ignoring validations
if self._action in ("save", "submit"):
self.run_method("before_validate")
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
"""Save load document from db before saving"""
self._doc_before_save = None
if not self.is_new():
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
doc_before_save = self.get_doc_before_save()
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.clear_cache()
self.notify_update()
update_global_search(self)
self.save_version()
self.run_method('on_change')
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
"""Clear _seen property and set current user as seen"""
if getattr(self.meta, 'track_seen', False):
frappe.db.set_value(self.doctype, self.name, "_seen", json.dumps([frappe.session.user]), update_modified=False)
def notify_update(self):
"""Publish realtime that the current document is modified"""
if frappe.flags.in_patch: return
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
"""Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
"""
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
self.load_doc_before_save()
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
"""get database value for this fieldname"""
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
"""Save version info"""
# don't track version under following conditions
if (not getattr(self.meta, 'track_changes', False)
or self.doctype == 'Version'
or self.flags.ignore_version
or frappe.flags.in_install
or (not self._doc_before_save and frappe.flags.in_patch)):
return
version = frappe.new_doc('Version')
if not self._doc_before_save:
version.for_insert(self)
version.insert(ignore_permissions=True)
elif version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
if not frappe.flags.in_migrate:
# follow since you made a change?
follow_document(self.doctype, self.name, frappe.session.user)
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method_name):
method = getattr(self, method_name, None)
if not method:
raise NotFound("Method {0} not found".format(method_name))
is_whitelisted(getattr(method, '__func__', method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}").format(doc.idx, label, condition_str, val2)
else:
msg = _("Incorrect value: {0} must be {1} {2}").format(label, condition_str, val2)
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/app/{doctype}/{name}`"""
return f"/app/{slug(self.doctype)}/{self.name}"
def add_comment(self, comment_type='Comment', text=None, comment_email=None, link_doctype=None, link_name=None, comment_by=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
out = frappe.get_doc({
"doctype":"Comment",
'comment_type': comment_type,
"comment_email": comment_email or frappe.session.user,
"comment_by": comment_by,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
"""add the given/current user to list of users who have seen this document (_seen)"""
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
"""add log to communication when a user views a document"""
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def queue_action(self, action, **kwargs):
"""Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead"""
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
from frappe.utils.background_jobs import enqueue
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
"""Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0"""
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
"""Delete the lock file for this document"""
file_lock.delete_lock(self.get_signature())
# validation helpers
def validate_from_to_dates(self, from_date_field, to_date_field):
"""
Generic validation to verify date sequence
"""
if date_diff(self.get(to_date_field), self.get(from_date_field)) < 0:
frappe.throw(_('{0} must be after {1}').format(
frappe.bold(self.meta.get_label(to_date_field)),
frappe.bold(self.meta.get_label(from_date_field)),
), frappe.exceptions.InvalidDates)
def get_assigned_users(self):
assignments = frappe.get_all('ToDo',
fields=['owner'],
filters={
'reference_type': self.doctype,
'reference_name': self.name,
'status': ('!=', 'Cancelled'),
})
users = set([assignment.owner for assignment in assignments])
return users
def add_tag(self, tag):
"""Add a Tag to this document"""
from frappe.desk.doctype.tag.tag import DocTags
DocTags(self.doctype).add(self.name, tag)
def get_tags(self):
"""Return a list of Tags attached to this document"""
from frappe.desk.doctype.tag.tag import DocTags
return DocTags(self.doctype).get_tags(self.name).split(",")[1:]
def __repr__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
docstatus = f" docstatus={self.docstatus}" if self.docstatus else ""
parent = f" parent={self.parent}" if self.parent else ""
return f"<{doctype}: {name}{docstatus}{parent}>"
def __str__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
return f"{doctype}({name})"
def execute_action(doctype, name, action, **kwargs):
"""Execute an action on a document (called by background worker)"""
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
| mit | -3,114,780,777,890,336,000 | 30.290765 | 130 | 0.687219 | false |
ikalnytskyi/holocron | tests/_processors/test_when.py | 1 | 6833 | """When processor test suite."""
import collections.abc
import itertools
import pathlib
import pytest
import holocron
from holocron._processors import when
@pytest.fixture(scope="function")
def testapp(request):
def spam(app, items, *, text=42):
for item in items:
item["spam"] = text
yield item
def rice(app, items):
yield from items
yield holocron.Item({"content": "rice"})
def eggs(app, items):
while True:
try:
item_a = next(items)
item_b = next(items)
except StopIteration:
break
else:
yield holocron.Item({"key": item_a["key"] + item_b["key"]})
instance = holocron.Application()
instance.add_processor("spam", spam)
instance.add_processor("rice", rice)
instance.add_processor("eggs", eggs)
return instance
@pytest.mark.parametrize(
["cond", "item"],
[
pytest.param(
"item.author == 'yoda'",
{"content": "eh", "author": "yoda", "spam": 42},
id="matched",
),
pytest.param(
"item.author == 'luke'",
{"content": "eh", "author": "yoda"},
id="skipped",
),
],
)
def test_item_spam(testapp, cond, item):
"""When processor has to work with a simple processor!"""
stream = when.process(
testapp,
[holocron.Item({"content": "eh", "author": "yoda"})],
processor={"name": "spam"},
condition=[cond],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [holocron.Item(item)]
@pytest.mark.parametrize(
["amount"],
[
pytest.param(0),
pytest.param(1),
pytest.param(2),
pytest.param(5),
pytest.param(10),
],
)
def test_item_many_spam(testapp, amount):
"""When processor has to work with a stream."""
stream = when.process(
testapp,
[
holocron.Item({"content": "the great jedi", "key": i})
for i in range(amount)
],
processor={"name": "spam"},
condition=["item.key % 2 == 0"],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item({"content": "the great jedi", "key": i})
if i % 2
else holocron.Item({"content": "the great jedi", "key": i, "spam": 42})
for i in range(amount)
]
@pytest.mark.parametrize(
["amount"],
[
pytest.param(0),
pytest.param(1),
pytest.param(2),
pytest.param(5),
pytest.param(10),
],
)
def test_item_many_rice(testapp, amount):
"""When processor has to work with a processor that populates a stream."""
stream = when.process(
testapp,
[
holocron.Item({"content": "the great jedi", "key": i})
for i in range(amount)
],
processor={"name": "rice"},
condition=["item.key % 2 == 0"],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == list(
itertools.chain(
[
holocron.Item({"content": "the great jedi", "key": i})
for i in range(amount)
],
[holocron.Item({"content": "rice"})],
)
)
def test_item_many_eggs(testapp):
"""When processor has to work with complex processor."""
stream = when.process(
testapp,
[
holocron.Item({"content": "the great jedi", "key": i})
for i in range(5)
],
processor={"name": "eggs"},
condition=["item.key % 2 != 0"],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item({"content": "the great jedi", "key": 0}),
holocron.Item({"content": "the great jedi", "key": 2}),
holocron.Item({"key": 4}),
holocron.Item({"content": "the great jedi", "key": 4}),
]
@pytest.mark.parametrize(
["cond"],
[
pytest.param([r"item.author == 'yoda'"], id="=="),
pytest.param([r"item.source.suffix == '.md'"], id="endswith"),
pytest.param(
[r"item.author == 'yoda'", "item.source.suffix == '.md'"],
id="two-conditions",
),
pytest.param([r"item.source | match('.*\.md')"], id="match-md"),
pytest.param([r"item.source | match('^about.*')"], id="match-about"),
],
)
def test_args_condition(testapp, cond):
"""When processor has to respect conditions."""
stream = when.process(
testapp,
[
holocron.Item(
{
"content": "eh",
"author": "yoda",
"source": pathlib.Path("about", "index.md"),
}
),
holocron.Item(
{"author": "luke", "source": pathlib.Path("me.rst")}
),
],
processor={"name": "spam"},
condition=cond,
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item(
{
"content": "eh",
"author": "yoda",
"source": pathlib.Path("about", "index.md"),
"spam": 42,
}
),
holocron.Item({"author": "luke", "source": pathlib.Path("me.rst")}),
]
@pytest.mark.parametrize(
["cond"],
[
pytest.param([r"item.author == 'yoda'"], id="=="),
pytest.param([r"item.source.suffix == '.md'"], id="endswith"),
pytest.param(
[r"item.author == 'yoda'", "item.source.suffix == '.md'"],
id="two-conditions",
),
pytest.param([r"item.source | match('.*\.md')"], id="match-md"),
pytest.param([r"item.source | match('^about.*')"], id="match-about"),
],
)
def test_args_condition_positional(testapp, cond):
"""When processor has to respect conditions."""
stream = when.process(
testapp,
[
holocron.Item(
{
"content": "eh",
"author": "yoda",
"source": pathlib.Path("about", "index.md"),
}
),
holocron.Item(
{"author": "luke", "source": pathlib.Path("me.rst")}
),
],
{"name": "spam"},
*cond,
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item(
{
"content": "eh",
"author": "yoda",
"source": pathlib.Path("about", "index.md"),
"spam": 42,
}
),
holocron.Item({"author": "luke", "source": pathlib.Path("me.rst")}),
]
| bsd-3-clause | -1,684,610,959,071,357,000 | 26.115079 | 79 | 0.493195 | false |
aknuds1/srl-python-lib | Tests/testqtgui/testwidgets.py | 1 | 7362 | from _common import *
if has_qt4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
import srllib.qtgui.widgets
if has_qt4:
class _LineEdit(srllib.qtgui.widgets._LineEditHelper,
guimocks.QLineEditMock):
_qbase = __qbase = guimocks.QLineEditMock
def __init__(self, contents="", undo_stack=None, undo_text=None, pos=None):
self.__qbase.__init__(self, returnValues={"text": contents})
srllib.qtgui.widgets._LineEditHelper.__init__(self, undo_stack,
undo_text, self.__qbase)
if pos is None:
pos = len(contents) + 1
self.setCursorPosition(pos)
@only_qt4
class LineEditTest(QtTestCase):
def test_construct_with_undo(self):
""" Test constructing with undo. """
# Test default label for undo operation
edit, stack = self.__construct("Test", undo=True)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
self.assertEqual(stack.undoText(), "edit text")
# Test label for undo operation
edit, stack = self.__construct("Test", undo=True, undo_text=
"editing test")
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
self.assertEqual(stack.undoText(), "editing test")
def test_undo(self):
""" Test undo functionality. """
edit, stack = self.__construct("Initial", undo=True)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New0")
edit.emit(QtCore.SIGNAL("editingFinished()"))
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New1")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Initial")
stack.redo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
stack.redo()
edit.mockCheckNamedCall(self, "setText", -1, "New1")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
def test_undo_setText(self):
""" Test undo in conjunction with setText. """
edit, stack = self.__construct(undo=True)
edit.setText("Test")
self.assertNot(stack.canUndo())
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Test")
def test_undo_setText_undoable(self):
""" Test undo in conjunction with setText, with undoable=True. """
edit, stack = self.__construct("Old", undo=True)
edit.setText("New", undoable=True)
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Old")
def test_editText_cursor(self):
"""Verify that the cursor position is kept."""
edit, stack = self.__construct("Txt", undo=True, pos=1)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "Text")
self.assertEqual(edit.cursorPosition(), 1)
def __construct(self, contents=None, undo=False,
undo_text=None, pos=None):
if contents is None:
contents = QtCore.QString()
if undo:
undo_stack = QtGui.QUndoStack()
edit = _LineEdit(contents, undo_stack=undo_stack, undo_text=undo_text,
pos=pos)
if not undo:
return edit
return edit, undo_stack
if has_qt4:
class _NumericalLineEdit(srllib.qtgui.widgets._NumericalLineEditHelper,
_LineEdit):
_qbase = _LineEdit
def __init__(self, floating_point, contents, minimum, maximum):
self._qbase.__init__(self, contents=contents)
srllib.qtgui.widgets._NumericalLineEditHelper.__init__(self,
floating_point, minimum, maximum)
@only_qt4
class NumericalLineEditTest(QtTestCase):
def test_construct(self):
edit = self.__construct(True)
call = edit.mockGetNamedCall("setValidator", 0)
self.assert_(isinstance(call.args[0], QtGui.QDoubleValidator))
edit = self.__construct(True, minimum=0, maximum=1)
call = edit.mockGetNamedCall("setValidator", 0)
vtor = call.args[0]
self.assert_(isinstance(vtor, QtGui.QDoubleValidator))
self.assertEqual(vtor.bottom(), 0)
self.assertEqual(vtor.top(), 1)
edit = self.__construct(False)
call = edit.mockGetNamedCall("setValidator", 0)
self.assert_(isinstance(call.args[0], QtGui.QIntValidator))
edit = self.__construct(False, minimum=0, maximum=1)
call = edit.mockGetNamedCall("setValidator", 0)
vtor = call.args[0]
self.assert_(isinstance(vtor, QtGui.QIntValidator))
self.assertEqual(vtor.bottom(), 0)
self.assertEqual(vtor.top(), 1)
self.assertRaises(ValueError, self.__construct, False, minimum=0.1)
self.assertRaises(ValueError, self.__construct, False, maximum=0.1)
def __construct(self, floating_point, contents=None,
minimum=None, maximum=None):
if contents is None:
contents = QtCore.QString()
edit = _NumericalLineEdit(floating_point=
floating_point, contents=contents, minimum=minimum, maximum=
maximum)
return edit
if has_qt4:
# Note that the helper must be inherited first, to override methods in the
# Qt base
class _CheckBox(srllib.qtgui.widgets._CheckBoxHelper, guimocks.QCheckBoxMock):
_qbase = guimocks.QCheckBoxMock
def __init__(self, undo_stack=None, undo_text=None):
guimocks.QCheckBoxMock.__init__(self)
srllib.qtgui.widgets._CheckBoxHelper.__init__(self, undo_stack=
undo_stack, undo_text=undo_text)
@only_qt4
class CheckBoxHelperTest(QtTestCase):
def test_construct_with_undo(self):
""" Test constructing with undo. """
# Test default label for undo operation
checkbox, stack = self.__construct(undo=True)
self.__change_state(checkbox, True)
self.assertEqual(stack.undoText(), "")
# Test label for undo operation
checkbox, stack = self.__construct(undo=True, undo_text=
"check test")
self.__change_state(checkbox, True)
self.assertEqual(stack.undoText(), "check test")
def test_undo(self):
""" Test undo functionality. """
checkbox, stack = self.__construct(undo=True)
self.__change_state(checkbox, True)
self.__change_state(checkbox, False)
stack.undo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Checked)
stack.undo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Unchecked)
stack.redo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Checked)
def __change_state(self, checkbox, checked):
if checked:
state = int(Qt.Checked)
else:
state = int(Qt.Unchecked)
checkbox.emit(QtCore.SIGNAL("stateChanged(int)"), state)
def __construct(self, checked=False, undo=False, undo_text=None):
if undo:
undo_stack = QtGui.QUndoStack()
checkbox = _CheckBox(undo_stack=undo_stack, undo_text=undo_text)
if not undo:
return checkbox
return checkbox, undo_stack
| mit | 4,963,683,699,837,994,000 | 39.01087 | 83 | 0.617359 | false |
akhilaananthram/nupic.research | encoder_quality/rdse_check_demo.py | 1 | 2680 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script demonstrates how encoder_check.encoderCheck is used.
The example shows that a RandomDistributedScalarEncoder with higher
resolution will more tightly preserve the distance metric of the scalar
input space.
For three scalar values x, y, z, and their encodings Sx, Sy, and Sz, if
the overlap of Sx and Sy is greater than the overlap of Sx and Sz, we would
hope that the distance between x and y is less than the distance between x and
z. This is the logic that the encoderCheck employs. If it finds values that
violate this property, it reports it with a warning.
"""
import encoder_check
import numpy as np
from nupic.encoders.random_distributed_scalar import (
RandomDistributedScalarEncoder
)
if __name__ == "__main__":
print "Testing RSDE Quality"
maxval = 100.0
minval = -100.0
Nsamples = 1000
encoder1 = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
encoder2 = RandomDistributedScalarEncoder(name="encoder", resolution=10.0,
w=23, n=500, offset=0.0)
distance_function = lambda x,y : abs(x-y)
sample_generator = lambda : np.random.uniform(minval, maxval)
input_pairs_source = encoder_check.InputTripleCreator(sample_generator)
err1 = encoder_check.encoderCheck(encoder1, distance_function,
input_pairs_source)
err2 = encoder_check.encoderCheck(encoder2, distance_function,
input_pairs_source)
print
print "Warning rate for encoder w/ resolution 1.0: ",
print err1
print "Warning rate for encoder w/ resolution 10.0: ",
print err2
| gpl-3.0 | -8,209,893,378,988,622,000 | 35.712329 | 78 | 0.672388 | false |
GeoscienceAustralia/PyRate | tests/test_merge.py | 1 | 2815 | # coding: utf-8
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the Merge step of PyRate.
"""
import os
from subprocess import check_call
import itertools
import pytest
from pathlib import Path
from pyrate.merge import create_png_and_kml_from_tif
from pyrate.core import config as cf
from pyrate.merge import _merge_stack, _merge_linrate
from pyrate.configuration import Configuration, write_config_file
from tests.common import manipulate_test_conf
@pytest.fixture
def create_merge_output(tempdir, gamma_conf):
tdir = Path(tempdir())
params = manipulate_test_conf(gamma_conf, tdir)
output_conf_file = tdir.joinpath('conf.cfg')
output_conf = tdir.joinpath(output_conf_file)
write_config_file(params=params, output_conf_file=output_conf)
check_call(f"pyrate conv2tif -f {output_conf}", shell=True)
check_call(f"pyrate prepifg -f {output_conf}", shell=True)
check_call(f"pyrate correct -f {output_conf}", shell=True)
check_call(f"pyrate timeseries -f {output_conf}", shell=True)
check_call(f"pyrate stack -f {output_conf}", shell=True)
params = Configuration(output_conf).__dict__
_merge_stack(params)
_merge_linrate(params)
return params
@pytest.mark.slow
def test_file_creation(create_merge_output):
params = create_merge_output
# check if color map is created
for ot in ['stack_rate', 'stack_error', 'linear_rate', 'linear_error', 'linear_rsquared']:
create_png_and_kml_from_tif(params[cf.OUT_DIR], output_type=ot)
output_color_map_path = os.path.join(params[cf.OUT_DIR], f"colourmap_{ot}.txt")
assert Path(output_color_map_path).exists(), "Output color map file not found at: " + output_color_map_path
# check if merged files are created
for _type, ot in itertools.product(['stack_rate', 'stack_error', 'linear_rate',
'linear_error', 'linear_rsquared'], ['.tif', '.png', '.kml']):
output_image_path = os.path.join(params[cf.OUT_DIR], _type + ot)
print(f"checking {output_image_path}")
assert Path(output_image_path).exists(), f"Output {ot} file not found at {output_image_path}"
| apache-2.0 | 5,893,206,925,292,384,000 | 41.651515 | 115 | 0.701954 | false |
aileisun/bubbleimg | bubbleimg/imgmeasure/iso/isomeasurer.py | 1 | 9351 | # isomeasurer.py
# ALS 2017/06/01
import os
import astropy.units as u
from astropy.io import fits
import numpy as np
import astropy.table as at
import pickle
import scipy.ndimage as simg
from ..measurer import Measurer
from ... import tabtools
from . import polytools
from . import plottools
class isoMeasurer(Measurer):
def __init__(self, **kwargs):
"""
child of Measurer
do isophotal measurements
"""
super(isoMeasurer, self).__init__(**kwargs)
self.msrtype = 'iso'
def get_fp_contours(self, imgtag='OIII5008_I', onlycenter=False, suffix=''):
""" e.g., msr_iso-OIII5008_I{suffix}_contours.pkl
\or msr_iso-OIII5008_I{suffix}_contours-ctr.pkl
"""
if onlycenter:
ctrtag = '-ctr'
else:
ctrtag = ''
fp_root = self.get_fp_msrtagroot(imgtag=imgtag, suffix=suffix)
return fp_root+'_contours{ctrtag}.pkl'.format(ctrtag=ctrtag)
def make_measurements(self, imgtag='OIII5008_I', isocut=3.e-15*u.Unit('erg / (arcsec2 cm2 s)'), minarea=5, onlycenter=True, centerradius=5.*u.arcsec, plotsuffix='', savecontours=False, plotmsr=False, msrsuffix='', overwrite=False, append=False):
"""
make measurements on a map and write to msr_iso.csv.
if imgtag='OIII5008_I' then measure 'stamp-OIII5008_I.fits'
Params
------
self
imgtag='OIII5008_I'
overwrite = False (bool)
isocut=1.e-15*u.Unit('erg / (arcsec2 cm2 s)'):
isophote cut
minarea=0:
connected contour area (# pix) above the area is counted as part of the isophote measurement
onlycenter=False:
whether to consider only the center contours
centerradius=2.*u.arcsec
plotsuffix = '':
plotsuffix label to be attach to the end of the plot or contour file names.
savecontours=False
plotmsr=False
msrsuffix=''
plotsuffix label in the end of the measurement csv file: msr_iso_{msrsuffix}.csv.
overwrite=False
append=False
Return
------
status (bool)
Write Output
------------
e.g., msr_iso.csv
"""
fn = self.get_fp_msr(msrsuffix=msrsuffix)
condi = {'imgtag': imgtag, 'isocut': isocut, 'minarea': minarea, 'onlycenter': onlycenter, 'centerradius': centerradius}
if append or overwrite or (not tabtools.fn_has_row(fn, condi)):
print("[isomeasurer] making measurement")
img = self.get_stamp_img(imgtag=imgtag, wunit=True)
xc, yc = self._get_xc_yc(img)
# calc
if np.all(~np.isnan(img)):
contours = self._get_contours_from_img(img=img, isocut=isocut, xc=xc, yc=yc, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tab_msr = self._get_tab_measurements_from_contours(contours=contours, xc=xc, yc=yc)
else:
contours = []
tab_msr = self._get_tab_measurements_nan()
tab_params = self._get_tab_params(imgtag=imgtag, isocut=isocut, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tabout = at.hstack([tab_params, tab_msr])
# output
tabtools.write_row(fn=fn, row=tabout, condi=condi, overwrite=overwrite, append=append)
# optional output
if savecontours:
fn_contours = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=plotsuffix)
write_pickle(contours, fn_contours, overwrite=overwrite)
if plotmsr:
fn_plot = self.get_fp_msrplot(imgtag=imgtag, suffix=plotsuffix)
plottools.make_plot_img_w_contours(fn_plot=fn_plot, img=img, contours=contours)
else:
print("[isomeasurer] skip making measurement as files exist")
return os.path.isfile(fn)
def make_visualpanel(self, fn=None, compo_bands ='gri', imgtag='OIII5008_I', onlycenter=True, minarea=5, centerradius=5.*u.arcsec, tocolorbar=True, totitle=True, fontsize=12, overwrite=False):
"""
make panel figure to visualize the composit and the iso measurements
saved to e.g., 'msr_iso-OIII5008_I_panel.pdf'
Params
------
fn = None: default: msr_iso_{imgtag}_panel.pdf
compo_bands ='gri', imgtag='OIII5008_I', overwrite=False
Return
------
status
"""
if fn is None:
fn = self.get_fp_msrplot(imgtag=imgtag, suffix='_panel')
else:
fn = self.dir_obj+fn
if not os.path.isfile(fn) or overwrite:
print("[isomeasurer] making visual panel")
# get files ready
self.make_colorimg(bands=compo_bands, img_type='stamp', overwrite=False)
# access data
img_compo = simg.imread(self.dir_obj+'color_stamp-{}.png'.format(compo_bands))
img_map = self.get_stamp_img(imgtag=imgtag, wunit=False)
suffix = '_3e-15'
isocut = 3.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours3 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours3):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours3 = read_pickle(fn_contours3)
suffix = '_1e-15'
isocut = 1.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours1 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours1):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours1 = read_pickle(fn_contours1)
z = self.z
pixsize = self.pixsize.to_value(u.arcsec)
legend_suffix = ' at 3'
name = self.obj.name[4:]
title_compo = '${}~{}~{}~$'.format(compo_bands[0], compo_bands[1], compo_bands[2])+'$\mathrm{Composite}$'
title_map = '$\mathrm{[OIII]\lambda 5007~Intensity}$'
label_cbar = '$I~[10^{-15}~\mathrm{erg~s^{-1}~cm^{-2}~arcsec^{-2}}]$'
plottools.make_iso_visual_panel(fn, img_compo, img_map, contours1, contours3, z, pixsize, legend_suffix, name, title_compo, title_map, label_cbar, tocolorbar=tocolorbar, totitle=totitle, fontsize=fontsize)
else:
print("[isomeasurer] skip making visual panel as files exist")
return os.path.isfile(fn)
def _get_tab_params(self, imgtag, isocut, minarea, onlycenter, centerradius):
"""
return a one row table of the measurement params
"""
tab = at.Table([[imgtag], [str(isocut)], [minarea], [onlycenter], [str(centerradius)], ], names=['imgtag', 'isocut', 'minarea', 'onlycenter', 'centerradius', ])
return tab
def _get_tab_measurements_from_contours(self, contours, xc, yc):
"""
calculate iso measurements from contours, return a table like:
"""
tab = polytools.ShapeParamsTab_from_contours(contours, xc, yc)
# unit conversion
area_ars = tab['area_pix'][0]*(self.pixsize/u.arcsec)**2
dmax_ars = self._pix_to_theta(tab['dmax_pix'][0], wunit=False)
rmax_ars = self._pix_to_theta(tab['rmax_pix'][0], wunit=False)
dper_ars = self._pix_to_theta(tab['dper_pix'][0], wunit=False)
kpc_per_arcsec = np.array(self._get_kpc_proper_per_arcsec())
area_kpc = area_ars * kpc_per_arcsec**2
dmax_kpc = dmax_ars * kpc_per_arcsec
rmax_kpc = rmax_ars * kpc_per_arcsec
dper_kpc = dper_ars * kpc_per_arcsec
tab_converted = at.Table(names=['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', ])
tab_converted.add_row([area_kpc, dmax_kpc, rmax_kpc, dper_kpc, area_ars, dmax_ars, rmax_ars, dper_ars, ])
tabout = at.hstack([tab_converted, tab])
return tabout
def _get_tab_measurements_nan(self):
"""
return a tab measurement just like _get_tab_measurements_from_contours() but with entries all nan.
"""
names = ['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', 'area_pix', 'dmax_pix', 'rmax_pix', 'dper_pix', 'theta_dmax', 'theta_rmax', 'theta_dper', 'aspectr']
tabout = at.Table(names=names)
tabout.add_row([np.nan for i in range(len(names))])
return tabout
def _get_contours_from_img(self, img, isocut, xc, yc, minarea=0., onlycenter=False, centerradius=2.*u.arcsec):
"""
make contour at isocut of image as python pickle file (fn_contours)
always overwrite
Params
------
self
img (array)
isocut (float or quantity):
has to be of the same type of unit as image
minarea (float):
minimum area (pix) to be considered as contour patch
onlycenter (bool):
whether to take only center patches as patches (they all have to pass minarea test as well)
centerradius (angular quantity):
if onlycenter = True, then it sets the radius of the center area. only patches overlapping with that area will be considered.
"""
# prep
try:
img.unit
except:
img_nparr = img/isocut
else:
img_nparr = np.array((img/isocut).to(u.dimensionless_unscaled))
# find contours -- satisfy minarea
contours = polytools.find_largecontours(img=img_nparr, threshold=1., minarea=minarea)
if onlycenter: # select only those at the center
centerradius_pix = self._theta_to_pix(centerradius)
contours = polytools.select_center_contours(contours, xc, yc, radius=centerradius_pix)
return contours
def read_pickle(fn):
with open(fn, 'rb') as handle:
result = pickle.load(handle)
return result
def write_pickle(result, fn, overwrite=False):
if not os.path.isfile(fn) or overwrite:
with open(fn, 'wb') as handle:
pickle.dump(result, handle)
| mit | 7,320,847,590,150,667,000 | 32.636691 | 246 | 0.694364 | false |
coldeasy/python-driver | tests/integration/cqlengine/query/test_queryset.py | 1 | 54820 | # Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime
import time
from uuid import uuid1, uuid4
import uuid
from cassandra.cluster import Session
from cassandra import InvalidRequest
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.connection import NOT_SET
import mock
from cassandra.cqlengine import functions
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from cassandra.cqlengine import query
from cassandra.cqlengine.query import QueryException, BatchQuery
from datetime import timedelta
from datetime import tzinfo
from cassandra.cqlengine import statements
from cassandra.cqlengine import operators
from cassandra.util import uuid_from_time
from cassandra.cqlengine.connection import get_session
from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21
from tests.integration.cqlengine import execute_count
class TzOffset(tzinfo):
"""Minimal implementation of a timezone offset to help testing with timezone
aware datetimes.
"""
def __init__(self, offset):
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return 'TzOffset: {}'.format(self._offset.hours)
def dst(self, dt):
return timedelta(0)
class TestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(primary_key=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer()
class IndexedTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
class IndexedCollectionsTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
test_list = columns.List(columns.Integer, index=True)
test_set = columns.Set(columns.Integer, index=True)
test_map = columns.Map(columns.Text, columns.Integer, index=True)
test_list_no_index = columns.List(columns.Integer, index=False)
test_set_no_index = columns.Set(columns.Integer, index=False)
test_map_no_index = columns.Map(columns.Text, columns.Integer, index=False)
class TestMultiClusteringModel(Model):
one = columns.Integer(primary_key=True)
two = columns.Integer(primary_key=True)
three = columns.Integer(primary_key=True)
class TestQuerySetOperation(BaseCassEngTestCase):
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = TestModel.filter(TestModel.test_id == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(TestModel.expected_result >= 1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_using_invalid_column_names_in_filter_kwargs_raises_error(self):
"""
Tests that using invalid or nonexistant column names for filter args raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(nonsense=5)
def test_using_nonexistant_column_names_in_query_args_raises_error(self):
"""
Tests that using invalid or nonexistant columns for query args raises an error
"""
with self.assertRaises(AttributeError):
TestModel.objects(TestModel.nonsense == 5)
def test_using_non_query_operators_in_query_args_raises_error(self):
"""
Tests that providing query args that are not query operator instances raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(5)
def test_queryset_is_immutable(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
assert len(query1._where) == 1
def test_queryset_limit_immutability(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset with same limit
"""
query1 = TestModel.objects(test_id=5).limit(1)
assert query1._limit == 1
query2 = query1.filter(expected_result__gte=1)
assert query2._limit == 1
query3 = query1.filter(expected_result__gte=1).limit(2)
assert query1._limit == 1
assert query3._limit == 2
def test_the_all_method_duplicates_queryset(self):
"""
Tests that calling all on a queryset with previously defined filters duplicates queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
query3 = query2.all()
assert query3 == query2
def test_queryset_with_distinct(self):
"""
Tests that calling distinct on a queryset w/without parameter are evaluated properly.
"""
query1 = TestModel.objects.distinct()
self.assertEqual(len(query1._distinct_fields), 1)
query2 = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(query2._distinct_fields), 1)
query3 = TestModel.objects.distinct(['test_id', 'attempt_id'])
self.assertEqual(len(query3._distinct_fields), 2)
def test_defining_only_fields(self):
"""
Tests defining only fields
@since 3.5
@jira_ticket PYTHON-560
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple only definition
q = TestModel.objects.only(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
with self.assertRaises(query.QueryException):
TestModel.objects.only(['nonexistent_field'])
# Cannot define more than once only fields
with self.assertRaises(query.QueryException):
TestModel.objects.only(['description']).only(['attempt_id'])
# only with defer fields
q = TestModel.objects.only(['attempt_id', 'description'])
q = q.defer(['description'])
self.assertEqual(q._select_fields(), ['attempt_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.only(['description'])
q = q.defer(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
# no fields to select
with self.assertRaises(query.QueryException):
q = TestModel.objects.only(['test_id']).defer(['test_id'])
q._select_fields()
with self.assertRaises(query.QueryException):
q = TestModel.objects.filter(test_id=0).only(['test_id'])
q._select_fields()
def test_defining_defer_fields(self):
"""
Tests defining defer fields
@since 3.5
@jira_ticket PYTHON-560
@jira_ticket PYTHON-599
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple defer definition
q = TestModel.objects.defer(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result'])
with self.assertRaises(query.QueryException):
TestModel.objects.defer(['nonexistent_field'])
# defer more than one
q = TestModel.objects.defer(['attempt_id', 'description'])
q = q.defer(['expected_result'])
self.assertEqual(q._select_fields(), ['test_id', 'test_result'])
# defer with only
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description', 'test_id'])
self.assertEqual(q._select_fields(), ['test_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
# implicit defer
q = TestModel.objects.filter(test_id=0)
self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result'])
# when all fields are defered, it fallbacks select the partition keys
q = TestModel.objects.defer(['test_id', 'attempt_id', 'description', 'expected_result', 'test_result'])
self.assertEqual(q._select_fields(), ['test_id'])
class BaseQuerySetUsage(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseQuerySetUsage, cls).setUpClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
sync_table(TestModel)
sync_table(IndexedTestModel)
sync_table(TestMultiClusteringModel)
TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20)
TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45)
IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30)
IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30)
IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25)
IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25)
IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25)
IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25)
IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20)
IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40)
IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60,
test_result=40)
IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70,
test_result=45)
IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75,
test_result=45)
if(CASSANDRA_VERSION >= '2.1'):
drop_table(IndexedCollectionsTestModel)
sync_table(IndexedCollectionsTestModel)
IndexedCollectionsTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75,
test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 3})
IndexedCollectionsTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75,
test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]),
test_map={'1': 5, '2': 6, '3': 7})
IndexedCollectionsTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75,
test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 42})
IndexedCollectionsTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75,
test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]),
test_map_no_index={'1': 1, '2': 2, '3': 42})
@classmethod
def tearDownClass(cls):
super(BaseQuerySetUsage, cls).tearDownClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
drop_table(TestMultiClusteringModel)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
@execute_count(2)
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(test_id=0)
assert q.count() == 4
@execute_count(2)
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0)
assert q.count() == 4
@execute_count(3)
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = TestModel.objects(test_id=0)
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = TestModel.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
@execute_count(3)
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(3)
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0)
m = q.get(TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(1)
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.get(test_id=100)
@execute_count(1)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(TestModel.MultipleObjectsReturned):
TestModel.objects.get(test_id=1)
def test_allow_filtering_flag(self):
"""
"""
@execute_count(4)
def test_non_quality_filtering():
class NonEqualityFilteringModel(Model):
example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key
example_type = columns.Integer(index=True)
created_at = columns.DateTime()
drop_table(NonEqualityFilteringModel)
sync_table(NonEqualityFilteringModel)
# setup table, etc.
NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now())
qa = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
num = qa.count()
assert num == 1, num
class TestQuerySetDistinct(BaseQuerySetUsage):
@execute_count(1)
def test_distinct_without_parameter(self):
q = TestModel.objects.distinct()
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_parameter(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_filter(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(len(q), 2)
@execute_count(1)
def test_distinct_with_non_partition(self):
with self.assertRaises(InvalidRequest):
q = TestModel.objects.distinct(['description']).filter(test_id__in=[1, 2])
len(q)
@execute_count(1)
def test_zero_result(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52])
self.assertEqual(len(q), 0)
@greaterthancass21
@execute_count(2)
def test_distinct_with_explicit_count(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(q.count(), 3)
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(q.count(), 2)
class TestQuerySetOrdering(BaseQuerySetUsage):
@execute_count(2)
def test_order_by_success_case(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
q = q.order_by('-attempt_id')
expected_order.reverse()
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
def test_ordering_by_non_second_primary_keys_fail(self):
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('test_id')
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(TestModel.test_id == 0).order_by('test_id')
def test_ordering_by_non_primary_keys_fails(self):
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('description')
def test_ordering_on_indexed_columns_fails(self):
with self.assertRaises(query.QueryException):
IndexedTestModel.objects(test_id=0).order_by('attempt_id')
@execute_count(8)
def test_ordering_on_multiple_clustering_columns(self):
TestMultiClusteringModel.create(one=1, two=1, three=4)
TestMultiClusteringModel.create(one=1, two=1, three=2)
TestMultiClusteringModel.create(one=1, two=1, three=5)
TestMultiClusteringModel.create(one=1, two=1, three=1)
TestMultiClusteringModel.create(one=1, two=1, three=3)
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three')
assert [r.three for r in results] == [5, 4, 3, 2, 1]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
class TestQuerySetSlicing(BaseQuerySetUsage):
@execute_count(1)
def test_out_of_range_index_raises_error(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
with self.assertRaises(IndexError):
q[10]
@execute_count(1)
def test_array_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for i in range(len(q)):
assert q[i].attempt_id == expected_order[i]
@execute_count(1)
def test_negative_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
assert q[-1].attempt_id == expected_order[-1]
assert q[-2].attempt_id == expected_order[-2]
@execute_count(1)
def test_slicing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[1:3], expected_order[1:3]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[0:3:2], expected_order[0:3:2]):
self.assertEqual(model.attempt_id, expect)
@execute_count(1)
def test_negative_slicing(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[-3:], expected_order[-3:]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[:-1], expected_order[:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[1:-1], expected_order[1:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1], expected_order[-3:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1:2], expected_order[-3:-1:2]):
self.assertEqual(model.attempt_id, expect)
class TestQuerySetValidation(BaseQuerySetUsage):
def test_primary_key_or_index_must_be_specified(self):
"""
Tests that queries that don't have an equals relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_result=25)
list([i for i in q])
def test_primary_key_or_index_must_have_equal_relation_filter(self):
"""
Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id__gt=0)
list([i for i in q])
@greaterthancass20
@execute_count(7)
def test_indexed_field_can_be_queried(self):
"""
Tests that queries on an indexed field will work without any primary key relations specified
"""
q = IndexedTestModel.objects(test_result=25)
self.assertEqual(q.count(), 4)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
class TestQuerySetDelete(BaseQuerySetUsage):
@execute_count(9)
def test_delete(self):
TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45)
assert TestModel.objects.count() == 16
assert TestModel.objects(test_id=3).count() == 4
TestModel.objects(test_id=3).delete()
assert TestModel.objects.count() == 12
assert TestModel.objects(test_id=3).count() == 0
def test_delete_without_partition_key(self):
""" Tests that attempting to delete a model without defining a partition key fails """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
def test_delete_without_any_where_args(self):
""" Tests that attempting to delete a whole table without any arguments will fail """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
@unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION))
@execute_count(18)
def test_range_deletion(self):
"""
Tests that range deletion work as expected
"""
for i in range(10):
TestMultiClusteringModel.objects().create(one=1, two=i, three=i)
TestMultiClusteringModel.objects(one=1, two__gte=0, two__lte=3).delete()
self.assertEqual(6, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__gt=3, two__lt=5).delete()
self.assertEqual(5, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__in=[8, 9]).delete()
self.assertEqual(3, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one__in=[1], two__gte=0).delete()
self.assertEqual(0, len(TestMultiClusteringModel.objects.all()))
class TimeUUIDQueryModel(Model):
partition = columns.UUID(primary_key=True)
time = columns.TimeUUID(primary_key=True)
data = columns.Text(required=False)
class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).setUpClass()
sync_table(TimeUUIDQueryModel)
@classmethod
def tearDownClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass()
drop_table(TimeUUIDQueryModel)
@execute_count(7)
def test_tzaware_datetime_support(self):
"""Test that using timezone aware datetime instances works with the
MinTimeUUID/MaxTimeUUID functions.
"""
pk = uuid4()
midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))
midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))
# Assert pre-condition that we have the same logical point in time
assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()
assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc - timedelta(minutes=1)),
data='1')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc),
data='2')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc + timedelta(minutes=1)),
data='3')
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]
@execute_count(8)
def test_success_case(self):
""" Test that the min and max time uuid functions work as expected """
pk = uuid4()
startpoint = datetime.utcnow()
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=1)), data='1')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=2)), data='2')
midpoint = startpoint + timedelta(seconds=3)
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=4)), data='3')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=5)), data='4')
# test kwarg filtering
q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))
q = [d for d in q]
self.assertEqual(len(q), 2, msg="Got: %s" % q)
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
# test query expression filtering
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)
)
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)
)
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
class TestInOperator(BaseQuerySetUsage):
@execute_count(1)
def test_kwarg_success_case(self):
""" Tests the in operator works with the kwarg query method """
q = TestModel.filter(test_id__in=[0, 1])
assert q.count() == 8
@execute_count(1)
def test_query_expression_success_case(self):
""" Tests the in operator works with the query expression query method """
q = TestModel.filter(TestModel.test_id.in_([0, 1]))
assert q.count() == 8
@execute_count(5)
def test_bool(self):
"""
Adding coverage to cqlengine for bool types.
@since 3.6
@jira_ticket PYTHON-596
@expected_result bool results should be filtered appropriately
@test_category object_mapper
"""
class bool_model(Model):
k = columns.Integer(primary_key=True)
b = columns.Boolean(primary_key=True)
v = columns.Integer(default=3)
sync_table(bool_model)
bool_model.create(k=0, b=True)
bool_model.create(k=0, b=False)
self.assertEqual(len(bool_model.objects.all()), 2)
self.assertEqual(len(bool_model.objects.filter(k=0, b=True)), 1)
self.assertEqual(len(bool_model.objects.filter(k=0, b=False)), 1)
@execute_count(3)
def test_bool_filter(self):
"""
Test to ensure that we don't translate boolean objects to String unnecessarily in filter clauses
@since 3.6
@jira_ticket PYTHON-596
@expected_result We should not receive a server error
@test_category object_mapper
"""
class bool_model2(Model):
k = columns.Boolean(primary_key=True)
b = columns.Integer(primary_key=True)
v = columns.Text()
drop_table(bool_model2)
sync_table(bool_model2)
bool_model2.create(k=True, b=1, v='a')
bool_model2.create(k=False, b=1, v='b')
self.assertEqual(len(list(bool_model2.objects(k__in=(True, False)))), 2)
@greaterthancass20
class TestContainsOperator(BaseQuerySetUsage):
@execute_count(6)
def test_kwarg_success_case(self):
""" Tests the CONTAINS operator works with the kwarg query method """
q = IndexedCollectionsTestModel.filter(test_list__contains=1)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_set__contains=3)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_list_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_set_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_map_no_index__contains=1)
self.assertEqual(q.count(), 0)
@execute_count(6)
def test_query_expression_success_case(self):
""" Tests the CONTAINS operator works with the query expression query method """
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(1))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(3))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(42))
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(13))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
class TestValuesList(BaseQuerySetUsage):
@execute_count(2)
def test_values_list(self):
q = TestModel.objects.filter(test_id=0, attempt_id=1)
item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first()
assert item == [0, 1, 'try2', 10, 30]
item = q.values_list('expected_result', flat=True).first()
assert item == 10
class TestObjectsProperty(BaseQuerySetUsage):
@execute_count(1)
def test_objects_property_returns_fresh_queryset(self):
assert TestModel.objects._result_cache is None
len(TestModel.objects) # evaluate queryset
assert TestModel.objects._result_cache is None
class PageQueryTests(BaseCassEngTestCase):
@execute_count(3)
def test_paged_result_handling(self):
if PROTOCOL_VERSION < 2:
raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION))
# addresses #225
class PagingTest(Model):
id = columns.Integer(primary_key=True)
val = columns.Integer()
sync_table(PagingTest)
PagingTest.create(id=1, val=1)
PagingTest.create(id=2, val=2)
session = get_session()
with mock.patch.object(session, 'default_fetch_size', 1):
results = PagingTest.objects()[:]
assert len(results) == 2
class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage):
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects())
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(0.5))
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(None))
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
class DMLQueryTimeoutTestCase(BaseQuerySetUsage):
def setUp(self):
self.model = TestModel(test_id=1, attempt_id=1, description='timeout test')
super(DMLQueryTimeoutTestCase, self).setUp()
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(0.5).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(None).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
def test_timeout_then_batch(self):
b = query.BatchQuery()
m = self.model.timeout(None)
with self.assertRaises(AssertionError):
m.batch(b)
def test_batch_then_timeout(self):
b = query.BatchQuery()
m = self.model.batch(b)
with self.assertRaises(AssertionError):
m.timeout(0.5)
class DBFieldModel(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(db_field='e', index=True)
class DBFieldModelMixed1(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True)
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True)
class DBFieldModelMixed2(Model):
k0 = columns.Integer(partition_key=True)
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True)
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True, db_field='e')
class TestModelQueryWithDBField(BaseCassEngTestCase):
def setUp(cls):
super(TestModelQueryWithDBField, cls).setUpClass()
cls.model_list = [DBFieldModel, DBFieldModelMixed1, DBFieldModelMixed2]
for model in cls.model_list:
sync_table(model)
def tearDown(cls):
super(TestModelQueryWithDBField, cls).tearDownClass()
for model in cls.model_list:
drop_table(model)
@execute_count(33)
def test_basic_crud(self):
"""
Tests creation update and delete of object model queries that are using db_field mappings.
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5}
# create
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# create
values['v0'] = 101
i.update(v0=values['v0'])
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# delete
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
i.delete()
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
@execute_count(21)
def test_slice(self):
"""
Tests slice queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1]))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:]))
@execute_count(15)
def test_order(self):
"""
Tests order by queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0])
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1])
@execute_count(15)
def test_index(self):
"""
Tests queries using index fields for object models using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
values['v1'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1)
@execute_count(1)
def test_db_field_names_used(self):
"""
Tests to ensure that with generated cql update statements correctly utilize the db_field values.
@since 3.2
@jira_ticket PYTHON-530
@expected_result resulting cql_statements will use the db_field values
@test_category object_mapper
"""
values = ('k0', 'k1', 'c0', 'v0', 'v1')
# Test QuerySet Path
b = BatchQuery()
DBFieldModel.objects(k0=1).batch(b).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b.queries[0]))
# Test DML path
b2 = BatchQuery()
dml_field_model = DBFieldModel.create(k0=1, k1=5, c0=3, v0=4, v1=5)
dml_field_model.batch(b2).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b2.queries[0]))
class TestModelSmall(Model):
test_id = columns.Integer(primary_key=True)
class TestModelQueryWithFetchSize(BaseCassEngTestCase):
"""
Test FetchSize, and ensure that results are returned correctly
regardless of the paging size
@since 3.1
@jira_ticket PYTHON-324
@expected_result results are properly retrieved and the correct size
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithFetchSize, cls).setUpClass()
sync_table(TestModelSmall)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithFetchSize, cls).tearDownClass()
drop_table(TestModelSmall)
@execute_count(9)
def test_defaultFetchSize(self):
with BatchQuery() as b:
for i in range(5100):
TestModelSmall.batch(b).create(test_id=i)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(500)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(4999)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5000)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5001)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5100)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5101)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(0)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(-1)
class People(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
birthday = columns.DateTime()
class People2(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
middle_name = columns.Text()
birthday = columns.DateTime()
class TestModelQueryWithDifferedFeld(BaseCassEngTestCase):
"""
Tests that selects with filter will deffer population of known values until after the results are returned.
I.E. Instead of generating SELECT * FROM People WHERE last_name="Smith" It will generate
SELECT first_name, birthday FROM People WHERE last_name="Smith"
Where last_name 'smith' will populated post query
@since 3.2
@jira_ticket PYTHON-520
@expected_result only needed fields are included in the query
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithDifferedFeld, cls).setUpClass()
sync_table(People)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithDifferedFeld, cls).tearDownClass()
drop_table(People)
@execute_count(8)
def test_defaultFetchSize(self):
# Populate Table
People.objects.create(last_name="Smith", first_name="John", birthday=datetime.now())
People.objects.create(last_name="Bestwater", first_name="Alan", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Greg", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Adam", birthday=datetime.now())
# Check query constructions
expected_fields = ['first_name', 'birthday']
self.assertEqual(People.filter(last_name="Smith")._select_fields(), expected_fields)
# Validate correct fields are fetched
smiths = list(People.filter(last_name="Smith"))
self.assertEqual(len(smiths), 3)
self.assertTrue(smiths[0].last_name is not None)
# Modify table with new value
sync_table(People2)
# populate new format
People2.objects.create(last_name="Smith", first_name="Chris", middle_name="Raymond", birthday=datetime.now())
People2.objects.create(last_name="Smith", first_name="Andrew", middle_name="Micheal", birthday=datetime.now())
# validate query construction
expected_fields = ['first_name', 'middle_name', 'birthday']
self.assertEqual(People2.filter(last_name="Smith")._select_fields(), expected_fields)
# validate correct items are returneds
smiths = list(People2.filter(last_name="Smith"))
self.assertEqual(len(smiths), 5)
self.assertTrue(smiths[0].last_name is not None)
| apache-2.0 | 1,945,220,001,991,381,000 | 38.157143 | 138 | 0.636045 | false |
ShaguptaS/faker | faker/providers/user_agent.py | 1 | 5274 | from __future__ import unicode_literals
from . import BaseProvider
from . import date_time
from datetime import datetime
import random
class Provider(BaseProvider):
user_agents = ('firefox', 'chrome', 'internet_explorer', 'opera', 'safari')
windows_platform_tokens = (
'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.2', 'Windows NT 5.1',
'Windows NT 5.01', 'Windows NT 5.0', 'Windows NT 4.0', 'Windows 98; Win 9x 4.90',
'Windows 98', 'Windows 95', 'Windows CE'
)
linux_processors = ('i686', 'x86_64',)
mac_processors = ('Intel', 'PPC', 'U; Intel', 'U; PPC')
langs = ('en-US', 'sl-SI', 'it-IT')
@classmethod
def mac_processor(cls):
return cls.random_element(cls.mac_processors)
@classmethod
def linux_processor(cls):
return cls.random_element(cls.linux_processors)
@classmethod
def user_agent(cls):
name = cls.random_element(cls.user_agents)
return getattr(cls, name)()
@classmethod
def chrome(cls):
saf = str(random.randint(531, 536)) + str(random.randint(0, 2))
platforms = (
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.linux_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.windows_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.mac_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def firefox(cls):
ver = (
'Gecko/{0} Firefox/{1}.0'.format(
date_time.Provider.date_time_between(datetime(2011, 1, 1)), random.randint(4, 15)),
'Gecko/{0} Firefox/3.6.{1}'.format(
date_time.Provider.date_time_between(datetime(2010, 1, 1)), random.randint(1, 20)),
'Gecko/{0} Firefox/3.8'.format(date_time.Provider.date_time_between(datetime(2010, 1, 1)), ),
)
platforms = (
"({0}; {1}; rv:1.9.{2}.20) {3}".format(
cls.windows_platform_token(), cls.random_element(cls.langs), random.randint(0, 2), random.choice(ver)),
"({0}; rv:1.9.{1}.20) {2}".format(cls.linux_platform_token(), random.randint(5, 7), random.choice(ver)),
"({0}; rv:1.9.{1}.20) {2}".format(cls.mac_platform_token(), random.randint(2, 6), random.choice(ver)),
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def safari(cls):
saf = "{0}.{1}.{2}".format(random.randint(531, 535), random.randint(1, 50), random.randint(1, 7))
if random.randint(0, 1) == 0:
ver = "{0}.{1}".format(random.randint(4, 5), random.randint(0, 1))
else:
ver = "{0}.0.{1}".format(random.randint(4, 5), random.randint(1, 5))
platforms = (
'(Windows; U; {0}) AppleWebKit/{1} (KHTML, like Gecko) Version/{2} Safari/{3}'.format(
cls.windows_platform_token(), saf, ver, saf),
'({0} rv:{1}.0; {2}) AppleWebKit/{3} (KHTML, like Gecko) Version/{4} Safari/{5}'.format(
cls.mac_platform_token(), random.randint(2, 6), cls.random_element(cls.langs), saf, ver, saf),
'(iPod; U; CPU iPhone OS {0}_{1} like Mac OS X; {2}) AppleWebKit/{3} (KHTML, like Gecko) Version/{4}.0.5 Mobile/8B{5} Safari/6{6}'.format(
random.randint(3, 4), random.randint(0, 3), cls.random_element(cls.langs), saf, random.randint(3, 4),
random.randint(111, 119), saf
)
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def opera(cls):
platforms = (
'({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(
cls.linux_platform_token(), cls.random_element(cls.langs), random.randint(160, 190),
random.randint(10, 12)),
'({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(
cls.windows_platform_token(), cls.random_element(cls.langs), random.randint(160, 190),
random.randint(10, 12)),
)
return 'Opera/{0}.{1}.{2}'.format(random.randint(8, 9), random.randint(10, 99), cls.random_element(platforms))
@classmethod
def internet_explorer(cls):
return 'Mozilla/5.0 (compatible; MSIE {0}.0; {1}; Trident/{2}.{3})'.format(
random.randint(5, 9),
cls.windows_platform_token(),
random.randint(3, 5),
random.randint(0, 1)
)
@classmethod
def windows_platform_token(cls):
return cls.random_element(cls.windows_platform_tokens)
@classmethod
def linux_platform_token(cls):
return 'X11; Linux {0}'.format(cls.random_element(cls.linux_processors))
@classmethod
def mac_platform_token(cls):
return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(
cls.random_element(cls.mac_processors), random.randint(5, 8), random.randint(0, 9))
| mit | -4,744,924,618,467,953,000 | 40.857143 | 150 | 0.571483 | false |
AButenko/selenium_tests | test_gui/test_login.py | 1 | 1921 | # -*- coding: utf-8 -*-
# from django.conf import settings
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from framework.fixtures import logger
from framework.gui.common.fixtures import browser
from framework.gui.common.tools import page_contain_assert
from framework.gui.loginpage import LoginPage
def test_login_default_user(logger, browser):
logger.info("Simple login test.")
loginpage = LoginPage(browser)
loginpage.login()
@pytest.mark.parametrize("user, psswd",[('',''), ('[email protected]', '')])
def test_login_any_user(logger, browser, user, psswd):
loginpage = LoginPage(browser)
browser.get(loginpage.login_page)
name, passwd = loginpage.enter_login_credentials(user, psswd)
# with tools.wait_for_page_load(browser):
passwd.send_keys(Keys.RETURN)
invalid_input = browser.find_element_by_css_selector("input:invalid")
assert invalid_input.is_displayed()
# try:
# validate_email(user) # TODO use additional flag to check if email is validS
if not name:
assert invalid_input == name
elif not passwd:
assert invalid_input == passwd
# except ValidationError: # otherwise invalid input if in email field
# assert not browser.execute_script("return document.getElementById(\"username\").validity.valid") # javascript way to check the same
WebDriverWait(browser, 20).until(
EC.presence_of_element_located((By.XPATH, "//div[@class='resultlogin']/div[1]"))
)
assert browser.find_element_by_xpath("//div[@class='resultlogin']/div[1]").text == "Invalid Email or Password"
def test_logout_default_user(logger, browser):
loginpage = LoginPage(browser)
loginpage.logout() | bsd-3-clause | -5,620,181,287,252,949,000 | 38.22449 | 138 | 0.72202 | false |
jeremiedecock/snippets | python/tkinter/python3/keyboard_events.py | 1 | 5625 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: https://github.com/jeremiedecock/pyarm/blob/master/pyarm/gui/tkinter_gui.py
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/key-names.html
# http://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
import tkinter as tk
root = tk.Tk()
label = tk.Label(root, text="Press some keys", width=50, height=10)
label.pack()
# SETUP KEYBOARD EVENT CALLBACKS
def keypress_callback(event):
if event.keysym == "Up":
print("keypress: <Up>")
elif event.keysym == "Down":
print("keypress: <Down>")
elif event.keysym == "Left":
print("keypress: <Left>")
elif event.keysym == "Right":
print("keypress: <Right>")
elif event.keysym == "Return":
print("keypress: <Return>")
elif event.keysym == "Escape":
print("keypress: <Escape>")
elif event.keysym == "space":
print("keypress: <space>")
elif event.keysym == "Control_R":
print("keypress: <Control_R>")
elif event.keysym == "Control_L":
print("keypress: <Control_L>")
elif event.keysym == "Shift_R":
print("keypress: <Shift_R>")
elif event.keysym == "Shift_L":
print("keypress: <Shift_L>")
elif event.keysym == "Tab":
print("keypress: <Tab>")
elif event.keysym == "Super_R":
print("keypress: <Super_R>")
elif event.keysym == "Super_L":
print("keypress: <Super_L>")
elif event.keysym == "BackSpace":
print("keypress: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keypress: <Prior>")
elif event.keysym == "Next": # PgDown
print("keypress: <Next>")
elif event.char == "a":
print("keypress: <a>")
elif event.char == "b":
print("keypress: <b>")
elif event.char == "c":
print("keypress: <c>")
elif event.char == "d":
print("keypress: <d>")
elif event.char == "A":
print("keypress: <A>")
elif event.char == "B":
print("keypress: <B>")
elif event.char == "C":
print("keypress: <C>")
elif event.char == "D":
print("keypress: <D>")
elif event.char == "1":
print("keypress: <1>")
elif event.char == "2":
print("keypress: <2>")
elif event.char == "3":
print("keypress: <3>")
else:
print("keypress:", event.char, event.keysym)
def keyrelease_callback(event):
if event.keysym == "Up":
print("keyrelease: <Up>")
elif event.keysym == "Down":
print("keyrelease: <Down>")
elif event.keysym == "Left":
print("keyrelease: <Left>")
elif event.keysym == "Right":
print("keyrelease: <Right>")
elif event.keysym == "Return":
print("keyrelease: <Return>")
elif event.keysym == "Escape":
print("keyrelease: <Escape>")
elif event.keysym == "space":
print("keyrelease: <space>")
elif event.keysym == "Control_R":
print("keyrelease: <Control_R>")
elif event.keysym == "Control_L":
print("keyrelease: <Control_L>")
elif event.keysym == "Shift_R":
print("keyrelease: <Shift_R>")
elif event.keysym == "Shift_L":
print("keyrelease: <Shift_L>")
elif event.keysym == "Tab":
print("keyrelease: <Tab>")
elif event.keysym == "Super_R":
print("keyrelease: <Super_R>")
elif event.keysym == "Super_L":
print("keyrelease: <Super_L>")
elif event.keysym == "BackSpace":
print("keyrelease: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keyrelease: <Prior>")
elif event.keysym == "Next": # PgDown
print("keyrelease: <Next>")
elif event.char == "a":
print("keyrelease: <a>")
elif event.char == "b":
print("keyrelease: <b>")
elif event.char == "c":
print("keyrelease: <c>")
elif event.char == "d":
print("keyrelease: <d>")
elif event.char == "A":
print("keyrelease: <A>")
elif event.char == "B":
print("keyrelease: <B>")
elif event.char == "C":
print("keyrelease: <C>")
elif event.char == "D":
print("keyrelease: <D>")
elif event.char == "1":
print("keyrelease: <1>")
elif event.char == "2":
print("keyrelease: <2>")
elif event.char == "3":
print("keyrelease: <3>")
else:
print("keyrelease:", event.char, event.keysym)
root.bind("<KeyPress>", keypress_callback)
root.bind("<KeyRelease>", keyrelease_callback)
root.mainloop()
| mit | 4,922,969,895,132,361,000 | 34.14375 | 82 | 0.599502 | false |
jessepeterson/commandment | commandment/alembic/versions/8c866896f76e_create_dep_join_tables.py | 1 | 1729 | """empty message
Revision ID: 8c866896f76e
Revises: 0e5babc5b9ee
Create Date: 2017-07-19 12:57:58.086196
"""
from alembic import op
import sqlalchemy as sa
import commandment.dbtypes
from alembic import context
# revision identifiers, used by Alembic.
revision = '8c866896f76e'
down_revision = '0e5babc5b9ee'
branch_labels = None
depends_on = None
def upgrade():
schema_upgrades()
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_upgrades()
def downgrade():
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_downgrades()
schema_downgrades()
def schema_upgrades():
"""schema upgrade migrations go here."""
op.create_table('dep_profile_anchor_certificates',
sa.Column('dep_profile_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['dep_profile_id'], ['dep_profiles.id'], )
)
op.create_table('dep_profile_supervision_certificates',
sa.Column('dep_profile_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['dep_profile_id'], ['dep_profiles.id'], )
)
def schema_downgrades():
"""schema downgrade migrations go here."""
op.drop_table('dep_profile_supervision_certificates')
op.drop_table('dep_profile_anchor_certificates')
def data_upgrades():
"""Add any optional data upgrade migrations here!"""
pass
def data_downgrades():
"""Add any optional data downgrade migrations here!"""
pass
| mit | -6,987,280,669,994,090,000 | 26.444444 | 71 | 0.689994 | false |
lgbouma/astrobase | astrobase/services/gaia.py | 1 | 44599 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# gaia - Waqas Bhatti ([email protected]) - Dec 2017
# License: MIT. See the LICENSE file for more details.
'''
This queries the GAIA catalog for object lists in specified areas of the
sky. The main use of this module is to generate realistic spatial distributions
of stars for variability recovery simulations in combination with colors and
luminosities from the TRILEGAL galaxy model.
If you use this module, please cite the GAIA papers as outlined at:
https://gaia.esac.esa.int/documentation//GDR1/Miscellaneous/sec_credit_and_citation_instructions.html
Much of this module is derived from the example given at:
http://gea.esac.esa.int/archive-help/commandline/index.html
For a more general and useful interface to the GAIA catalog, see the astroquery
package by A. Ginsburg, B. Sipocz, et al.:
http://astroquery.readthedocs.io/en/latest/gaia/gaia.html
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os
import os.path
import gzip
import hashlib
import time
import pickle
import random
# to do the queries
import requests
import requests.exceptions
# to read the XML returned by the TAP service
from xml.dom.minidom import parseString
###################
## FORM SETTINGS ##
###################
GAIA_URLS = {
'gaia':{'url':"https://gea.esac.esa.int/tap-server/tap/async",
'table':'gaiadr2.gaia_source',
'phasekeyword':'uws:phase',
'resultkeyword':'uws:result'},
'heidelberg':{'url':"http://gaia.ari.uni-heidelberg.de/tap/async",
'table':'gaiadr2.gaia_source',
'phasekeyword':'phase',
'resultkeyword':'result'},
'vizier':{'url':"http://tapvizier.u-strasbg.fr/TAPVizieR/tap/async",
'table':'"I/345/gaia2"',
'phasekeyword':'phase',
'resultkeyword':'result'},
}
# default TAP query params, will be copied and overridden
TAP_PARAMS = {
'REQUEST':'doQuery',
'LANG':'ADQL',
'FORMAT':'json',
'PHASE':'RUN',
'JOBNAME':'',
'JOBDESCRIPTION':'',
'QUERY':''
}
# valid return formats
RETURN_FORMATS = {
'json':'json.gz',
'csv':'csv.gz',
'votable':'vot',
}
#####################
## QUERY FUNCTIONS ##
#####################
def tap_query(querystr,
gaia_mirror=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=False):
'''This queries the GAIA TAP service using an ADQL query string.
Parameters
----------
querystr : str
This is the ADQL query string. See:
http://www.ivoa.net/documents/ADQL/2.0 for the specification and
http://gea.esac.esa.int/archive-help/adql/index.html for GAIA-specific
additions.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# get the default params
inputparams = TAP_PARAMS.copy()
# update them with our input params
inputparams['QUERY'] = querystr[::]
if returnformat in RETURN_FORMATS:
inputparams['FORMAT'] = returnformat
else:
LOGWARNING('unknown result format: %s requested, using CSV' %
returnformat)
inputparams['FORMAT'] = 'csv'
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachefname and look for it
xcachekey = '-'.join([repr(inputparams[x])
for x in sorted(inputparams.keys())])
cachekey = hashlib.sha256(xcachekey.encode()).hexdigest()
cachefname = os.path.join(
cachedir,
'%s.%s' % (cachekey, RETURN_FORMATS[returnformat])
)
provenance = 'cache'
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
##########################################
## COMPLETE A QUERY THAT MAY BE RUNNING ##
##########################################
# first, check if this query can be resurrected
if (not forcefetch and
complete_query_later and
os.path.exists(incomplete_qpklf)):
with open(incomplete_qpklf, 'rb') as infd:
incomplete_qinfo = pickle.load(infd)
LOGWARNING('complete_query_later = True, and '
'this query was not completed on a '
'previous run, will check if it is done now...')
# get the status URL and go into a loop to see if the query completed
waitdone = False
timeelapsed = 0.0
gaia_mirror = incomplete_qinfo['gaia_mirror']
status_url = incomplete_qinfo['status_url']
phasekeyword = incomplete_qinfo['phase_keyword']
resultkeyword = incomplete_qinfo['result_keyword']
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('GAIA TAP query still not done '
'after waiting %s seconds for results.\n'
'status URL is: %s' %
(maxtimeout,
repr(inputparams),
status_url))
return None
try:
resreq = requests.get(status_url,
timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
# if we're not done yet, then wait some more
elif jobstatus != 'ERROR':
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
# if the JOB failed, then bail out immediately
else:
LOGERROR('GAIA TAP query failed due to a server error.\n'
'status URL: %s\n'
'status contents: %s' %
(status_url,
resreq.text))
# since this job failed, remove the incomplete query pickle
# so we can try this from scratch
os.remove(incomplete_qpklf)
return None
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while waiting for status '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed while waiting for status\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
# if the query fails completely, then either the status URL
# doesn't exist any more or something else went wrong. we'll
# remove the incomplete query pickle so we can try this from
# scratch
os.remove(incomplete_qpklf)
return None
#
# at this point, we should be ready to get the query results
#
LOGINFO('query completed, retrieving results...')
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
provenance = 'cache'
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
# all went well, so we'll remove the incomplete query pickle
os.remove(incomplete_qpklf)
return resdict
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
# if the result download fails, then either the result URL doesn't
# exist any more or something else went wrong. we'll remove the
# incomplete query pickle so we can try this from scratch
os.remove(incomplete_qpklf)
return None
#####################
## RUN A NEW QUERY ##
#####################
# otherwise, we check the cache if it's done already, or run it again if not
if forcefetch or (not os.path.exists(cachefname)):
provenance = 'new download'
# generate a jobid here and update the input params
jobid = 'ab-gaia-%i' % time.time()
inputparams['JOBNAME'] = jobid
inputparams['JOBDESCRIPTION'] = 'astrobase-gaia-tap-ADQL-query'
try:
waitdone = False
timeelapsed = 0.0
# set the gaia mirror to use
if gaia_mirror is not None and gaia_mirror in GAIA_URLS:
tapurl = GAIA_URLS[gaia_mirror]['url']
resultkeyword = GAIA_URLS[gaia_mirror]['resultkeyword']
phasekeyword = GAIA_URLS[gaia_mirror]['phasekeyword']
randkey = gaia_mirror
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[gaia_mirror]['table']
)
)
else:
randkey = random.choice(list(GAIA_URLS.keys()))
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
if verbose:
LOGINFO('using GAIA mirror TAP URL: %s' % tapurl)
# send the query and get status
if verbose:
LOGINFO('submitting GAIA TAP query request for input params: %s'
% repr(inputparams))
# here, we'll make sure the GAIA mirror works before doing anything
# else
mirrorok = False
ntries = 1
while not mirrorok:
if ntries > maxtries:
LOGERROR('maximum number of allowed GAIA query '
'submission tries (%s) reached, bailing out...' %
maxtries)
return None
try:
req = requests.post(tapurl,
data=inputparams,
timeout=timeout)
resp_status = req.status_code
req.raise_for_status()
mirrorok = True
# this handles immediate 503s
except requests.exceptions.HTTPError:
LOGWARNING(
'GAIA TAP server: %s not responding, '
'trying another mirror...'
% tapurl
)
mirrorok = False
# make sure not to hit current mirror again if it's down
remainingmirrors = list(GAIA_URLS.keys())
remainingmirrors.remove(randkey)
randkey = random.choice(remainingmirrors)
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
# this handles initial query submission timeouts
except requests.exceptions.Timeout:
LOGWARNING(
'GAIA TAP query submission timed out, '
'mirror is probably down. Trying another mirror...'
)
mirrorok = False
# make sure not to hit current mirror again if it's down
remainingmirrors = list(GAIA_URLS.keys())
remainingmirrors.remove(randkey)
randkey = random.choice(remainingmirrors)
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
# update the number of submission tries
ntries = ntries + 1
# NOTE: python-requests follows the "303 See Other" redirect
# automatically, so we get the XML status doc immediately. We don't
# need to look up the location of it in the initial response's
# header as in the GAIA example.
status_url = req.url
# parse the response XML and get the job status
resxml = parseString(req.text)
jobstatuselem = resxml.getElementsByTagName(phasekeyword)
if jobstatuselem:
jobstatuselem = jobstatuselem[0]
else:
LOGERROR('could not parse job phase using '
'keyword %s in result XML' % phasekeyword)
LOGERROR('%s' % req.txt)
req.close()
return None
jobstatus = jobstatuselem.firstChild.toxml()
# if the job completed already, jump down to retrieving results
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
elif jobstatus == 'ERROR':
if verbose:
LOGERROR(
'GAIA query failed immediately '
'(probably an ADQL error): %s, '
'status URL: %s, status contents: %s' %
(repr(inputparams),
status_url,
req.text)
)
return None
# we wait for the job to complete if it's not done already
else:
if verbose:
LOGINFO(
'request submitted successfully, '
'current status is: %s. '
'waiting for results...' % jobstatus
)
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('GAIA TAP query timed out '
'after waiting %s seconds for results.\n'
'request was: %s\n'
'status URL is: %s\n'
'last status was: %s' %
(maxtimeout,
repr(inputparams),
status_url,
jobstatus))
# here, we'll check if we're allowed to sleep on a query
# for a bit and return to it later if the last status
# was QUEUED or EXECUTING
if complete_query_later and jobstatus in ('EXECUTING',
'QUEUED'):
# write a pickle with the query params that we can
# pick up later to finish this query
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
with open(incomplete_qpklf, 'wb') as outfd:
savedict = inputparams.copy()
savedict['status_url'] = status_url
savedict['last_status'] = jobstatus
savedict['gaia_mirror'] = gaia_mirror
savedict['phase_keyword'] = phasekeyword
savedict['result_keyword'] = resultkeyword
pickle.dump(savedict,
outfd,
pickle.HIGHEST_PROTOCOL)
LOGINFO('complete_query_later = True, '
'last state of query was: %s, '
'will resume later if this function '
'is called again with the same query' %
jobstatus)
return None
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
try:
resreq = requests.get(status_url, timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
else:
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
continue
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while waiting for results '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed while waiting for results\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
return None
#
# at this point, we should be ready to get the query results
#
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
return None
except requests.exceptions.HTTPError:
LOGEXCEPTION('GAIA TAP query failed.\nrequest status was: '
'%s.\nquery was: %s' % (resp_status,
repr(inputparams)))
return None
except requests.exceptions.Timeout:
LOGERROR('GAIA TAP query submission timed out, '
'site is probably down. Request was: '
'%s' % repr(inputparams))
return None
except Exception:
LOGEXCEPTION('GAIA TAP query request failed for '
'%s' % repr(inputparams))
if 'resxml' in locals():
LOGERROR('HTTP response from service:\n%s' % req.text)
return None
############################
## GET RESULTS FROM CACHE ##
############################
else:
if verbose:
LOGINFO('getting cached GAIA query result for '
'request: %s' %
(repr(inputparams)))
tablefname = cachefname
#
# all done with retrieval, now return the result dict
#
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
return resdict
def objectlist_conesearch(racenter,
declcenter,
searchradiusarcsec,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax', 'parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects near the coords.
Runs a conesearch around `(racenter, declcenter)` with radius in arcsec of
`searchradiusarcsec`.
Parameters
----------
racenter,declcenter : float
The center equatorial coordinates in decimal degrees.
searchradiusarcsec : float
The search radius of the cone-search in arcseconds.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns}, "
"(DISTANCE(POINT('ICRS', "
"{{table}}.ra, {{table}}.dec), "
"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"{extra_filter_str}"
"ORDER by dist_arcsec asc "
)
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0,
extra_filter_str=extra_filter_str,
columns=', '.join(columns))
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
def objectlist_radeclbox(radeclbox,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects in an equatorial
coordinate box.
Parameters
----------
radeclbox : sequence of four floats
This defines the box to search in::
[ra_min, ra_max, decl_min, decl_max]
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"BOX('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{ra_width:.5f},{decl_height:.5f}))=1"
"{extra_filter_str}"
)
ra_min, ra_max, decl_min, decl_max = radeclbox
ra_center = (ra_max + ra_min)/2.0
decl_center = (decl_max + decl_min)/2.0
ra_width = ra_max - ra_min
decl_height = decl_max - decl_min
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(columns=', '.join(columns),
extra_filter_str=extra_filter_str,
ra_center=ra_center,
decl_center=decl_center,
ra_width=ra_width,
decl_height=decl_height)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
def objectid_search(gaiaid,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'phot_bp_mean_mag',
'phot_rp_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a single GAIA source ID.
Parameters
----------
gaiaid : str
The source ID of the object whose info will be collected.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"source_id = {gaiaid}"
)
formatted_query = query.format(columns=', '.join(columns),
gaiaid=gaiaid)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
| mit | 5,913,882,603,663,914,000 | 35.200487 | 101 | 0.518621 | false |
peter-kiechle/tactile-sensors | python/interpolation_and_contour/interpolation_and_contour.py | 1 | 7498 | # -*- coding: utf-8 -*-
import os, sys
print("CWD: " + os.getcwd() )
# Load configuration file before pyplot
config_path = os.path.abspath('../matplotlib/')
sys.path.append(config_path)
import configuration as config
# Library path
lib_path = os.path.abspath('../../lib')
sys.path.append(lib_path)
import framemanager_python
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
# Color map
# Define "bds_highcontrast" color map by Britton Smith <[email protected]> from http://yt-project.org/
cdict = {'red': ((0.0, 80/256., 80/256.),
(0.2, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.6, 256/256., 256/256.),
(0.95, 256/256., 256/256.),
(1.0, 150/256., 150/256.)),
'green': ((0.0, 0/256., 0/256.),
(0.2, 0/256., 0/256.),
(0.4, 130/256., 130/256.),
(0.6, 256/256., 256/256.),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 80/256., 80/256.),
(0.2, 220/256., 220/256.),
(0.4, 0.0, 0.0),
(0.6, 20/256., 20/256.),
(1.0, 0.0, 0.0))}
plt.register_cmap(name='bds_highcontrast', data=cdict)
# Define YELLOW_RED colormap: each row consists of (x, y0, y1) where the x must increase from 0 to 1
#row i: x y0 y1
# /
# /
#row i+1: x y0 y1
cdict = {'red': ((0.0, 0.9, 0.9),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.9, 0.9),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0))}
plt.register_cmap(name='YELLOW_RED', data=cdict)
#cmap=plt.get_cmap('YELLOW_RED')
#cmap=plt.get_cmap('autumn')
#cmap=plt.get_cmap('gist_heat')
#cmap=plt.get_cmap('Spectral_r')
#cmap.set_under([0.0, 0.0, 0.0])
# Load profile
profileName = os.path.abspath("foam_ball_short.dsa")
frameID = 230
frameManager = framemanager_python.FrameManagerWrapper()
frameManager.load_profile(profileName);
numTSFrames = frameManager.get_tsframe_count();
matrixID = 1
# Load single frame
tsframe = np.copy( frameManager.get_tsframe(frameID, matrixID) );
cols = tsframe.shape[1]
rows = tsframe.shape[0]
# Add padding on border
padding = 2
v_padding = np.empty((padding, cols)); v_padding.fill(-50)
h_padding = np.empty((rows+2*padding, padding)); h_padding.fill(-50)
zs = np.vstack([v_padding, tsframe]) # Top
zs = np.vstack([zs, v_padding]) # Bottom
zs = np.hstack([h_padding, zs]) # Left
zs = np.hstack([zs, h_padding]) # Right
# Update matrix size with padding
cols = zs.shape[1]
rows = zs.shape[0]
# Coordinates of sampled data points
xs = np.arange(0, cols, 1)
ys = np.arange(0, rows, 1)
# Coordinates of interpolation points
scaleFactor = 10;
xi = np.linspace(xs.min(), xs.max(), cols*scaleFactor)
yi = np.linspace(ys.min(), ys.max(), rows*scaleFactor)
#------------------------------------------------------
# Interpolate with cubic splines
spline = scipy.interpolate.RectBivariateSpline(ys, xs, zs, kx=3, ky=3, s=0)
# Evaluate splines
zi = spline(yi, xi)
#------------------------------------------------------
'''
#------------------------------------------------------
# Polynomial interpolation: ‘linear’, ‘nearest’, ‘cubic’
coordinates = [(y, x) for y in ys for x in xs]
zs_flattened = np.ravel(zs, order='C')
coordinates_interpolated = [(y, x) for y in yi for x in xi]
# Interpolate with griddata
zi_flattened= scipy.interpolate.griddata(coordinates, zs_flattened, coordinates_interpolated, method='cubic')
# Reshape flattened array to 2D
zi = zi_flattened.reshape((rows*scaleFactor, cols*scaleFactor))
#------------------------------------------------------
'''
#------------------------------------------------------
# Old API
# Set up a regular grid of sampled data points
#ys, xs = np.meshgrid(xs, ys)
# Set up a regular grid of interpolated points
#yi, xi = np.meshgrid(xi, yi)
# Interpolate
#tck = scipy.interpolate.bisplrep(xs2, ys2, zs, kx=3, ky=3, s=0)
# Evaluate splines
#zi = scipy.interpolate.bisplev(xi2[:,0], yi2[0,:], tck)
#------------------------------------------------------
# Apply threshold to level out small values (interpolation ripples)
min_threshold = 25
zi[zi < min_threshold ] = 0
#########################################
# Plotting
#########################################
fig, ax = plt.subplots()
############
# Histogram
############
plt.hist(zi.flatten(), 128, range=(min_threshold, zi.max()), fc='k', ec='k')
plt.savefig("histogram.pdf", format='pdf')
plt.close()
########################
# Interpolated image
########################
fig, ax = plt.subplots()
# Interpolated image
#cmap=plt.get_cmap('gray')
cmap=plt.get_cmap('bds_highcontrast')
cax = ax.imshow(zi, cmap=cmap, vmin=zs.min(), vmax=zs.max(), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()])
# Colorbar with countour levels
cbar = fig.colorbar(cax)
cbar.set_label('Raw sensor value', rotation=90)
cbar.solids.set_edgecolor("face") # set the color of the lines
ax.invert_yaxis()
ax.xaxis.tick_top()
plt.axis('off')
plt.savefig("interpolation.pdf", format='pdf')
plt.close()
############
# Contour
############
fig, ax = plt.subplots()
# Nearest-Neighbor Image
cax = ax.imshow(zs, interpolation='nearest', cmap=plt.get_cmap('gray'), vmin=zs.min(), vmax=zs.max(), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()])
#------------------------------------------------------
# Contour lines: contour()
#------------------------------------------------------
countour_threshold = 50
levels = np.linspace(countour_threshold, zs.max(), 10)
#contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, colors=[(0.0, 0.0, 0.0)], origin='upper') # black contour
contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, colors=[(1.0, 0.0, 0.0)], origin='upper') # Red contour
#contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, cmap=plt.get_cmap('bds_highcontrast'), origin='upper') # Colormap
#plt.clabel(contour, inline=True, fontsize=9)
# Colorbar with countour levels
cbar = fig.colorbar(cax)
cbar.add_lines(contour)
cbar.set_label('Raw sensor value', rotation=90)
cbar.solids.set_edgecolor("face") # set the color of the lines
'''
#------------------------------------------------------
# Filled contours: contourf()
#------------------------------------------------------
# Background image
background = np.empty((rows, cols)); background.fill(0)
cax = ax.imshow(background, cmap=plt.get_cmap('gray'), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()] )
# Filled contour
countour_threshold = 100 # Ignore "ripples" from spline extrapolation
max_threshold = 0 # Boost the upper limit to avoid truncation error
levels = np.linspace(countour_threshold, zs.max(), num=10, endpoint=True)
# Levels correspond to midpoint of layers:
# Extend level range to enlarge top layer (avoid ugly hole)
levels[-1] = levels[-1] + (levels[-1] - levels[-2])/2
contour = ax.contourf(xi, yi, zi, levels=levels, cmap=plt.get_cmap('bds_highcontrast'), origin='upper')
cbar = fig.colorbar(contour, format='%.0f')
cbar.set_label('mV', rotation=0)
cbar.solids.set_edgecolor("face") # set the color of the lines
# Restore old levels
#levels[-1] = zs.max()
#cbar.set_ticks(levels)
#------------------------------------------------------
'''
ax.invert_yaxis()
ax.xaxis.tick_top()
plt.axis('off')
plt.savefig("contour.pdf", format='pdf')
plt.show()
| gpl-3.0 | 8,087,984,003,630,864,000 | 28.015504 | 167 | 0.569329 | false |
elopio/snapcraft | tests/unit/plugins/test_base.py | 1 | 3087 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest.mock
from testtools.matchers import Equals
import snapcraft
from tests import unit
class TestBasePlugin(unit.TestCase):
def setUp(self):
super().setUp()
self.project_options = snapcraft.ProjectOptions()
def test_parallel_build_count_returns_1_when_disabled(self):
options = unit.MockOptions(disable_parallel=True)
plugin = snapcraft.BasePlugin('test_plugin', options,
self.project_options)
self.assertThat(plugin.parallel_build_count, Equals(1))
def test_parallel_build_count_returns_build_count_from_project(self):
options = unit.MockOptions(disable_parallel=False)
plugin = snapcraft.BasePlugin('test_plugin', options,
self.project_options)
unittest.mock.patch.object(
self.project_options, 'parallel_build_count', 2)
self.assertThat(plugin.parallel_build_count, Equals(2))
def test_part_name_with_forward_slash_is_one_directory(self):
plugin = snapcraft.BasePlugin('test/part', options=None)
os.makedirs(plugin.sourcedir)
self.assertIn('test\N{BIG SOLIDUS}part', os.listdir('parts'))
@unittest.mock.patch('snapcraft.internal.common.run')
def test_run_without_specifying_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run(['ls'])
mock_run.assert_called_once_with(['ls'], cwd=plugin.builddir)
@unittest.mock.patch('snapcraft.internal.common.run')
def test_run_specifying_a_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run(['ls'], cwd=plugin.sourcedir)
mock_run.assert_called_once_with(['ls'], cwd=plugin.sourcedir)
@unittest.mock.patch('snapcraft.internal.common.run_output')
def test_run_output_without_specifying_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run_output(['ls'])
mock_run.assert_called_once_with(['ls'], cwd=plugin.builddir)
@unittest.mock.patch('snapcraft.internal.common.run_output')
def test_run_output_specifying_a_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run_output(['ls'], cwd=plugin.sourcedir)
mock_run.assert_called_once_with(['ls'], cwd=plugin.sourcedir)
| gpl-3.0 | -4,015,501,113,157,418,000 | 38.075949 | 73 | 0.685455 | false |
Subsets and Splits