repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zmap/ztag | ztag/annotations/helix.py | 1 | 1282 | from ztag.annotation import *
import re
class Helix(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
version_re = re.compile(
"^Helix Mobile Server/(\d+(\.\d+)*)",
re.IGNORECASE
)
os_re = re.compile(
"^Helix Mobile Server/(?:\d+(?:\.\d+)*) \(.*\)",
re.IGNORECASE
)
def process(self, obj, meta):
server = obj["headers"]["server"]
if server.startswith("Helix Mobile Server"):
meta.local_metadata.product = "Helix Mobile Server"
version = self.version_re.search(server).group(1)
meta.local_metadata.version = version
os = self.os_re.search(server).group(1)
if "win" in os:
meta.global_metadata.os = OperatingSystem.WINDOWS
elif "rhel4" in os:
meta.global_metadata.os = OperatingSystem.REDHAT
meta.global_metadata.os_version = "4"
elif "rhel5" in os:
meta.global_metadata.os = OperatingSystem.REDHAT
meta.global_metadata.os_version = "5"
elif "rhel6" in os:
meta.global_metadata.os = OperatingSystem.REDHAT
meta.global_metadata.os_version = "6"
| apache-2.0 | 8,732,023,420,992,456,000 | 30.268293 | 65 | 0.556942 | false | 3.838323 | false | false | false |
ric2b/Vivaldi-browser | chromium/third_party/crashpad/crashpad/util/mach/mig_gen.py | 3 | 2715 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2019 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import subprocess
import sys
MigInterface = collections.namedtuple('MigInterface', ['user_c', 'server_c',
'user_h', 'server_h'])
def generate_interface(defs, interface, includes=[], sdk=None, clang_path=None,
mig_path=None, migcom_path=None):
if mig_path is None:
mig_path = 'mig'
command = [mig_path,
'-user', interface.user_c,
'-server', interface.server_c,
'-header', interface.user_h,
'-sheader', interface.server_h,
]
if clang_path is not None:
os.environ['MIGCC'] = clang_path
if migcom_path is not None:
os.environ['MIGCOM'] = migcom_path
if sdk is not None:
command.extend(['-isysroot', sdk])
for include in includes:
command.extend(['-I' + include])
command.append(defs)
subprocess.check_call(command)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--clang-path', help='Path to Clang')
parser.add_argument('--mig-path', help='Path to mig')
parser.add_argument('--migcom-path', help='Path to migcom')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument('--include',
default=[],
action='append',
help='Additional include directory')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
return parser.parse_args(args)
def main(args):
parsed = parse_args(args)
interface = MigInterface(parsed.user_c, parsed.server_c,
parsed.user_h, parsed.server_h)
generate_interface(parsed.defs, interface, parsed.include,
parsed.sdk, parsed.clang_path, parsed.mig_path,
parsed.migcom_path)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -7,646,883,918,846,086,000 | 35.2 | 79 | 0.618785 | false | 3.934783 | false | false | false |
mirthbottle/ghg | targetsmosh.py | 1 | 5662 | import pandas as pd
import numpy as np
def get_targets(p, year):
# years 2010 and 2011 and 2012 don't have ISIN, boooo
pcols = p.columns.values
ttypes_col = deets[year]["summary"]["ttypes"]
targets = p[p["Organisation"].notnull()][["Organisation",pcols[ttypes_col]]]
targets.rename(columns = {pcols[ttypes_col]: "target type"}, inplace=True)
targets["year"] = year - 1
targets["has absolute"] = targets["target type"].apply(lambda x: "solute" in unicode(x).encode('utf-8'))
targets["has intensity"] = targets["target type"].apply(lambda x: "ntensity" in unicode(x).encode('utf-8'))
return targets
def get_vcounts(p, year):
pcols = p.columns.values
return p[pcols[goalcols[2014]]].value_counts()
def summary(vcounts, p):
# stats about emissions targets in 2014
# generate for every year
hasintensity = vcounts['Intensity target'] + 350
hasabs = vcounts['Absolute target'] + 350
neg = len(p) - vcounts.values.sum() + vcounts['No']
return {"total":len(p),
"neg":neg,
"intensity":hasintensity,
"absolute":hasabs}
def get_companies_by_target(p):
# get by levels[0] should be ISIN
companies = p.index.levels[0].tolist()
pieces_targets = []
pieces_none = []
for c in companies:
try:
f = p.loc[c]
fhas_target = f[f["has target"]]
f["ISIN"] = c
yearswithtarget = fhas_target.index.tolist()
if len(yearswithtarget) > 2:
pieces_targets.append(f)
else:
pieces_none.append(f)
except Exception:
print c
pass
ptargets = pd.concat(pieces_targets).reset_index().set_index(["ISIN", "year"])
pnotargets = pd.concat(pieces_none).reset_index().set_index(["ISIN", "year"])
return ptargets, pnotargets
def get_hadtarget(targetorgs):
# shift had target
# targetorgs index is ["Organisation", "year"]
to_gs = targetorgs.groupby(level=0)
companies = to_gs.indices.keys()
pieces = []
for c in companies:
g = to_gs.get_group(c)
g_tseries = np.array(g["has target"].tolist())
g_aseries = np.array(g["has absolute"].tolist())
g_iseries = np.array(g["has intensity"].tolist())
g_tseries = g_tseries[:-1]
g_aseries = g_aseries[:-1]
g_iseries = g_iseries[:-1]
g = g[1:]
g['had target last year'] = g_tseries
g['had absolute last year'] = g_aseries
g['had intensity last year'] = g_iseries
# g["ISIN"] = c
pieces.append(g)
new_to = pd.concat(pieces).reset_index().set_index("Organisation")
return new_to
# targetorgs is the join of the table of targets,
# Scope 1 and 2 emissions, and orginfos
def get_targetorgs(to):
to = to.reset_index().set_index("ISIN")
to = to[['year','Country', 'GICS Sector',
'GICS Industry Group', 'GICS Industry',
'cogs', 'revt',
'has target', 'has absolute',
'has intensity',
'Scope 1', 'Scope 2',
'1and2 total', '1and2 intensity',
'percent change 1and2 intensity',
'percent change 1and2 total']]
return to
goalcols = { 2014: 14, 2013: 14, 2012: 12, 2011: 12, 2010: 12 }
# target details
deets = { 2014: { 'summary': { 'sheet': 12, 'ttypes': 14 },
'abs info':
{ 'sheet': 13, 'scope': 15, 'target': 17,
'base year': 18, 'base ghg': 19,
'target year': 20},
'int info':
{ 'sheet': 14, 'scope': 15, 'target': 17,
'metric': 18,
'base year': 19, 'base ghg int': 20,
'target year': 21},
'progress':
{ 'sheet': 16, 'target id': 14},
'initiatives':
{ 'sheet': 18, 'itype': 14,
'monetary savings': 17, 'monetary cost': 18 }
},
2013: { 'summary': { 'sheet': 10, 'ttypes': 14 },
'abs info':
{ 'sheet': 11, 'scope': 15, 'target': 17,
'base year': 18, 'base ghg': 19,
'target year': 20 },
'int info':
{ 'sheet': 12, 'scope': 15, 'target': 17,
'metric': 18,
'base year': 19, 'base ghg int': 20,
'target year': 21},
'progress': { 'sheet': 14 },
'initiatives': { 'sheet': 16, 'itype': 14 }
},
2012: { 'summary': { 'sheet': 10, 'ttypes': 12 },
'abs info':
{ 'sheet': 11, 'scope': 13, 'target': 15,
'base year': 16, 'base ghg': 17,
'target year': 18 },
'int info':
{ 'sheet': 12, 'scope': 13, 'target': 15,
'metric': 16, 'base year': 17, 'base ghg int': 18,
'target year': 19 },
'progress': { 'sheet': 14 },
'initiatives': { 'sheet': 16, 'itype': 12 }
},
2011: { 'summary': { 'sheet': 9, 'ttypes': 12 },
'abs info': {},
'int info': {},
'progress': {},
'initiatives': {}
},
2010: { 'summary': { 'sheet': 23, 'ttypes': 12 },
'abs info': {},
'int info': {},
'progress': {},
'initiatives': {}
}
}
# scopes need cleaning...
| mit | -1,730,018,486,499,977,700 | 36.496689 | 111 | 0.481102 | false | 3.710354 | false | false | false |
tnemisteam/cdf-steps | teacher/views/teacher_loan_views.py | 1 | 4138 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from teacher.models import Teacher_loan
class Teacher_loanView(object):
model = Teacher_loan
def get_template_names(self):
"""Nest templates within teacher_loan directory."""
tpl = super(Teacher_loanView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'teacher_loan'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:8]+'teacher_loan/'+tpl[8:]
return [self.template_name]
class Teacher_loanDateView(Teacher_loanView):
date_field = 'timestamp'
month_format = '%m'
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanBaseListView(Teacher_loanView):
paginate_by = 10
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanArchiveIndexView(
Teacher_loanDateView, Teacher_loanBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanCreateView(Teacher_loanView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDateDetailView(Teacher_loanDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDayArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDeleteView(Teacher_loanView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDetailView(Teacher_loanView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanListView(Teacher_loanBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanMonthArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanTodayArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanUpdateView(Teacher_loanView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanWeekArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanYearArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
| mit | -2,688,472,835,296,899,600 | 27.937063 | 74 | 0.689705 | false | 3.738031 | false | false | false |
coin-or/oBB | obb/trsq.py | 1 | 1608 | from __future__ import division
def trsq(Lg, delta, xc, f, g):
# import necessary functions
from numpy import dot, inf
from numpy.linalg import norm
# our own functions
from newton import newton
# Define function to minimise
fun = lambda x: f + dot(x,g) + (Lg / 2) * norm(x) ** 2
# n.b. x -> x - xc for convenience
# Case a) Trust-region inactive
# Quadratic order approx. solution
xq1 = -g / Lg
# Check if q. ord. root is within trust region
if (norm(xq1) < delta):
bndq1 = fun(xq1)
xbq1 = xq1
else: # No solution
bndq1 = inf
xbq1 = inf
# Case b) Trust-region active
# Initial perturbation
l = -Lg + 1e-5
# Define nfq(l) to find quadratic approx. roots
def nfq(l):
# Find x(l)
xl = -g / (l + Lg)
# Calculate |xl|-delta (for newton stopping rule)
xlmd = norm(xl) - delta
# Calculate f(l) for p=-1
fl = 1/norm(xl) - 1/delta
# Find x'(l)
xlp = g / ((l + Lg) ** 2)
# Calculate f'(l) for p=-1
flp = -dot(xl,xlp) * (dot(xl,xl) ** (-1.5))
# Calculate increment
dl = fl / flp
# Set Delta
Delta = delta
return xlmd, dl, Delta
# Run newton
l = newton(nfq, l)
# Given l, find xq2
xq2 = -g / (l + Lg)
bndq2 = fun(xq2)
xbq2 = xq2
# Return minimum of bndq1 and bndq2
if (bndq1 < bndq2):
bnd = bndq1
xb = xbq1 + xc# since x -> x - xc
else:
bnd = bndq2
xb = xbq2 + xc# since x -> x - xc
return bnd, xb
| lgpl-3.0 | 3,042,594,837,105,357,300 | 20.44 | 58 | 0.515547 | false | 2.96679 | false | false | false |
threerings/splatd | splat/helpers/opennms.py | 1 | 25880 | # opennms.py vi:ts=4:sw=4:expandtab:
#
# Support functions for plugins that deal with OpenNMS.
# Author: Landon Fuller <[email protected]>
#
# Copyright (c) 2007 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os, tempfile, logging, stat
import splat
from splat import plugin
from pysqlite2 import dbapi2 as sqlite
try:
# Python 2.5 cElementTree
from xml.etree import cElementTree as ElementTree
except ImportError:
# Stand-alone pre-2.5 cElementTree
import cElementTree as ElementTree
# Logger
logger = logging.getLogger(splat.LOG_NAME)
# Output File Encoding
XML_ENCODING = "UTF-8"
# XML Namespaces
XML_USERS_NAMESPACE = "http://xmlns.opennms.org/xsd/users"
XML_GROUPS_NAMESPACE = "http://xmlns.opennms.org/xsd/groups"
# OpenNMS User Record Fields
OU_USERNAME = 'userName'
OU_FULLNAME = 'fullName'
OU_COMMENTS = 'comments'
OU_EMAIL = 'email'
OU_PAGER_EMAIL = 'pagerEmail'
OU_XMPP_ADDRESS = 'xmppAddress'
OU_NUMERIC_PAGER = 'numericPager'
OU_NUMERIC_PAGER_SERVICE = 'numericPagerService'
OU_TEXT_PAGER = 'textPager'
OU_TEXT_PAGER_SERVICE = 'textPagerService'
OU_LDAP_DN = 'ldapDN'
# OpenNMS Group Record Fields
OG_GROUPNAME = 'groupName'
class UserExistsException (plugin.SplatPluginError):
pass
class NoSuchUserException (plugin.SplatPluginError):
pass
class GroupExistsException (plugin.SplatPluginError):
pass
class NoSuchGroupException (plugin.SplatPluginError):
pass
def _sqlite_dict_factory(cursor, row):
"""
Returns sqlite rows as dictionaries
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class WriterContext(object):
def __init__(self):
# A map of (XML/database) fields to LDAP attributes
# The name choices are no accident -- they're meant
# to match between the DB and the XML.
self.attrmap = {
OU_USERNAME : None,
OU_FULLNAME : None,
OU_COMMENTS : None,
OU_EMAIL : None,
OU_PAGER_EMAIL : None,
OU_XMPP_ADDRESS : None,
OU_NUMERIC_PAGER : None,
OU_NUMERIC_PAGER_SERVICE : None,
OU_TEXT_PAGER : None,
OU_TEXT_PAGER_SERVICE : None
}
# Map of configuration keys to the attribute map
self.config_attrmap = {}
for key in self.attrmap.iterkeys():
self.config_attrmap[key.lower() + "attribute"] = key
self.usersFile = None
self.groupsFile = None
self.opennmsGroup = None
class Writer(plugin.Helper):
@classmethod
def attributes(self):
# We want all attributes
return None
@classmethod
def parseOptions(self, options):
context = WriterContext()
for key in options.iterkeys():
# Do some magic to check for 'attribute keys' without enumerating
# them all over again.
if (key.endswith("attribute")):
try:
attrKey = context.config_attrmap[key]
except KeyError:
raise plugin.SplatPluginError, "Invalid option '%s' specified." % key
if (context.attrmap.has_key(attrKey)):
context.attrmap[attrKey] = options[key]
continue
if (key == "usersfile"):
context.usersFile = options[key]
continue
if (key == "groupsfile"):
context.groupsFile = options[key]
continue
if (key == "opennmsgroup"):
context.opennmsGroup = options[key]
continue
raise plugin.SplatPluginError, "Invalid option '%s' specified." % key
if (context.attrmap[OU_USERNAME] == None):
raise plugin.SplatPluginError, "Missing userNameAttribute option."
if (context.usersFile == None):
raise plugin.SplatPluginError, "Missing usersFile option."
if (context.groupsFile == None):
raise plugin.SplatPluginError, "Missing groupsFile option."
return context
def __init__ (self):
# If a fatal error occurs, set this to True, and we won't attempt to
# overwrite any files in finish()
self.fatalError = False
# Path to the user/group 'database' xml files
self.usersFile = None
self.groupsFile = None
# Create a temporary database in which to store user records
dbfile = None
try:
(handle, dbfile) = tempfile.mkstemp()
self._initdb(dbfile)
except Exception, e:
if (dbfile != None and os.path.exists(dbfile)):
os.unlink(dbfile)
raise plugin.SplatPluginError("Initialization failure: %s" % e)
def _initdb (self, dbfile):
"""
Create our temporary user record database
"""
# Connect to the database
self.db = sqlite.connect(dbfile)
# Initialize the users table
self.db.execute(
"""
CREATE TABLE Users (
userName TEXT NOT NULL PRIMARY KEY,
ldapDN TEXT NOT NULL,
fullName TEXT DEFAULT NULL,
comments TEXT DEFAULT NULL,
email TEXT DEFAULT NULL,
pagerEmail TEXT DEFAULT NULL,
xmppAddress TEXT DEFAULT NULL,
numericPager TEXT DEFAULT NULL,
numericPagerService TEXT DEFAULT NULL,
textPager TEXT DEFAULT NULL,
textPagerService TEXT DEFAULT NULL
);
"""
)
# Now for the group table
self.db.execute(
"""
CREATE TABLE Groups (
groupName TEXT NOT NULL PRIMARY KEY,
comments TEXT DEFAULT NULL
);
"""
)
# ... finally, the group member table
self.db.execute(
"""
CREATE TABLE GroupMembers (
groupName TEXT NOT NULL,
userName TEXT NOT NULL,
PRIMARY KEY(groupName, username),
FOREIGN KEY(groupName) REFERENCES Groups(groupName)
FOREIGN KEY(userName) REFERENCES Users(userName)
);
"""
)
# Drop the file out from under ourselves
os.unlink(dbfile)
# Commit our changes
self.db.commit()
def _insertDict(self, table, dataDict):
"""
Safely insert a dict into a table (with SQL escaping)
"""
def dictValuePad(key):
return '?'
cols = []
vals = []
for key in dataDict.iterkeys():
cols.append(key)
vals.append(dataDict[key])
sql = 'INSERT INTO ' + table
sql += ' ('
sql += ', '.join(cols)
sql += ') VALUES ('
sql += ', '.join(map(dictValuePad, vals))
sql += ');'
self.db.execute(sql, vals)
def _createUserAttributeDict (self, ldapEntry, attrMap):
"""
Add to dict from attribute dictionary
"""
result = {}
# Add required elements
result[OU_USERNAME] = ldapEntry.attributes[attrMap[OU_USERNAME]][0]
result[OU_LDAP_DN] = ldapEntry.dn
# Add optional elements
for key in attrMap.iterkeys():
ldapKey = attrMap[key]
if (ldapEntry.attributes.has_key(ldapKey)):
result[key] = ldapEntry.attributes[ldapKey][0]
return result
def _insertUserRecord (self, context, ldapEntry):
# Validate the available attributes
attributes = ldapEntry.attributes
if (not attributes.has_key(context.attrmap[OU_USERNAME])):
raise plugin.SplatPluginError, "Required attribute %s not found for dn %s." % (context.attrmap[OU_USERNAME], ldapEntry.dn)
# Insert the user record in the database
insertData = self._createUserAttributeDict(ldapEntry, context.attrmap)
try:
self._insertDict("Users", insertData)
self.db.commit()
except Exception, e:
self.fatalError = True
raise plugin.SplatPluginError, "Failed to commit user record to database for dn %s: %s" % (ldapEntry.dn, e)
def _insertGroupRecord (self, context, ldapEntry):
insertData = {
'groupName' : context.opennmsGroup
}
# Attempt to insert the group record
try:
self._insertDict("Groups", insertData)
self.db.commit()
except sqlite.IntegrityError, e:
# We'll get an IntegrityError if the record already exists:
# No need to add it.
self.db.rollback()
except Exception, e:
self.fatalError = True
raise plugin.SplatPluginError, "Failed to commit group record to database for dn: %s" % (ldapEntry.dn, e)
# Insert the group membership record
insertData = {
'groupName' : context.opennmsGroup,
'userName' : ldapEntry.attributes[context.attrmap[OU_USERNAME]][0]
}
try:
self._insertDict("GroupMembers", insertData)
self.db.commit()
except Exception, e:
self.fatalError = True
raise plugin.SplatPluginError, "Failed to commit group membership record to database for dn: %s" (ldapEntry.dn, e)
def work (self, context, ldapEntry, modified):
# We need to pull the location of the user file out of the first configuration
# context we get.
if (self.usersFile == None):
self.usersFile = context.usersFile
self.groupsFile = context.groupsFile
else:
# Is the setting still the same? It's not overridable.
if (self.usersFile != context.usersFile):
self.fatalError = True
raise plugin.SplatPluginError, "The \"usersFile\" setting may not be overridden in a group configuration"
if (self.groupsFile != context.groupsFile):
self.fatalError = True
raise plugin.SplatPluginError, "The \"groupsFile\" setting may not be overridden in a group configuration"
# Insert the user record
self._insertUserRecord(context, ldapEntry)
# Insert the group record
if (context.opennmsGroup != None):
self._insertGroupRecord(context, ldapEntry)
def _writeXML (self, etree, filePath):
# Write out the new XML file. mkstemp()-created files are
# "readable and writable only by the creating user ID", so we'll use that,
# and then reset permissions to match the original file.
# Open the temporary file
try:
outputDir = os.path.dirname(filePath)
(tempFd, tempPath) = tempfile.mkstemp(dir=outputDir)
except Exception, e:
raise plugin.SplatPluginError, "Failed to create output file: %s" % e
# Wrap the file descriptor
try:
output = os.fdopen(tempFd, 'w')
except Exception, e:
# Highly unlikely
os.unlink(tempPath)
raise plugin.SplatPluginError, "Failed to open output file: %s" % e
# Dump the XML
try:
etree.doc.write(output, XML_ENCODING)
output.close()
except Exception, e:
os.unlink(tempPath)
raise plugin.SplatPluginError, "Failed to write to output file: %s" % e
# Set permissions
try:
fstat = os.stat(filePath)
os.chmod(tempPath, stat.S_IMODE(fstat.st_mode))
os.chown(tempPath, fstat.st_uid, fstat.st_gid)
except Exception, e:
os.unlink(tempPath)
raise plugin.SplatPluginError, "Failed to set output permissions: %s" % e
# Atomicly replace the old file
try:
os.rename(tempPath, filePath)
except Exception, e:
os.unlink(tempPath)
raise plugin.SplatPluginError, "Failed to rename output file: %s" % e
def _finishUsers (self):
# Open up the OpenNMS user database.
try:
userdb = Users(self.usersFile)
except Exception, e:
raise plugin.SplatPluginError, "Failed to open %s: %s" % (self.usersFile, e)
# User Update/Insert Pass: Iterate over each user in the LDAP result set.
# If they currently exist in the OpenNMS db, update their record.
# If they do not exist in the OpenNMS db, add their record.
cur = self.db.cursor()
cur.row_factory = _sqlite_dict_factory
cur.execute("SELECT * from Users")
for ldapRecord in cur:
user = userdb.findUser(ldapRecord[OU_USERNAME])
if (user == None):
user = userdb.createUser(ldapRecord[OU_USERNAME])
# Clean up the result for use as arguments
del ldapRecord[OU_USERNAME]
del ldapRecord[OU_LDAP_DN]
userdb.updateUser(user, **ldapRecord)
# User Deletion pass. For each user in the OpenNMS db, check if they
# are to be found in the LDAP result so. If not, clear out
# their record.
for user in userdb.getUsers():
userId = user.find("user-id")
if (userId == None):
logger.error("Corrupt OpenNMS user record, missing user-id: %s" % ElementTree.tostring(user))
cur = self.db.cursor()
cur.execute("SELECT COUNT(*) FROM Users WHERE userName=?", (userId.text,))
if (cur.fetchone()[0] == 0):
userdb.deleteUser(userId.text)
self._writeXML(userdb, self.usersFile)
def _finishGroups (self):
try:
groupdb = Groups(self.groupsFile)
except Exception, e:
raise plugin.SplatPluginError, "Failed to open %s: %s" % (self.groupsFile, e)
# Group Update/Insert Pass: Iterate over each group in the LDAP result set.
# If it currently exists in the OpenNMS db, update the record.
# If it does not exist in the OpenNMS db, add the record.
groupCursor = self.db.cursor()
groupCursor.row_factory = _sqlite_dict_factory
groupCursor.execute("SELECT * from Groups")
for ldapRecord in groupCursor:
groupName = ldapRecord[OG_GROUPNAME]
group = groupdb.findGroup(groupName)
if (group == None):
group = groupdb.createGroup(groupName)
# Set group members
memberCursor = self.db.cursor()
memberCursor.row_factory = _sqlite_dict_factory
memberCursor.execute("SELECT userName FROM GroupMembers WHERE groupName = ?", (groupName,))
groupMembers = []
for member in memberCursor:
groupMembers.append(member[OU_USERNAME])
groupdb.setMembers(group, groupMembers)
# Group deletion pass. For each group in the OpenNMS db, check if it
# is to be found in the LDAP result so. If not, clear out
# the record.
for group in groupdb.getGroups():
groupName = group.find("name")
if (groupName == None):
logger.error("Corrupt OpenNMS group record, missing name: %s" % ElementTree.tostring(group))
countCursor = self.db.cursor()
countCursor.execute("SELECT COUNT(*) FROM Groups WHERE groupName=?", (groupName.text,))
if (countCursor.fetchone()[0] == 0):
groupdb.deleteGroup(groupName.text)
self._writeXML(groupdb, self.groupsFile)
def finish (self):
# If something terrible happened, don't overwrite the user XML file
if (self.fatalError):
return
# If no work was done, there won't be a users file
if (self.usersFile == None):
return
# User pass
self._finishUsers()
# Group pass
self._finishGroups()
class Users (object):
def __init__ (self, path):
self.doc = ElementTree.ElementTree(file = path)
def findUser (self, username):
for entry in self.getUsers():
userId = entry.find("user-id")
if (userId != None and userId.text == username):
return entry
# Not found
return None
def _getUsersElement (self):
# Retrieve the <users> element
return self.doc.find("./{%s}users" % (XML_USERS_NAMESPACE))
@classmethod
def _setChildElementText (self, parentNode, nodeName, text):
node = parentNode.find(nodeName)
node.text = text
@classmethod
def _setContactInfo (self, parentNode, contactType, info, serviceProvider = None):
node = self._findUserContact(parentNode, contactType)
node.set("info", info)
if (serviceProvider != None):
node.set("serviceProvider", serviceProvider)
@classmethod
def _findUserContact (self, parentNode, contactType):
nodes = parentNode.findall("./{%s}contact" % (XML_USERS_NAMESPACE))
for node in nodes:
if (node.get("type") == contactType):
return node
return None
def getUsers (self):
"""
Returns an iterator over all user elements
"""
return self.doc.findall("./{%s}users/*" % (XML_USERS_NAMESPACE))
def deleteUser (self, username):
user = self.findUser(username)
if (user == None):
raise NoSuchUserException("Could not find user %s." % username)
users = self._getUsersElement()
users.remove(user)
def createUser (self, username, fullName = "", comments = "", password = "XXX"):
"""
Insert and return a new user record.
@param username User's login name
@param fullName User's full name.
@param comments User comments.
@param password User's password (unused if LDAP auth is enabled)
"""
if (self.findUser(username) != None):
raise UserExistsException("User %s exists." % username)
# Create the user record
user = ElementTree.SubElement(self._getUsersElement(), "{%s}user" % XML_USERS_NAMESPACE)
# Set up the standard user data
userId = ElementTree.SubElement(user, "user-id")
userId.text = username
fullName = ElementTree.SubElement(user, "full-name")
fullName.text = fullName
userComments = ElementTree.SubElement(user, "user-comments")
userComments.text = comments
userPassword = ElementTree.SubElement(user, "password")
userPassword.text = password
# Add the required (blank) contact records
# E-mail
ElementTree.SubElement(user, "{%s}contact" % XML_USERS_NAMESPACE, type="email", info="")
# Pager E-mail
ElementTree.SubElement(user, "{%s}contact" % XML_USERS_NAMESPACE, type="pagerEmail", info="")
# Jabber Address
ElementTree.SubElement(user, "{%s}contact" % XML_USERS_NAMESPACE, type="xmppAddress", info="")
# Numeric Pager
ElementTree.SubElement(user, "{%s}contact" % XML_USERS_NAMESPACE, type="numericPage", info="", serviceProvider="")
# Text Pager
ElementTree.SubElement(user, "{%s}contact" % XML_USERS_NAMESPACE, type="textPage", info="", serviceProvider="")
return user
def updateUser (self, user, fullName = None, comments = None, email = None,
pagerEmail = None, xmppAddress = None, numericPager = None, numericPagerService = None,
textPager = None, textPagerService = None):
"""
Update a user record.
<user>
<user-id xmlns="">admin</user-id>
<full-name xmlns="">Administrator</full-name>
<user-comments xmlns="">Default administrator, do not delete</user-comments>
<password xmlns="">xxxx</password>
<contact type="email" info=""/>
<contact type="pagerEmail" info=""/>
<contact type="xmppAddress" info=""/>
<contact type="numericPage" info="" serviceProvider=""/>
<contact type="textPage" info="" serviceProvider=""/>
</user>
@param user: User XML node to update.
@param fullName: User's full name.
@param comments: User comments.
@param email: User's e-mail address.
@param pagerEmail: User's pager e-mail address.
@param xmppAddress: User's Jabber address.
@param numericPager: User's numeric pager. (number, service) tuple.
@param textPager: User's text pager. (number, service) tuple.
"""
if (fullName != None):
self._setChildElementText(user, "full-name", fullName)
if (comments != None):
self._setChildElementText(user, "user-comments", comments)
if (email != None):
self._setContactInfo(user, "email", email)
if (pagerEmail != None):
self._setContactInfo(user, "pagerEmail", pagerEmail)
if (xmppAddress != None):
self._setContactInfo(user, "xmppAddress", xmppAddress)
if (numericPager != None):
self._setContactInfo(user, "numericPage", numericPager, numericPagerService)
if (textPager != None):
self._setContactInfo(user, "textPager", textPager, textPagerService)
class Groups (object):
"""
<?xml version="1.0" encoding="UTF-8"?>
<groupinfo xmlns="http://xmlns.opennms.org/xsd/groups"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="groupinfo">
<ns1:header xmlns:ns1="http://xmlns.opennms.org/xsd/types">
<rev xmlns="">1.3</rev>
<created xmlns="">Monday, May 7, 2007 9:57:05 PM GMT</created>
<mstation xmlns="">dhcp-219.internal.opennms.org</mstation>
</ns1:header>
<groups>
<group>
<name xmlns="">Admin</name>
<comments xmlns="">The administrators</comments>
<user xmlns="">admin</user>
<user xmlns="">landonf</user>
</group>
</groups>
</groupinfo>
"""
def __init__ (self, path):
self.doc = ElementTree.ElementTree(file = path)
def getGroups (self):
return self.doc.findall("./{%s}groups/*" % (XML_GROUPS_NAMESPACE))
def findGroup (self, groupName):
for entry in self.getGroups():
groupId = entry.find("name")
if (groupId != None and groupId.text == groupName):
return entry
# Not found
return None
def _getGroupsElement (self):
return self.doc.find("./{%s}groups" % (XML_GROUPS_NAMESPACE))
def createGroup (self, groupName, comments = ""):
"""
Insert and return a new group record.
@param groupName Group name.
@param comments Group comments.
"""
if (self.findGroup(groupName) != None):
raise GroupExistsException("Group %s exists." % groupName)
# Create the group record
group = ElementTree.SubElement(self._getGroupsElement(), "group")
# Set up the standard group data
groupId = ElementTree.SubElement(group, "name", xmlns="")
groupId.text = groupName
groupComments = ElementTree.SubElement(group, "comments", xmlns="")
groupComments.text = comments
return group
def deleteGroup (self, groupName):
group = self.findGroup(groupName)
if (group == None):
raise NoSuchUserException("Could not find group %s." % groupName)
groups = self._getGroupsElement()
groups.remove(group)
def setMembers (self, group, members):
"""
Set a groups' members.
@param group Group XML node to update.
@param members A list of member names.
"""
# Delete existing user entries
entries = group.findall("./user")
for entry in entries:
group.remove(entry)
# Add new user entries
for member in members:
entry = ElementTree.SubElement(group, "user", xmlns="")
entry.text = member | bsd-3-clause | 7,778,300,007,850,750,000 | 34.599725 | 134 | 0.595479 | false | 4.231524 | false | false | false |
krishauser/Klampt | Python/klampt/plan/robotoptimize.py | 1 | 34985 | from ..model import ik,types,config
from ..math import vectorops
from ..robotsim import IKSolver,IKObjective
from ..io import loader
import time
import random
from ..math import optimize,symbolic,symbolic_klampt,so3,se3
import numpy as np
class KlamptVariable:
"""
Attributes:
name (str): the Klamp't item's name
type (str): the Klamp't item's type
encoding (str): the way in which the item is encoded in the optimization
variables (list of Variable): the list of Variables encoding this Klamp't item
expr (Expression): the Expression that will be used to replace the symbolic mainVariable via
appropriate variables
constraints, encoder, decoder: internally used
"""
def __init__(self,name,type):
self.name = name
self.type = type
self.encoding = None
self.variables = None
self.expr = None
self.constraints = []
self.encoder = None
self.decoder = None
def bind(self,obj):
"""Binds all Variables associated with this to the value of Klamp't object obj"""
if self.type in ['Config','Vector','Vector3','Point']:
self.variables[0].bind(obj)
elif self.type == 'Configs':
assert len(obj) == len(self.variables),"Invalid number of configs in Configs object"
for i,v in enumerate(obj):
self.variables[i].bind(v)
elif self.type == 'Rotation':
if self.encoder is None:
self.variables[0].bind(obj)
else:
self.variables[0].bind(self.encoder(obj))
elif self.type == 'RigidTransform':
if self.encoder is None:
self.variables[0].bind(obj[0])
self.variables[1].bind(obj[1])
else:
T = self.encoder(obj)
self.variables[0].bind(T[0])
self.variables[1].bind(T[1])
else:
raise ValueError("Unsupported object type "+self.type)
def getParams(self):
"""Returns the list of current parameters bound to the symbolic Variables."""
if len(self.variables) > 1:
return [v.value for v in self.variables]
else:
return self.variables[0].value
def getValue(self):
"""Returns the Klamp't value corresponding to the current bound parameters."""
return self.decode(self.getParams())
def unbind(self):
"""Unbinds all Variables associated with this."""
for v in self.variables:
v.unbind()
def encode(self,obj):
"""Returns the parameters giving the encoding of the Klamp't object obj"""
if self.encoder is None:
return obj
else:
return self.encoder(obj)
def decode(self,params):
"""Returns the Klamp't object given a parameters encoding it"""
if self.decoder is None:
return params
else:
return self.decoder(params)
class RobotOptimizationProblem(optimize.OptimizationProblemBuilder):
"""Defines a generalized optimization problem for a robot, which is a subclass of
OptimizationProblemBuilder. This may easily incorporate IK constraints, and may
have additional specifications of active DOF.
Attributes:
robot (RobotModel) the robot whose configuration is being optimized
world (WorldModel, optional): the world containing possible obstacles
context (KlamptContext, inherited): a symbolic.KlamptContext that stores the variable q
denoting the robot configuration, as well as any user data. User data "robot" and "world"
are available by default.
q (Variable): the primary optimization variable.
activeDofs (list): the list of active robot DOFs.
autoLoad (dict): a dictionary of (userDataName:fileName) pairs that are stored so that user data
is automatically loaded from files. I.e., upon self.loadJson(), for each pair in autoLoad
the command self.context.userData[userDataName] = loader.load(fileName) is executed.
managedVariables (dict of KlamptVariable): a dictionary of KlamptVariables like rotations and
rigid transforms.
Managed variables should be referred to in parsed expressions with the prefix @name,
and are encoded into optimization form and decoded from optimization form
using KlamptVariable.bind / KlamptVariable.unbind. You can also retrieve the Klampt value
by KlamptVariable.getValue().
If you would like to find the configuration *closest* to solving the
IK constraints, either add the IK constraints one by one with weight=1 (or some other
numeric value), or call enableSoftIK() after the constraints have been added. In this
case, solve will always return a solution, as long as it finds a configuration that
passes the feasibility tests. The optimization method changes so that it 1) optimizes
the residual norm, and then 2) optimizes the cost function to maintain the residual
norm at its current value. In other words, minimizing error is the first priority and
minimizing cost is the second priority.
"""
def __init__(self,robot=None,world=None,*ikgoals):
self.robot = robot
self.world = world
if self.world is not None and robot is None and self.world.numRobots() > 0:
robot = self.world.robot(0)
self.robot = robot
context = symbolic_klampt.KlamptContext()
context.addUserData("robot",self.robot)
if self.world:
context.addUserData("world",self.world)
optimize.OptimizationProblemBuilder.__init__(self,context)
self.activeDofs = None
self.autoLoad = dict()
nlinks = robot.numLinks() if robot is not None else None
self.q = self.context.addVar('q','V',nlinks)
self.managedVariables = dict()
self.optimizationVariables = [self.q]
self.setJointLimits()
for goal in ikgoals:
self.addIKObjective(goal)
def isIKObjective(self,index):
"""Returns True if the indexed constraint is an IKObjective"""
if self.objectives[index].type != "eq":
return False
return symbolic.is_op(self.objectives[index].expr,'ik.residual')
def getIKObjective(self,index):
"""Returns the IKObjective the indexed constraint is an IKObjective"""
res = self.objectives[index].expr.args[0]
assert isinstance(res,symbolic.ConstantExpression) and isinstance(res.value,IKObjective),"Not an IK objective: "+str(self.objectives[index].expr)
return res.value
def enableSoftIK(self,enabled=True):
"""Turns on soft IK solving. This is the same as hard IK solving if all
constraints can be reached, but if the constraints cannot be reached, it will
try to optimize the error.
"""
for i,o in enumerate(self.objective):
if self.isIKObjective(i):
o.soft = not o.soft
def addIKObjective(self,obj,weight=None):
"""Adds a new IKObjective to the problem. If weight is not None, it is
added as a soft constraint."""
assert isinstance(obj,IKObjective)
self.addEquality(self.context.ik.residual(obj,self.context.setConfig("robot",self.q)),weight)
if hasattr(obj,'robot'):
if self.robot is None:
self.robot = obj.robot
else:
assert self.robot.index == obj.robot.index,"All objectives must be on the same robot"
def addUserData(self,name,fn):
"""Adds an auto-loaded userData. Raises an exception if fn cannot be loaded.
Arguments:
- name: the name of the userData.
- fn: the file from which it is loaded. It must be loadable with loader.load.
"""
assert isinstance(fn,str)
obj = loader.load(fn)
self.context.addUserData(name,obj)
self.autoLoad[name] = fn
def addKlamptVar(self,name,type=None,initialValue=None,encoding='auto',constraints=True,optimize=True):
"""Adds one or more variables of a given Klamp't type (e.g., "Config", "Rotation", "RigidTransform").
If necessary, constraints on the object will also be added, e.g., joint limits, or a quaternion unit
norm constraint.
At least one of type / initialValue must be provided.
Args:
name (str): a name for the variable.
type (str, optional): a supported variable type (default None determines the type by initialValue).
Supported types include "Config", "Configs", Rotation", "RigidTransform", "Vector3". Future
work may support Trajectory and other types.
initialValue (optional): the configuration of the variable. If it's a float, the type will be set to
numeric, if it's a list it will be set to a vector, or if its a supported object, the type will
be set appropriately and config.getConfig(initialValue) will be used for its parameter setting.
encoding (str, optional): only supported for Rotation and RigidTransform types, and defines how the
variable will be parameterized in optimization. Can be:
- 'rotation_vector' (default) for rotation vector, 3 parameters
- 'quaternion' for quaternion encoding, 4 parameters + 1 constraint
- 'rpy' for roll-pitch-yaw euler angles, 3 parameters
- None for full rotation matrix (9 parameters, 6 constraints)
- 'auto' (equivalent to to 'rotation_vector')
constraints (bool, optional): True if all default constraints are to be added. For Config / Configs
types, bound constraints at the robot's joint limits are added.
optimize (bool, optional): If True, adds the parameterized variables to the list of optimization
variables.
Returns:
KlamptVariable: an object containing information about the encoding of the variable.
Note that extra symbolic Variable names may be decorated with extensions in the form of "_ext" if
the encoding is not direct.
"""
if type is None:
assert initialValue is not None,"Either type or initialValue must be provided"
type = types.objectToTypes(initialValue)
if type in ['Vector3','Point']:
if initialValue is None:
initialValue = [0.0]*3
else:
assert len(initialValue)==3
type = 'Vector'
def default(name,value):
v = self.context.addVar(name,"V",len(value))
v.value = value[:]
return v
if name in self.managedVariables:
raise ValueError("Klamp't variable name "+name+" already defined")
kv = KlamptVariable(name,type)
if type == 'Config':
if initialValue is None:
initialValue = self.robot.getConfig()
else:
assert len(initialValue) == self.robot.numLinks()
v = default(name,initialValue)
if constraints:
self.setBounds(v.name,*self.robot.getJointLimits())
kv.constraints = [self.robot.getJointLimits()]
elif type == 'Vector':
assert initialValue is not None,"Need to provide initialValue for "+type+" type variables"
v = default(name,initialValue)
kv.expr = VariableExpression(v)
elif type == 'Configs':
assert initialValue is not None,"Need to provide initialValue for "+type+" type variables"
vals = []
for i,v in enumerate(initialValue):
vals.append(default(name+"_"+str(i),v))
if constraints:
self.setBounds(vals[-1].name,*self.robot.getJointLimits())
kv.constraints.append(self.robot.getJointLimits())
kv.variables = vals
kv.expr = symbolic.list_(*vals)
elif type == 'Rotation':
if encoding == 'auto': encoding='rotation_vector'
if encoding == 'rotation_vector':
if initialValue is not None:
initialValue2 = so3.rotation_vector(initialValue)
else:
initialValue = so3.identity()
initialValue2 = [0.0]*3
v = default(name+"_rv",initialValue2)
kv.expr = self.context.so3.from_rotation_vector(v)
kv.decoder = so3.from_rotation_vector
kv.encoder = so3.rotation_vector
elif encoding == 'quaternion':
if initialValue is not None:
initialValue2 = so3.quaternion(initialValue)
else:
initialValue = so3.identity()
initialValue2 = [1,0,0,0]
v = default(name+"_q",initialValue2)
kv.expr = self.context.so3.from_quaternion(v)
kv.decoder = so3.from_quaternion
kv.encoder = so3.quaternion
if constraints:
f = self.addEquality(self.context.so3.quaternion_constraint(v))
f.name = name+"_q_constraint"
kv.constraints = [f]
elif encoding == 'rpy':
if initialValue is not None:
initialValue2 = so3.rpy(initialValue)
else:
initialValue = so3.identity()
initialValue2 = [0.0]*3
v = default(name+"_rpy",initialValue2)
kv.expr = self.context.so3.from_rpy(v)
kv.decoder = so3.from_rpy
kv.encoder = so3.rpy
elif encoding is None:
if initialValue is None:
initialValue = so3.identity()
v = self.addVar(name,"Vector",initialValue)
if constraints:
f = self.addEquality(self.context.so3.eq_constraint(v))
f.name = name+"_constraint"
kv.constraints = [f]
else:
raise ValueError("Invalid encoding "+str(encoding))
kv.encoding = encoding
elif type == 'RigidTransform':
if initialValue is None:
Ri,ti = None,[0.0]*3
else:
Ri,ti = initialValue
kR = self.addKlamptVar(name+'_R','Rotation',Ri,constraints=constraints,encoding=encoding)
t = default(name+'_t',ti)
kv.variables = kR.variables+[t]
kv.constraints = kR.constraints
kv.expr = symbolic.list_(kR.expr,t)
kv.encoding = encoding
if kR.encoder is not None:
kv.encoder = lambda T:(kR.encoder(T[0]),T[1])
kv.decoder = lambda T:(kR.decoder(T[0]),T[1])
del self.managedVariables[kR.name]
else:
raise ValueError("Unsupported object type "+type)
if kv.variables is None:
kv.variables = [v]
if kv.expr is None:
kv.expr = symbolic.VariableExpression(v)
self.context.addExpr(name,kv.expr)
if optimize:
for v in kv.variables:
self.optimizationVariables.append(v)
self.managedVariables[name] = kv
return kv
def get(self,name,defaultValue=None):
"""Returns a Variable or UserData in the context, or a managed KlamptVariable. If the item
does not exist, defaultValue is returned.
"""
if name in self.managedVariables:
return self.managedVariables[name]
else:
return self.context.get(name,defaultValue)
def rename(self,itemname,newname):
"""Renames a Variable, UserData, or managed KlamptVariable."""
if itemname in self.managedVariables:
item = self.managedVariables[itemname]
del self.managedVariables[itemname]
item.name = newname
print("Renaming KlamptVariable",itemname)
self.context.expressions[newname] = self.context.expressions[itemname]
del self.context.expressions[itemname]
for var in item.variables:
varnewname = newname + var.name[len(itemname):]
print(" Renaming internal variable",var.name,"to",varnewname)
if var.name in self.variableBounds:
self.variableBounds[varnewname] = self.variableBounds[var.name]
del self.variableBounds[var.name]
self.context.renameVar(var,varnewname)
self.managedVariables[newname] = item
elif itemname in self.context.userData:
self.context.renameUserData(itemname,newname)
else:
var = self.context.variableDict[itemname]
if var.name in self.variableBounds:
self.variableBounds[newname] = self.variableBounds[var.name]
del self.variableBounds[var.name]
self.context.renameVar(var,newname)
def setActiveDofs(self,links):
"""Sets the list of active DOFs. These may be indices, RobotModelLinks, or strings."""
self.activeDofs = []
for v in links:
if isinstance(v,str):
self.activeDofs.append(self.robot.link(v).index)
elif isinstance(v,RobotModelLink):
self.activeDofs.append(v.index)
else:
assert isinstance(v,int)
self.activeDofs.append(v)
def enableDof(self,link):
"""Enables an active DOF. If this is the first time enableDof is called,
this initializes the list of active DOFs to the single link. Otherwise
it appends it to the list. (By default, all DOFs are enabled)"""
if isinstance(link,str):
link = self.robot.link(link).index
elif isinstance(link,RobotModelLink):
self.activeDofs.append(link.index)
else:
assert isinstance(link,int)
if self.activeDofs is None:
self.activeDofs = [link]
else:
if link not in self.activeDofs:
self.activeDofs.append(link)
def disableJointLimits(self):
"""Disables joint limits. By default, the robot's joint limits are
used."""
self.setBounds("q",None,None)
def setJointLimits(self,qmin=None,qmax=None):
"""Sets the joint limits to the given lists qmin,qmax. By default,
the robot's joint limits are used."""
if qmin is None:
self.setBounds("q",*self.robot.getJointLimits())
return
#error checking
assert(len(qmin)==len(qmax))
if len(qmin)==0:
#disabled bounds
self.setBounds("q",None,None)
else:
if self.activeDofs is not None:
assert(len(qmin)==len(self.activeDofs))
raise NotImplementedError("What to do when you set joint limits on a subset of DOFS?")
else:
if self.robot is not None:
assert(len(qmin) == self.robot.numLinks())
self.setBounds("q",qmin,qmax)
def inJointLimits(self,q):
"""Returns True if config q is in the currently set joint limits."""
qmin,qmax = self.variableBounds.get('q',self.robot.getJointLimits())
if len(qmin) == 0:
return True
if len(qmin) > 0:
for v,a,b in zip(q,qmin,qmax):
if v < a or v > b:
return False
return True
def toJson(self,saveContextFunctions=False,prettyPrintExprs=False):
res = optimize.OptimizationProblemBuilder.toJson(self,saveContextFunctions,prettyPrintExprs)
if self.activeDofs is not None:
res['activeDofs'] = self.activeDofs
if len(self.managedVariables) > 0:
varobjs = []
for (k,v) in self.managedVariables.items():
varobj = dict()
assert k == v.name
varobj['name'] = v.name
varobj['type'] = v.type
varobj['encoding'] = v.encoding
varobjs.append(varobj)
res['managedVariables'] = varobjs
if len(self.autoLoad) > 0:
res['autoLoad'] = self.autoLoad
return res
def fromJson(self,obj,doAutoLoad=True):
"""Loads from a JSON-compatible object.
Args:
obj: the JSON-compatible object
doAutoLoad (bool, optional): if True, performs the auto-loading step. An IOError is raised if any
item can't be loaded.
"""
optimize.OptimizationProblemBuilder.fromJson(self,obj)
if 'activeDofs' in obj:
self.activeDofs = obj['activeDofs']
else:
self.activeDofs = None
assert 'q'in self.context.variableDict,'Strange, the loaded JSON file does not have a configuration q variable?'
self.q = self.context.variableDict['q']
if 'managedVariables' in obj:
self.managedVariables = dict()
for v in obj['managedVariables']:
name = v['name']
type = v['type']
encoding = v['encoding']
raise NotImplementedError("TODO: load managed variables from disk properly")
self.managedVariables[name] = self.addKlamptVar(name,type,encoding)
if doAutoLoad:
self.autoLoad = obj.get('autoLoad',dict())
for (name,fn) in self.autoLoad.items():
try:
obj = loader.load(fn)
except Exception:
raise IOError("Auto-load item "+name+": "+fn+" could not be loaded")
self.context.addUserData(name,obj)
def solve(self,params=optimize.OptimizerParams()):
"""Locally or globally solves the given problem (using the robot's current configuration
as a seed if params.startRandom=False). Returns the solution configuration or
None if failed.
Args:
params (OptimizerParams, optional): configures the optimizer.
"""
if len(self.objectives) == 0:
print("Warning, calling solve without setting any constraints?")
return self.robot.getConfig()
robot = self.robot
solver = IKSolver(robot)
for i,obj in enumerate(self.objectives):
if self.isIKObjective(i):
ikobj = self.getIKObjective(i)
ikobj.robot = self.robot
solver.add(ikobj)
if self.activeDofs is not None:
solver.setActiveDofs(self.activeDofs)
ikActiveDofs = self.activeDofs
if 'q' in self.variableBounds:
solver.setJointLimits(*self.variableBounds['q'])
qmin,qmax = solver.getJointLimits()
if len(qmin)==0:
qmin,qmax = self.robot.getJointLimits()
backupJointLimits = None
if self.activeDofs is None:
#need to distinguish between dofs that affect feasibility vs IK
ikActiveDofs = solver.getActiveDofs()
if any(obj.type != 'ik' for obj in self.objectives):
activeDofs = [i for i in range(len(qmin)) if qmin[i] != qmax[i]]
activeNonIKDofs = [i for i in activeDofs if i not in ikActiveDofs]
ikToActive = [activeDofs.index(i) for i in ikActiveDofs]
else:
activeDofs = ikActiveDofs
nonIKDofs = []
ikToActive = list(range(len(activeDofs)))
else:
activeDofs = ikActiveDofs
activeNonIKDofs = []
ikToActive = list(range(len(ikActiveDofs)))
anyIKProblems = False
anyCosts = False
softIK = False
for obj in self.objectives:
if obj.type == 'ik':
anyIKProblems = True
if obj.soft:
softIK = True
elif obj.type == 'cost' or obj.soft:
anyCosts = True
#sample random start point
if params.startRandom:
self.randomVarBinding()
solver.sampleInitial()
if len(activeNonIKDofs)>0:
q = robot.getConfig()
for i in activeNonIKDofs:
q[i] = random.uniform(qmin[i],qmax[i])
robot.setConfig(q)
if params.localMethod is not None or params.globalMethod is not None or (anyCosts or not anyIKProblems):
#set up optProblem, an instance of optimize.Problem
assert self.optimizationVariables[0] is self.q
if len(activeDofs) < self.robot.numLinks():
#freeze those inactive DOFs
q = self.robot.getConfig()
backupJointLimits = qmin[:],qmax[:]
inactiveDofs = set(range(len(q))) - set(activeDofs)
for i in inactiveDofs:
qmin[i] = q[i]
qmax[i] = q[i]
self.setBounds("q",qmin,qmax)
reducedProblem,reducedToFullMapping,fullToReducedMapping = self.preprocess()
optq = reducedProblem.context.variableDict['q']
print("Preprocessed problem:")
reducedProblem.pprint()
optProblem = reducedProblem.getProblem()
assert backupJointLimits is not None
self.setBounds("q",*backupJointLimits)
else:
optq = self.q
optProblem = self.getProblem()
reducedToFullMapping = fullToReducedMapping = None
#optProblem is now ready to use
if params.globalMethod is not None:
#set seed = robot configuration
if self.q.value is None:
self.q.bind(robot.getConfig())
if reducedToFullMapping is None:
x0 = self.getVarVector()
else:
for var,vexpr in zip(reducedProblem.optimizationVariables,fullToReducedMapping):
var.bind(vexpr.eval(self.context))
x0 = reducedProblem.getVarVector()
#do global optimization of the cost function and return
(succ,res) = params.solve(optProblem,x0)
if not succ:
print("Global optimize returned failure")
return None
if reducedToFullMapping is not None:
reducedProblem.setVarVector(res)
for var,vexpr in zip(self.optimizationVariables,reducedToFullMapping):
var.bind(vexpr.eval(reducedProblem.context))
else:
self.setVarVector(res)
#check feasibility if desired
if not self.inJointLimits(self.q.value):
print("Result from global optimize is out of joint limits")
return None
if not self.feasibilityTestsPass():
print("Result from global optimize isn't feasible")
return None
if not self.satisfiesEqualities(params.tol):
print("Result from global optimize doesn't satisfy tolerance.")
return None
#passed
print("Global optimize succeeded! Cost",self.cost())
q = self.q.value
return q
if anyIKProblems:
print("Performing random-restart newton raphson")
#random-restart newton-raphson
solver.setMaxIters(params.numIters)
solver.setTolerance(params.tol)
best = None
bestQuality = float('inf')
if softIK:
#quality is a tuple
bestQuality = bestQuality,bestQuality
quality = None
for restart in range(params.numRestarts):
if time.time() - t0 > params.timeout:
return best
t0 = time.time()
res = solver.solve()
if res or self.softObjectives:
q = robot.getConfig()
print("Got a solve, checking feasibility...")
#check feasibility if desired
t0 = time.time()
self.q.bind(q)
if not self.feasibilityTestsPass():
print("Failed feasibility")
#TODO: resample other non-robot optimization variables
if len(nonIKDofs) > 0:
u = float(restart+0.5)/params.numRestarts
q = robot.getConfig()
#perturbation sampling for non-IK dofs
for i in nonIKDofs:
delta = u*(qmax[i]-qmin[i])*0.5
q[i] = random.uniform(max(q[i]-delta,qmin[i]),min(q[i]+delta,qmax[i]))
robot.setConfig(q)
self.q.bind(q)
if not self.feasibilityTestsPass():
solver.sampleInitial()
continue
else:
solver.sampleInitial()
continue
print("Found a feasible config")
if softIK:
residual = solver.getResidual()
ikerr = max(abs(v) for v in residual)
if ikerr < params.tol:
ikerr = 0
else:
#minimize squared error
ikerr = vectorops.normSquared(residual)
if not anyCosts:
cost = 0
if ikerr == 0:
#feasible and no cost
return q
else:
cost = self.cost()
quality = ikerr,cost
else:
if not anyCosts:
#feasible, no costs, so we're done
print("Feasible and no costs, we're done")
return q
else:
#optimize
quality = self.cost(q)
print("Quality of solution",quality)
if quality < bestQuality:
best = self.getVarValues()
bestQuality = quality
#sample a new ik seed
solver.sampleInitial()
if best is None or params.localMethod is None:
return best[0]
print("Performing post-optimization")
#post-optimize using local optimizer
self.setVarValues(best)
if softIK:
if not self.satisfiesEqualities(params.tol):
raise NotImplementedError("TODO: add soft IK inequality constraint |ik residual| <= |current ik residual|")
optSolver = optimize.LocalOptimizer(method=params.localMethod)
if reducedToFullMapping is not None:
for var,vexpr in zip(reducedProblem.optimizationVariables,fullToReducedMapping):
var.bind(vexpr.eval(self.context))
x0 = reducedProblem.getVarVector()
else:
x0 = self.getVarVector()
optSolver.setSeed(x0)
res = optSolver.solve(optProblem,params.numIters,params.tol)
if res[0]:
if reducedToFullMapping is not None:
reducedProblem.setVarVector(res[1])
for var,vexpr in zip(self.optimizationVariables,reducedToFullMapping):
var.bind(vexpr.eval(reducedProblem.context))
else:
self.setVarVector(res[1])
#check feasibility if desired
if not self.feasibilityTestsPass():
pass
elif not anyCosts:
#feasible
best = self.getVarValues()
else:
#optimize
quality = self.cost()
if quality < bestQuality:
#print "Optimization improvement",bestQuality,"->",quality
best = self.getVarValues()
bestQuality = quality
elif quality > bestQuality + 1e-2:
print("Got worse solution by local optimizing?",bestQuality,"->",quality)
self.getVarValues(best)
print("Resulting quality",bestQuality)
return best[0]
else:
#no IK problems, no global method set -- for now, just perform random restarts
#
#set seed = robot configuration
if self.q.value is None:
self.q.bind(robot.getConfig())
if reducedToFullMapping is None:
x0 = self.getVarVector()
else:
for var,vexpr in zip(reducedProblem.optimizationVariables,fullToReducedMapping):
var.bind(vexpr.eval(self.context))
x0 = reducedProblem.getVarVector()
#do global optimization of the cost function and return
print("Current optimization variable vector is",x0)
(succ,res) = params.solve(optProblem,x0)
if not succ:
print("Global optimize returned failure")
return None
if reducedToFullMapping is not None:
reducedProblem.setVarVector(res)
for var,vexpr in zip(self.optimizationVariables,reducedToFullMapping):
var.bind(vexpr.eval(reducedProblem.context))
else:
self.setVarVector(res)
#check feasibility if desired
if not self.inJointLimits(self.q.value):
print("Result from global optimize is out of joint limits")
return None
if not self.feasibilityTestsPass():
print("Result from global optimize isn't feasible")
return None
if not self.satisfiesEqualities(params.tol):
print("Result from global optimize doesn't satisfy tolerance: result %s"%(str(self.equalityResidual()),))
for obj in self.objectives:
if obj.type == 'eq':
print(" ",obj.expr,":",obj.expr.eval(self.context))
return None
#passed
print("Global optimize succeeded! Cost",self.cost())
q = self.q.value
return q
| bsd-3-clause | 630,887,699,771,616,300 | 45.215324 | 153 | 0.564985 | false | 4.472641 | true | false | false |
JKAD/Arch-Install-Scripts | bmu.py | 1 | 3732 | #!/usr/bin/env python2
#--Filename----------------------------------------------------------#
# bmu.py #
#--Info--------------------------------------------------------------#
# BlackArch mirrorlist update script #
# Updated: 27/05/2015 #
# The following lines: #
# [blackarch] #
# Include = /etc/pacman.d/blackarch-mirrorlist #
# Must be present in pacman.conf #
# blackarch.sh can be used to setup your pacman.conf correctly #
# Designed for Arch Linux - https://archlinux.org/download/ #
# and BlackArch - http://www.blackarch.org/index.html #
#--Author------------------------------------------------------------#
# JKAD - https://jkad.github.io #
#--Tested------------------------------------------------------------#
# 27/05/2015 - archlinux-2015.05.01-dual.iso #
#--Licence-----------------------------------------------------------#
# MIT Licence: #
# https://github.com/JKAD/Arch-Install-Scripts/blob/master/LICENSE #
#--------------------------------------------------------------------#
import os
import sys
import urllib2
import time
from HTMLParser import HTMLParser
from datetime import datetime
protocol = "://"
class ParseHTML(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.results = []
self.return_data = False
self.href = False
def handle_starttag(self, tag, attrs):
if tag == "img":
for name, value in attrs:
if "flags" in value:
self.return_data = True
if tag == "href":
self.href = True
def handle_data(self, data):
if self.href:
if protocol in data:
self.results.append(data)
self.href = False
self.return_data = False
if self.return_data:
self.results.append(data)
def main():
if not os.geteuid() == 0:
sys.exit('bmu.py must be run as root')
url = "http://blackarch.org/downloads.html"
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36"
FILE = '/etc/pacman.d/blackarch-mirrorlist'
headers = {}
headers["User-Agent"] = user_agent
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
page = response.read()
parser = ParseHTML()
parser.feed(page)
parser_results = parser.results
ts = time.time()
timestamp = "# Updated: %s\n\n" \
% datetime.fromtimestamp(ts).strftime('%d-%m-%Y %H:%M:%S')
message = "# Uncomment the mirrors you wish to use\n"
default = "Server = http://mirror.team-cymru.org/blackarch/$repo/os/$arch\n"
with open(FILE, 'w') as overwrite:
overwrite.write(timestamp + message + default)
for i in parser_results:
if i.strip() != "":
if protocol in i:
hash_sym = "#Server = "
ending = "$repo/os/$arch\n"
else:
hash_sym = "\n#"
ending = "\n"
with open(FILE, 'a') as out_file:
out_file.write(hash_sym + i.strip('\n\r\t ') + ending)
print "Mirrorlist updated"
print "Uncomment your prefered mirrors in %s and run pacman -Syy" % FILE
if __name__ == '__main__':
main()
| mit | -2,826,958,007,145,787,000 | 38.284211 | 80 | 0.459003 | false | 4.110132 | false | false | false |
tungvx/deploy | Django-0.90/django/contrib/comments/models/comments.py | 1 | 11734 | from django.core import meta
from django.models import auth, core
class Comment(meta.Model):
user = meta.ForeignKey(auth.User, raw_id_admin=True)
content_type = meta.ForeignKey(core.ContentType)
object_id = meta.IntegerField('object ID')
headline = meta.CharField(maxlength=255, blank=True)
comment = meta.TextField(maxlength=3000)
rating1 = meta.PositiveSmallIntegerField('rating #1', blank=True, null=True)
rating2 = meta.PositiveSmallIntegerField('rating #2', blank=True, null=True)
rating3 = meta.PositiveSmallIntegerField('rating #3', blank=True, null=True)
rating4 = meta.PositiveSmallIntegerField('rating #4', blank=True, null=True)
rating5 = meta.PositiveSmallIntegerField('rating #5', blank=True, null=True)
rating6 = meta.PositiveSmallIntegerField('rating #6', blank=True, null=True)
rating7 = meta.PositiveSmallIntegerField('rating #7', blank=True, null=True)
rating8 = meta.PositiveSmallIntegerField('rating #8', blank=True, null=True)
# This field designates whether to use this row's ratings in aggregate
# functions (summaries). We need this because people are allowed to post
# multiple reviews on the same thing, but the system will only use the
# latest one (with valid_rating=True) in tallying the reviews.
valid_rating = meta.BooleanField('is valid rating')
submit_date = meta.DateTimeField('date/time submitted', auto_now_add=True)
is_public = meta.BooleanField()
ip_address = meta.IPAddressField('IP address', blank=True, null=True)
is_removed = meta.BooleanField(help_text='Check this box if the comment is inappropriate. A "This comment has been removed" message will be displayed instead.')
site = meta.ForeignKey(core.Site)
class META:
db_table = 'comments'
module_constants = {
# min. and max. allowed dimensions for photo resizing (in pixels)
'MIN_PHOTO_DIMENSION': 5,
'MAX_PHOTO_DIMENSION': 1000,
# option codes for comment-form hidden fields
'PHOTOS_REQUIRED': 'pr',
'PHOTOS_OPTIONAL': 'pa',
'RATINGS_REQUIRED': 'rr',
'RATINGS_OPTIONAL': 'ra',
'IS_PUBLIC': 'ip',
}
ordering = ('-submit_date',)
admin = meta.Admin(
fields = (
(None, {'fields': ('content_type', 'object_id', 'site')}),
('Content', {'fields': ('user', 'headline', 'comment')}),
('Ratings', {'fields': ('rating1', 'rating2', 'rating3', 'rating4', 'rating5', 'rating6', 'rating7', 'rating8', 'valid_rating')}),
('Meta', {'fields': ('is_public', 'is_removed', 'ip_address')}),
),
list_display = ('user', 'submit_date', 'content_type', 'get_content_object'),
list_filter = ('submit_date',),
date_hierarchy = 'submit_date',
search_fields = ('comment', 'user__username'),
)
def __repr__(self):
return "%s: %s..." % (self.get_user().username, self.comment[:100])
def get_absolute_url(self):
return self.get_content_object().get_absolute_url() + "#c" + str(self.id)
def get_crossdomain_url(self):
return "/r/%d/%d/" % (self.content_type_id, self.object_id)
def get_flag_url(self):
return "/comments/flag/%s/" % self.id
def get_deletion_url(self):
return "/comments/delete/%s/" % self.id
def get_content_object(self):
"""
Returns the object that this comment is a comment on. Returns None if
the object no longer exists.
"""
from django.core.exceptions import ObjectDoesNotExist
try:
return self.get_content_type().get_object_for_this_type(pk=self.object_id)
except ObjectDoesNotExist:
return None
get_content_object.short_description = 'Content object'
def _fill_karma_cache(self):
"Helper function that populates good/bad karma caches"
good, bad = 0, 0
for k in self.get_karmascore_list():
if k.score == -1:
bad +=1
elif k.score == 1:
good +=1
self._karma_total_good, self._karma_total_bad = good, bad
def get_good_karma_total(self):
if not hasattr(self, "_karma_total_good"):
self._fill_karma_cache()
return self._karma_total_good
def get_bad_karma_total(self):
if not hasattr(self, "_karma_total_bad"):
self._fill_karma_cache()
return self._karma_total_bad
def get_karma_total(self):
if not hasattr(self, "_karma_total_good") or not hasattr(self, "_karma_total_bad"):
self._fill_karma_cache()
return self._karma_total_good + self._karma_total_bad
def get_as_text(self):
return 'Posted by %s at %s\n\n%s\n\nhttp://%s%s' % \
(self.get_user().username, self.submit_date,
self.comment, self.get_site().domain, self.get_absolute_url())
def _module_get_security_hash(options, photo_options, rating_options, target):
"""
Returns the MD5 hash of the given options (a comma-separated string such as
'pa,ra') and target (something like 'lcom.eventtimes:5157'). Used to
validate that submitted form options have not been tampered-with.
"""
from django.conf.settings import SECRET_KEY
import md5
return md5.new(options + photo_options + rating_options + target + SECRET_KEY).hexdigest()
def _module_get_rating_options(rating_string):
"""
Given a rating_string, this returns a tuple of (rating_range, options).
>>> s = "scale:1-10|First_category|Second_category"
>>> get_rating_options(s)
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ['First category', 'Second category'])
"""
rating_range, options = rating_string.split('|', 1)
rating_range = range(int(rating_range[6:].split('-')[0]), int(rating_range[6:].split('-')[1])+1)
choices = [c.replace('_', ' ') for c in options.split('|')]
return rating_range, choices
def _module_get_list_with_karma(**kwargs):
"""
Returns a list of Comment objects matching the given lookup terms, with
_karma_total_good and _karma_total_bad filled.
"""
kwargs.setdefault('select', {})
kwargs['select']['_karma_total_good'] = 'SELECT COUNT(*) FROM comments_karma WHERE comments_karma.comment_id=comments.id AND score=1'
kwargs['select']['_karma_total_bad'] = 'SELECT COUNT(*) FROM comments_karma WHERE comments_karma.comment_id=comments.id AND score=-1'
return get_list(**kwargs)
def _module_user_is_moderator(user):
from django.conf.settings import COMMENTS_MODERATORS_GROUP
if user.is_superuser:
return True
for g in user.get_group_list():
if g.id == COMMENTS_MODERATORS_GROUP:
return True
return False
class FreeComment(meta.Model):
# A FreeComment is a comment by a non-registered user.
content_type = meta.ForeignKey(core.ContentType)
object_id = meta.IntegerField('object ID')
comment = meta.TextField(maxlength=3000)
person_name = meta.CharField("person's name", maxlength=50)
submit_date = meta.DateTimeField('date/time submitted', auto_now_add=True)
is_public = meta.BooleanField()
ip_address = meta.IPAddressField()
# TODO: Change this to is_removed, like Comment
approved = meta.BooleanField('approved by staff')
site = meta.ForeignKey(core.Site)
class META:
db_table = 'comments_free'
ordering = ('-submit_date',)
admin = meta.Admin(
fields = (
(None, {'fields': ('content_type', 'object_id', 'site')}),
('Content', {'fields': ('person_name', 'comment')}),
('Meta', {'fields': ('submit_date', 'is_public', 'ip_address', 'approved')}),
),
list_display = ('person_name', 'submit_date', 'content_type', 'get_content_object'),
list_filter = ('submit_date',),
date_hierarchy = 'submit_date',
search_fields = ('comment', 'person_name'),
)
def __repr__(self):
return "%s: %s..." % (self.person_name, self.comment[:100])
def get_absolute_url(self):
return self.get_content_object().get_absolute_url() + "#c" + str(self.id)
def get_content_object(self):
"""
Returns the object that this comment is a comment on. Returns None if
the object no longer exists.
"""
from django.core.exceptions import ObjectDoesNotExist
try:
return self.get_content_type().get_object_for_this_type(pk=self.object_id)
except ObjectDoesNotExist:
return None
get_content_object.short_description = 'Content object'
class KarmaScore(meta.Model):
user = meta.ForeignKey(auth.User)
comment = meta.ForeignKey(Comment)
score = meta.SmallIntegerField(db_index=True)
scored_date = meta.DateTimeField(auto_now=True)
class META:
module_name = 'karma'
unique_together = (('user', 'comment'),)
module_constants = {
# what users get if they don't have any karma
'DEFAULT_KARMA': 5,
'KARMA_NEEDED_BEFORE_DISPLAYED': 3,
}
def __repr__(self):
return "%d rating by %s" % (self.score, self.get_user())
def _module_vote(user_id, comment_id, score):
try:
karma = get_object(comment__id__exact=comment_id, user__id__exact=user_id)
except KarmaScoreDoesNotExist:
karma = KarmaScore(None, user_id, comment_id, score, datetime.datetime.now())
karma.save()
else:
karma.score = score
karma.scored_date = datetime.datetime.now()
karma.save()
def _module_get_pretty_score(score):
"""
Given a score between -1 and 1 (inclusive), returns the same score on a
scale between 1 and 10 (inclusive), as an integer.
"""
if score is None:
return DEFAULT_KARMA
return int(round((4.5 * score) + 5.5))
class UserFlag(meta.Model):
user = meta.ForeignKey(auth.User)
comment = meta.ForeignKey(Comment)
flag_date = meta.DateTimeField(auto_now_add=True)
class META:
db_table = 'comments_user_flags'
unique_together = (('user', 'comment'),)
def __repr__(self):
return "Flag by %r" % self.get_user()
def _module_flag(comment, user):
"""
Flags the given comment by the given user. If the comment has already
been flagged by the user, or it was a comment posted by the user,
nothing happens.
"""
if int(comment.user_id) == int(user.id):
return # A user can't flag his own comment. Fail silently.
try:
f = get_object(user__id__exact=user.id, comment__id__exact=comment.id)
except UserFlagDoesNotExist:
from django.core.mail import mail_managers
f = UserFlag(None, user.id, comment.id, None)
message = 'This comment was flagged by %s:\n\n%s' % (user.username, comment.get_as_text())
mail_managers('Comment flagged', message, fail_silently=True)
f.save()
class ModeratorDeletion(meta.Model):
user = meta.ForeignKey(auth.User, verbose_name='moderator')
comment = meta.ForeignKey(Comment)
deletion_date = meta.DateTimeField(auto_now_add=True)
class META:
db_table = 'comments_moderator_deletions'
unique_together = (('user', 'comment'),)
def __repr__(self):
return "Moderator deletion by %r" % self.get_user()
| apache-2.0 | 947,574,036,190,088,000 | 41.669091 | 164 | 0.607551 | false | 3.812216 | false | false | false |
fauskanger/Pretreat | app/libs/json_map.py | 1 | 15076 | # json-map, a tiled JSON map renderer for pyglet
# Copyright (C) 2014 Juan J. Martinez <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A Tiled JSON map renderer for pyglet.
These classes use the JSON format as generated by Tiled JSON plugin.
`pyglet.resource` framework is used to load all the elements of the map, so
any path information must be removed from the tileset.
"""
import os
import json
import pyglet
from pyglet.graphics import OrderedGroup
from pyglet.sprite import Sprite
from pyglet import gl
__all__ = ['Map', "TileLayer", "ObjectGroup",]
def get_texture_sequence(filename, tilewidth=32, tileheight=32, margin=1, spacing=1, nearest=False):
"""Returns a texture sequence of a grid generated from a tile set."""
image = pyglet.resource.image(filename)
region = image.get_region(margin, margin, image.width-margin*2, image.height-margin*2)
grid = pyglet.image.ImageGrid(region,
int(region.height/tileheight),
int(region.width/tilewidth),
row_padding=spacing,
column_padding=spacing,
)
texture = grid.get_texture_sequence()
if nearest:
gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
return texture
class BaseLayer(object):
"""
Base layer.
Takes care of the "visible" flag.
"""
# ordered group
groups = 0
def __init__(self, data, map):
self.data = data
self.map = map
if self.data["visible"]:
self.sprites = {}
self.group = OrderedGroup(BaseLayer.groups)
BaseLayer.groups += 1
class TileLayer(BaseLayer):
"""
Tile layer.
Provides a pythonic interface to the tile layer, including:
- Iterate through the tiles.
- Check if one coordinate exists in the layer.
- Get one tile of layer.
"""
def __iter__(self):
return iter(self.data)
def __contains__(self, index):
if type(index) != tuple:
raise TypeError("tuple expected")
x, y = index
return int(x+y*self.map.data["width"]) in self.data["data"]
def __getitem__(self, index):
if type(index) != tuple:
raise TypeError("tuple expected")
x, y = index
return self.data["data"][int(x+y*self.map.data["width"])]
def set_viewport(self, x, y, w, h):
tw = self.map.data["tilewidth"]
th = self.map.data["tileheight"]
def yrange(f, t, s):
while f < t:
yield f
f += s
in_use = []
for j in yrange(y, y+h+th, th):
py = j//th
for i in yrange(x, x+w+tw, tw):
px = i//tw
in_use.append((px, py))
if (px, py) not in self.sprites:
try:
texture = self.map.get_texture(self[px, py])
except (KeyError, IndexError):
self.sprites[(px, py)] = None
else:
self.sprites[(px, py)] = Sprite(texture,
x=(px*tw),
y=h-(py*th)-th,
batch=self.map.batch,
group=self.group,
usage="static",
)
# keys_to_remove = list(k for k, v in self.sprites.items() if k not in in_use)
# for key in keys_to_remove:
# if self.sprites[key] is not None:
# self.sprites[key].delete()
# del self.sprites[key]
for key in list(self.sprites.keys()):
if key not in in_use:
if self.sprites[key] is not None:
self.sprites[key].delete()
del self.sprites[key]
class ObjectGroup(BaseLayer):
"""
Object Group Layer.
Only tile based objects will be drawn (not shape based).
Provides a pythonic interface to the object layer, including:
- Iterate through the objects.
- Check if one coordinate or an object name exists in the layer.
- Get one object based on its coordinates or its name.
Also it is possible to get a list of objects of the same type with
`ObjectGroup.get_by_type(type)`.
"""
def __init__(self, data, map):
super(ObjectGroup, self).__init__(data, map)
self.h = 0
self.objects = []
self._index = {}
self._index_type = {}
self._xy_index = {}
for obj in data["objects"]:
self.objects.append(obj)
name = obj.get("name", "?")
if name not in self._index:
self._index[name] = []
otype = obj.get("type", "?")
if otype not in self._index_type:
self._index_type[otype] = []
x = int(obj["x"])//self.map.data["tilewidth"]
y = int(obj["y"])//self.map.data["tileheight"]-1
if (x, y) not in self._xy_index:
self._xy_index[x, y] = []
self._index[name].append(self.objects[-1])
self._index_type[otype].append(self.objects[-1])
self._xy_index[x, y].append(self.objects[-1])
# XXX: is this useful AT ALL?
self.objects.sort(key=lambda obj: obj["x"]+obj["y"]*self.map.data["width"])
def __iter__(self):
return iter(self.objects)
def __contains__(self, name):
if isinstance(name, tuple):
x, y = name
return (int(x), int(y)) in self._xy_index
return name in self._index
def __getitem__(self, name):
if isinstance(name, tuple):
x, y = name
# XXX: if there are several objects, expect the first one
return self._xy_index[int(x), int(y)][0]
return self._index[name]
def get_by_type(self, otype):
return self._index_type[otype]
def set_viewport(self, x, y, w, h):
self.h = h
tw = self.map.data["tilewidth"]
th = self.map.data["tileheight"]
in_use = []
for obj in self.objects:
if x-tw < obj["x"] < x+w+tw and y-th < obj["y"] < y+h+th:
if not obj["visible"]:
continue
if "gid" in obj:
in_use.append((obj["x"], obj["y"]))
try:
texture = self.map.get_texture(obj["gid"])
tileoffset = self.map.get_tileoffset(obj["gid"])
except (IndexError, KeyError):
sprite = None
else:
sprite = Sprite(texture,
x=obj["x"]+tileoffset[0],
y=self.h-obj["y"]+tileoffset[1],
batch=self.map.batch,
group=self.group,
usage="static",
)
self.sprites[(obj["x"], obj["y"])] = sprite
for key in list(self.sprites.keys()):
if key not in in_use:
self.sprites[key].delete()
del self.sprites[key]
class Tileset(object):
"""Manages a tileset and it's used internally by TileLayer."""
def __init__(self, data, nearest=False):
self.data = data
# used to convert coordinates of the grid
self.columns = (self.data["imagewidth"]-self.data["spacing"]*2)//(self.data["tilewidth"]-self.data["margin"])
self.rows = (self.data["imageheight"]-self.data["spacing"]*2)//(self.data["tileheight"]-self.data["margin"])
# the image will be accessed using pyglet resources
self.image = os.path.basename(self.data["image"])
self.texture = get_texture_sequence(self.image, self.data["tilewidth"],
self.data["tileheight"],
self.data["margin"],
self.data["spacing"],
nearest=False,
)
def __getitem__(self, index):
return self.texture[index]
def __len__(self):
return len(self.texture)
class Map(object):
"""
Load, manage and render Tiled JSON files.
Maps can created providing the JSON data to this class or using `Map.load_json()`
and after that a viewport must be set with `Map.set_viewport()`.
"""
def __init__(self, data, nearest=False):
self.data = data
self.tilesets = {} # the order is not important
self.layers = []
self.tilelayers = {}
self.objectgroups = {}
for tileset in data["tilesets"]:
self.tilesets[tileset["name"]] = Tileset(tileset, nearest)
for layer in data["layers"]:
# TODO: test this!
if layer['name'] in (self.tilelayers, self.objectgroups):
raise ValueError("Duplicated layer name %s" % layer["name"])
if layer["type"] == "tilelayer":
self.layers.append(TileLayer(layer, self))
self.tilelayers[layer["name"]] = self.layers[-1]
elif layer["type"] == "objectgroup":
self.layers.append(ObjectGroup(layer, self))
self.objectgroups[layer["name"]] = self.layers[-1]
else:
raise ValueError("unsupported layer type %s, skipping" % layer["type"])
self.batch = pyglet.graphics.Batch()
# viewport
self.x = 0
self.y = 0
self.w = 0
self.h = 0
# focus
self.fx = None
self.fy = None
# useful (size in pixels)
self.p_width = self.data["width"]*self.data["tilewidth"]
self.p_height = self.data["height"]*self.data["tileheight"]
# build a texture index converting pyglet indexing of the texture grid
# to tiled coordinate system
self.tileoffset_index = {}
self.texture_index = {}
for tileset in self.tilesets.values():
for y in range(tileset.rows):
for x in range(tileset.columns):
self.texture_index[x+y*tileset.columns+tileset.data["firstgid"]] = \
tileset[(tileset.rows-1-y),x]
# TODO: test this!
if "tileoffset" in tileset.data:
self.tileoffset_index[x+y*tileset.columns+tileset.data["firstgid"]] = \
(tileset.data["tileoffset"]["x"], tileset.data["tileoffset"]["y"])
def invalidate(self):
"""Forces a batch update of the map."""
self.set_viewport(self.x, self.y, self.w, self.h, True)
def set_viewport(self, x, y, w, h, force=False):
"""
Sets the map viewport to the screen coordinates.
Optionally the force flag can be used to update the batch even if the
viewport didn't change (this should be used via `Map.invalidate()`).
"""
# x and y can be floats
vx = max(x, 0)
vy = max(y, 0)
vx = min(vx, (self.p_width)-w)
vy = min(vy, (self.p_height)-h)
vw = int(w)
vh = int(h)
if not any([force, vx!=self.x, vy!=self.y, vw!=self.w, vh!=self.h]):
return
self.x = vx
self.y = vy
self.w = vw
self.h = vh
for layer in self.layers:
if layer.data["visible"]:
layer.set_viewport(self.x, self.y, self.w, self.h)
def set_focus(self, x, y):
"""Sets the focus in (x, y) world coordinates."""
x = int(x)
y = int(y)
if self.fx == x and self.fy == y:
return
self.fx = x
self.fy = y
vx = max(x-(self.w//2), 0)
vy = max(y-(self.h//2), 0)
if vx+(self.w//2) > self.p_width:
vx = self.p_width-self.w
if vy+(self.h//2) > self.p_height:
vy = self.p_height-self.h
self.set_viewport(vx, vy, self.w, self.h)
def world_to_screen(self, x, y):
"""
Translate world coordinate into screen coordinates.
Returns a (x, y) tuple.
"""
return x-self.x, self.h-(y-self.y)
def get_texture(self, gid):
"""
Returns a texture identified by its gid.
If not found will raise a KeyError or IndexError.
"""
return self.texture_index[gid]
def get_tileoffset(self, gid):
"""Returns the offset of a tile."""
return self.tileoffset_index.get(gid, (0, 0))
@property
def last_group(self):
"""
The last use group in `Map` batch.
This is useful in case any Sprite is added to the `Map` to
be drawn by the Map's batch without being managed by the Map.
Using this value plus one will ensure the sprite will be drawn
over the map.
"""
return BaseLayer.groups-1
@staticmethod
def load_json(fileobj, nearest=False):
"""
Load the map in JSON format.
This class method return a `Map` object and the file will be
closed after is read.
Set nearest to True to set GL_NEAREST for both min and mag
filters in the tile textures.
"""
data = json.load(fileobj)
fileobj.close()
return Map(data, nearest)
def draw(self):
"""Applies transforms and draws the batch."""
gl.glPushMatrix()
gl.glTranslatef(-self.x, self.y, 0)
self.batch.draw()
gl.glPopMatrix()
| gpl-2.0 | 5,152,356,281,729,806,000 | 32.651786 | 117 | 0.529981 | false | 4.049423 | false | false | false |
luileito/WKM | py/mathlib.py | 1 | 1825 | from __future__ import division
import math
def cumdist(samples):
"""
Computes the cummulated distance along a sequence of vectors.
samples = [ [v11,...,v1N], ... [vn1,...,vnN] ]
"""
N, l, Ln = len(samples), [0.0], 0.0
for i in range(1,N):
Li = math.sqrt( sqL2(samples[i], samples[i-1]) )
Ln += Li
l.append(Ln)
return l, Ln
def sqL2(a, b):
"""
Computes the L2 euclidean distance between two vectors.
a = [a1,...,aN]; b = [b1,...,bN]
"""
dim, nrg = len(a), 0.0
for d in range(dim):
dist = a[d] - b[d]
nrg += dist * dist
return nrg
def clustercenter(samples):
"""
Computes the geometric center of a set of vectors.
samples = [ [v11,...,v1N], ... [vn1,...,vnN] ]
"""
N, dim = len(samples), len(samples[0])
if N == 1: # singleton cluster
return samples[0]
# Cluster center is the average in all dimensions
dsum = [0.0] * dim
for d in range(dim):
for i in range(N):
dsum[d] += samples[i][d]
dsum[d] /= N
return dsum
def whiten(samples):
"""
Divides each feature by its standard deviation across all observations,
in order to give it unit variance.
@param samples array [ [v11,...,v1N], ... [vn1,...,vnN] ]
"""
N, dim = len(samples), len(samples[0])
pts = samples[:]
for d in range(dim):
cols = [samples[i][d] for i in range(N)]
m, s = msd(cols);
if s > 0:
for i in range(N):
pts[i][d] = samples[i][d] / s
return pts
def avg(vec):
"""
Computes the average of all vector values.
vec = [v1,...,vN]
"""
return sum(vec) / len(vec)
def msd(vec):
"""
Computes the mean plus standard deviation of a vector.
vec = [v1,...,vN]
"""
mean, sd, n = avg(vec), 0.0, len(vec)
if n > 1:
for v in vec:
sd += (v - mean)**2
sd = math.sqrt(sd / (n-1))
return mean, sd
| mit | 1,113,156,717,748,922,500 | 22.701299 | 74 | 0.56274 | false | 2.878549 | false | false | false |
denismakogon/pyvcloud | tests/connect_vapp_to_network.py | 4 | 1721 | import os
from pyvcloud.vcloudair import VCA
def print_vca(vca):
if vca:
print 'vca token: ', vca.token
if vca.vcloud_session:
print 'vcloud session token: ', vca.vcloud_session.token
print 'org name: ', vca.vcloud_session.org
print 'org url: ', vca.vcloud_session.org_url
print 'organization: ', vca.vcloud_session.organization
else:
print 'vca vcloud session: ', vca.vcloud_session
else:
print 'vca: ', vca
### On Demand
host='iam.vchs.vmware.com'
username = os.environ['VCAUSER']
password = os.environ['PASSWORD']
instance = 'c40ba6b4-c158-49fb-b164-5c66f90344fa'
org = 'a6545fcb-d68a-489f-afff-2ea055104cc1'
vdc = 'VDC1'
vapp = 'ubu'
network = 'default-routed-network'
vca = VCA(host=host, username=username, service_type='ondemand', version='5.7', verify=True)
assert vca
result = vca.login(password=password)
assert result
result = vca.login_to_instance(password=password, instance=instance, token=None, org_url=None)
assert result
result = vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, org_url=vca.vcloud_session.org_url)
assert result
print_vca(vca)
the_vdc = vca.get_vdc(vdc)
assert the_vdc
print the_vdc.get_name()
the_vapp = vca.get_vapp(the_vdc, vapp)
assert the_vapp
print the_vapp.me.name
the_network = vca.get_network(vdc, network)
assert the_network
# this assumes that the vApp is already connected to the network so it should return immediately with success
task = the_vapp.connect_to_network(network, the_network.get_href(), 'bridged')
print task.get_status()
assert 'success' == task.get_status()
| apache-2.0 | 7,531,989,792,316,935,000 | 34.854167 | 132 | 0.685648 | false | 2.921902 | false | false | false |
heiher/libreoffice-core | writerfilter/source/ooxml/modelpreprocess.py | 9 | 2912 | #!/usr/bin/env python
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from __future__ import print_function
from xml.dom import minidom
import sys
def prefixForGrammar(namespace):
ns = namespace.getElementsByTagName("grammar")[0].getAttribute("ns")
return ooxUrlAliases[ns]
def parseNamespaceAliases(node):
ret = {}
for k, v in list(node.attributes.items()):
if k.startswith("xmlns:"):
ret[k.replace('xmlns:', '')] = v
return ret
def parseNamespaces(fro):
sock = open(fro)
for i in sock.readlines():
line = i.strip()
alias, url = line.split(' ')[1:]
ooxUrlAliases[url] = alias
sock.close()
def check(model):
defines = [i.getAttribute("name") for i in model.getElementsByTagName("define")]
for reference in [i.getAttribute("name") for i in model.getElementsByTagName("ref")]:
if reference not in defines:
raise Exception("Unknown define element with name '%s'" % reference)
for start in [i.getAttribute("name") for i in model.getElementsByTagName("start")]:
if start not in defines:
raise Exception("Unknown start element with name '%s'" % start)
def preprocess(model):
modelNode = [i for i in model.childNodes if i.localName == "model"][0]
# Alias -> URL, based on "xmlns:" attributes.
modelNamespaceAliases = parseNamespaceAliases(modelNode)
for i in modelNode.getElementsByTagName("namespace"):
grammarprefix = prefixForGrammar(i)
grammar = i.getElementsByTagName("grammar")[0]
for j in i.getElementsByTagName("element") + i.getElementsByTagName("attribute"):
# prefix
prefix = ""
if ":" in j.getAttribute("name"):
nameprefix = j.getAttribute("name").split(':')[0]
prefix = ooxUrlAliases[modelNamespaceAliases[nameprefix]]
elif j.localName == "attribute":
if grammar.getAttribute("attributeFormDefault") == "qualified":
prefix = grammarprefix
else:
prefix = grammarprefix
# localname
if ":" in j.getAttribute("name"):
localname = j.getAttribute("name").split(':')[1]
else:
localname = j.getAttribute("name")
# set the attributes
j.setAttribute("prefix", prefix)
j.setAttribute("localname", localname)
namespacesPath = sys.argv[1]
modelPath = sys.argv[2]
# URL -> alias, from oox
ooxUrlAliases = {}
parseNamespaces(namespacesPath)
model = minidom.parse(modelPath)
check(model)
preprocess(model)
model.writexml(sys.stdout)
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| gpl-3.0 | 4,567,539,681,103,047,000 | 31 | 89 | 0.632212 | false | 4.107193 | false | false | false |
jafletch/makeblock-serial | src/Python27/config.py | 1 | 1392 | import exceptions
from types import *
class validatable:
@classmethod
def validate(cls, item):
for memberName in dir(cls):
member = getattr(cls, memberName)
if type(member) == IntType and item == member:
return item
raise ConfigError("Invalid " + str(cls.__name__) + ": " + str(item))
class action(validatable):
GET = 1
RUN = 2
RESET = 3
START = 4
class device(validatable):
VERSION = 0
ULTRASONIC_SENSOR = 1
TEMPERATURE_SENSOR = 2
LIGHT_SENSOR = 3
POTENTIONMETER = 4
JOYSTICK = 5
GYRO = 6
SOUND_SENSOR = 7
RGBLED = 8
SEVSEG = 9
MOTOR = 10
SERVO = 11
ENCODER = 12
IR = 13
PIRMOTION = 15
INFRARED = 16
LINEFOLLOWER = 17
SHUTTER = 20
LIMITSWITCH = 21
BUTTON = 22
DIGITAL = 30
ANALOG = 31
PWM = 32
SERVO_PIN = 33
TOUCH_SENSOR = 34
STEPPER = 40
ENCODER = 41
TIMER = 50
class port(validatable):
PORT_1 = 1
PORT_2 = 2
PORT_3 = 3
PORT_4 = 4
PORT_5 = 5
PORT_6 = 6
PORT_7 = 7
PORT_8 = 8
MOTOR_1 = 9
MOTOR_2 = 10
class slot(validatable):
SLOT_1 = 1
SLOT_2 = 2
class ConfigError(Exception):
def __init__(self, message):
super(ConfigError, self).__init__(message) | gpl-3.0 | 7,782,678,110,567,276,000 | 17.333333 | 76 | 0.52518 | false | 3.333333 | false | false | false |
lcostantino/healing-os | healing/engine/alarms/alarm_base.py | 1 | 6284 | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from healing.openstack.common import log
from healing.openstack.common import jsonutils
from healing.objects import alarm_track as alarm_obj
LOG = log.getLogger(__name__)
STATE_OK = 'ok'
STATE_ALARM = 'alarm'
STATE_INSUFFICIENT = 'insufficient'
OK_HOOK = 'ok_actions'
ALARM_HOOK = 'alarm_actions'
INSUFFICIENT_HOOK = 'insufficient_data_actions'
CURRENT_HANDLER = None
"""
Handle AlarmObjs and ceilometer alarms. Always use this engine
to create AlarmObjs.
TODO: we can add a join table also for contract-alarm later. all the mess
is only forthe singleton alarm
I don't lke this, since we still need the obj to wrap,
but it's only for developers...
"""
class AlarmMetaClass(type):
"""Metaclass that allows tracking classes by alarm type."""
AVAILABLE_ALARMS = {}
def __init__(cls, names, bases, dict_):
AlarmMetaClass.AVAILABLE_ALARMS[cls.ALARM_TYPE] = cls
@six.add_metaclass(AlarmMetaClass)
class AlarmBase(object):
""" Some alarms will be unique , other's per VM, etc
This work as a wrapper arund AlarmTrack objects
"""
ALARM_TYPE = 'base'
def __init__(self, ctx, remote_alarm_id=None,
contract_id=None, meter="dummy",
threshold=0, period=120, operator="eq",
query=None, alarm_object=None, evaluation_period=1,
statistic='avg', **kwargs):
"""
You need to provide contract_id, meter, threshold, period and
operator if it's a new object
"""
self.ctx = ctx
# additional data base on subclass
self.options = kwargs or {}
#this is filled in some specific cases on create/update
self.extra_alarm_data = {}
if alarm_object:
self.alarm_track = alarm_object
return
# only update once if alarm_id not in place. Ex: (new alarm)
# and only if values are set to avoid exceptions on field coercion
# if will fail on save later if not properly set
self.alarm_track = alarm_obj.AlarmTrack()
self.contract_id = contract_id
self.meter = meter
self.alarm_id = remote_alarm_id
self.period = period
self.threshold = threshold
self.statistic = statistic
self.operator = operator
self.evaluation_period = evaluation_period
self.type = self.ALARM_TYPE
# this could be done by __getattr__ and __setattr__ to proxy the object,
# but.... make it explicity like this
@property
def alarm_track_id(self):
return self.alarm_track.id
@property
def alarm_id(self):
return self.alarm_track.alarm_id
@alarm_id.setter
def alarm_id(self, val):
self.alarm_track.alarm_id = val
@property
def type(self):
return self.alarm_track.type
@type.setter
def type(self, val):
self.alarm_track.type = val
@property
def contract_id(self):
return self.alarm_track.contract_id
@contract_id.setter
def contract_id(self, val):
self.alarm_track.contract_id = val
@property
def meter(self):
return self.alarm_track.meter
@meter.setter
def meter(self, val):
self.alarm_track.meter = val
@property
def threshold(self):
return self.alarm_track.threshold
@threshold.setter
def threshold(self, val):
self.alarm_track.threshold = val
@property
def operator(self):
return self.alarm_track.operator
@operator.setter
def operator(self, val):
self.alarm_track.operator = val
@property
def period(self):
return self.alarm_track.period
@period.setter
def period(self, val):
self.alarm_track.period = val
@property
def statistic(self):
return self.alarm_track.statistic
@statistic.setter
def statistic(self, val):
self.alarm_track.statistic = val
@property
def evaluation_period(self):
return self.alarm_track.evaluation_period
@evaluation_period.setter
def evaluation_period(self, val):
self.alarm_track.evaluation_period = val
@property
def query(self):
# TODO MUST: Add a json field that do this into fields.py
try:
return jsonutils.loads(self.alarm_track.query)
except:
return []
@query.setter
def query(self, val):
self.alarm_track.query = jsonutils.dumps(val)
@abc.abstractmethod
def create(self):
pass
def set_from_dict(self, update_dict):
for x in alarm_obj.AlarmTrack.fields.keys():
present = update_dict.get(x)
if present:
#to avoid change_fields being modified
setattr(self, x, present)
@abc.abstractmethod
def update(self):
pass
@abc.abstractmethod
def delete(self):
pass
def is_active(self):
return True
def get_extra_alarm_data(self):
return self.extra_alarm_data
def affected_resources(self, group_by='resource_id',
period=0, query=None,
start_date=None, end_date=None,
aggregates=None, delta_seconds=None,
meter=None,
result_process=None):
pass
def set_default_alarm_hook(self):
pass
def set_default_ok_hook(self):
pass
def set_default_insufficient_hook(self):
pass
def set_ok_hook_url(self, url):
pass
def set_alarm_hook_url(self, url):
pass
def set_insufficient_hook_url(self, url):
pass
def get_hooks(self):
pass
| apache-2.0 | -5,866,530,302,132,023,000 | 25.854701 | 76 | 0.62508 | false | 3.969678 | false | false | false |
klnusbaum/UDJ-Server | udjserver/udj/migrations/0018_add_default_algo.py | 1 | 10265 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from udj.trans_migration_constants import ADDED_DEFAULT_ALGO_NAME
class Migration(DataMigration):
def forwards(self, orm):
totalAlgo = orm.SortingAlgorithm(
name=ADDED_DEFAULT_ALGO_NAME,
description="Sorts playlist be the total amount of votes per song",
function_name='totalVotes')
totalAlgo.save()
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'udj.activeplaylistentry': {
'Meta': {'object_name': 'ActivePlaylistEntry'},
'adder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.LibraryEntry']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "u'QE'", 'max_length': '2'}),
'time_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'udj.libraryentry': {
'Meta': {'object_name': 'LibraryEntry'},
'album': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'artist': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_banned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.Player']"}),
'player_lib_song_id': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'track': ('django.db.models.fields.IntegerField', [], {})
},
'udj.participant': {
'Meta': {'unique_together': "(('user', 'player'),)", 'object_name': 'Participant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.Player']"}),
'time_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'time_last_interaction': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'udj.player': {
'Meta': {'object_name': 'Player'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owning_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'sorting_algo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.SortingAlgorithm']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'IN'", 'max_length': '2'}),
'volume': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
'udj.playerlocation': {
'Meta': {'object_name': 'PlayerLocation'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.Player']", 'unique': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'default': "'POINT(0.0 0.0)'"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.State']"}),
'zipcode': ('django.db.models.fields.IntegerField', [], {})
},
'udj.playerpassword': {
'Meta': {'object_name': 'PlayerPassword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.Player']", 'unique': 'True'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'udj.playlistentrytimeplayed': {
'Meta': {'object_name': 'PlaylistEntryTimePlayed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'playlist_entry': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['udj.ActivePlaylistEntry']", 'unique': 'True'}),
'time_played': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'udj.sortingalgorithm': {
'Meta': {'object_name': 'SortingAlgorithm'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'function_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'udj.state': {
'Meta': {'object_name': 'State'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'udj.ticket': {
'Meta': {'unique_together': "(('user', 'ticket_hash'),)", 'object_name': 'Ticket'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'time_issued': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'udj.vote': {
'Meta': {'unique_together': "(('user', 'playlist_entry'),)", 'object_name': 'Vote'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'playlist_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['udj.ActivePlaylistEntry']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'weight': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['udj']
symmetrical = True
| gpl-2.0 | -3,212,786,209,966,275,000 | 67.433333 | 182 | 0.54301 | false | 3.659537 | false | false | false |
astrodroid/functionrefactor | tests/test_runs.py | 1 | 1134 | from functionrefactor.commands import launch_case
from functionrefactor.formatter import Formatter
from functionrefactor.settings import *
from tests.common_functions import *
import json
class TestRuns():
def test_runs(self):
with open("tests/cases/test_cases.json", "r") as tcf:
test_launcher = json.load(tcf)
settings.update_global_settings(test_launcher)
test_cases = test_launcher["launcher"]
for case in test_cases:
if case["active"]:
[hpp_result, cpp_result] = launch_case("tests/cases",case)
self.check_result("tests/cases/" + case["hpp_out"],
"tests/cases/" + case["cpp_out"], hpp_result,
cpp_result)
def check_result(self, hpp_path, cpp_path, hpp_result, cpp_result):
clang_formatter = Formatter()
hpp_expected_result = clang_formatter.open_and_launch(hpp_path)
cpp_expected_result = clang_formatter.open_and_launch(cpp_path)
are_equal(hpp_result, hpp_expected_result)
are_equal(cpp_result, cpp_expected_result)
| mit | -1,969,767,511,287,521,000 | 35.580645 | 79 | 0.623457 | false | 3.78 | true | false | false |
levilucio/SyVOLT | mbeddr2C_MM/transformation_from_mps/Hlayer1rule1.py | 1 | 3561 | from core.himesis import Himesis
import uuid
class Hlayer1rule1(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer1rule1.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer1rule1, self).__init__(name='Hlayer1rule1', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer1rule1"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer1rule1')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """layer1rule1"""
# match class ClientServerInterface(layer1rule1class0ClientServerInterface) node
self.add_node()
self.vs[3]["mm__"] = """ClientServerInterface"""
self.vs[3]["attr1"] = """+"""
# match class Operation(layer1rule1class1Operation) node
self.add_node()
self.vs[4]["mm__"] = """Operation"""
self.vs[4]["attr1"] = """+"""
# apply class StructDeclaration(layer1rule1class2StructDeclaration) node
self.add_node()
self.vs[5]["mm__"] = """StructDeclaration"""
self.vs[5]["attr1"] = """1"""
# apply class CFunctionPointerStructMember(layer1rule1class3CFunctionPointerStructMember) node
self.add_node()
self.vs[6]["mm__"] = """CFunctionPointerStructMember"""
self.vs[6]["attr1"] = """1"""
# match association ClientServerInterface--contents-->Operation node
self.add_node()
self.vs[7]["attr1"] = """contents"""
self.vs[7]["mm__"] = """directLink_S"""
# apply association StructDeclaration--members-->CFunctionPointerStructMember node
self.add_node()
self.vs[8]["attr1"] = """members"""
self.vs[8]["mm__"] = """directLink_T"""
# backward association CFunctionPointerStructMember-->Operationnode
self.add_node()
self.vs[9]["mm__"] = """backward_link"""
# backward association StructDeclaration-->ClientServerInterfacenode
self.add_node()
self.vs[10]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class ClientServerInterface(layer1rule1class0ClientServerInterface)
(0,4), # matchmodel -> match_class Operation(layer1rule1class1Operation)
(1,5), # applymodel -> apply_classStructDeclaration(layer1rule1class2StructDeclaration)
(1,6), # applymodel -> apply_classCFunctionPointerStructMember(layer1rule1class3CFunctionPointerStructMember)
(3,7), # match classClientServerInterface(layer1rule1class0ClientServerInterface) -> association contents
(7,4), # associationcontents -> match_classClientServerInterface(layer1rule1class1Operation)
(5,8), # apply class StructDeclaration(layer1rule1class2StructDeclaration) -> association members
(8,6), # associationmembers -> apply_classCFunctionPointerStructMember(layer1rule1class3CFunctionPointerStructMember)
(6,9), # apply class CFunctionPointerStructMember(layer1rule1class1Operation) -> backward_association
(9,4), # backward_associationOperation -> match_class Operation(layer1rule1class1Operation)
(5,10), # apply class StructDeclaration(layer1rule1class0ClientServerInterface) -> backward_association
(10,3), # backward_associationClientServerInterface -> match_class ClientServerInterface(layer1rule1class0ClientServerInterface)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = []
| mit | -1,731,512,285,974,052,900 | 39.465909 | 131 | 0.711598 | false | 3.249088 | false | false | false |
rshelans/GeneRing | src/TraceModel.py | 1 | 5221 | import time
import os.path
import Trace
import scipy
__version__="01.00.00"
__author__ ="Robert Shelansky"
class TraceModel:
"""
A model/controller of data which knows how to do specific analysis and maintenance of settings etc.
Can be used as a data dump for communication with the TraceView.
"""
def __init__(self,imgres=scipy.NAN,directory=None,user=None,out_path=None,title=None,version=None,files=None,length=None,threshold=None,smooth=None,**kw):
self.settings = {
'length':length,
'threshold':threshold,
'smooth':smooth,
'end':None,
'midpoint':None}
self.context = {
'files' :files,
'version':version,
'date':time.strftime("%d/%m/%Y %H:%M:%S"),
'path':None,
'trace':None,
'coordinates':None,
'index':None,
'domain':None,
'basename':None,
'title':title,
'scale':None,
'user':user,
'resolution':None,
'steadiness':None,
'saved':['' for i in range(len(files))],
'out_path':out_path,
'imgres':imgres,
'pimgres':None,
'directory':directory}
self.molecule = {
'smoothed':None,
'fr':None,
'fl':None,
'rr':None,
'rl':None,
'zipped':None,
'zlabels':None,
'symmerty':None,
'starts':None,
'bubs':None}
self.seek(0)
self.find_midpoint()
self.analyze()
def find_midpoint(self):
self.settings['end' ]=self.context['trace'].edgebuffer(
threshold=self.settings['threshold'],
smooth =self.settings['smooth'])
self.settings['midpoint' ]=self.context['trace'].midpoint(
threshold =self.settings['threshold'],
smooth =self.settings['smooth'],
end =self.settings['end'])
def analyze(self):
midpoint,smoothed,(fr,fl),(rr,rl),(regions,labels)=self.context['trace'].solve_molecule(
self.settings['midpoint'],
self.settings['threshold'],
self.settings['smooth'],
self.settings['end'])
self.molecule['segments'] =len(fr) if len(fr) == len(rr) else float('Nan')
self.molecule['smoothed'] =smoothed
self.molecule['fr' ] =fr
self.molecule['fl' ] =fl
self.molecule['rr' ] =rr
self.molecule['rl' ] =rl
self.molecule['zipped' ] =regions
self.molecule['zlabels' ] =labels
self.molecule['rlabels' ] =self.context['trace'].label(fl,rl)
self.molecule['molecule'] =self.context['trace'].moleculify(
fr,
fl,
rr,
rl,
self.settings['length'])
self.molecule['symmetry'] =self.context['trace'].sd_segment_size (fr,fl,rr,rl) * self.context['scale']**2
self.molecule['starts' ] =self.context['trace'].msd_of_linker_end_points(fr,fl,rr,rl) * self.context['scale']**2
self.molecule['bubs' ] =self.context['trace'].msd_of_region_sizes (fr,fl,rr,rl) * self.context['scale']**2
def seek(self, index):
BASE_TO_NM_CONVERSION_FACTOR=0.34#nm/bp
self.context['path' ] =self.context['files'][index]
self.context['index'] =index
self.context['basename']=os.path.basename(self.context['path'])
##reads _trace file into scipy.array
self.context['coordinates'] = scipy.genfromtxt(
self.context['path' ],
delimiter='\t')
self.context['trace'] = Trace.Trace(self.context['coordinates'])
self.context['scale'] = self.context['trace'].scale(self.settings['length'])
self.context['resolution'] = self.context['trace']._ld.mean() * self.context['scale']
self.context['steadiness'] = scipy.sqrt(self.context['trace']._ld.var()) * self.context['scale']
self.context['pimgres'] = self.context['scale'] * BASE_TO_NM_CONVERSION_FACTOR
self.context['domain'] = scipy.array(range(len(self.context['trace'])))
def write_comments(self,file):
print("""
##Image_Resolution\t{:.2f} nm/px
##Predicted_Image_Resolution\t{:>.2f} nm/px
##Tracer\t{}
##Length\t{} bp
##Edgebuffer\t{} Coordinates
##Threshold\t{:.2f} bp
##Smooth\t{} Coordinates
##Midpoint\t{} Coordinate
##Scale\t{:.2f} bp/AU
##Resolution\t{:.2f} bp
##Steadiness\t{:.2f} bp
##Segments\t{:} #
##Symmetry\t{:.2f} bp
##Linker\t{:.2f} bp
##Region\t{:.2f} bp""".format(
self.context['imgres'],
self.context['pimgres'],
self.context['user'],
self.settings['length'],
self.settings['end'],
self.settings['threshold'] * self.context['scale'],
self.settings['smooth'],
self.settings['midpoint'],
self.context['scale'],
self.context['resolution'],
self.context['steadiness'],
self.molecule['segments'],
self.molecule['symmetry'],
self.molecule['starts'],
self.molecule['bubs']),file=file)
def save(self):
base=os.path.basename(self.context['path']).split('.')[0]
if self.context['out_path'] is not None:
path=self.context['out_path']
else:
path= os.path.dirname(self.context['path'])
if self.context['directory']:
path=path+'\\'+base
if not os.path.exists(path):
os.makedirs(path)
mol_file='{}\\{}.mol'.format(path,base)
reg_file='{}\\{}.reg'.format(path,base)
with open(mol_file, 'w') as file:
self.write_comments(file)
self.molecule['molecule'].write(file)
with open(reg_file,'w') as file:
self.write_comments(file)
reg="\n".join(['{}\t{}\t{}'.format(l,s,e) for (s,e),l in zip(self.molecule['zipped'],self.molecule['zlabels'])])
print(reg,file=file)
self.context['saved'][self.context['index']] = 'Saved.'
| mit | 5,411,831,733,201,479,000 | 31.428571 | 155 | 0.64298 | false | 2.856127 | false | false | false |
OzFlux/OzFluxQC | OzFluxQC.py | 1 | 51206 | import ast
import copy
import datetime
import logging
import matplotlib
matplotlib.use('TkAgg')
#matplotlib.use('Qt4Agg')
import numpy
import ntpath
import time
import Tkinter as tk
import tkMessageBox
import os
import sys
# The Lindsay Trap: check the scripts directory is present
if not os.path.exists("./scripts/"):
print "OzFluxQC: the scripts directory is missing"
sys.exit()
# since the scripts directory is there, try importing the modules
sys.path.append('scripts')
import cfg
import qcclim
import qccpd
import qcgf
import qcio
import qcls
import qcplot
import qcrp
import qcts
import qcutils
# now check the logfiles and plots directories are present
dir_list = ["./logfiles/","./plots/"]
for item in dir_list:
if not os.path.exists(item): os.makedirs(item)
# now check the solo/inf, solo/input, solo/log and solo/output directories are present
dir_list = ["./solo/inf","./solo/input","./solo/log","./solo/output"]
for item in dir_list:
if not os.path.exists(item): os.makedirs(item)
logging.basicConfig(filename='logfiles/OzFluxQC.log',level=logging.DEBUG)
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
console.setFormatter(formatter)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
class qcgui(tk.Tk):
"""
QC Data Main GUI
Used to access read, save, and data processing (qcls) prodecures
Columns: Data levels:
1: L1 Raw Data (read excel into NetCDF)
2: L2 QA/QC (general QA/QC algorithms, site independent)
3: L3 Corrections (Flux data corrections, site dependent based on ancillary measurements available and technical issues)
4: L4 Gap Filling (Used for fill met data gaps and ingesting SOLO-ANN Gap Filled fluxes from external processes)
Rows: function access
1: Ingest excel dataset into NetCDF files
2: Process data from previous level and generate NetCDF file(s) at current level
3-6: Show Timestamp range of dataset and accept date range for graphical plots
7: Export excel dataset from NetCDF file
"""
def __init__(self, parent):
tk.Tk.__init__(self,parent)
self.parent = parent
self.initialise()
def option_not_implemented(self):
self.do_progress(text='Option not implemented yet ...')
logging.info(' Option not implemented yet ...')
def initialise(self):
self.org_frame = tk.Frame(self)
self.org_frame.grid()
# things in the first row of the GUI
L1Label = tk.Label(self.org_frame,text='L1: Raw data')
L1Label.grid(row=0,column=0,columnspan=2)
L2Label = tk.Label(self.org_frame,text='L2: QA/QC')
L2Label.grid(row=0,column=2,columnspan=2)
L3Label = tk.Label(self.org_frame,text='L3: Process')
L3Label.grid(row=0,column=4,columnspan=2)
# things in the second row of the GUI
doL1Button = tk.Button (self.org_frame, text="Read L1 file", command=self.do_l1qc )
doL1Button.grid(row=1,column=0,columnspan=2)
doL2Button = tk.Button (self.org_frame, text="Do L2 QA/QC", command=self.do_l2qc )
doL2Button.grid(row=1,column=2,columnspan=2)
doL3Button = tk.Button (self.org_frame, text="Do L3 processing", command=self.do_l3qc )
doL3Button.grid(row=1,column=4,columnspan=2)
# things in the third row of the GUI
filestartLabel = tk.Label(self.org_frame,text='File start date')
filestartLabel.grid(row=2,column=0,columnspan=3)
fileendLabel = tk.Label(self.org_frame,text='File end date')
fileendLabel.grid(row=2,column=3,columnspan=3)
# things in the fourth row of the GUI
self.filestartValue = tk.Label(self.org_frame,text='No file loaded ...')
self.filestartValue.grid(row=3,column=0,columnspan=3)
self.fileendValue = tk.Label(self.org_frame,text='No file loaded ...')
self.fileendValue.grid(row=3,column=3,columnspan=3)
# things in the fifth row of the GUI
plotstartLabel = tk.Label(self.org_frame, text='Start date (YYYY-MM-DD)')
plotstartLabel.grid(row=4,column=0,columnspan=3)
self.plotstartEntry = tk.Entry(self.org_frame)
self.plotstartEntry.grid(row=4,column=3,columnspan=3)
# things in row sixth of the GUI
plotendLabel = tk.Label(self.org_frame, text='End date (YYYY-MM-DD)')
plotendLabel.grid(row=5,column=0,columnspan=3)
self.plotendEntry = tk.Entry(self.org_frame)
self.plotendEntry.grid(row=5,column=3,columnspan=3)
# things in the seventh row of the GUI
closeplotwindowsButton = tk.Button (self.org_frame, text="Close plot windows", command=self.do_closeplotwindows )
closeplotwindowsButton.grid(row=6,column=0,columnspan=2)
plotL1L2Button = tk.Button (self.org_frame, text="Plot L1 & L2 Data", command=self.do_plotL1L2 )
plotL1L2Button.grid(row=6,column=2,columnspan=2)
plotL3L3Button = tk.Button (self.org_frame, text="Plot L3 Data", command=self.do_plotL3L3 )
plotL3L3Button.grid(row=6,column=4,columnspan=2)
# things in the eigth row of the GUI
quitButton = tk.Button (self.org_frame, text='Quit', command=self.do_quit )
quitButton.grid(row=7,column=0,columnspan=2)
savexL2Button = tk.Button (self.org_frame, text='Write L2 Excel file', command=self.do_savexL2 )
savexL2Button.grid(row=7,column=2,columnspan=2)
savexL3Button = tk.Button (self.org_frame, text='Write L3 Excel file', command=self.do_savexL3 )
savexL3Button.grid(row=7,column=4,columnspan=2)
# other things in the GUI
self.progress = tk.Label(self.org_frame, text='Waiting for input ...')
self.progress.grid(row=8,column=0,columnspan=6,sticky="W")
# now we put together the menu, "File" first
menubar = tk.Menu(self)
filemenu = tk.Menu(menubar,tearoff=0)
filemenu.add_command(label="Concatenate netCDF",command=self.do_ncconcat)
filemenu.add_command(label="Split netCDF",command=self.do_ncsplit)
filemenu.add_command(label="List netCDF contents",command=self.option_not_implemented)
fileconvertmenu = tk.Menu(menubar,tearoff=0)
#fileconvertmenu.add_command(label="V2.7 to V2.8",command=self.do_v27tov28)
fileconvertmenu.add_command(label="nc to EddyPro (biomet)",command=self.do_nc2ep_biomet)
fileconvertmenu.add_command(label="nc to FluxNet",command=self.do_nc2fn)
fileconvertmenu.add_command(label="nc to REddyProc",command=self.do_nc2reddyproc)
fileconvertmenu.add_command(label="nc to SMAP",command=self.do_nc2smap)
fileconvertmenu.add_command(label="nc to xls",command=self.do_nc2xls)
fileconvertmenu.add_command(label="xls to nc",command=self.option_not_implemented)
filemenu.add_cascade(label="Convert",menu=fileconvertmenu)
filemenu.add_separator()
filemenu.add_command(label="Quit",command=self.do_quit)
menubar.add_cascade(label="File",menu=filemenu)
# now the "Run" menu
runmenu = tk.Menu(menubar,tearoff=0)
runmenu.add_command(label="Read L1 file",command=self.do_l1qc)
runmenu.add_command(label="Do L2 QA/QC",command=self.do_l2qc)
runmenu.add_command(label="Do L3 processing",command=self.do_l3qc)
runmenu.add_command(label="Do L4 gap fill (drivers)",command=self.do_l4qc)
runmenu.add_command(label="Do L5 gap fill (fluxes)",command=self.do_l5qc)
runmenu.add_command(label="Do L6 partitioning",command=self.do_l6qc)
menubar.add_cascade(label="Run",menu=runmenu)
# then the "Plot" menu
plotmenu = tk.Menu(menubar,tearoff=0)
plotmenu.add_command(label="Plot L1 & L2",command=self.do_plotL1L2)
plotmenu.add_command(label="Plot L3",command=self.do_plotL3L3)
plotmenu.add_command(label="Plot L4",command=self.do_plotL3L4)
plotmenu.add_command(label="Plot L5",command=self.option_not_implemented)
plotmenu.add_command(label="Plot L6 summary",command=self.do_plotL6_summary)
fnmenu = tk.Menu(menubar,tearoff=0)
fnmenu.add_command(label="Standard",command=lambda:self.do_plotfluxnet(mode="standard"))
fnmenu.add_command(label="Custom",command=lambda:self.do_plotfluxnet(mode="custom"))
plotmenu.add_cascade(label="30 minute",menu=fnmenu)
#plotmenu.add_command(label="FluxNet",command=self.do_plotfluxnet)
fpmenu = tk.Menu(menubar,tearoff=0)
fpmenu.add_command(label="Standard",command=lambda:self.do_plotfingerprint(mode="standard"))
fpmenu.add_command(label="Custom",command=lambda:self.do_plotfingerprint(mode="custom"))
plotmenu.add_cascade(label="Fingerprint",menu=fpmenu)
plotmenu.add_command(label="Quick check",command=self.do_plotquickcheck)
plotmenu.add_command(label="Years check",command=self.option_not_implemented)
plotmenu.add_separator()
plotmenu.add_command(label="Close plots",command=self.do_closeplotwindows)
menubar.add_cascade(label="Plot",menu=plotmenu)
# and the "Utilities" menu
utilsmenu = tk.Menu(menubar,tearoff=0)
climatologymenu = tk.Menu(menubar,tearoff=0)
climatologymenu.add_command(label="Standard",command=lambda:self.do_climatology(mode="standard"))
climatologymenu.add_command(label="Custom",command=lambda:self.do_climatology(mode="custom"))
utilsmenu.add_cascade(label="Climatology",menu=climatologymenu)
utilsmenu.add_command(label="Compare Ah",command=self.option_not_implemented)
utilsmenu.add_command(label="Compare EP",command=self.do_compare_eddypro)
ustarmenu = tk.Menu(menubar,tearoff=0)
ustarmenu.add_command(label="Reichstein",command=self.option_not_implemented)
ustarmenu.add_command(label="Change Point Detection",command=self.do_cpd)
utilsmenu.add_cascade(label="u* threshold",menu=ustarmenu)
menubar.add_cascade(label="Utilities",menu=utilsmenu)
# and the "Help" menu
helpmenu = tk.Menu(menubar,tearoff=0)
helpmenu.add_command(label="Contents",command=self.do_helpcontents)
helpmenu.add_command(label="About",command=self.option_not_implemented)
menubar.add_cascade(label="Help",menu=helpmenu)
self.config(menu=menubar)
def do_climatology(self,mode="standard"):
"""
Calls qcclim.climatology
"""
logging.info(' Starting climatology')
self.do_progress(text='Doing climatology ...')
if mode=="standard":
stdname = "controlfiles/standard/climatology.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
self.do_progress(text='Opening input file ...')
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0:
logging.info( " Climatology: no input file chosen")
self.do_progress(text='Waiting for input ...')
return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Doing the climatology')
qcclim.climatology(cf)
self.do_progress(text='Finished climatology')
logging.info(' Finished climatology')
logging.info("")
def do_closeplotwindows(self):
"""
Close plot windows
"""
self.do_progress(text='Closing plot windows ...') # tell the user what we're doing
logging.info(' Closing plot windows ...')
matplotlib.pyplot.close('all')
#fig_numbers = [n.num for n in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
##logging.info(' Closing plot windows: '+str(fig_numbers))
#for n in fig_numbers:
#matplotlib.pyplot.close(n)
self.do_progress(text='Waiting for input ...') # tell the user what we're doing
logging.info(' Waiting for input ...')
def do_compare_eddypro(self):
"""
Calls qcclim.compare_ep
Compares the results OzFluxQC (L3) with those from EddyPro (full output).
"""
self.do_progress(text='Comparing EddyPro and OzFlux results ...')
qcclim.compare_eddypro()
self.do_progress(text='Finished comparing EddyPro and OzFlux')
logging.info(' Finished comparing EddyPro and OzFlux')
def do_cpd(self):
"""
Calls qccpd.cpd_main
Compares the results OzFluxQC (L3) with those from EddyPro (full output).
"""
logging.info(' Starting estimation u* threshold using CPD')
self.do_progress(text='Estimating u* threshold using CPD ...')
stdname = "controlfiles/standard/cpd.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose an input nc file')
if len(filename)==0: self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
qccpd.cpd_main(cf)
self.do_progress(text='Finished estimating u* threshold')
logging.info(' Finished estimating u* threshold')
logging.info("")
def do_helpcontents(self):
tkMessageBox.showinfo("Obi Wan says ...","Read the source, Luke!")
def do_l1qc(self):
"""
Calls qcls.l1qc
"""
logging.info(" Starting L1 processing ...")
self.do_progress(text='Load L1 Control File ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0:
logging.info( " L1: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
self.do_progress(text='Doing L1 ...')
ds1 = qcls.l1qc(cf)
if ds1.returncodes["value"] == 0:
outfilename = qcio.get_outfilenamefromcf(cf)
ncFile = qcio.nc_open_write(outfilename)
qcio.nc_write_series(ncFile,ds1)
self.do_progress(text='Finished L1')
logging.info(' Finished L1')
logging.info("")
else:
msg = 'An error occurred, check the console ...'
self.do_progress(text=msg)
def do_l2qc(self):
"""
Call qcls.l2qc function
Performs L2 QA/QC processing on raw data
Outputs L2 netCDF file to ncData folder
ControlFiles:
L2_year.txt
or
L2.txt
ControlFile contents (see ControlFile/Templates/L2.txt for example):
[General]:
Enter list of functions to be performed
[Files]:
L1 input file name and path
L2 output file name and path
[Variables]:
Variable names and parameters for:
Range check to set upper and lower rejection limits
Diurnal check to reject observations by time of day that
are outside specified standard deviation limits
Timestamps for excluded dates
Timestamps for excluded hours
[Plots]:
Variable lists for plot generation
"""
logging.info(" Starting L2 processing ...")
self.do_progress(text='Load L2 Control File ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
logging.info( " L2: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
infilename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
self.do_progress(text='Doing L2 QC ...')
self.ds1 = qcio.nc_read_series(infilename)
if len(self.ds1.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds1; return
self.update_startenddate(str(self.ds1.series['DateTime']['Data'][0]),
str(self.ds1.series['DateTime']['Data'][-1]))
self.ds2 = qcls.l2qc(self.cf,self.ds1)
logging.info(' Finished L2 QC process')
self.do_progress(text='Finished L2 QC process')
self.do_progress(text='Saving L2 QC ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(self.cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
qcio.nc_write_series(ncFile,self.ds2) # save the L2 data
self.do_progress(text='Finished saving L2 QC data') # tdo_progressell the user we are done
logging.info(' Finished saving L2 QC data')
logging.info("")
def do_l3qc(self):
"""
Call qcls.l3qc_sitename function
Performs L3 Corrections and QA/QC processing on L2 data
Outputs L3 netCDF file to ncData folder
Outputs L3 netCDF file to OzFlux folder
Available corrections:
* corrections requiring ancillary measurements or samples
marked with an asterisk
Linear correction
fixed slope
linearly shifting slope
Conversion of virtual temperature to actual temperature
2D Coordinate rotation
Massman correction for frequency attenuation*
Webb, Pearman and Leuning correction for flux effects on density
measurements
Conversion of virtual heat flux to actual heat flux
Correction of soil moisture content to empirical calibration
curve*
Addition of soil heat storage to ground ground heat flux*
ControlFiles:
L3_year.txt
or
L3a.txt
ControlFile contents (see ControlFile/Templates/L3.txt for example):
[General]:
Python control parameters
[Files]:
L2 input file name and path
L3 output file name and ncData folder path
L3 OzFlux output file name and OzFlux folder path
[Massman] (where available):
Constants used in frequency attenuation correction
zmd: instrument height (z) less zero-plane displacement
height (d), m
z0: aerodynamic roughness length, m
angle: angle from CSAT mounting point between CSAT and
IRGA mid-path, degrees
CSATarm: distance from CSAT mounting point to CSAT
mid-path, m
IRGAarm: distance from CSAT mounting point to IRGA
mid-path, m
[Soil]:
Constants used in correcting Fg for storage and in empirical
corrections of soil water content
FgDepth: Heat flux plate depth, m
BulkDensity: Soil bulk density, kg/m3
OrganicContent: Soil organic content, fraction
SwsDefault
Constants for empirical corrections using log(sensor)
and exp(sensor) functions (SWC_a0, SWC_a1, SWC_b0,
SWC_b1, SWC_t, TDR_a0, TDR_a1, TDR_b0, TDR_b1,
TDR_t)
Variable and attributes lists (empSWCin, empSWCout,
empTDRin, empTDRout, linTDRin, SWCattr, TDRattr)
[Output]:
Variable subset list for OzFlux output file
[Variables]:
Variable names and parameters for:
Range check to set upper and lower rejection limits
Diurnal check to reject observations by time of day that
are outside specified standard deviation limits
Timestamps, slope, and offset for Linear correction
[Plots]:
Variable lists for plot generation
"""
logging.info(" Starting L3 processing ...")
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
logging.info( " L3: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
infilename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds2 = qcio.nc_read_series(infilename)
if len(self.ds2.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds2; return
self.update_startenddate(str(self.ds2.series['DateTime']['Data'][0]),
str(self.ds2.series['DateTime']['Data'][-1]))
self.do_progress(text='Doing L3 QC & Corrections ...')
self.ds3 = qcls.l3qc(self.cf,self.ds2)
self.do_progress(text='Finished L3')
txtstr = ' Finished L3: Standard processing for site: '
txtstr = txtstr+self.ds3.globalattributes['site_name'].replace(' ','')
logging.info(txtstr)
self.do_progress(text='Saving L3 QC & Corrected NetCDF data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(self.cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(self.cf,'nc')
qcio.nc_write_series(ncFile,self.ds3,outputlist=outputlist) # save the L3 data
self.do_progress(text='Finished saving L3 QC & Corrected NetCDF data') # tell the user we are done
logging.info(' Finished saving L3 QC & Corrected NetCDF data')
logging.info("")
def do_l4qc(self):
"""
Call qcls.l4qc_gapfill function
Performs L4 gap filling on L3 met data
or
Ingests L4 gap filled fluxes performed in external SOLO-ANN and c
omputes daily sums
Outputs L4 netCDF file to ncData folder
Outputs L4 netCDF file to OzFlux folder
ControlFiles:
L4_year.txt
or
L4b.txt
ControlFile contents (see ControlFile/Templates/L4.txt and
ControlFile/Templates/L4b.txt for examples):
[General]:
Python control parameters (SOLO)
Site characteristics parameters (Gap filling)
[Files]:
L3 input file name and path (Gap filling)
L4 input file name and path (SOLO)
L4 output file name and ncData folder path (both)
L4 OzFlux output file name and OzFlux folder path
[Variables]:
Variable subset list for OzFlux output file (where
available)
"""
logging.info(" Starting L4 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds3 = qcio.nc_read_series(infilename)
if len(ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds3; return
ds3.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds3.series['DateTime']['Data'][0]),
str(ds3.series['DateTime']['Data'][-1]))
sitename = ds3.globalattributes['site_name']
self.do_progress(text='Doing L4 gap filling drivers: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds4 = qcls.l4qc(cf,ds3)
if ds4.returncodes["alternate"]=="quit" or ds4.returncodes["solo"]=="quit":
self.do_progress(text='Quitting L4: '+sitename)
logging.info(' Quitting L4: '+sitename)
else:
self.do_progress(text='Finished L4: '+sitename)
logging.info(' Finished L4: '+sitename)
self.do_progress(text='Saving L4 gap filled data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds4,outputlist=outputlist) # save the L4 data
self.do_progress(text='Finished saving L4 gap filled data') # tell the user we are done
logging.info(' Finished saving L4 gap filled data')
logging.info("")
def do_l5qc(self):
"""
Call qcls.l5qc function to gap fill the fluxes.
"""
logging.info(" Starting L5 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds4 = qcio.nc_read_series(infilename)
if len(ds4.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds4; return
ds4.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds4.series['DateTime']['Data'][0]),
str(ds4.series['DateTime']['Data'][-1]))
sitename = ds4.globalattributes['site_name']
self.do_progress(text='Doing L5 gap filling fluxes: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds5 = qcls.l5qc(cf,ds4)
if ds5.returncodes["solo"]=="quit":
self.do_progress(text='Quitting L5: '+sitename)
logging.info(' Quitting L5: '+sitename)
else:
self.do_progress(text='Finished L5: '+sitename)
logging.info(' Finished L5: '+sitename)
self.do_progress(text='Saving L5 gap filled data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds5,outputlist=outputlist) # save the L5 data
self.do_progress(text='Finished saving L5 gap filled data') # tell the user we are done
logging.info(' Finished saving L5 gap filled data')
logging.info("")
def do_l6qc(self):
"""
Call qcls.l6qc function to partition NEE into GPP and ER.
"""
logging.info(" Starting L6 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds5 = qcio.nc_read_series(infilename)
if len(ds5.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds5; return
ds5.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds5.series['DateTime']['Data'][0]),
str(ds5.series['DateTime']['Data'][-1]))
sitename = ds5.globalattributes['site_name']
self.do_progress(text='Doing L6 partitioning: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds6 = qcls.l6qc(cf,ds5)
self.do_progress(text='Finished L6: '+sitename)
logging.info(' Finished L6: '+sitename)
self.do_progress(text='Saving L6 partitioned data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds6,outputlist=outputlist) # save the L6 data
self.do_progress(text='Finished saving L6 partitioned data') # tell the user we are done
logging.info(' Finished saving L6 partitioned data')
logging.info("")
def do_nc2ep_biomet(self):
""" Calls qcio.ep_biomet_write_csv. """
logging.info(' Starting conversion to EddyPro biomet file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to EddyPro biomet CSV ...')
return_code = qcio.ep_biomet_write_csv(self.cf)
if return_code==0:
self.do_progress(text='An error occurred, check the console ...');
return
else:
logging.info(' Finished conversion to EddyPro biomet format')
self.do_progress(text='Finished conversion to EddyPro biomet format')
logging.info("")
def do_nc2fn(self):
""" Calls qcio.fn_write_csv. """
logging.info(' Starting conversion to FluxNet CSV file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to FluxNet CSV ...')
qcio.fn_write_csv(self.cf)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2reddyproc(self):
""" Calls qcio.reddyproc_write_csv."""
logging.info(' Starting conversion to REddyProc CSV file')
self.do_progress(text="Choosing netCDF file ...")
ncfilename = qcio.get_filename_dialog(path="../Sites",title="Choose a netCDF file")
if len(ncfilename)==0 or not os.path.exists(ncfilename):
self.do_progress(text="Waiting for input ..."); return
self.do_progress(text='Converting nc to REddyProc CSV ...')
qcio.reddyproc_write_csv(ncfilename)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2smap(self):
""" Calls qcio.smap_write_csv. """
logging.info(' Starting conversion to SMAP CSV file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to SMAP CSV ...')
qcio.smap_write_csv(self.cf)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2xls(self):
""" Calls qcio.nc_2xls. """
logging.info(" Starting conversion to Excel file")
self.do_progress(text="Choosing netCDF file ...")
ncfilename = qcio.get_filename_dialog(path="../Sites",title="Choose a netCDF file")
if len(ncfilename)==0: self.do_progress(text="Waiting for input ..."); return
self.do_progress(text="Converting netCDF file to Excel file")
qcio.nc_2xls(ncfilename,outputlist=None)
self.do_progress(text="Finished converting netCDF file")
logging.info(" Finished converting netCDF file")
logging.info("")
def do_ncconcat(self):
"""
Calls qcio.nc_concatenate
"""
logging.info(' Starting concatenation of netCDF files')
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Concatenating files')
qcio.nc_concatenate(cf)
self.do_progress(text='Finished concatenating files')
logging.info(' Finished concatenating files')
logging.info("")
def do_ncsplit(self):
"""
Calls qcio.nc_split
"""
logging.info(' Starting split of netCDF file')
self.do_progress(text='Splitting file')
qcio.nc_split()
self.do_progress(text='Finished splitting file')
logging.info(' Finished splitting file')
logging.info("")
def do_plotfingerprint(self,mode="standard"):
""" Plot fingerprint"""
logging.info(' Starting fingerprint plot')
self.do_progress(text='Doing fingerprint plot ...')
if mode=="standard":
stdname = "controlfiles/standard/fingerprint.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0 or not os.path.exists(filename):
self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
self.do_progress(text='Plotting fingerprint ...')
qcplot.plot_fingerprint(cf)
self.do_progress(text='Finished plotting fingerprint')
logging.info(' Finished plotting fingerprint')
logging.info("")
def do_plotfluxnet(self,mode="standard"):
""" Plot FluxNet style time series of data."""
self.do_progress(text='Doing FluxNet plots ...')
if mode=="standard":
stdname = "controlfiles/standard/fluxnet.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0 or not os.path.exists(filename):
self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Plotting FluxNet style plots ...')
qcplot.plot_fluxnet(cf)
self.do_progress(text='Finished FluxNet plotting')
logging.info(' Finished FluxNet plotting')
def do_plotquickcheck(self):
""" Plot quickcheck"""
self.do_progress(text='Loading control file ...')
stdname = "controlfiles/standard/quickcheck.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose an input file')
if len(filename)==0: self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Plotting quickcheck ...')
qcplot.plot_quickcheck(cf)
self.do_progress(text='Finished plotting quickcheck')
logging.info(' Finished plotting quickcheck')
def do_plotL1L2(self):
"""
Plot L1 (raw) and L2 (QA/QC) data in blue and red, respectively
Control File for do_l2qc function used.
If L2 Control File not loaded, requires control file selection.
"""
if 'ds1' not in dir(self) or 'ds2' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
l1filename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(l1filename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds1 = qcio.nc_read_series(l1filename)
if len(self.ds1.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds1; return
l2filename = qcio.get_outfilenamefromcf(self.cf)
self.ds2 = qcio.nc_read_series(l2filename)
if len(self.ds2.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds2; return
self.update_startenddate(str(self.ds1.series['DateTime']['Data'][0]),
str(self.ds1.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L1 & L2 QC ...')
cfname = self.ds2.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds1.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds1.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds1.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds1.globalattributes['time_step'],default=-1,match='exact')
plt_cf = self.cf['Plots'][str(nFig)]
if 'Type' in plt_cf.keys():
if str(plt_cf['Type']).lower() =='xy':
self.do_progress(text='Plotting L1 and L2 XY ...')
qcplot.plotxy(self.cf,nFig,plt_cf,self.ds1,self.ds2,si,ei)
else:
self.do_progress(text='Plotting L1 and L2 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds1,self.ds2,si,ei)
else:
self.do_progress(text='Plotting L1 and L2 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds1,self.ds2,si,ei)
self.do_progress(text='Finished plotting L1 and L2')
logging.info(' Finished plotting L1 and L2, check the GUI')
def do_plotL3L3(self):
"""
Plot L3 (QA/QC and Corrected) data
Control File for do_l3qc function used.
If L3 Control File not loaded, requires control file selection.
"""
if 'ds3' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
l3filename = qcio.get_outfilenamefromcf(self.cf)
self.ds3 = qcio.nc_read_series(l3filename)
if len(self.ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds3; return
self.update_startenddate(str(self.ds3.series['DateTime']['Data'][0]),
str(self.ds3.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L3 QC ...')
cfname = self.ds3.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=-1,match='exact')
plt_cf = self.cf['Plots'][str(nFig)]
if 'Type' in plt_cf.keys():
if str(plt_cf['Type']).lower() =='xy':
self.do_progress(text='Plotting L3 XY ...')
qcplot.plotxy(self.cf,nFig,plt_cf,self.ds3,self.ds3,si,ei)
else:
self.do_progress(text='Plotting L3 QC ...')
SeriesList = ast.literal_eval(plt_cf['Variables'])
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds3,si,ei)
else:
self.do_progress(text='Plotting L3 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds3,si,ei)
self.do_progress(text='Finished plotting L3')
logging.info(' Finished plotting L3, check the GUI')
def do_plotL3L4(self):
"""
Plot L3 (QA/QC and Corrected) and L4 (Gap Filled) data in blue and
red, respectively
Control File for do_l4qc function used.
If L4 Control File not loaded, requires control file selection.
"""
if 'ds3' not in dir(self) or 'ds4' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
self.do_progress(text='Waiting for input ...')
return
l3filename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(l3filename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds3 = qcio.nc_read_series(l3filename)
if len(self.ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds3; return
l4filename = qcio.get_outfilenamefromcf(self.cf)
self.ds4 = qcio.nc_read_series(l4filename)
if len(self.ds4.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds4; return
self.update_startenddate(str(self.ds3.series['DateTime']['Data'][0]),
str(self.ds3.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L3 and L4 QC ...')
cfname = self.ds4.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=-1,match='exact')
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds4,si,ei)
self.do_progress(text='Finished plotting L4')
logging.info(' Finished plotting L4, check the GUI')
def do_plotL4L5(self):
"""
Plot L4 (Gap filled) and L5 (Partitioned) data.
"""
pass
def do_plotL6_summary(self):
"""
Plot L6 summary.
"""
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0:
self.do_progress(text='Waiting for input ...')
return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
l6filename = qcio.get_outfilenamefromcf(cf)
if not qcutils.file_exists(l6filename): self.do_progress(text='An error occurred, check the console ...'); return
ds6 = qcio.nc_read_series(l6filename)
if len(ds6.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds6; return
self.update_startenddate(str(ds6.series['DateTime']['Data'][0]),
str(ds6.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L6 summary ...')
qcgf.ImportSeries(cf,ds6)
qcrp.L6_summary(cf,ds6)
self.do_progress(text='Finished plotting L6 summary')
logging.info(' Finished plotting L6 summary, check the GUI')
def do_progress(self,text):
"""
Update progress message in QC Data GUI
"""
self.progress.destroy()
self.progress = tk.Label(self.org_frame, text=text)
self.progress.grid(row=8,column=0,columnspan=6,sticky="W")
self.update()
def do_quit(self):
"""
Close plot windows and quit QC Data GUI
"""
self.do_progress(text='Closing plot windows ...') # tell the user what we're doing
logging.info(' Closing plot windows ...')
matplotlib.pyplot.close('all')
self.do_progress(text='Quitting ...') # tell the user what we're doing
logging.info(' Quitting ...')
self.quit()
def do_savexL2(self):
"""
Call nc2xl function
Exports excel data from NetCDF file
Outputs L2 Excel file containing Data and Flag worksheets
"""
self.do_progress(text='Exporting L2 NetCDF -> Xcel ...') # put up the progress message
# get the output filename
outfilename = qcio.get_outfilenamefromcf(self.cf)
# get the output list
outputlist = qcio.get_outputlistfromcf(self.cf,'xl')
qcio.nc_2xls(outfilename,outputlist=outputlist)
self.do_progress(text='Finished L2 Data Export') # tell the user we are done
logging.info(' Finished saving L2 data')
def do_savexL3(self):
"""
Call nc2xl function
Exports excel data from NetCDF file
Outputs L3 Excel file containing Data and Flag worksheets
"""
self.do_progress(text='Exporting L3 NetCDF -> Xcel ...') # put up the progress message
# get the output filename
outfilename = qcio.get_outfilenamefromcf(self.cf)
# get the output list
outputlist = qcio.get_outputlistfromcf(self.cf,'xl')
qcio.nc_2xls(outfilename,outputlist=outputlist)
self.do_progress(text='Finished L3 Data Export') # tell the user we are done
logging.info(' Finished saving L3 data')
def do_xl2nc(self):
"""
Calls qcio.xl2nc
"""
logging.info(" Starting L1 processing ...")
self.do_progress(text='Loading control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Reading Excel file & writing to netCDF')
rcode = qcio.xl2nc(self.cf,"L1")
if rcode==1:
self.do_progress(text='Finished writing to netCDF ...')
logging.info(' Finished writing to netCDF ...')
else:
self.do_progress(text='An error occurred, check the console ...')
def update_startenddate(self,startstr,endstr):
"""
Read start and end timestamps from data and report in QC Data GUI
"""
self.filestartValue.destroy()
self.fileendValue.destroy()
self.filestartValue = tk.Label(self.org_frame,text=startstr)
self.filestartValue.grid(row=3,column=0,columnspan=3)
self.fileendValue = tk.Label(self.org_frame,text=endstr)
self.fileendValue.grid(row=3,column=3,columnspan=3)
self.update()
if __name__ == "__main__":
#log = qcutils.startlog('qc','logfiles/qc.log')
qcGUI = qcgui(None)
main_title = cfg.version_name+' Main GUI '+cfg.version_number
qcGUI.title(main_title)
qcGUI.mainloop()
qcGUI.destroy()
logging.info('QC: All done')
| gpl-3.0 | -7,339,864,531,312,118,000 | 51.25102 | 134 | 0.600184 | false | 3.711118 | false | false | false |
kbase/assembly | lib/assembly/pipe.py | 4 | 3878 | import itertools
import re
"""
# parameter sweep
input: ['trim_sort', 'kiki', '?k=29-30']
output: [['trim_sort', 'kiki', '?k=29], ['trim_sort', 'kiki', '?k=30]]
# parameter sweep with multiple assemblers
input: ['trim_sort', 'kiki ?k=29-30 velvet']
output: [['trim_sort', 'kiki', '?k=29], ['trim_sort', 'kiki', '?k=30], ['trim_sort', 'velvet']]
# binary parameter sweep
CLI: trim_sort kiki ?k=29-30 ?cov=20-21
input: ['trim_sort', 'kiki', '?k=29-30', ?cov=20-21]
output: [['trim_sort', 'kiki', '?k=29, '?cov=20], ['trim_sort', 'kiki', '?k=30, '?cov=20],
['trim_sort', 'kiki', '?k=29, '?cov=21], ['trim_sort', 'kiki', '?k=30, '?cov=21]]
"""
#my_pipe = ['trim_sort', '?length=10-11', 'kiki ?k=29-30 ?cov=29-30']
my_pipe = ['ma', '?k=1,5,3', 'b']
#my_pipe = ['a' , 'b ?k=1,10-11,20,30:40:2']
test=['sga_preprocess', '?min_length=29,100,150','sga_ec', 'tagdust',
'velvet ?hash_length=31:39:2 idba']
def parse_pipe(pipe):
"""
Parses modules and parameters into stages
Input: a flat (no quotes) list of modules and params
e.g. ['kiki', '?k=29-30', 'velvet']
Output: list of lists containing single modules and
parameters
e.g. [['kiki', '?k=29'], ['kiki', '?k=30'], ['velvet']]
"""
module = []
stages = []
for string in pipe:
if not string.startswith('?'):
if module:
stages.append(module) #flush
module = [string]
else:
module.append(string)
else:
module.append(string) #param
if module:
stages.append(module)
stages = [expand_sweep(m) for m in stages]
return stages
def parse_branches(pipe):
stages = []
flat_pipe = []
for i in range(len(pipe)):
if len(pipe[i].split(' ')) == 1:
flat_pipe.append(pipe[i])
try:
if len(pipe[i+1].split(' ')) != 1:
stages += parse_pipe(flat_pipe)
flat_pipe = []
except:
stages += parse_pipe(flat_pipe)
flat_pipe = []
else: # parenth
stages += [list(itertools.chain(*parse_pipe(pipe[i].split(' '))))]
cart = [list(itertools.product(*stages))]
all_pipes = []
for pipe in cart[0]:
all_pipes.append(list(itertools.chain(*pipe)))
#### Remove occurences of 'none'
for i, p in enumerate(all_pipes):
all_pipes[i] = [mod for mod in p if mod != 'none']
return all_pipes
def expand_sweep(module):
"""
[m, ?p=1-2, ?p=3-4] -> [m, p1, p3, m, p2, p3, m, p1, p4, m, p1, p4]
"""
expanded = []
has_range = False
for word in module:
if word.startswith('?'):
f = re.split('\?|=', word)[1:]
flag = f[0]
params = f[1]
sweep = []
for param in params.split(','):
s = re.split('-|:', param)
if len(s) != 1: #is range
has_range = True
delim = s[0].find('=')+1
if delim == 1:
break
srange = (int(s[0][delim:]),int(s[1]))
step_size = 1
if len(s) == 3:
step_size = int(s[2])
sweep += ['?{}={}'.format(flag, x)
for x in range(
srange[0], srange[1]+1, step_size)]
else:
sweep.append('?{}={}'.format(flag, s[0]))
has_range = True
expanded.append(sweep)
else: #mod name
expanded.append([word])
if has_range:
cart = [list(itertools.product(*expanded))]
flat = list(itertools.chain(*cart))
return flat
else:
return [module]
#print parse_branches(my_pipe)
| mit | 8,232,373,787,818,876,000 | 32.145299 | 95 | 0.47344 | false | 3.306053 | false | false | false |
RaisingTheDerp/raisingthebar | root/devtools/AddFunctionPrologue.py | 1 | 2929 |
# Assuming all functions begin with ')' followed by '{', just find the matching brace and
# add a line with 'g_pVCR->SyncToken("<random string here>");'
import dlexer
import sys
class BlankStruct:
pass
def MatchParensBack( list, iStart ):
parenCount = -1
for i in range( 0, iStart ):
if list[iStart-i].id == __TOKEN_OPENPAREN:
parenCount += 1
elif list[iStart-i].id == __TOKEN_CLOSEPAREN:
parenCount -= 1
if parenCount == 0:
return iStart - i
return -1
if len( sys.argv ) >= 2:
# Setup the parser.
parser = dlexer.DLexer( 0 )
__TOKEN_NEWLINE = parser.AddToken( '\n' )
__TOKEN_WHITESPACE = parser.AddToken( '[ \\t\\f\\v]+' )
__TOKEN_OPENBRACE = parser.AddToken( '{' )
__TOKEN_CLOSEBRACE = parser.AddToken( '}' )
__TOKEN_OPENPAREN = parser.AddToken( '\(' )
__TOKEN_CLOSEPAREN = parser.AddToken( '\)' )
__TOKEN_COMMENT = parser.AddToken( r"\/\/.*" )
__TOKEN_CONST = parser.AddToken( "const" )
__TOKEN_IF = parser.AddToken( "if" )
__TOKEN_WHILE = parser.AddToken( "while" )
__TOKEN_FOR = parser.AddToken( "for" )
__TOKEN_SWITCH = parser.AddToken( "switch" )
validChars = r"\~\@\#\$\%\^\&\!\,\w\.-/\[\]\<\>\""
__TOKEN_IDENT = parser.AddToken( '[' + validChars + ']+' )
__TOKEN_OPERATOR = parser.AddToken( "\=|\+" )
__TOKEN_SCOPE_OPERATOR = parser.AddToken( "::" )
__TOKEN_IGNORE = parser.AddToken( r"\#|\;|\:|\||\?|\'|\\|\*|\-|\`" )
head = None
# First, read all the tokens into a list.
list = []
parser.BeginReadFile( sys.argv[1] )
while 1:
m = parser.GetToken()
if m:
list.append( m )
else:
break
# Make a list of all the non-whitespace ones.
nw = []
for token in list:
if token.id == __TOKEN_NEWLINE or token.id == __TOKEN_WHITESPACE:
token.iNonWhitespace = -2222
else:
token.iNonWhitespace = len( nw )
nw.append( token )
# Get ready to output sync tokens.
file = open( sys.argv[1], 'r' )
fileLines = file.readlines()
file.close()
curLine = 1
iCur = 0
file = open( sys.argv[1], 'w' )
# Now, search for the patterns we're interested in.
# Look for <ident>::<ident> '(' <idents...> ')' followed by a '{'. This would be a function.
for token in list:
file.write( token.val )
if token.id == __TOKEN_NEWLINE:
curLine += 1
if token.id == __TOKEN_OPENBRACE:
i = token.iNonWhitespace
if i >= 6:
if nw[i-1].id == __TOKEN_CLOSEPAREN:
pos = MatchParensBack( nw, i-2 )
if pos != -1:
if nw[pos-1].id == __TOKEN_IDENT:
#ADD PROLOGUE CODE HERE
#file.write( "\n\tg_pVCR->SyncToken( \"%d_%s\" ); // AUTO-GENERATED SYNC TOKEN\n" % (iCur, nw[pos-1].val) )
iCur += 1
# TEST CODE TO PRINT OUT FUNCTION NAMES
#if nw[pos-2].id == __TOKEN_SCOPE_OPERATOR:
# print "%d: %s::%s" % ( curLine, nw[pos-3].val, nw[pos-1].val )
#else:
# print "%d: %s" % ( curLine, nw[pos-1].val )
file.close()
else:
print "VCRMode_AddSyncTokens <filename>"
| gpl-3.0 | -2,518,166,003,032,879,600 | 24.25 | 114 | 0.592352 | false | 2.712037 | false | false | false |
ThoriumGroup/thorium | thorium/__init__.py | 1 | 8928 | #!/usr/bin/env python
"""
Thorium
=======
Thorium is a project that combines various python modules and tools originally
sourced from Nukepedia. It provides a streamlined way of managing their
versions and customizing the installation. While thorium ships as a complete
package, individual submodules can be activated and deactivated via config
files or arguments passed to thorium.
## Installation
Before we get into installation, a quick warning. Thorium is made up of many
submodules that are designed and still released to work independent of
thorium. When thorium imports those modules, it imports them into the global
namespace so that Nuke can access the modules directly, without having to go
through the thorium namespace. It does this by directly accessing and importing
straight into the `__builtin__` namespace. This is normally not recommended.
While every effort has been made to ensure that these submodules are named
uniquely, the python namespace can get very tricky and managers of facility
installations should carefully compare the modules thorium is set to import
with any global facility modules, otherwise those facility modules will
be inaccessible from within Nuke.
Installation can be done via pip (`pip install thorium`), an rpm or by manually
placing the 'thorium' folder in your .nuke directory or anywhere else within
the Nuke python path.
Then, add the following lines to your 'init.py' file:
::
import thorium
thorium.run()
And the following lines to your 'menu.py' file:
::
import thorium
thorium.run_gui()
You can turn off the usage of specific modules by passing a dictionary with the
module name and a bool.
::
import thorium
thorium.run_gui({'animatedSnap3D': False})
Now `animatedSnap3D` will not load, and every other module will. You can
reverse this behavior by passing the `default` argument `False`, which will
cause all modules not specifically listed as True to not be loaded.
::
import thorium
thorium.run_gui({'animatedSnap3D': True}, default=False)
Now `animatedSnap3D` will be the ONLY module that loads- all others will not
load, since the default is False.
## Usage
After the run functions above have executed, each submodule will be available
in it's native namespace. Modules with menu items will appear in their correct
place, and the python commands will be available for use from anywhere in Nuke.
## Classes
GlobalInjector
Injects set attributes directly into the global namespace. Thorium
uses this to import modules into '__builtin__'
## Public Functions
run()
Imports and runs the thorium submodules that should be available to
nuke and scripts at all times.
run_gui()
Imports and runs the thorium submodules that are only needed to user
interaction in the GUI.
## License
The MIT License (MIT)
Thorium
Copyright (c) 2014 Sean Wallitsch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# =============================================================================
# GLOBALS
# =============================================================================
__author__ = "Sean Wallitsch"
__author_email__ = "[email protected]"
__copyright__ = "Copyright 2014, Sean Wallitsch"
__credits__ = [
"Ivan Busquets",
"Philippe Huberdeau",
"Alexey Kuchinski",
"Frank Rueter",
"Sean Wallitsch",
]
__license__ = "MIT"
__version__ = "0.1b5"
__maintainer__ = "Sean Wallitsch"
__maintainer_email__ = "[email protected]"
__module_name__ = "thorium"
__short_desc__ = "Combines and manages many Nuke python packages"
__status__ = "Development"
__url__ = "https://github.com/ThoriumGroup/thorium"
# =============================================================================
# EXPORTS
# =============================================================================
__all__ = [
'run',
'run_gui'
]
# =============================================================================
# PRIVATE FUNCTIONS
# =============================================================================
def _importer(module):
"""Imports and returns the given string as a module"""
return __import__(module, globals())
# =============================================================================
# CLASSES
# =============================================================================
class GlobalInjector(object):
"""Inject into the global namespace of "__builtin__"
Assigning to variables declared global in a function, injects them only
into the module's global namespace.
>>> global_namespace = sys.modules['__builtin__'].__dict__
>>> #would need
>>> global_namespace['aname'] = 'avalue'
>>> #With
>>> global_namespace = GlobalInjector()
>>> #one can do
>>> global_namespace.bname = 'bvalue'
>>> #reading from it is simply
>>> bname
bvalue
Class is from the following stackoverflow:
http://stackoverflow.com/questions/11813287/insert-variable-into-global-namespace-from-within-a-function
"""
def __init__(self):
import sys
self.__dict__['modules'] = []
self.__dict__['builtin'] = sys.modules['__builtin__'].__dict__
def __setattr__(self, name, value):
"""Adds an object to the __builtin__ namespace under name.
While this can be used to inject any object into the __builtin__
namespace, it's particularly useful for importing.
>>> global_namespace = GlobalInjector()
>>> global_namespace.random = __import__("random", globals())
>>> random.randint(0, 100)
67
`random` has now been imported into the global namespace. This works
even when global_namespace is within a local scope.
Args:
name : (str)
The variable name the module should be added under.
value : (<module>|any other object)
The python object to be referenced by name.
Returns:
None
Raises:
N/A
"""
self.builtin[name] = value
self.modules.append(name)
def reset(self):
""" Removes the objects that GlobalInjector has placed in the namespace
Note that when used for imported modules, this does not reload, or
uncache the modules.
This is mostly useful for testing.
Args:
N/A
Returns:
None
Raises:
N/A
"""
for module in self.modules:
if module in self.builtin:
del(self.builtin[module])
self.modules = []
# =============================================================================
# PUBLIC FUNCTIONS
# =============================================================================
def run(modules=None, default=True):
"""Imports and runs the submodules that must be available at all times"""
global_namespace = GlobalInjector()
if not modules:
modules = {}
pass
# =============================================================================
def run_gui(modules=None, default=True, menu_name='Thorium'):
"""Imports and runs gui only submodules"""
global_namespace = GlobalInjector()
if not modules:
modules = {}
if modules.get('animatedSnap3D', default):
global_namespace.animatedSnap3D = _importer('animatedSnap3D')
animatedSnap3D.run()
if modules.get('cardToTrack', default):
global_namespace.cardToTrack = _importer('cardToTrack')
cardToTrack.run(menu=menu_name)
if modules.get('iconPanel', default):
global_namespace.iconPanel = _importer('iconPanel')
iconPanel.run()
if modules.get('Keying', default):
global_namespace.keying = _importer('keying')
keying.run()
if modules.get('viewerSync', default):
global_namespace.viewerSync = _importer('viewerSync')
viewerSync.run()
| mit | -3,899,632,006,197,299,700 | 31.347826 | 108 | 0.621528 | false | 4.411067 | false | false | false |
kstrauser/schematics | schematics/types/compound.py | 5 | 14905 | # -*- coding: utf-8 -*-
from __future__ import division
from collections import Iterable
import itertools
from ..exceptions import ValidationError, ConversionError, ModelValidationError, StopValidation
from ..models import Model
from ..transforms import export_loop, EMPTY_LIST, EMPTY_DICT
from .base import BaseType
from six import iteritems
from six import string_types as basestring
from six import text_type as unicode
class MultiType(BaseType):
def validate(self, value):
"""Report dictionary of errors with lists of errors as values of each
key. Used by ModelType and ListType.
"""
errors = {}
for validator in self.validators:
try:
validator(value)
except ModelValidationError as exc:
errors.update(exc.messages)
except StopValidation as exc:
errors.update(exc.messages)
break
if errors:
raise ValidationError(errors)
return value
def export_loop(self, shape_instance, field_converter,
role=None, print_none=False):
raise NotImplementedError
def init_compound_field(self, field, compound_field, **kwargs):
"""
Some of non-BaseType fields requires `field` arg.
Not avoid name conflict, provide it as `compound_field`.
Example:
comments = ListType(DictType, compound_field=StringType)
"""
if compound_field:
field = field(field=compound_field, **kwargs)
else:
field = field(**kwargs)
return field
class ModelType(MultiType):
def __init__(self, model_class, **kwargs):
self.model_class = model_class
self.fields = self.model_class.fields
validators = kwargs.pop("validators", [])
self.strict = kwargs.pop("strict", True)
def validate_model(model_instance):
model_instance.validate()
return model_instance
super(ModelType, self).__init__(validators=[validate_model] + validators, **kwargs)
def __repr__(self):
return object.__repr__(self)[:-1] + ' for %s>' % self.model_class
def to_native(self, value, mapping=None, context=None):
# We have already checked if the field is required. If it is None it
# should continue being None
if mapping is None:
mapping = {}
if value is None:
return None
if isinstance(value, self.model_class):
return value
if not isinstance(value, dict):
raise ConversionError(
u'Please use a mapping for this field or {0} instance instead of {1}.'.format(
self.model_class.__name__,
type(value).__name__))
# partial submodels now available with import_data (ht ryanolson)
model = self.model_class()
return model.import_data(value, mapping=mapping, context=context,
strict=self.strict)
def to_primitive(self, model_instance, context=None):
primitive_data = {}
for field_name, field, value in model_instance.atoms():
serialized_name = field.serialized_name or field_name
if value is None and model_instance.allow_none(field):
primitive_data[serialized_name] = None
else:
primitive_data[serialized_name] = field.to_primitive(value,
context)
return primitive_data
def export_loop(self, model_instance, field_converter,
role=None, print_none=False):
"""
Calls the main `export_loop` implementation because they are both
supposed to operate on models.
"""
if isinstance(model_instance, self.model_class):
model_class = model_instance.__class__
else:
model_class = self.model_class
shaped = export_loop(model_class, model_instance,
field_converter,
role=role, print_none=print_none)
if shaped and len(shaped) == 0 and self.allow_none():
return shaped
elif shaped:
return shaped
elif print_none:
return shaped
class ListType(MultiType):
def __init__(self, field, min_size=None, max_size=None, **kwargs):
if not isinstance(field, BaseType):
compound_field = kwargs.pop('compound_field', None)
field = self.init_compound_field(field, compound_field, **kwargs)
self.field = field
self.min_size = min_size
self.max_size = max_size
validators = [self.check_length, self.validate_items] + kwargs.pop("validators", [])
super(ListType, self).__init__(validators=validators, **kwargs)
@property
def model_class(self):
return self.field.model_class
def _force_list(self, value):
if value is None or value == EMPTY_LIST:
return []
try:
if isinstance(value, basestring):
raise TypeError()
if isinstance(value, dict):
return [value[unicode(k)] for k in sorted(map(int, value.keys()))]
return list(value)
except TypeError:
return [value]
def to_native(self, value, context=None):
items = self._force_list(value)
return [self.field.to_native(item, context) for item in items]
def check_length(self, value):
list_length = len(value) if value else 0
if self.min_size is not None and list_length < self.min_size:
message = ({
True: u'Please provide at least %d item.',
False: u'Please provide at least %d items.',
}[self.min_size == 1]) % self.min_size
raise ValidationError(message)
if self.max_size is not None and list_length > self.max_size:
message = ({
True: u'Please provide no more than %d item.',
False: u'Please provide no more than %d items.',
}[self.max_size == 1]) % self.max_size
raise ValidationError(message)
def validate_items(self, items):
errors = []
for item in items:
try:
self.field.validate(item)
except ValidationError as exc:
errors.append(exc.messages)
if errors:
raise ValidationError(errors)
def to_primitive(self, value, context=None):
return [self.field.to_primitive(item, context) for item in value]
def export_loop(self, list_instance, field_converter,
role=None, print_none=False):
"""Loops over each item in the model and applies either the field
transform or the multitype transform. Essentially functions the same
as `transforms.export_loop`.
"""
data = []
for value in list_instance:
if hasattr(self.field, 'export_loop'):
shaped = self.field.export_loop(value, field_converter,
role=role)
feels_empty = shaped and len(shaped) == 0
else:
shaped = field_converter(self.field, value)
feels_empty = shaped is None
# Print if we want empty or found a value
if feels_empty and self.field.allow_none():
data.append(shaped)
elif shaped is not None:
data.append(shaped)
elif print_none:
data.append(shaped)
# Return data if the list contains anything
if len(data) > 0:
return data
elif len(data) == 0 and self.allow_none():
return data
elif print_none:
return data
class DictType(MultiType):
def __init__(self, field, coerce_key=None, **kwargs):
if not isinstance(field, BaseType):
compound_field = kwargs.pop('compound_field', None)
field = self.init_compound_field(field, compound_field, **kwargs)
self.coerce_key = coerce_key or unicode
self.field = field
validators = [self.validate_items] + kwargs.pop("validators", [])
super(DictType, self).__init__(validators=validators, **kwargs)
@property
def model_class(self):
return self.field.model_class
def to_native(self, value, safe=False, context=None):
if value == EMPTY_DICT:
value = {}
value = value or {}
if not isinstance(value, dict):
raise ValidationError(u'Only dictionaries may be used in a DictType')
return dict((self.coerce_key(k), self.field.to_native(v, context))
for k, v in iteritems(value))
def validate_items(self, items):
errors = {}
for key, value in iteritems(items):
try:
self.field.validate(value)
except ValidationError as exc:
errors[key] = exc
if errors:
raise ValidationError(errors)
def to_primitive(self, value, context=None):
return dict((unicode(k), self.field.to_primitive(v, context))
for k, v in iteritems(value))
def export_loop(self, dict_instance, field_converter,
role=None, print_none=False):
"""Loops over each item in the model and applies either the field
transform or the multitype transform. Essentially functions the same
as `transforms.export_loop`.
"""
data = {}
for key, value in iteritems(dict_instance):
if hasattr(self.field, 'export_loop'):
shaped = self.field.export_loop(value, field_converter,
role=role)
feels_empty = shaped and len(shaped) == 0
else:
shaped = field_converter(self.field, value)
feels_empty = shaped is None
if feels_empty and self.field.allow_none():
data[key] = shaped
elif shaped is not None:
data[key] = shaped
elif print_none:
data[key] = shaped
if len(data) > 0:
return data
elif len(data) == 0 and self.allow_none():
return data
elif print_none:
return data
class PolyModelType(MultiType):
def __init__(self, model_classes, **kwargs):
if isinstance(model_classes, type) and issubclass(model_classes, Model):
self.model_classes = (model_classes,)
allow_subclasses = True
elif isinstance(model_classes, Iterable) \
and not isinstance(model_classes, basestring):
self.model_classes = tuple(model_classes)
allow_subclasses = False
else:
raise Exception("The first argument to PolyModelType.__init__() "
"must be a model or an iterable.")
validators = kwargs.pop("validators", [])
self.strict = kwargs.pop("strict", True)
self.claim_function = kwargs.pop("claim_function", None)
self.allow_subclasses = kwargs.pop("allow_subclasses", allow_subclasses)
def validate_model(model_instance):
model_instance.validate()
return model_instance
MultiType.__init__(self, validators=[validate_model] + validators, **kwargs)
def __repr__(self):
return object.__repr__(self)[:-1] + ' for %s>' % str(self.model_classes)
def is_allowed_model(self, model_instance):
if self.allow_subclasses:
if isinstance(model_instance, self.model_classes):
return True
else:
if model_instance.__class__ in self.model_classes:
return True
return False
def to_native(self, value, mapping=None, context=None):
if mapping is None:
mapping = {}
if value is None:
return None
if self.is_allowed_model(value):
return value
if not isinstance(value, dict):
if len(self.model_classes) > 1:
instanceof_msg = 'one of: {}'.format(', '.join(
cls.__name__ for cls in self.model_classes))
else:
instanceof_msg = self.model_classes[0].__name__
raise ConversionError(u'Please use a mapping for this field or '
'an instance of {}'.format(instanceof_msg))
model_class = self.find_model(value)
model = model_class()
return model.import_data(value, mapping=mapping, context=context,
strict=self.strict)
def find_model(self, data):
"""Finds the intended type by consulting potential classes or `claim_function`."""
chosen_class = None
if self.claim_function:
chosen_class = self.claim_function(self, data)
else:
candidates = self.model_classes
if self.allow_subclasses:
candidates = itertools.chain.from_iterable(
([m] + m._subclasses for m in candidates))
fallback = None
matching_classes = []
for cls in candidates:
match = None
if '_claim_polymorphic' in cls.__dict__:
match = cls._claim_polymorphic(data)
elif not fallback: # The first model that doesn't define the hook
fallback = cls # can be used as a default if there's no match.
if match:
matching_classes.append(cls)
if not matching_classes and fallback:
chosen_class = fallback
elif len(matching_classes) == 1:
chosen_class = matching_classes[0]
else:
raise Exception("Got ambiguous input for polymorphic field")
if chosen_class:
return chosen_class
else:
raise Exception("Input for polymorphic field did not match any model")
def export_loop(self, model_instance, field_converter,
role=None, print_none=False):
model_class = model_instance.__class__
if not self.is_allowed_model(model_instance):
raise Exception("Cannot export: {} is not an allowed type".format(model_class))
shaped = export_loop(model_class, model_instance,
field_converter,
role=role, print_none=print_none)
if shaped and len(shaped) == 0 and self.allow_none():
return shaped
elif shaped:
return shaped
elif print_none:
return shaped
| bsd-3-clause | -8,087,686,044,882,606,000 | 34.236407 | 95 | 0.56424 | false | 4.509834 | false | false | false |
huyang1532/learn-python | xlrd-0.6.1/xlrd/sheet.py | 1 | 56349 | # -*- coding: cp1252 -*-
##
# <p> Portions copyright © 2005-2006 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2007-04-22 SJM Remove experimental "trimming" facility.
from biffh import *
from timemachine import *
from struct import unpack
from formula import dump_formula, decompile_formula, rangename2d
from formatting import nearest_colour_index
import time
DEBUG = 0
_WINDOW2_options = (
# Attribute names and initial values to use in case
# a WINDOW2 record is not written.
("show_formulas", 0),
("show_grid_lines", 1),
("show_sheet_headers", 1),
("panes_are_frozen", 0),
("show_zero_values", 1),
("automatic_grid_line_colour", 1),
("columns_from_right_to_left", 0),
("show_outline_symbols", 1),
("remove_splits_if_pane_freeze_is_removed", 0),
("sheet_selected", 0),
# "sheet_visible" appears to be merely a clone of "sheet_selected".
# The real thing is the visibility attribute from the BOUNDSHEET record.
("sheet_visible", 0),
("show_in_page_break_preview", 0),
)
##
# <p>Contains the data for one worksheet.</p>
#
# <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a
# column index, counting from zero.
# Negative values for row/column indexes and slice positions are supported in the expected fashion.</p>
#
# <p>For information about cell types and cell values, refer to the documentation of the Cell class.</p>
#
# <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Sheet(BaseObject):
##
# Name of sheet.
name = ''
##
# Number of rows in sheet. A row index is in range(thesheet.nrows).
nrows = 0
##
# Number of columns in sheet. A column index is in range(thesheet.ncols).
ncols = 0
##
# The map from a column index to a Colinfo object. Often there is an entry
# in COLINFO records for all column indexes in range(257).
# Note that xlrd ignores the entry for the non-existent
# 257th column. On the other hand, there may be no entry for unused columns.
# <br /> -- New in version 0.6.1
colinfo_map = {}
##
# The map from a row index to a Rowinfo object. Note that it is possible
# to have missing entries -- at least one source of XLS files doesn't
# bother writing ROW records.
# <br /> -- New in version 0.6.1
rowinfo_map = {}
##
# List of address ranges of cells containing column labels.
# These are set up in Excel by Insert > Name > Labels > Columns.
# <br> -- New in version 0.6.0
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.col_label_ranges:
# rlo, rhi, clo, chi = crange
# for rx in xrange(rlo, rhi):
# for cx in xrange(clo, chi):
# print "Column label at (rowx=%d, colx=%d) is %r" \
# (rx, cx, thesheet.cell_value(rx, cx))
# </pre>
col_label_ranges = []
##
# List of address ranges of cells containing row labels.
# For more details, see <i>col_label_ranges</i> above.
# <br> -- New in version 0.6.0
row_label_ranges = []
##
# List of address ranges of cells which have been merged.
# These are set up in Excel by Format > Cells > Alignment, then ticking
# the "Merge cells" box.
# <br> -- New in version 0.6.1
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.merged_cells:
# rlo, rhi, clo, chi = crange
# for rowx in xrange(rlo, rhi):
# for colx in xrange(clo, chi):
# # cell (rlo, clo) (the top left one) will carry the data
# # and formatting info; the remainder will be recorded as
# # blank cells, but a renderer will apply the formatting info
# # for the top left cell (e.g. border, pattern) to all cells in
# # the range.
# </pre>
merged_cells = []
##
# Default column width from DEFCOLWIDTH record, else None.
# From the OOo docs:<br />
# """Column width in characters, using the width of the zero character
# from default font (first FONT record in the file). Excel adds some
# extra space to the default width, depending on the default font and
# default font size. The algorithm how to exactly calculate the resulting
# column width is not known.<br />
# Example: The default width of 8 set in this record results in a column
# width of 8.43 using Arial font with a size of 10 points."""<br />
# For the default hierarchy, refer to the Colinfo class above.
# <br /> -- New in version 0.6.1
defcolwidth = None
##
# Default column width from STANDARDWIDTH record, else None.
# From the OOo docs:<br />
# """Default width of the columns in 1/256 of the width of the zero
# character, using default font (first FONT record in the file)."""<br />
# For the default hierarchy, refer to the Colinfo class above.
# <br /> -- New in version 0.6.1
standardwidth = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height_mismatch = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_hidden = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_above = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_below = None
##
# Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden
# by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden
# only by VBA macro).
visibility = 0
##
# A 256-element tuple corresponding to the contents of the GCW record for this sheet.
# If no such record, treat as all bits zero.
# Applies to BIFF4-7 only. See docs of Colinfo class for discussion.
gcw = (0, ) * 256
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.pickleable = book.pickleable
self.dont_use_array = not(array_array and (CAN_PICKLE_ARRAY or not book.pickleable))
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self._need_fix_ragged_rows = 0
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.cached_page_break_preview_mag_factor = 0
self.cached_normal_view_mag_factor = 0
#### Don't initialise this here, use class attribute initialisation.
#### self.gcw = (0, ) * 256 ####
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
##
# Cell object in the given row and column.
def cell(self, rowx, colx):
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
##
# Value of the cell in the given row and column.
def cell_value(self, rowx, colx):
return self._cell_values[rowx][colx]
##
# Type of the cell in the given row and column.
# Refer to the documentation of the Cell class.
def cell_type(self, rowx, colx):
return self._cell_types[rowx][colx]
##
# XF index of the cell in the given row and column.
# This is an index into Book.raw_xf_list and Book.computed_xf_list.
# <br /> -- New in version 0.6.1
def cell_xf_index(self, rowx, colx):
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
assert xfx > -1
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
##
# Returns a sequence of the Cell objects in the given row.
def row(self, rowx):
return [
self.cell(rowx, colx)
for colx in xrange(self.ncols)
]
##
# Returns a slice of the types
# of the cells in the given row.
def row_types(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
##
# Returns a slice of the values
# of the cells in the given row.
def row_values(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
##
# Returns a slice of the Cell objects in the given row.
def row_slice(self, rowx, start_colx=0, end_colx=None):
nc = self.ncols
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
##
# Returns a slice of the Cell objects in the given column.
def col_slice(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the values of the cells in the given column.
def col_values(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the types of the cells in the given column.
def col_types(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a sequence of the Cell objects in the given column.
def col(self, colx):
return self.col_slice(colx)
# Above two lines just for the docs. Here's the real McCoy:
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def extend_cells(self, nr, nc):
# print "extend_cells_2", self.nrows, self.ncols, nr, nc
assert 1 <= nc <= 256
assert 1 <= nr <= self.utter_max_rows
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
self._need_fix_ragged_rows = 1
nrx = nr - 1
trow = self._cell_types[nrx]
tlen = len(trow)
nextra = max(nc, self.ncols) - tlen
if nextra > 0:
xce = XL_CELL_EMPTY
if self.dont_use_array:
trow.extend([xce] * nextra)
if self.formatting_info:
self._cell_xf_indexes[nrx].extend([-1] * nextra)
else:
aa = array_array
trow.extend(aa('B', [xce]) * nextra)
if self.formatting_info:
self._cell_xf_indexes[nrx].extend(aa('h', [-1]) * nextra)
self._cell_values[nrx].extend([''] * nextra)
if nc > self.ncols:
self.ncols = nc
self._need_fix_ragged_rows = 1
if nr > self.nrows:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
xce = XL_CELL_EMPTY
nc = self.ncols
if self.dont_use_array:
for _unused in xrange(self.nrows, nr):
scta([xce] * nc)
scva([''] * nc)
if fmt_info:
scxa([-1] * nc)
else:
aa = array_array
for _unused in xrange(self.nrows, nr):
scta(aa('B', [xce]) * nc)
scva([''] * nc)
if fmt_info:
scxa(aa('h', [-1]) * nc)
self.nrows = nr
def fix_ragged_rows(self):
t0 = time.time()
ncols = self.ncols
xce = XL_CELL_EMPTY
aa = array_array
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_dont_use_array = self.dont_use_array
s_fmt_info = self.formatting_info
totrowlen = 0
for rowx in xrange(self.nrows):
trow = s_cell_types[rowx]
rlen = len(trow)
totrowlen += rlen
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [''] * nextra
if s_dont_use_array:
trow[rlen:] = [xce] * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = [-1] * nextra
else:
trow.extend(aa('B', [xce]) * nextra)
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = aa('h', [-1]) * nextra
self._fix_ragged_rows_time = time.time() - t0
if 0 and self.nrows:
avgrowlen = float(totrowlen) / self.nrows
print >> self.logfile, \
"sheet %d: avg row len %.1f; max row len %d" \
% (self.number, avgrowlen, self.ncols)
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(self.logfile,
"tidy_dimensions: nrows=%d ncols=%d _need_fix_ragged_rows=%d\n",
self.nrows, self.ncols, self._need_fix_ragged_rows,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) \
or not (0 <= clo < chi <= 256):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
self.extend_cells(nr, nc)
if self.verbosity >= 1 \
and (self.nrows != self._dimnrows or self.ncols != self._dimncols):
fprintf(self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if self._need_fix_ragged_rows:
self.fix_ragged_rows()
def put_cell(self, rowx, colx, ctype, value, xf_index):
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_cell", rowx, colx
raise
except:
print >> self.logfile, "put_cell", rowx, colx
raise
def put_blank_cell(self, rowx, colx, xf_index):
# This is used for cells from BLANK and MULBLANK records
ctype = XL_CELL_BLANK
value = ''
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_cell", rowx, colx
raise
except:
print >> self.logfile, "put_cell", rowx, colx
raise
def put_number_cell(self, rowx, colx, value, xf_index):
ctype = self._xf_index_to_xl_type_map[xf_index]
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_number_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_number_cell", rowx, colx
raise
except:
print >> self.logfile, "put_number_cell", rowx, colx
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 0 and blah
oldpos = bk._position
bk.position(self._position)
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_number_cell = self.put_number_cell
self_put_cell = self.put_cell
self_put_blank_cell = self.put_blank_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
rowx, colx, xf_index, d = local_unpack('<HHHd', data)
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
elif rc == XL_LABEL or rc == XL_RSTRING:
# RSTRING has extra richtext info at the end, but we ignore it.
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding, lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_number_cell(mulrk_row, colx, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print >> self.logfile, \
"*** NOTE: ROW record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows)
continue
r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print >> self.logfile, 'ROW', rowx, bits1, bits2
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc & 0xff == XL_FORMULA: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
rowx, colx, xf_index, flags = local_unpack('<HHHxxxxxxxxH', data[0:16])
if blah_formulas: # testing formula dumper
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen,
reldelta=0, browx=rowx, bcolx=colx, blah=1)
if data[12] == '\xff' and data[13] == '\xff':
if data[6] == '\x00':
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, reldelta=1, blah=1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 != XL_STRING: raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
if self.biff_version < BIFF_FIRST_UNICODE:
strg = unpack_string(data2, 0, bk.encoding, lenlen=2)
else:
strg = unpack_unicode(data2, 0, lenlen=2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif data[6] == '\x01':
# boolean formula result
value = ord(data[8])
self.put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif data[6] == '\x02':
# Error in cell
value = ord(data[8])
self.put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif data[6] == '\x03':
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self.put_cell(rowx, colx, XL_CELL_TEXT, u"", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % ord(data[6]))
else:
# it is a number
d = local_unpack('<d', data[6:14])[0]
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self.put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print >> self.logfile, \
"*** NOTE: COLINFO record has first col index %d, last %d; " \
"should have 0 <= first <= last <= 255 -- record ignored!" \
% (first_colx, last_colx)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print >> self.logfile, 'DEFCOLWIDTH', self.defcolwidth
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print >> self.logfile, '*** ERROR *** STANDARDWIDTH', data_len, repr(data)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print >> self.logfile, 'STANDARDWIDTH', self.standardwidth
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == "\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print "GCW:", showgcw
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_blank_cell(rowx, colx, xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
mul_row, mul_first = local_unpack('<HH', data[0:4])
mul_last, = local_unpack('<H', data[-2:])
if 0:
print >> self.logfile, "MULBLANK", mul_row, mul_first, mul_last
pos = 4
for colx in xrange(mul_first, mul_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
pos += 2
self_put_blank_cell(mul_row, colx, xf_index)
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if not self.book._xf_epilogue_done:
# Needed for bv <= 40
self.book.xf_epilogue()
if blah:
fprintf(self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print >> self.logfile, "SHEET.READ: EOF"
eof_found = 1
break
elif rc == XL_OBJ:
bk.handle_obj(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print >> self.logfile, \
"*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \
% (rc, bk._position - data_len - 4, version, boftype)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print >> self.logfile, "---> found EOF"
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print "ARRAY:", row1x, rownx, col1x, colnx, array_flags
dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print "SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas
decompile_formula(bk, data[10:], tokslen, reldelta=0, blah=1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \
"*** in Sheet %d (%r).\n" \
"*** %d CF record(s); needs_recalc_or_redraw = %d\n" \
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(self.logfile,
"*** %d individual range(s):\n" \
"*** %s\n",
len(olist),
", ".join([rangename2d(*coords) for coords in olist]),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = \
unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n" \
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile,
"*** formula 1:\n",
)
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile,
"*** formula 2:\n",
)
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, " \
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, " \
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", int_floor_div(pos - 2, 8))
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80:
(options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else: # BIFF3-7
(options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = \
nearest_colour_index(
self.book.colour_map,
self.gridline_colour_rgb,
debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
# print "WINDOW2: visible=%d selected=%d" \
# % (self.sheet_visible, self.sheet_selected)
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record" \
% (self.number, self.name))
self.tidy_dimensions()
bk.position(oldpos)
return 1
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
##
# Determine column display width.
# <br /> -- New in version 0.6.1
# <br />
# @param colx Index of the queried column, range 0 to 255.
# Note that it is possible to find out the width that will be used to display
# columns with no cell information e.g. column IV (colx=255).
# @return The column width that will be used for displaying
# the given column by Excel, in units of 1/256th of the width of a
# standard character (the digit zero in the first font).
def computed_column_width(self, colx):
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
# === helpers ===
def unpack_RK(rk_str):
flags = ord(rk_str[0])
if flags & 2:
# There's a SIGNED 30-bit integer in there!
i, = unpack('<i', rk_str)
i >>= 2 # div by 4 to drop the 2 flag bits
if flags & 1:
return i / 100.0
return float(i)
else:
# It's the most significant 30 bits of an IEEE 754 64-bit FP number
d, = unpack('<d', '\0\0\0\0' + chr(flags & 252) + rk_str[1:4])
if flags & 1:
return d / 100.0
return d
##### =============== Cell ======================================== #####
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
ctype_text = {
XL_CELL_EMPTY: 'empty',
XL_CELL_TEXT: 'text',
XL_CELL_NUMBER: 'number',
XL_CELL_DATE: 'xldate',
XL_CELL_BOOLEAN: 'bool',
XL_CELL_ERROR: 'error',
}
##
# <p>Contains the data for one cell.</p>
#
# <p>WARNING: You don't call this class yourself. You access Cell objects
# via methods of the Sheet object(s) that you found in the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
# <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i>
# (which depends on <i>ctype</i>) and <i>xf_index</i>.
# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None.
# The following table describes the types of cells and how their values
# are represented in Python.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Type symbol</th>
# <th>Type number</th>
# <th>Python value</th>
# </tr>
# <tr>
# <td>XL_CELL_EMPTY</td>
# <td align="center">0</td>
# <td>empty string u''</td>
# </tr>
# <tr>
# <td>XL_CELL_TEXT</td>
# <td align="center">1</td>
# <td>a Unicode string</td>
# </tr>
# <tr>
# <td>XL_CELL_NUMBER</td>
# <td align="center">2</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_DATE</td>
# <td align="center">3</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_BOOLEAN</td>
# <td align="center">4</td>
# <td>int; 1 means TRUE, 0 means FALSE</td>
# </tr>
# <tr>
# <td>XL_CELL_ERROR</td>
# <td align="center">5</td>
# <td>int representing internal Excel codes; for a text representation,
# refer to the supplied dictionary error_text_from_code</td>
# </tr>
# <tr>
# <td>XL_CELL_BLANK</td>
# <td align="center">6</td>
# <td>empty string u''. Note: this type will appear only when
# open_workbook(..., formatting_info=True) is used.</td>
# </tr>
# </table>
#<p></p>
class Cell(BaseObject):
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
##
# There is one and only one instance of an empty cell -- it's a singleton. This is it.
# You may use a test like "acell is empty_cell".
empty_cell = Cell(XL_CELL_EMPTY, '')
##### =============== Colinfo and Rowinfo ============================== #####
##
# Width and default formatting information that applies to one or
# more columns in a sheet. Derived from COLINFO records.
#
# <p> Here is the default hierarchy for width, according to the OOo docs:
#
# <br />"""In BIFF3, if a COLINFO record is missing for a column,
# the width specified in the record DEFCOLWIDTH is used instead.
#
# <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used,
# if the corresponding bit for this column is cleared in the GCW
# record, otherwise the column width set in the DEFCOLWIDTH record
# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]).
#
# <br />In BIFF8, if a COLINFO record is missing for a column,
# the width specified in the record STANDARDWIDTH is used.
# If this [STANDARDWIDTH] record is also missing,
# the column width of the record DEFCOLWIDTH is used instead."""
# <br />
#
# Footnote: The docs on the GCW record say this:
# """<br />
# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH
# record. If a bit is cleared, the corresponding column uses the width set in the
# COLINFO record for this column.
# <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if
# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH
# record of the worksheet will be used instead.
# <br />"""<br />
# At the moment (2007-01-17) xlrd is going with the GCW version of the story.
# Reference to the source may be useful: see the computed_column_width(colx) method
# of the Sheet class.
# <br />-- New in version 0.6.1
# </p>
class Colinfo(BaseObject):
##
# Width of the column in 1/256 of the width of the zero character,
# using default font (first FONT record in the file).
width = 0
##
# XF index to be used for formatting empty cells.
xf_index = -1
##
# 1 = column is hidden
hidden = 0
##
# Value of a 1-bit flag whose purpose is unknown
# but is often seen set to 1
bit1_flag = 0
##
# Outline level of the column, in range(7).
# (0 = no outline)
outline_level = 0
##
# 1 = column is collapsed
collapsed = 0
##
# Height and default formatting information that applies to a row in a sheet.
# Derived from ROW records.
# <br /> -- New in version 0.6.1
class Rowinfo(BaseObject):
##
# Height of the row, in twips. One twip == 1/20 of a point
height = 0
##
# 0 = Row has custom height; 1 = Row has default height
has_default_height = 0
##
# Outline level of the row
outline_level = 0
##
# 1 = Outline group starts or ends here (depending on where the
# outline buttons are located, see WSBOOL record [TODO ??]),
# <i>and</i> is collapsed
outline_group_starts_ends = 0
##
# 1 = Row is hidden (manually, or by a filter or outline group)
hidden = 0
##
# 1 = Row height and default font height do not match
height_mismatch = 0
##
# 1 = the xf_index attribute is usable; 0 = ignore it
has_default_xf_index = 0
##
# Index to default XF record for empty cells in this row.
# Don't use this if has_default_xf_index == 0.
xf_index = -9999
##
# This flag is set, if the upper border of at least one cell in this row
# or if the lower border of at least one cell in the row above is
# formatted with a thick line style. Thin and medium line styles are not
# taken into account.
additional_space_above = 0
##
# This flag is set, if the lower border of at least one cell in this row
# or if the upper border of at least one cell in the row below is
# formatted with a medium or thick line style. Thin line styles are not
# taken into account.
additional_space_below = 0
| mit | 5,134,417,725,366,508,000 | 41.399549 | 134 | 0.507622 | false | 3.681016 | false | false | false |
almey/policycompass-services | apps/referencepool/models.py | 1 | 2884 | from django.db import models
class PolicyDomain(models.Model):
title = models.CharField(max_length=100, unique=True)
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name = "Policy Domain"
verbose_name_plural = "Policy Domains"
def __str__(self):
return self.title
class Language(models.Model):
code = models.CharField(max_length=2, unique=True)
title = models.CharField(max_length=100, unique=True)
class Meta:
verbose_name = "Language"
verbose_name_plural = "Languages"
def __str__(self):
return self.title
class ExternalResource(models.Model):
title = models.CharField(max_length=100, unique=True)
url = models.URLField()
api_url = models.URLField()
class Meta:
verbose_name = "External Resource"
verbose_name_plural = "External Resources"
def __str__(self):
return self.title
class UnitCategory(models.Model):
title = models.CharField(max_length=100, unique=True)
identifier = models.CharField(max_length=100, unique=True)
class Meta:
verbose_name = "Unit Category"
verbose_name_plural = "Unit Categories"
def __str__(self):
return self.title
class Unit(models.Model):
title = models.CharField(max_length=50, unique=True)
description = models.TextField()
unit_category = models.ForeignKey(UnitCategory)
identifier = models.CharField(max_length=100, unique=True)
class Meta:
verbose_name = "Unit"
verbose_name_plural = "Units"
def __str__(self):
return self.title
class DateFormat(models.Model):
"""
Holds different formats for dates
"""
# Based on https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
format = models.CharField(max_length=50, unique=True)
example = models.CharField(max_length=50)
# Based on http://en.wikipedia.org/wiki/Date_format_by_country
symbol = models.CharField(max_length=50)
class Meta:
verbose_name = "Date Format"
verbose_name_plural = "Date Formats"
def __str__(self):
return self.example
class DataClass(models.Model):
"""
Refers to a Policy Compass Class
"""
title = models.CharField(max_length=100, unique=True)
description = models.TextField()
code_type = models.CharField(max_length=30, blank=True)
class Meta:
verbose_name = "Class"
verbose_name_plural = "Classes"
def __str__(self):
return self.title
class Individual(models.Model):
title = models.CharField(max_length=100)
code = models.CharField(max_length=30, blank=True)
data_class = models.ForeignKey(DataClass)
class Meta:
verbose_name = "Individual"
verbose_name_plural = "Individuals"
def __str__(self):
return self.title
| agpl-3.0 | 1,012,833,540,433,283,100 | 24.522124 | 89 | 0.650139 | false | 3.835106 | false | false | false |
nash-x/hws | neutron/plugins/ml2/drivers/cascading/cascaded_driver.py | 1 | 10365 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.config import cfg
from neutron.common import constants as const
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import driver_api as api
from neutronclient.common import exceptions
from neutron.openstack.common import local
from neutron.openstack.common import context
from neutron import context as n_context
from neutron.openstack.common import importutils
from neutron.openstack.common import excutils
from neutron.plugins.l2_proxy.agent import neutron_keystoneclient as hkc
LOG = logging.getLogger(__name__)
try:
from neutronclient.v2_0 import client as neutronclient
except ImportError:
neutronclient = None
LOG.info('neutronclient not available')
CASCADING = 'cascading'
class RequestContext(context.RequestContext):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, username=None, password=None,
aws_creds=None, tenant=None,
tenant_id=None, auth_url=None, roles=None, is_admin=False,
insecure=True,region_name=None, read_only=False,
show_deleted=False,owner_is_tenant=True, overwrite=True,
trust_id=None, trustor_user_id=None,
**kwargs):
"""
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(RequestContext, self).__init__(auth_token=auth_token,
user=username, tenant=tenant,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id='unused')
self.username = username
self.password = password
self.aws_creds = aws_creds
self.tenant_id = tenant_id
self.auth_url = auth_url
self.roles = roles or []
self.region_name = region_name
self.insecure = insecure
self.owner_is_tenant = owner_is_tenant
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
self._session = None
self.trust_id = trust_id
self.trustor_user_id = trustor_user_id
def update_store(self):
local.store.context = self
def to_dict(self):
return {'auth_token': self.auth_token,
'username': self.username,
'password': self.password,
'aws_creds': self.aws_creds,
'tenant': self.tenant,
'tenant_id': self.tenant_id,
'trust_id': self.trust_id,
'insecure': self.insecure,
'trustor_user_id': self.trustor_user_id,
'auth_url': self.auth_url,
'roles': self.roles,
'is_admin': self.is_admin,
'region_name': self.region_name}
@classmethod
def from_dict(cls, values):
return cls(**values)
@property
def owner(self):
"""Return the owner to correlate with an image."""
return self.tenant if self.owner_is_tenant else self.user
def get_admin_context(read_deleted="no"):
return RequestContext(is_admin=True)
class OpenStackClients(object):
'''
Convenience class to create and cache client instances.
'''
def __init__(self, context):
self.context = context
self._neutron = None
self._keystone = None
@property
def auth_token(self):
# if there is no auth token in the context
# attempt to get one using the context username and password
return self.context.auth_token or self.keystone().auth_token
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = hkc.KeystoneClient(self.context)
return self._keystone
def url_for(self, **kwargs):
return self.keystone().url_for(**kwargs)
def neutron(self):
if neutronclient is None:
return None
if self._neutron:
return self._neutron
con = self.context
if self.auth_token is None:
LOG.error("Neutron connection failed, no auth_token!")
return None
if self.context.region_name is None:
management_url = self.url_for(service_type='network',
endpoint_type='publicURL')
else:
management_url = self.url_for(
service_type='network',
attr='region',
endpoint_type='publicURL',
filter_value=self.context.region_name)
args = {
'auth_url': con.auth_url,
'insecure': self.context.insecure,
'service_type': 'network',
'token': self.auth_token,
'endpoint_url': management_url
}
self._neutron = neutronclient.Client(**args)
return self._neutron
def get_cascading_neutron_client():
context = n_context.get_admin_context_without_session()
auth_url = 'https://%s:%s/%s/%s' %(cfg.CONF.keystone_authtoken.auth_host,
cfg.CONF.keystone_authtoken.auth_port,
cfg.CONF.keystone_authtoken.auth_admin_prefix,
cfg.CONF.keystone_authtoken.auth_version)
kwargs = {'auth_token': None,
'username': cfg.CONF.keystone_authtoken.admin_user,
'password': cfg.CONF.keystone_authtoken.admin_password,
'aws_creds': None,
'tenant': cfg.CONF.keystone_authtoken.admin_tenant_name,
'auth_url': auth_url,
'insecure': cfg.CONF.keystone_authtoken.insecure,
'roles': context.roles,
'is_admin': context.is_admin,
'region_name': cfg.CONF.cascading_os_region_name}
reqCon = RequestContext(**kwargs)
openStackClients = OpenStackClients(reqCon)
neutronClient = openStackClients.neutron()
return neutronClient
def check_neutron_client_valid(function):
@functools.wraps(function)
def decorated_function(self, method_name, *args, **kwargs):
retry = 0
while(True):
try:
return function(self, method_name, *args, **kwargs)
except exceptions.Unauthorized:
retry = retry + 1
if(retry <= 3):
self.client = get_cascading_neutron_client()
continue
else:
with excutils.save_and_reraise_exception():
LOG.error(_('Try 3 times, Unauthorized.'))
return None
return decorated_function
class CascadeNeutronClient(object):
def __init__(self):
#mode is cascading or cascaded
self.client = get_cascading_neutron_client()
@check_neutron_client_valid
def __call__(self, method_name, *args, **kwargs):
method = getattr(self.client, method_name)
if method:
return method(*args, **kwargs)
else:
raise Exception('Can not find the method')
@check_neutron_client_valid
def execute(self, method_name, *args, **kwargs):
method = getattr(self.client, method_name)
if method:
return method(*args, **kwargs)
else:
raise Exception('Can not find the method')
class Cascaded2MechanismDriver(api.MechanismDriver):
def __init__(self):
super(Cascaded2MechanismDriver, self).__init__()
self.notify_cascading = False
if cfg.CONF.cascading_os_region_name:
self.cascading_neutron_client = CascadeNeutronClient()
self.notify_cascading = True
def initialize(self):
LOG.debug(_("Experimental L2 population driver"))
self.rpc_ctx = n_context.get_admin_context_without_session()
def get_cascading_port_id(self, cascaded_port_name):
try:
return cascaded_port_name.split('@')[1]
except Exception:
return None
def update_port_postcommit(self, context):
if not self.notify_cascading:
return
cur_port = context.current
orig_port = context._original_port
LOG.debug(_("update_port_postcommit update "
"current_port:%s original:%s") % (cur_port, orig_port))
if not (context.original_host and context.host
and const.DEVICE_OWNER_COMPUTER in cur_port['device_owner']):
return
if context.host != context.original_host:
cascading_port_id = self.get_cascading_port_id(cur_port['name'])
if cascading_port_id:
update_attrs = {'port': {'binding:profile': {'refresh_notify': True}}}
for i in range(const.UPDATE_RETRY):
try:
self.cascading_neutron_client('update_port', cascading_port_id, update_attrs)
LOG.debug(_("host_id(%s -> %s) changed, notify the cascading")
%(context.original_host, context.host))
break
except Exception as e:
LOG.debug(_("Notify cascading refresh port failed(%s)! try %d") % (str(e), i))
continue
| apache-2.0 | -9,135,588,512,233,614,000 | 36.554348 | 102 | 0.58466 | false | 4.279521 | false | false | false |
huji-nlp/tupa | tupa/states/state.py | 2 | 20047 | from collections import deque
from semstr.constraints import Constraints, Direction
from semstr.util.amr import LABEL_ATTRIB
from semstr.validation import CONSTRAINTS
from ucca import core, layer0, layer1
from ucca.layer0 import NodeTags
from ucca.layer1 import EdgeTags
from .edge import Edge
from .node import Node
from ..action import Actions
from ..config import Config
class InvalidActionError(AssertionError):
def __init__(self, *args, is_type=False):
super().__init__(*args)
self.is_type = is_type
class State:
"""
The parser's state, responsible for applying actions and creating the final Passage
:param passage: a Passage object to get the tokens from, and everything else if training
"""
def __init__(self, passage):
self.args = Config().args
self.constraints = CONSTRAINTS.get(passage.extra.get("format"), Constraints)(implicit=self.args.implicit)
self.log = []
self.finished = False
self.passage = passage
try:
l0 = passage.layer(layer0.LAYER_ID)
except KeyError as e:
raise IOError("Passage %s is missing layer %s" % (passage.ID, layer0.LAYER_ID)) from e
try:
l1 = passage.layer(layer1.LAYER_ID)
except KeyError:
l1 = layer1.Layer1(passage)
self.labeled = any(n.outgoing or n.attrib.get(LABEL_ATTRIB) for n in l1.all)
self.terminals = [Node(i, orig_node=t, root=passage, text=t.text, paragraph=t.paragraph, tag=t.tag)
for i, t in enumerate(l0.all, start=1)]
self.stack = []
self.buffer = deque()
self.nodes = []
self.heads = set()
self.need_label = None # If we are waiting for label_node() to be called, which node is to be labeled by it
self.root = self.add_node(orig_node=l1.heads[0], is_root=True) # Root is not in the buffer
self.stack.append(self.root)
self.buffer += self.terminals
self.nodes += self.terminals
self.actions = [] # History of applied actions
self.type_validity_cache = {}
def is_valid_action(self, action):
"""
:param action: action to check for validity
:return: is the action (including tag) valid in the current state?
"""
valid = self.type_validity_cache.get(action.type_id)
if valid is None:
try:
self.check_valid_action(action)
valid = True
except InvalidActionError as e:
valid = False
if e.is_type:
self.type_validity_cache[action.type_id] = valid
return valid
def check_valid_action(self, action, message=False):
"""
Raise InvalidActionError if the action is invalid in the current state
:param action: action to check for validity
:param message: whether to add an informative message to the thrown exception
"""
def _check_possible_node():
self.check(self.node_ratio() < self.args.max_node_ratio,
message and "Non-terminals/terminals ratio: %.3f" % self.args.max_node_ratio, is_type=True)
for head in self.heads:
self.check(head.height <= self.args.max_height,
message and "Graph height: %d" % self.args.max_height, is_type=True)
def _check_possible_parent(node, t):
self.check(node.text is None, message and "Terminals may not have children: %s" % node.text, is_type=True)
if self.args.constraints and t is not None:
for rule in self.constraints.tag_rules:
violation = rule.violation(node, t, Direction.outgoing, message=message)
self.check(violation is None, violation)
self.check(self.constraints.allow_parent(node, t),
message and "%s may not be a '%s' parent (currently %s)" % (
node, t, ", ".join(map(str, node.outgoing)) or "childless"))
self.check(not self.constraints.require_implicit_childless or not node.implicit,
message and "Implicit nodes may not have children: %s" % s0, is_type=True)
def _check_possible_child(node, t):
self.check(node is not self.root, message and "Root may not have parents", is_type=True)
if self.args.constraints and t is not None:
self.check(not t or (node.text is None) != (t == EdgeTags.Terminal),
message and "Edge tag must be %s iff child is terminal, but node %s has edge tag %s" %
(EdgeTags.Terminal, node, t))
for rule in self.constraints.tag_rules:
violation = rule.violation(node, t, Direction.incoming, message=message)
self.check(violation is None, violation)
self.check(self.constraints.allow_child(node, t),
message and "%s may not be a '%s' child (currently %s, %s)" % (
node, t, ", ".join(map(str, node.incoming)) or "parentless",
", ".join(map(str, node.outgoing)) or "childless"))
self.check(self.constraints.possible_multiple_incoming is None or t is None or
action.remote or t in self.constraints.possible_multiple_incoming or
all(e.remote or e.tag in self.constraints.possible_multiple_incoming for e in node.incoming),
message and "Multiple non-remote '%s' parents not allowed for %s" % (t, node))
def _check_possible_edge(p, c, t):
_check_possible_parent(p, t)
_check_possible_child(c, t)
if self.args.constraints and t is not None:
if p is self.root:
self.check(self.constraints.top_level_allowed is None or not t or
t in self.constraints.top_level_allowed, message and "Root may not have %s edges" % t)
else:
self.check(self.constraints.top_level_only is None or
t not in self.constraints.top_level_only, message and "Only root may have %s edges" % t)
self.check(self.constraints.allow_root_terminal_children or p is not self.root or c.text is None,
message and "Terminal child '%s' for root" % c, is_type=True)
if self.constraints.multigraph: # Nodes may be connected by more than one edge
edge = Edge(p, c, t, remote=action.remote)
self.check(self.constraints.allow_edge(edge), message and "Edge not allowed: %s (currently: %s)" % (
edge, ", ".join(map(str, p.outgoing)) or "childless"))
else: # Simple graph, i.e., no more than one edge between the same pair of nodes
self.check(c not in p.children, message and "%s is already %s's child" % (c, p), is_type=True)
self.check(p not in c.descendants, message and "Detected cycle by edge: %s->%s" % (p, c), is_type=True)
def _check_possible_label():
self.check(self.args.node_labels, message and "Node labels disabled", is_type=True)
try:
node = self.stack[-action.tag]
except IndexError:
node = None
self.check(node is not None, message and "Labeling invalid node %s when stack size is %d" % (
action.tag, len(self.stack)))
self.check(not node.labeled, message and "Labeling already-labeled node: %s" % node, is_type=True)
self.check(node.text is None, message and "Terminals do not have labels: %s" % node, is_type=True)
if self.args.constraints:
self.check(self.constraints.allow_action(action, self.actions),
message and "Action not allowed: %s " % action + (
("after " + ", ".join("%s" % a for a in self.actions[-3:])) if self.actions else "as first"))
if action.is_type(Actions.Finish):
self.check(not self.buffer, "May only finish at the end of the input buffer", is_type=True)
if self.args.swap: # Without swap, the oracle may be incapable even of single action
self.check(self.root.outgoing or all(n is self.root or n.is_linkage or n.text for n in self.nodes),
message and "Root has no child at parse end", is_type=True)
for n in self.nodes:
self.check(not self.args.require_connected or n is self.root or n.is_linkage or n.text or
n.incoming, message and "Non-terminal %s has no parent at parse end" % n, is_type=True)
self.check(not self.args.node_labels or n.text or n.labeled,
message and "Non-terminal %s has no label at parse end" % n, is_type=True)
else:
self.check(self.action_ratio() < self.args.max_action_ratio,
message and "Actions/terminals ratio: %.3f" % self.args.max_action_ratio, is_type=True)
if action.is_type(Actions.Shift):
self.check(self.buffer, message and "Shifting from empty buffer", is_type=True)
elif action.is_type(Actions.Label):
_check_possible_label()
else: # Unary actions
self.check(self.stack, message and "%s with empty stack" % action, is_type=True)
s0 = self.stack[-1]
if action.is_type(Actions.Reduce):
if s0 is self.root:
self.check(self.root.labeled or not self.args.node_labels,
message and "Reducing root without label", is_type=True)
elif not s0.text:
self.check(not self.args.require_connected or s0.is_linkage or s0.incoming,
message and "Reducing parentless non-terminal %s" % s0, is_type=True)
self.check(not self.constraints.required_outgoing or
s0.outgoing_tags.intersection((EdgeTags.Terminal, EdgeTags.Punctuation, "")) or
s0.outgoing_tags.intersection(self.constraints.required_outgoing),
message and "Reducing non-terminal %s without %s edge" % (
s0, self.constraints.required_outgoing), is_type=True)
self.check(not self.args.node_labels or s0.text or s0.labeled,
message and "Reducing non-terminal %s without label" % s0, is_type=True)
elif action.is_type(Actions.Swap):
# A regular swap is possible since the stack has at least two elements;
# A compound swap is possible if the stack is longer than the distance
distance = action.tag or 1
self.check(1 <= distance < len(self.stack), message and "Invalid swap distance: %d" % distance)
swapped = self.stack[-distance - 1]
# To prevent swap loops: only swap if the nodes are currently in their original order
self.check(self.swappable(s0, swapped),
message and "Already swapped nodes: %s (swap index %g) <--> %s (swap index %g)"
% (swapped, swapped.swap_index, s0, s0.swap_index))
else:
pct = self.get_parent_child_tag(action)
self.check(pct, message and "%s with len(stack) = %d" % (action, len(self.stack)), is_type=True)
parent, child, tag = pct
if parent is None:
_check_possible_child(child, tag)
_check_possible_node()
elif child is None:
_check_possible_parent(parent, tag)
_check_possible_node()
else: # Binary actions
_check_possible_edge(parent, child, tag)
@staticmethod
def swappable(right, left):
return left.swap_index < right.swap_index
def is_valid_label(self, label):
"""
:param label: label to check for validity
:return: is the label valid in the current state?
"""
try:
self.check_valid_label(label)
except InvalidActionError:
return False
return True
def check_valid_label(self, label, message=False):
if self.args.constraints and label is not None:
valid = self.constraints.allow_label(self.need_label, label)
self.check(valid, message and "May not label %s as %s: %s" % (self.need_label, label, valid))
@staticmethod
def check(condition, *args, **kwargs):
if not condition:
raise InvalidActionError(*args, **kwargs)
# noinspection PyTypeChecker
def transition(self, action):
"""
Main part of the parser: apply action given by oracle or classifier
:param action: Action object to apply
"""
action.apply()
self.log = []
pct = self.get_parent_child_tag(action)
if pct:
parent, child, tag = pct
if parent is None:
parent = action.node = self.add_node(orig_node=action.orig_node)
if child is None:
child = action.node = self.add_node(orig_node=action.orig_node, implicit=True)
action.edge = self.add_edge(Edge(parent, child, tag, remote=action.remote))
if action.node:
self.buffer.appendleft(action.node)
elif action.is_type(Actions.Shift): # Push buffer head to stack; shift buffer
self.stack.append(self.buffer.popleft())
elif action.is_type(Actions.Label):
self.need_label = self.stack[-action.tag] # The parser is responsible to choose a label and set it
elif action.is_type(Actions.Reduce): # Pop stack (no more edges to create with this node)
self.stack.pop()
elif action.is_type(Actions.Swap): # Place second (or more) stack item back on the buffer
distance = action.tag or 1
s = slice(-distance - 1, -1)
self.log.append("%s <--> %s" % (", ".join(map(str, self.stack[s])), self.stack[-1]))
self.buffer.extendleft(reversed(self.stack[s])) # extendleft reverses the order
del self.stack[s]
elif action.is_type(Actions.Finish): # Nothing left to do
self.finished = True
else:
raise ValueError("Invalid action: %s" % action)
if self.args.verify:
intersection = set(self.stack).intersection(self.buffer)
assert not intersection, "Stack and buffer overlap: %s" % intersection
action.index = len(self.actions)
self.actions.append(action)
self.type_validity_cache = {}
def add_node(self, **kwargs):
"""
Called during parsing to add a new Node (not core.Node) to the temporary representation
:param kwargs: keyword arguments for Node()
"""
node = Node(len(self.nodes), swap_index=self.calculate_swap_index(), root=self.passage, **kwargs)
if self.args.verify:
assert node not in self.nodes, "Node already exists"
self.nodes.append(node)
self.heads.add(node)
self.log.append("node: %s (swap_index: %g)" % (node, node.swap_index))
if self.args.use_gold_node_labels:
self.need_label = node # Labeled the node as soon as it is created rather than applying a LABEL action
return node
def calculate_swap_index(self):
"""
Update a new node's swap index according to the nodes before and after it.
Usually the swap index is just the index, i.e., len(self.nodes).
If the buffer is not empty and its head is not a terminal, it means that it is a non-terminal created before.
In that case, the buffer head's index will be lower than the new node's index, so the new node's swap index will
be the arithmetic mean between the previous node (stack top) and the next node (buffer head).
Then, in the validity check on the SWAP action, we will correctly identify this node as always having appearing
before the current buffer head. Otherwise, we would prevent swapping them even though it should be valid
(because they have never been swapped before).
"""
if self.buffer:
b0 = self.buffer[0]
if self.stack and (b0.text is not None or b0.swap_index <= len(self.nodes)):
s0 = self.stack[-1]
return (s0.swap_index + b0.swap_index) / 2
return None
def add_edge(self, edge):
edge.add()
self.heads.discard(edge.child)
self.log.append("edge: %s" % edge)
return edge
PARENT_CHILD = (
((Actions.LeftEdge, Actions.LeftRemote), (-1, -2)),
((Actions.RightEdge, Actions.RightRemote), (-2, -1)),
((Actions.Node, Actions.RemoteNode), (None, -1)),
((Actions.Implicit,), (-1, None)),
)
def get_parent_child_tag(self, action):
try:
for types, indices in self.PARENT_CHILD:
if action.is_type(*types):
parent, child = [None if i is None else self.stack[i] for i in indices]
break
else:
return None
return parent, child, (EdgeTags.Terminal if child and child.text else
EdgeTags.Punctuation if child and child.children and all(
c.tag == NodeTags.Punct for c in child.children)
else action.tag) # In unlabeled parsing, keep a valid graph
except IndexError:
return None
def label_node(self, label):
self.need_label.label = label
self.need_label.labeled = True
self.log.append("label: %s" % self.need_label)
self.type_validity_cache = {}
self.need_label = None
def create_passage(self, verify=True, **kwargs):
"""
Create final passage from temporary representation
:param verify: fail if this results in an improper passage
:return: core.Passage created from self.nodes
"""
Config().print("Creating passage %s from state..." % self.passage.ID, level=2)
passage = core.Passage(self.passage.ID)
passage_format = kwargs.get("format") or self.passage.extra.get("format")
if passage_format:
passage.extra["format"] = passage_format
self.passage.layer(layer0.LAYER_ID).copy(passage)
l0 = passage.layer(layer0.LAYER_ID)
l1 = layer1.Layer1(passage)
self.root.node = l1.heads[0]
if self.args.node_labels:
self.root.set_node_label()
if self.labeled: # We have a reference passage
self.root.set_node_id()
Node.attach_nodes(l0, l1, self.nodes, self.labeled, self.args.node_labels, verify)
return passage
def node_ratio(self):
return (len(self.nodes) / len(self.terminals) - 1) if self.terminals else 0
def action_ratio(self):
return (len(self.actions) / len(self.terminals)) if self.terminals else 0
def str(self, sep):
return "stack: [%-20s]%sbuffer: [%s]" % (" ".join(map(str, self.stack)), sep,
" ".join(map(str, self.buffer)))
def __str__(self):
return self.str(" ")
def __eq__(self, other):
return self.stack == other.stack and self.buffer == other.buffer and \
self.nodes == other.nodes
def __hash__(self):
return hash((tuple(self.stack), tuple(self.buffer), tuple(self.nodes)))
| gpl-3.0 | -3,374,840,468,821,454,300 | 51.616798 | 120 | 0.578042 | false | 4.033602 | false | false | false |
sugarsweetrobotics/wasanbon | wasanbon/core/plugins/mgr/rtc_plugin/__init__.py | 1 | 16100 | import os, sys, signal, time, traceback, threading
import wasanbon
from wasanbon.core.plugins import PluginFunction, manifest
ev = threading.Event()
endflag = False
class Plugin(PluginFunction):
""" Manage RT-Component in Package """
def __init__(self):
#PluginFunction.__init__(self)
super(Plugin, self).__init__()
pass
def depends(self):
return ['admin.environment',
'admin.package',
'admin.rtc',
'admin.rtcconf',
'admin.rtcprofile',
'admin.builder',
'admin.systeminstaller',
'admin.systemlauncher',
'admin.editor']
#@property
#def rtc(self):
# import rtc
# return rtc
def _print_rtcs(self, args):
pack = admin.package.get_package_from_path(os.getcwd())
rtcs = admin.rtc.get_rtcs_from_package(pack)
for r in rtcs:
print r.rtcprofile.basicInfo.name
@manifest
def list(self, args):
""" List RTC in current Package """
self.parser.add_option('-l', '--long', help='Long Format (default=False)', default=False, action='store_true', dest='long_flag')
self.parser.add_option('-d', '--detail', help='Long Format (default=False)', default=False, action='store_true', dest='detail_flag')
options, argv = self.parse_args(args)
verbose = options.verbose_flag
long = options.long_flag
detail = options.detail_flag
if detail: long = True
#package = wasanbon.plugins.admin.package.package
#package = admin.package
#admin_rtc = admin.rtc.rtc
pack = admin.package.get_package_from_path(os.getcwd())
rtcs = admin.rtc.get_rtcs_from_package(pack, verbose=verbose)
for r in rtcs:
if not long:
print ' - ' + r.rtcprofile.basicInfo.name
elif long:
print r.rtcprofile.basicInfo.name + ' : '
print ' basicInfo : '
print ' description : ' + r.rtcprofile.basicInfo.description
print ' category : ' + r.rtcprofile.basicInfo.category
print ' vendor : ' + r.rtcprofile.basicInfo.vendor
if len(r.rtcprofile.dataports):
print ' dataports : '
for d in r.rtcprofile.dataports:
if not detail:
print ' - ' + d.name
else:
print ' ' + d.name + ':'
#print ' name : ' + d.name
print ' portType : ' + d.portType
print ' type : ' + d.type
if len(r.rtcprofile.serviceports):
print ' serviceports :'
for s in r.rtcprofile.serviceports:
if not detail:
print ' - ' + s.name
else:
print ' ' + s.name + ':'
#print ' name : ' + s.name
for i in s.serviceInterfaces:
print ' ' + i.name + ':'
print ' type : ' + i.type
print ' instanceName : ' + i.instanceName
if detail:
print ' language : '
print ' kind : ' + r.rtcprofile.language.kind
if long or detail:
print ''
return 0
@manifest
def build(self, args):
self.parser.add_option('-o', '--only', help='Build Only (Not Install) (default=False)', default=False, action='store_true', dest='only_flag')
self.parser.add_option('-s', '--standalone', help='Install Standalone Mode (default=False)', default=False, action='store_true', dest='standalone_flag')
options, argv = self.parse_args(args, self._print_rtcs)
verbose = options.verbose_flag
if sys.platform == 'win32':
if verbose: sys.stdout.write('# In Windows, always build with verbose option.\n')
verbose = True
only = options.only_flag
standalone = options.standalone_flag
wasanbon.arg_check(argv, 4)
pack = admin.package.get_package_from_path(os.getcwd())
if argv[3] == 'all':
rtcs = admin.rtc.get_rtcs_from_package(pack, verbose=verbose)
else:
rtcs = [admin.rtc.get_rtc_from_package(pack, argv[3], verbose=verbose)]
return_value_map = {}
retval = 0
for rtc in rtcs:
sys.stdout.write('# Building RTC (%s)\n' % rtc.rtcprofile.basicInfo.name)
ret, msg = admin.builder.build_rtc(rtc.rtcprofile, verbose=verbose)
return_value_map[rtc.rtcprofile.basicInfo.name] = ret
if not ret:
sys.stdout.write('## Failed.\n')
retval = -1
else:
sys.stdout.write('## Success.\n')
if not only:
if not standalone:
# Confirm if this rtc is
standalone_flag = admin.systeminstaller.is_installed(pack, rtc, verbose=verbose, standalone=True)
else:
standalone_flag = standalone
sys.stdout.write('## Installing RTC (standalone=%s).\n' % (standalone_flag is True))
admin.systeminstaller.install_rtc_in_package(pack, rtc, verbose=verbose, standalone=standalone_flag)
sys.stdout.write('### Success.\n')
if verbose:
sys.stdout.write('Build Summary:\n')
for key, value in return_value_map.items():
sys.stdout.write(' - Build RTC (' + key + ')' + ' '*(25-len(key)) + ('Success' if value else 'False') + '\n')
return retval
@manifest
def clean(self, args):
options, argv = self.parse_args(args, self._print_rtcs)
verbose = options.verbose_flag
if verbose: sys.stdout.write('# Cleanup RTCs\n')
wasanbon.arg_check(argv, 4)
pack = admin.package.get_package_from_path(os.getcwd(), verbose=verbose)
if argv[3] == 'all':
rtcs = admin.rtc.get_rtcs_from_package(pack, verbose=verbose)
else:
rtcs = [admin.rtc.get_rtc_from_package(pack, argv[3], verbose=verbose)]
retval = 0
for rtc in rtcs:
if verbose: sys.stdout.write('# Cleanuping RTC %s\n' % rtc.rtcprofile.basicInfo.name)
ret, msg = admin.builder.clean_rtc(rtc.rtcprofile, verbose=verbose)
if not ret:
retval = -1
return retval
@manifest
def delete(self, args):
""" Delete Package
# Usage $ wasanbon-admin.py package delete [PACK_NAME]"""
self.parser.add_option('-f', '--force', help='Force option (default=False)', default=False, action='store_true', dest='force_flag')
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
force = options.force_flag
pack = admin.package.get_package_from_path(os.getcwd())
if argv[3] == 'all':
rtcs = admin.rtc.get_rtcs_from_package(pack, verbose=verbose)
else:
rtcs = [admin.rtc.get_rtc_from_package(pack, argv[3], verbose=verbose)]
import shutil
for rtc in rtcs:
if os.path.isdir(rtc.path):
sys.stdout.write('# Deleting RTC (%s)\n' % rtc.rtcprofile.basicInfo.name)
def remShut(*args):
import stat
func, path, _ = args
os.chmod(path, stat.S_IWRITE)
os.remove(path)
pass
shutil.rmtree(rtc.path, onerror = remShut)
@manifest
def edit(self, args):
""" Edit RTC with editor """
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
pack = admin.package.get_package_from_path(os.getcwd())
rtc = admin.rtc.get_rtc_from_package(pack, argv[3], verbose=verbose)
admin.editor.edit_rtc(rtc, verbose=verbose)
@manifest
def run(self, args):
""" Run just one RTC """
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
package = admin.package.get_package_from_path(os.getcwd())
rtc = admin.rtc.get_rtc_from_package(package, argv[3], verbose=verbose)
return self.run_rtc_in_package(package, rtc, verbose=verbose)
def run_rtc_in_package(self, package, rtc, verbose=False, background=False):
global endflag
endflag = False
def signal_action(num, frame):
print ' - SIGINT captured'
ev.set()
global endflag
endflag = True
pass
signal.signal(signal.SIGINT, signal_action)
if sys.platform == 'win32':
sys.stdout.write(' - Escaping SIGBREAK...\n')
signal.signal(signal.SIGBREAK, signal_action)
pass
sys.stdout.write('# Executing RTC %s\n' % rtc.rtcprofile.basicInfo.name)
rtcconf_path = package.rtcconf[rtc.rtcprofile.language.kind]
rtcconf = admin.rtcconf.RTCConf(rtcconf_path, verbose=verbose)
rtc_temp = os.path.join("conf", "rtc_temp.conf")
if os.path.isfile(rtc_temp):
os.remove(rtc_temp)
pass
rtcconf.sync(verbose=True, outfilename=rtc_temp)
admin.systeminstaller.uninstall_all_rtc_from_package(package, rtcconf_filename=rtc_temp, verbose=True)
admin.systeminstaller.install_rtc_in_package(package, rtc, rtcconf_filename=rtc_temp, copy_conf=False)
try:
admin.systemlauncher.launch_rtcd(package, rtc.rtcprofile.language.kind, rtcconf=rtc_temp, verbose=True)
if background:
return 0
while not endflag:
try:
time.sleep(0.1)
except IOError, e:
print e
pass
pass
pass
except:
traceback.print_exc()
return -1
if verbose: sys.stdout.write('## Exitting RTC Manager.\n')
admin.systemlauncher.exit_all_rtcs(package, verbose=verbose)
admin.systemlauncher.terminate_system(package, verbose=verbose)
return 0
def terminate_rtcd(self, package, verbose=False):
if verbose: sys.stdout.write('# Terminating RTCDs.\n')
admin.systemlauncher.exit_all_rtcs(package, verbose=verbose)
admin.systemlauncher.terminate_system(package, verbose=verbose)
return 0
@manifest
def download_profile(self, args):
""" Run just one RTC """
self.parser.add_option('-w', '--wakeuptimeout', help='Timeout of Sleep Function when waiting for the wakeup of RTC-Daemons', default=5, dest='wakeuptimeout', action='store', type='float')
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
wakeuptimeout = options.wakeuptimeout
package = admin.package.get_package_from_path(os.getcwd())
rtc = admin.rtc.get_rtc_from_package(package, argv[3], verbose=verbose)
if self.run_rtc_in_package(package, rtc, verbose=verbose, background=True) != 0:
return -1
wasanbon.sleep(wakeuptimeout)
rtcp = admin.rtcprofile.create_rtcprofile(rtc, verbose=verbose)
print admin.rtcprofile.tostring(rtcp)
self.terminate_rtcd(package, verbose=verbose)
return 0
@manifest
def verify_profile(self, args):
""" Run just one RTC """
self.parser.add_option('-w', '--wakeuptimeout', help='Timeout of Sleep Function when waiting for the wakeup of RTC-Daemons', default=5, dest='wakeuptimeout', action='store', type='float')
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
wakeuptimeout = options.wakeuptimeout
package = admin.package.get_package_from_path(os.getcwd())
sys.stdout.write('# Starting RTC.\n')
rtc = admin.rtc.get_rtc_from_package(package, argv[3], verbose=verbose)
if self.run_rtc_in_package(package, rtc, verbose=verbose, background=True) != 0:
return -1
wasanbon.sleep(wakeuptimeout)
sys.stdout.write('# Acquiring RTCProfile from Inactive RTC\n')
rtcp = admin.rtcprofile.create_rtcprofile(rtc, verbose=verbose)
self.terminate_rtcd(package, verbose=verbose)
sys.stdout.write('# Comparing Acquired RTCProfile and Existing RTCProfile.\n')
retval = admin.rtcprofile.compare_rtcprofile(rtc.rtcprofile, rtcp, verbose=verbose)
if retval:
sys.stdout.write('Failed.\n# RTCProfile must be updated.\n')
return -1
sys.stdout.write('Succeeded.\n# RTCProfile is currently matches to binary.\n')
return 0
@manifest
def update_profile(self, args):
""" Run just one RTC and compare the profile between the existing RTC.xml and launched RTC, then save RTC.xml """
self.parser.add_option('-f', '--file', help='RTCProfile filename (default="RTC.xml")', default='RTC.xml', dest='filename', action='store', type='string')
self.parser.add_option('-d', '--dryrun', help='Just output on console', default=False, dest='dry_flag', action='store_true')
self.parser.add_option('-w', '--wakeuptimeout', help='Timeout of Sleep Function when waiting for the wakeup of RTC-Daemons', default=5, dest='wakeuptimeout', action='store', type='float')
options, argv = self.parse_args(args[:], self._print_rtcs)
verbose = options.verbose_flag
dry = options.dry_flag
filename = options.filename
wakeuptimeout = options.wakeuptimeout
wasanbon.arg_check(argv, 4)
rtc_name = argv[3]
package = admin.package.get_package_from_path(os.getcwd())
sys.stdout.write('# Starting RTC.\n')
rtc = admin.rtc.get_rtc_from_package(package, rtc_name, verbose=verbose)
standalone = admin.systeminstaller.is_installed(package, rtc, standalone=True, verbose=verbose)
if standalone:
admin.systemlauncher.launch_standalone_rtc(package, rtc, stdout=True, verbose=verbose)
pass
else:
if self.run_rtc_in_package(package, rtc, verbose=verbose, background=True) != 0:
return -1
wasanbon.sleep(wakeuptimeout)
sys.stdout.write('# Acquiring RTCProfile from Inactive RTC\n')
rtcp = admin.rtcprofile.create_rtcprofile(rtc, verbose=verbose)
if standalone:
pass
else:
self.terminate_rtcd(package, verbose=verbose)
sys.stdout.write('# Comparing Acquired RTCProfile and Existing RTCProfile.\n')
retval = admin.rtcprofile.compare_rtcprofile(rtc.rtcprofile, rtcp, verbose=verbose)
if retval:
filepath = os.path.join(rtc.path, filename)
if not dry:
outstr = admin.rtcprofile.tostring(retval, pretty_print=True)
if outstr == None:
sys.stdout.write('# RTC Profile save failed.\n')
return -1
if os.path.isfile(filepath):
f = filepath + wasanbon.timestampstr()
os.rename(filepath, f)
pass
fout = open(filepath, 'w')
fout.write(outstr)
fout.close()
else:
sys.stdout.write(admin.rtcprofile.tostring(retval, pretty_print=True))
sys.stdout.write('Succeed.\n')
return 0
sys.stdout.write('Succeed.\n')
return 0
| gpl-3.0 | 968,969,645,110,266,200 | 42.631436 | 195 | 0.56354 | false | 3.994046 | false | false | false |
zathras777/atavism | atavism/http11/range.py | 1 | 2282 | class Range(object):
def __init__(self, tpl):
self.start = int(tpl[0]) if tpl[0] is not None and tpl[0] != b'' else None
self.end = int(tpl[1]) if tpl[1] is not None and tpl[1] != b'' else None
if self.start is None and self.end is not None and self.end > 0:
self.end *= -1
def __str__(self):
return "Byte Range: {} - {}".format(self.start, self.end)
def __len__(self, cl=0):
if self.start is None and self.end is not None:
return self.end * -1
elif self.end is None:
return cl - self.start
return self.end - self.start + 1
def header(self):
r = ''
if self.start is not None:
r += '{}-'.format(self.start)
if self.end is not None:
r += "{}".format(self.end)
return r
def from_content(self, content):
""" Try and get the range from the supplied content. If it isn't possible,
return None.
:param content: The content stream to extract the range from.
:return: The extracted content or None.
"""
csz = len(content)
if self.end is not None:
if self.end < 0 and csz < self.end * -1:
print("not big enough")
return None
if self.end > csz:
print("end > content length")
return None
else:
if self.start > csz:
print("start > content length")
return None
if self.end is None:
return content[self.start:]
elif self.start is None:
return content[self.end:]
return content[self.start: self.end + 1]
def absolutes(self, clen):
start = self.start
if self.start is None:
if self.end < 0:
return clen + self.end, clen - 1
start = 0
end = clen - 1
if self.end is not None:
if self.end < 0:
end = clen + self.end - 1
else:
end = self.end - 1
if end < start:
end = start
return start, end
def absolute_range(self, clen):
start, end = self.absolutes(clen)
return "{}-{}/{}".format(start, end, clen)
| unlicense | 3,471,495,143,078,376,400 | 32.558824 | 82 | 0.501753 | false | 3.900855 | false | false | false |
NaohiroTamura/python-ironicclient | ironicclient/tests/functional/test_json_response.py | 1 | 8400 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import jsonschema
from tempest.lib.common.utils import data_utils
from ironicclient.tests.functional import base
def _is_valid_json(json_response, schema):
"""Verify JSON is valid.
:param json_response: JSON response from CLI
:type json_response: string
:param schema: expected schema of response
:type json_response: dictionary
"""
try:
json_response = json.loads(json_response)
jsonschema.validate(json_response, schema)
except jsonschema.ValidationError:
return False
return True
class TestNodeJsonResponse(base.FunctionalTestBase):
"""Test JSON responses for node commands."""
node_schema = {
"type": "object",
"properties": {
"target_power_state": {"type": ["string", "null"]},
"extra": {"type": "object"},
"last_error": {"type": ["string", "null"]},
"updated_at": {"type": ["string", "null"]},
"maintenance_reason": {"type": ["string", "null"]},
"provision_state": {"type": "string"},
"clean_step": {"type": "object"},
"uuid": {"type": "string"},
"console_enabled": {"type": "boolean"},
"target_provision_state": {"type": ["string", "null"]},
"raid_config": {"type": "string"},
"provision_updated_at": {"type": ["string", "null"]},
"maintenance": {"type": "boolean"},
"target_raid_config": {"type": "string"},
"inspection_started_at": {"type": ["string", "null"]},
"inspection_finished_at": {"type": ["string", "null"]},
"power_state": {"type": ["string", "null"]},
"driver": {"type": "string"},
"reservation": {"type": ["string", "null"]},
"properties": {"type": "object"},
"instance_uuid": {"type": ["string", "null"]},
"name": {"type": ["string", "null"]},
"driver_info": {"type": "object"},
"created_at": {"type": "string"},
"driver_internal_info": {"type": "object"},
"chassis_uuid": {"type": ["string", "null"]},
"instance_info": {"type": "object"}
}
}
def setUp(self):
super(TestNodeJsonResponse, self).setUp()
self.node = self.create_node()
def test_node_list_json(self):
"""Test JSON response for nodes list."""
schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"instance_uuid": {"type": ["string", "null"]},
"maintenance": {"type": "boolean"},
"name": {"type": ["string", "null"]},
"power_state": {"type": ["string", "null"]},
"provision_state": {"type": "string"},
"uuid": {"type": "string"}}}
}
response = self.ironic('node-list', flags='--json',
params='', parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_node_show_json(self):
"""Test JSON response for node show."""
response = self.ironic('node-show', flags='--json', params='{0}'
.format(self.node['uuid']), parse=False)
self.assertTrue(_is_valid_json(response, self.node_schema))
def test_node_validate_json(self):
"""Test JSON response for node validation."""
schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"interface": {"type": ["string", "null"]},
"result": {"type": "boolean"},
"reason": {"type": ["string", "null"]}}}
}
response = self.ironic('node-validate', flags='--json',
params='{0}'.format(self.node['uuid']),
parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_node_show_states_json(self):
"""Test JSON response for node show states."""
schema = {
"type": "object",
"properties": {
"target_power_state": {"type": ["string", "null"]},
"target_provision_state": {"type": ["string", "null"]},
"last_error": {"type": ["string", "null"]},
"console_enabled": {"type": "boolean"},
"provision_updated_at": {"type": ["string", "null"]},
"power_state": {"type": ["string", "null"]},
"provision_state": {"type": "string"}
}
}
response = self.ironic('node-show-states', flags='--json',
params='{0}'.format(self.node['uuid']),
parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_node_create_json(self):
"""Test JSON response for node creation."""
schema = {
"type": "object",
"properties": {
"uuid": {"type": "string"},
"driver_info": {"type": "object"},
"extra": {"type": "object"},
"driver": {"type": "string"},
"chassis_uuid": {"type": ["string", "null"]},
"properties": {"type": "object"},
"name": {"type": ["string", "null"]},
}
}
response = self.ironic('node-create', flags='--json',
params='-d fake', parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_node_update_json(self):
"""Test JSON response for node update."""
node_name = data_utils.rand_name('test')
response = self.ironic('node-update', flags='--json',
params='{0} add name={1}'
.format(self.node['uuid'], node_name),
parse=False)
self.assertTrue(_is_valid_json(response, self.node_schema))
class TestDriverJsonResponse(base.FunctionalTestBase):
"""Test JSON responses for driver commands."""
def test_driver_list_json(self):
"""Test JSON response for drivers list."""
schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"hosts": {"type": "string"},
}}
}
response = self.ironic('driver-list', flags='--json', parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_driver_show_json(self):
"""Test JSON response for driver show."""
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"hosts": {
"type": "array",
"items": {"type": "string"}}
}
}
drivers_names = self.get_drivers_names()
for driver in drivers_names:
response = self.ironic('driver-show', flags='--json',
params='{0}'.format(driver), parse=False)
self.assertTrue(_is_valid_json(response, schema))
def test_driver_properties_json(self):
"""Test JSON response for driver properties."""
schema = {
"type": "object",
"additionalProperties": {"type": "string"}
}
drivers_names = self.get_drivers_names()
for driver in drivers_names:
response = self.ironic('driver-properties', flags='--json',
params='{0}'.format(driver), parse=False)
self.assertTrue(_is_valid_json(response, schema))
| apache-2.0 | 7,119,508,323,768,416,000 | 39 | 76 | 0.498333 | false | 4.34333 | true | false | false |
jasoncorbett/slickqa.slick-python | slickqa/slickqa.py | 1 | 15630 | import logging
import time
from datetime import datetime
import types
from .micromodels.fields import ModelCollectionField
from . import SlickConnection, SlickCommunicationError, Release, Build, BuildReference, Component, ComponentReference, \
Project, Testplan, Testrun, Testcase, RunStatus, Result, ResultStatus, LogEntry, Configuration, TestrunGroup, TestrunReference
def add_log_entry(self, message, level='DEBUG', loggername='', exceptionclassname='', exceptionmessage='', stacktrace=''):
entry = LogEntry()
entry.entryTime = int(round(time.time() * 1000))
entry.message = message
entry.level = level
entry.loggerName = loggername
entry.exceptionClassName = exceptionclassname
entry.exceptionMessage = exceptionmessage
entry.exceptionStackTrace = stacktrace
if not hasattr(self, 'log'):
self.log = []
self.log.append(entry)
def update_result(self):
self.connection.results(self).update()
def update_testrun(self):
if hasattr(self, 'summary'):
del self.summary
self.connection.testruns(self).update()
def add_file_to_result(self, filename, fileobj=None):
slickfile = self.connection.files.upload_local_file(filename, fileobj)
if not hasattr(self, 'files'):
self.files = []
self.files.append(slickfile)
self.update()
def make_result_updatable(result, connection):
result.connection = connection
result.update = types.MethodType(update_result, result)
result.add_file = types.MethodType(add_file_to_result, result)
result.add_log_entry = types.MethodType(add_log_entry, result)
def make_testrun_updatable(testrun, connection):
testrun.connection = connection
testrun.update = types.MethodType(update_testrun, testrun)
testrun.add_file = types.MethodType(add_file_to_result, testrun)
class SlickQA(object):
def __init__(self, url, project_name, release_name, build_name, test_plan=None, test_run=None, environment_name=None, test_run_group_name=None):
self.logger = logging.getLogger('slick-reporter.Slick')
self.slickcon = None
self.is_connected = False
self.project = None
self.environment = environment_name
self.component = None
self.componentref = None
self.testplan = test_plan
self.releaseref = None
self.release = release_name
self.build = build_name
self.buildref = None
self.testrun = test_run
self.testrunref = None
self.testrun_group = test_run_group_name
self.logqueue = []
self.init_connection(url)
if self.is_connected:
self.logger.debug("Initializing Slick...")
self.init_project(project_name)
self.init_release()
self.init_build()
self.init_testplan()
self.init_environment()
self.init_testrun()
self.init_testrungroup()
# TODO: if you have a list of test cases, add results for each with notrun status
def init_connection(self, url):
try:
self.logger.debug("Checking connection to server...")
self.slickcon = SlickConnection(url)
successful = self.verify_connection()
if not successful:
raise SlickCommunicationError(
"Unable to verify connection to {} by trying to access the version api".format(
self.slickcon.getUrl()))
self.is_connected = True
except SlickCommunicationError as se:
self.logger.error(se.message)
def verify_connection(self):
version = self.slickcon.version.findOne()
if version:
self.logger.debug("Successfully connected. Using version {}".format(version))
return True
self.logger.debug("Unable to connect. No version available.")
return False
def init_project(self, project, create=True):
self.logger.debug("Looking for project by name '{}'.".format(project))
try:
self.project = self.slickcon.projects.findByName(project)
except SlickCommunicationError as err:
self.logger.error("Error communicating with slick: {}".format(err.args[0]))
if self.project is None and create:
self.logger.error("Unable to find project with name '{}', creating...".format(self.project))
self.project = Project()
self.project.name = project
self.project = self.slickcon.projects(self.project).create()
assert (isinstance(self.project, Project))
self.logger.info("Using project with name '{}' and id: {}.".format(self.project.name, self.project.id))
def init_release(self):
release_name = self.release
self.logger.debug("Looking for release '{}' in project '{}'".format(release_name, self.project.name))
if not hasattr(self.project, 'releases'):
self.project.releases = []
for release in self.project.releases:
assert isinstance(release, Release)
if release.name == release_name:
self.logger.info("Found Release '{}' with id '{}' in Project '{}'.".format(release.name, release.id,
self.project.id))
self.release = release
self.releaseref = release.create_reference()
break
else:
self.logger.info("Adding release {} to project {}.".format(release_name, self.project.name))
release = Release()
release.name = release_name
self.release = self.slickcon.projects(self.project).releases(release).create()
assert isinstance(self.release, Release)
self.project = self.slickcon.projects(self.project).get()
self.releaseref = self.release.create_reference()
self.logger.info("Using newly created release '{}' with id '{}' in Project '{}'.".format(self.release.name,
self.release.id, self.project.name))
def init_build(self):
build_number = self.build
if not hasattr(self.release, 'builds'):
self.release.builds = []
for build in self.release.builds:
if build.name == build_number:
self.logger.debug("Found build with name '{}' and id '{}' on release '{}'".format(build.name, build.id,
self.release.name))
self.buildref = build.create_reference()
break
else:
self.logger.info("Adding build {} to release {}.".format(build_number, self.release.name))
build = Build()
build.name = build_number
build.built = datetime.now()
self.buildref = (
self.slickcon.projects(self.project).releases(self.release).builds(build).create()).create_reference()
assert isinstance(self.buildref, BuildReference)
self.logger.info("Using newly created build '{}' with id '{}' in Release '{}' in Project '{}'.".format(
self.buildref.name, self.buildref.buildId, self.release.name, self.project.name))
def get_component(self, component_name):
self.logger.debug("Looking for component with name '{}' in project '{}'".format(component_name, self.project.name))
for comp in self.project.components:
if comp.name == component_name:
assert isinstance(comp, Component)
self.logger.info("Found component with name '{}' and id '{}' in project '{}'.".format(comp.name, comp.id,
self.project.name))
self.component = comp
self.componentref = self.component.create_reference()
assert isinstance(self.componentref, ComponentReference)
return self.component
def create_component(self, component_name):
self.logger.info("Adding component {} to project {}.".format(component_name, self.project.name))
component = Component()
component.name = component_name
component.code = component_name.replace(" ", "-")
self.component = self.slickcon.projects(self.project).components(component).create()
self.project.components.append(self.component)
self.componentref = self.component.create_reference()
self.logger.info("Using newly created component '{}' with id '{}' in project '{}'.".format(
self.component.name, self.component.id, self.project.name))
return self.component
def init_testplan(self):
if self.testplan:
testplan_name = self.testplan
testplan = self.slickcon.testplans.findOne(projectid=self.project.id, name=testplan_name)
if testplan is None:
self.logger.debug("Creating testplan with name '{}' connected to project '{}'.".format(testplan_name,
self.project.name))
testplan = Testplan()
testplan.name = testplan_name
testplan.project = self.project.create_reference()
testplan.isprivate = False
testplan.createdBy = "slickqa-python"
testplan = self.slickcon.testplans(testplan).create()
self.logger.info("Using newly create testplan '{}' with id '{}'.".format(testplan.name, testplan.id))
else:
self.logger.info("Found (and using) existing testplan '{}' with id '{}'.".format(testplan.name, testplan.id))
self.testplan = testplan
else:
self.logger.warn("No testplan specified for the testrun.")
def init_environment(self):
if self.environment is not None:
env = self.slickcon.configurations.findOne(name=self.environment, configurationType="ENVIRONMENT")
if env is None:
env = Configuration()
env.name = self.environment
env.configurationType = "ENVIRONMENT"
env = self.slickcon.configurations(env).create()
self.environment = env
def init_testrun(self):
testrun = Testrun()
if self.testrun is not None:
testrun.name = self.testrun
else:
if self.testplan is not None:
testrun.name = self.testplan.name
else:
testrun.name = 'Tests run from slick-python'
if self.testplan is not None:
testrun.testplanId = self.testplan.id
testrun.project = self.project.create_reference()
testrun.release = self.releaseref
testrun.build = self.buildref
testrun.state = RunStatus.RUNNING
testrun.runStarted = int(round(time.time() * 1000))
if self.environment is not None and isinstance(self.environment, Configuration):
testrun.config = self.environment.create_reference()
self.logger.debug("Creating testrun with name {}.".format(testrun.name))
self.testrun = self.slickcon.testruns(testrun).create()
make_testrun_updatable(self.testrun, self.slickcon)
def init_testrungroup(self):
if self.testrun_group is not None:
trg = self.slickcon.testrungroups.findOne(name=self.testrun_group)
if trg is None:
trg = TestrunGroup()
trg.name = self.testrun_group
trg.testruns = []
trg.created = datetime.now()
trg = self.slickcon.testrungroups(trg).create()
self.testrun_group = self.slickcon.testrungroups(trg).add_testrun(self.testrun)
def add_log_entry(self, message, level='DEBUG', loggername='', exceptionclassname='', exceptionmessage='', stacktrace=''):
entry = LogEntry()
entry.entryTime = int(round(time.time() * 1000))
entry.message = message
entry.level = level
entry.loggerName = loggername
entry.exceptionClassName = exceptionclassname
entry.exceptionMessage = exceptionmessage
entry.exceptionStackTrace = stacktrace
self.logqueue.append(entry)
def finish_testrun(self):
assert isinstance(self.testrun, Testrun)
testrun = Testrun()
if self.testrun.name:
testrun.name = self.testrun.name
else:
testrun.name = 'Tests run from slick-python'
testrun.id = self.testrun.id
testrun.runFinished = int(round(time.time() * 1000))
testrun.state = RunStatus.FINISHED
self.logger.debug("Finishing testrun named {}, with id {}.".format(testrun.name, testrun.id))
self.slickcon.testruns(testrun).update()
# TODO: need to add logs, files, etc. to a result
def file_result(self, name, status=ResultStatus.FAIL, reason=None, runlength=0, testdata=None, runstatus=RunStatus.FINISHED):
test = None
if testdata is not None:
assert isinstance(testdata, Testcase)
if testdata.automationId:
test = self.slickcon.testcases.findOne(projectid=self.project.id, automationId=testdata.automationId)
if test is None and hasattr(testdata, 'automationKey') and testdata.automationKey is not None:
test = self.slickcon.testcases.findOne(projectid=self.project.id, automationKey=testdata.automationId)
if test is None:
test = self.slickcon.testcases.findOne(projectid=self.project.id, name=name)
if test is None:
self.logger.debug("Creating testcase with name '{}' on project '{}'.".format(name, self.project.name))
test = Testcase()
if testdata is not None:
test = testdata
test.name = name
test.project = self.project.create_reference()
test = self.slickcon.testcases(test).create()
self.logger.info("Using newly created testcase with name '{}' and id '{}' for result.".format(name, test.id))
else:
if testdata is not None:
# update the test with the data passed in
assert isinstance(test, Testcase)
testdata.id = test.id
testdata.name = name
testdata.project = self.project.create_reference()
test = self.slickcon.testcases(testdata).update()
self.logger.info("Found testcase with name '{}' and id '{}' for result.".format(test.name, test.id))
result = Result()
result.testrun = self.testrun.create_reference()
result.testcase = test.create_reference()
result.project = self.project.create_reference()
result.release = self.releaseref
result.build = self.buildref
if self.component is not None:
result.component = self.componentref
if len(self.logqueue) > 0:
result.log = []
result.log.extend(self.logqueue)
self.logqueue[:] = []
result.reason = reason
result.runlength = runlength
result.end = int(round(time.time() * 1000))
result.started = result.end - result.runlength
result.status = status
result.runstatus = runstatus
self.logger.debug("Filing result of '{}' for test with name '{}'".format(result.status, result.testcase.name))
result = self.slickcon.results(result).create()
self.logger.info("Filed result of '{}' for test '{}', result id: {}".format(result.status, result.testcase.name,
result.id))
make_result_updatable(result, self.slickcon)
return result
| apache-2.0 | -1,363,627,592,683,206,400 | 46.944785 | 148 | 0.618554 | false | 4.175795 | true | false | false |
paulross/cpip | src/cpip/util/XmlWrite.py | 1 | 19169 | #!/usr/bin/env python
# CPIP is a C/C++ Preprocessor implemented in Python.
# Copyright (C) 2008-2017 Paul Ross
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Paul Ross: [email protected]
"""Writes XML and XHTML."""
__author__ = 'Paul Ross'
__date__ = '2009-09-15'
__rights__ = 'Copyright (c) Paul Ross'
import logging
#import traceback
import sys
#import htmlentitydefs
import base64
from cpip import ExceptionCpip
#: Global flag that sets the error behaviour
#:
#: If ``True`` then this module may raise an ``ExceptionXml`` and that might mask other
#: exceptions.
#:
#: If ``False`` no ExceptionXml will be raised but a ``logging.error(...)``
#: will be written. These will not mask other Exceptions.
RAISE_ON_ERROR = True
class ExceptionXml(ExceptionCpip):
"""Exception specialisation for the XML writer."""
pass
class ExceptionXmlEndElement(ExceptionXml):
"""Exception specialisation for end of element."""
pass
#####################################
# Section: Encoding/decoding methods.
#####################################
def encodeString(theS, theCharPrefix='_'):
"""Returns a string that is the argument encoded.
From RFC3548:
.. code-block:: text
Table 1: The Base 64 Alphabet
Value Encoding Value Encoding Value Encoding Value Encoding
0 A 17 R 34 i 51 z
1 B 18 S 35 j 52 0
2 C 19 T 36 k 53 1
3 D 20 U 37 l 54 2
4 E 21 V 38 m 55 3
5 F 22 W 39 n 56 4
6 G 23 X 40 o 57 5
7 H 24 Y 41 p 58 6
8 I 25 Z 42 q 59 7
9 J 26 a 43 r 60 8
10 K 27 b 44 s 61 9
11 L 28 c 45 t 62 +
12 M 29 d 46 u 63 /
13 N 30 e 47 v
14 O 31 f 48 w (pad) =
15 P 32 g 49 x
16 Q 33 h 50 y
See section 3 of : http://www.faqs.org/rfcs/rfc3548.html
:param theS: The string to be encoded.
:type theS: ``str``
:param theCharPrefix: A character to prefix the string.
:type theCharPrefix: ``str``
:returns: ``str`` -- Encoded string.
"""
if len(theCharPrefix) != 1:
errMsg = 'Prefix for encoding string must be a single character, not "%s"' % theCharPrefix
if RAISE_ON_ERROR:
raise ExceptionXml(errMsg)
logging.error(errMsg)
if sys.version_info[0] == 2:
myBy = bytes(theS)
retVal = base64.b64encode(myBy)
elif sys.version_info[0] == 3:
myBy = bytes(theS, 'ascii')
retVal = base64.b64encode(myBy).decode()
else:
assert 0, 'Unknown Python version %d' % sys.version_info.major
# if isinstance(theS, str):
# retVal = base64.b64encode(bytes(theS, 'ascii')).decode()
# else:
# retVal = base64.b64encode(theS)
# retVal = base64.b64encode(myBy)
# post-fix base64
retVal = retVal.replace('+', '-') \
.replace('/', '.') \
.replace('=', '_')
# Lead with prefix
return theCharPrefix + retVal
def decodeString(theS):
"""Returns a string that is the argument decoded. May raise a TypeError."""
# pre-fix base64
temp = theS[1:].replace('-', '+') \
.replace('.', '/') \
.replace('_', '=')
temp = base64.b64decode(temp)
return temp
def nameFromString(theStr):
"""Returns a name from a string.
See http://www.w3.org/TR/1999/REC-html401-19991224/types.html#type-cdata
"ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
This also works for in namespaces as ':' is not used in the encoding.
:param theStr: The string to be encoded.
:type theStr: ``str``
:returns: ``str`` -- Encoded string."""
return encodeString(theStr, 'Z')
#################################
# End: Encoding/decoding methods.
#################################
#############################
# Section: XML Stream writer.
#############################
class XmlStream(object):
"""Creates and maintains an XML output stream."""
INDENT_STR = u' '
ENTITY_MAP = {
ord('<') : u'<',
ord('>') : u'>',
ord('&') : u'&',
ord("'") : u''',
ord('"') : u'"',
}
def __init__(self, theFout, theEnc='utf-8', theDtdLocal=None, theId=0, mustIndent=True):
"""Initialise with a writable file like object or a file path.
:param theFout: The file-like object or a path as a string. If the latter it
will be closed on __exit__.
:type theFout: ``_io.TextIOWrapper, str``
:param theEnc: The encoding to be used.
:type theEnc: ``str``
:param theDtdLocal: Any local DTD as a string.
:type theDtdLocal: ``NoneType``, ``str``
:param theId: An integer value to use as an ID string.
:type theId: ``int``
:param mustIndent: Flag, if True the elements will be indented (pretty printed).
:type mustIndent: ``bool``
:returns: ``NoneType``
"""
if isinstance(theFout, str):
self._file = open(theFout, 'w')
self._fileClose = True
else:
self._file = theFout
self._fileClose = False
self._enc = theEnc
self._dtdLocal = theDtdLocal
# Stack of strings
self._elemStk = []
self._inElem = False
self._canIndentStk = []
# An integer that represents a unique ID
self._intId = theId
self._mustIndent = mustIndent
@property
def id(self):
"""A unique ID in this stream. The ID is incremented on each call.
:returns: ``str`` -- The ID."""
self._intId += 1
return '%d' % (self._intId-1)
@property
def _canIndent(self):
"""Returns True if indentation is possible (no mixed content etc.).
:returns: ``bool`` -- True if the element can be indented."""
for b in self._canIndentStk:
if not b:
return False
return True
def _flipIndent(self, theBool):
"""Set the value at the tip of the indent stack to the given value.
:param theBool: Flag for indenting.
:type theBool: ``bool``
:returns: ``NoneType``
"""
assert(len(self._canIndentStk) > 0)
self._canIndentStk.pop()
self._canIndentStk.append(theBool)
def xmlSpacePreserve(self):
"""Suspends indentation for this element and its descendants.
:returns: ``NoneType``"""
if len(self._canIndentStk) == 0:
errMsg = 'xmlSpacePreserve() on empty stack.'
if RAISE_ON_ERROR:
raise ExceptionXml(errMsg)
logging.error(errMsg)
self._flipIndent(False)
def startElement(self, name, attrs):
"""Opens a named element with attributes.
:param name: Element name.
:type name: ``str``
:param attrs: Element attributes.
:type attrs: ``dict({str : [str]}), dict({})``
:returns: ``NoneType``"""
self._closeElemIfOpen()
self._indent()
self._file.write(u'<%s' % name)
kS = sorted(attrs.keys())
for k in kS:
self._file.write(u' %s="%s"' % (k, self._encode(attrs[k])))
self._inElem = True
self._canIndentStk.append(self._mustIndent)
self._elemStk.append(name)
def characters(self, theString):
"""Encodes the string and writes it to the output.
:param theString: The content.
:type theString: ``str``
:returns: ``NoneType``
"""
self._closeElemIfOpen()
encStr = self._encode(theString)
self._file.write(encStr)
# mixed content - don't indent
self._flipIndent(False)
def literal(self, theString):
"""Writes theString to the output without encoding.
:param theString: The content.
:type theString: ``str``
:returns: ``NoneType``
"""
self._closeElemIfOpen()
self._file.write(theString)
# mixed content - don't indent
self._flipIndent(False)
def comment(self, theS, newLine=False):
"""Writes a comment to the output stream.
:param theS: The comment.
:type theS: ``str``
:param newLine: If True the comment is written on a new line, if False it is written inline.
:type newLine: ``bool``
:returns: ``NoneType``
"""
self._closeElemIfOpen()
if newLine:
self._indent()
self._file.write('<!--%s-->' % self._encode(theS))
# mixed content - don't indent
#self._flipIndent(False)
def pI(self, theS):
"""Writes a Processing Instruction to the output stream."""
self._closeElemIfOpen()
self._file.write('<?%s?>' % self._encode(theS))
self._flipIndent(False)
def endElement(self, name):
"""Ends an element.
:param name: Element name.
:type name: ``str``
:returns: ``NoneType``
"""
if len(self._elemStk) == 0:
errMsg = 'endElement() on empty stack'
if RAISE_ON_ERROR:
raise ExceptionXmlEndElement(errMsg)
logging.error(errMsg)
if name != self._elemStk[-1]:
errMsg = 'endElement("%s") does not match "%s"' \
% (name, self._elemStk[-1])
if RAISE_ON_ERROR:
raise ExceptionXmlEndElement(errMsg)
logging.error(errMsg)
myName = self._elemStk.pop()
if self._inElem:
self._file.write(u' />')
self._inElem = False
else:
self._indent()
self._file.write(u'</%s>' % myName)
self._canIndentStk.pop()
def writeECMAScript(self, theScript):
"""Writes the ECMA script.
Example:
.. code-block:: html
<script type="text/ecmascript">
//<![CDATA[
...
// ]]>
</script>
:param theData: The ECMA script content.
:type theData: ``str``
:returns: ``NoneType``
"""
self.startElement('script', {'type' : "text/ecmascript"})
self.writeCDATA(theScript)
self.endElement('script')
def writeCDATA(self, theData):
"""Writes a CDATA section.
Example:
.. code-block:: html
<![CDATA[
...
]]>
:param theData: The CDATA content.
:type theData: ``str``
:returns: ``NoneType``
"""
self._closeElemIfOpen()
self.xmlSpacePreserve()
self._file.write(u'')
self._file.write(u'\n<![CDATA[\n')
self._file.write(theData)
self._file.write(u'\n]]>\n')
def writeCSS(self, theCSSMap):
"""Writes a style sheet as a CDATA section. Expects a dict of dicts.
Example:
.. code-block:: html
<style type="text/css"><![CDATA[
...
]]></style>
:param theCSSMap: Map of CSS elements.
:type theCSSMap: ``dict({str : [dict({str : [str]}), dict({str : [str]})]})``
:returns: ``NoneType``
"""
self.startElement('style', {'type' : "text/css"})
theLines = []
for style in sorted(theCSSMap.keys()):
theLines.append('%s {' % style)
for attr in sorted(theCSSMap[style].keys()):
theLines.append('%s : %s;' % (attr, theCSSMap[style][attr]))
theLines.append('}')
self.writeCDATA(u'\n'.join(theLines))
self.endElement('style')
def _indent(self, offset=0):
"""Write out the indent string.
:param offset: The offset.
:type offset: ``int``
:returns: ``NoneType``
"""
if self._canIndent:
self._file.write(u'\n')
self._file.write(self.INDENT_STR*(len(self._elemStk)-offset))
def _closeElemIfOpen(self):
"""Close the element if open.
:returns: ``NoneType``
"""
if self._inElem:
self._file.write(u'>')
self._inElem = False
def _encode(self, theStr):
""""Apply the XML encoding such as ``'<'`` to ``'<'``
:param theStr: String to encode.
:type theStr: ``str``
:returns: ``str`` -- Encoded string.
"""
if sys.version_info.major == 2:
# Python 2 clunkiness
result = []
for c in theStr:
try:
result.append(self.ENTITY_MAP[ord(c)])
except KeyError:
result.append(c)
return u''.join(result)
else:
assert sys.version_info.major == 3
return theStr.translate(self.ENTITY_MAP)
def __enter__(self):
"""Context manager support.
:returns: ``cpip.plot.SVGWriter.SVGWriter,cpip.util.XmlWrite.XhtmlStream`` -- self"""
self._file.write(u"<?xml version='1.0' encoding=\"%s\"?>" % self._enc)
# Write local DTD?
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Context manager support.
:param excType: Exception type, if raised.
:type excType: ``NoneType``
:param excValue: Exception, if raised.
:type excValue: ``NoneType``
:param tb: Traceback, if raised.
:type tb: ``NoneType``
:returns: ``NoneType``
"""
while len(self._elemStk):
self.endElement(self._elemStk[-1])
self._file.write(u'\n')
if self._fileClose:
self._file.close()
return False
#############################
# End: XML Stream writer.
#############################
###############################
# Section: XHTML Stream writer.
###############################
class XhtmlStream(XmlStream):
"""Specialisation of an XmlStream to handle XHTML."""
def __enter__(self):
"""Context manager support.
:returns: ``cpip.util.XmlWrite.XhtmlStream`` -- self
"""
super(XhtmlStream, self).__enter__()
self._file.write(u"""\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">""")
self.startElement(
'html',
{
'xmlns' : 'http://www.w3.org/1999/xhtml',
'xml:lang' : 'en',
'lang' : 'en',
}
)
return self
def charactersWithBr(self, sIn):
"""Writes the string replacing any ``\\n`` characters with ``<br/>`` elements.
:param sIn: The string to write.
:type sIn: ``str``
:returns: ``NoneType``
"""
while len(sIn) > 0:
i = sIn.find('\n')
if i != -1:
self.characters(sIn[:i])
with Element(self, 'br'):
pass
sIn = sIn[i+1:]
else:
self.characters(sIn)
break
###############################
# Section: XHTML Stream writer.
###############################
##################################
# Section: Element for any writer.
##################################
class Element(object):
"""Represents an element in a markup stream."""
def __init__(self, theXmlStream, theElemName, theAttrs=None):
"""Constructor.
:param theXmlStream: The XML stream.
:type theXmlStream: ``cpip.plot.SVGWriter.SVGWriter, cpip.util.XmlWrite.XhtmlStream``
:param theElemName: Element name.
:type theElemName: ``str``
:param theAttrs: Element attributes
:type theAttrs: ``NoneType, dict({str : [str]}), dict({})``
:returns: ``NoneType``
"""
self._stream = theXmlStream
self._name = theElemName
self._attrs = theAttrs or {}
def __enter__(self):
"""Context manager support.
:returns: ``cpip.plot.SVGWriter.SVGGroup,cpip.plot.SVGWriter.SVGLine,cpip.plot.SVGWriter.SVGRect,cpip.plot.SVGWriter.SVGText,cpip.util.XmlWrite.Element`` -- self
"""
# Write element and attributes to the stream
self._stream.startElement(self._name, self._attrs)
return self
def __exit__(self, excType, excValue, tb):
"""Context manager support.
TODO: Should respect RAISE_ON_ERROR here if excType is not None.
:param excType: Exception type, if raised.
:type excType: ``NoneType``
:param excValue: Exception, if raised.
:type excValue: ``NoneType``
:param tb: Traceback, if raised.
:type tb: ``NoneType``
:returns: ``NoneType``
"""
# if excType is not None:
# print('excType= ', excType)
# print('excValue= ', excValue)
# print('traceback=\n', '\n'.join(traceback.format_tb(tb)))
# Close element on the stream
self._stream.endElement(self._name)
#return True
| gpl-2.0 | 1,042,945,868,849,424,800 | 31.107081 | 169 | 0.501226 | false | 4.095065 | false | false | false |
costadorione/purestream | servers/rocvideo.py | 1 | 2680 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para rocvideo
# http://www.mimediacenter.info/foro/viewforum.php?f=36
#------------------------------------------------------------
import re
from core import jsunpack
from core import logger
from core import scrapertools
def test_video_exists( page_url ):
logger.info("streamondemand.servers.rocvideo test_video_exists(page_url='%s')" % page_url)
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("streamondemand.servers.rocvideo url="+page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://rocvideo.tv/","http://rocvideo.tv/embed-") + ".html"
data = scrapertools.cache_page( page_url )
data = scrapertools.find_single_match(data,"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = jsunpack.unpack(data)
logger.info("data="+data)
#file:"http://s1.rocvideo.tv/files/2/aqsk8q5mjcoh1d/INT3NS4HDTS-L4T.mkv.mp4
media_url = scrapertools.get_match(data,'file:"([^"]+)"')
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [rocvideo]",media_url])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
devuelve = []
#http://rocvideo.net/mfhpecruzj2q
#http://rocvideo.tv/mfhpecruzj2q
patronvideos = 'rocvideo.(?:tv|net)/embed-([a-z0-9A-Z]+)'
logger.info("streamondemand.servers.rocvideo find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[rocvideo]"
url = "http://rocvideo.tv/embed-"+match+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'rocvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
patronvideos = 'rocvideo.(?:tv|net)/([a-z0-9A-Z]+)'
logger.info("streamondemand.servers.rocvideo find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[rocvideo]"
url = "http://rocvideo.tv/embed-"+match+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'rocvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 | 6,527,898,725,564,416,000 | 36.180556 | 122 | 0.609638 | false | 3.225301 | false | false | false |
Pettythug/ForThisClass | Assignment 5/common.py | 1 | 1698 | from random import *
from time import sleep
################### MODEL #############################
def collide_boxes(box1, box2):
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
return x1 < x2 + w2 and y1 < y2 + h2 and x2 < x1 + w1 and y2 < y1 + h1
class Model():
cmd_directions = {'up': (0, -1),
'down': (0, 1),
'left': (-1, 0),
'right': (1, 0)}
def __init__(self):
self.borders = [[0, 0, 2, 300],
[0, 0, 400, 2],
[398, 0, 2, 300],
[0, 298, 400, 2]]
self.pellets = [ [randint(10, 380), randint(10, 280), 5, 5]
for _ in range(4) ]
self.game_over = False
self.mydir = self.cmd_directions['down'] # start direction: down
self.mybox = [200, 150, 10, 10] # start in middle of the screen
def do_cmd(self, cmd):
if cmd == 'quit':
self.game_over = True
else:
self.mydir = self.cmd_directions[cmd]
def update(self):
# move me
self.mybox[0] += self.mydir[0]
self.mybox[1] += self.mydir[1]
# potential collision with a border
for b in self.borders:
if collide_boxes(self.mybox, b):
self.mybox = [200, 150, 10, 10]
# potential collision with a pellet
for index, pellet in enumerate(self.pellets):
if collide_boxes(self.mybox, pellet):
self.mybox[2] *= 1.2
self.mybox[3] *= 1.2
self.pellets[index] = [randint(10, 380), randint(10, 280), 5, 5]
| mit | 2,167,591,220,567,360,500 | 32.313725 | 80 | 0.446996 | false | 3.362376 | false | false | false |
selam/python-vast-xml-generator | vast/ad.py | 1 | 2544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Timu Eren <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from creative import Creative
REQUIRED_INLINE = ['AdSystem', 'AdTitle']
REQUIRED_WRAPPER = ['AdSystem', 'VASTAdTagURI']
def validateSettings(settings, requireds):
keys = settings.keys()
for required in requireds:
if required not in keys:
raise Exception("Missing required settings: {required}".format(required=required))
def validateInLineSettings(settings):
validateSettings(settings, REQUIRED_INLINE)
def validateWrapperSettings(settings):
validateSettings(settings, REQUIRED_WRAPPER)
class Ad(object):
def __init__(self, settings={}):
self.errors = []
self.surveys = []
self.impressions = []
self.creatives = []
if settings["structure"].lower() == 'wrapper':
validateWrapperSettings(settings)
self.VASTAdTagURI = settings["VASTAdTagURI"]
else:
validateInLineSettings(settings)
self.id = settings["id"]
self.sequence = settings.get("sequence", None)
self.structure = settings["structure"]
self.AdSystem = settings["AdSystem"]
self.AdTitle = settings["AdTitle"]
# optional elements
self.Error = settings.get("Error", None)
self.Description = settings.get("Description", None)
self.Advertiser = settings.get("Advertiser", None)
self.Pricing = settings.get("Pricing", None)
self.Extensions = settings.get("Extensions", None)
def attachSurvey(self, settings):
survey = {"url": settings.url}
if "type" in settings:
survey["type"] = settings["type"]
self.surveys.append(survey)
def attachImpression(self, settings):
self.impressions.append(settings)
return self
def attachCreative(self, _type, options):
creative = Creative(_type, options)
self.creatives.append(creative)
return creative
| apache-2.0 | -7,131,255,563,608,026,000 | 31.202532 | 94 | 0.666274 | false | 4.038095 | false | false | false |
jmchilton/galaxy-central | tools/filters/ucsc_gene_bed_to_exon_bed.py | 1 | 4444 | #!/usr/bin/env python2.3
"""
Read a table dump in the UCSC gene table format and print a tab separated
list of intervals corresponding to requested features of each gene.
usage: ucsc_gene_table_to_intervals.py [options]
options:
-h, --help show this help message and exit
-rREGION, --region=REGION
Limit to region: one of coding, utr3, utr5, transcribed [default]
-e, --exons Only print intervals overlapping an exon
-i, --input=inputfile input file
-o, --output=outputfile output file
"""
import optparse, string, sys
def main():
# Parse command line
parser = optparse.OptionParser( usage="%prog [options] " )
parser.add_option( "-r", "--region", dest="region", default="transcribed",
help="Limit to region: one of coding, utr3, utr5, transcribed [default]" )
parser.add_option( "-e", "--exons", action="store_true", dest="exons",
help="Only print intervals overlapping an exon" )
parser.add_option( "-s", "--strand", action="store_true", dest="strand",
help="Print strand after interval" )
parser.add_option( "-i", "--input", dest="input", default=None,
help="Input file" )
parser.add_option( "-o", "--output", dest="output", default=None,
help="Output file" )
options, args = parser.parse_args()
assert options.region in ( 'coding', 'utr3', 'utr5', 'transcribed' ), "Invalid region argument"
try:
out_file = open (options.output,"w")
except:
print >> sys.stderr, "Bad output file."
sys.exit(0)
try:
in_file = open (options.input)
except:
print >> sys.stderr, "Bad input file."
sys.exit(0)
print "Region:", options.region+";"
print "Only overlap with Exons:",
if options.exons:
print "Yes"
else:
print "No"
# Read table and handle each gene
for line in in_file:
try:
if line[0:1] == "#":
continue
# Parse fields from gene tabls
fields = line.split( '\t' )
chrom = fields[0]
tx_start = int( fields[1] )
tx_end = int( fields[2] )
name = fields[3]
strand = fields[5].replace(" ","_")
cds_start = int( fields[6] )
cds_end = int( fields[7] )
# Determine the subset of the transcribed region we are interested in
if options.region == 'utr3':
if strand == '-': region_start, region_end = tx_start, cds_start
else: region_start, region_end = cds_end, tx_end
elif options.region == 'utr5':
if strand == '-': region_start, region_end = cds_end, tx_end
else: region_start, region_end = tx_start, cds_start
elif options.region == 'coding':
region_start, region_end = cds_start, cds_end
else:
region_start, region_end = tx_start, tx_end
# If only interested in exons, print the portion of each exon overlapping
# the region of interest, otherwise print the span of the region
if options.exons:
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map((lambda x: x + tx_start ), exon_starts)
exon_ends = map( int, fields[10].rstrip( ',\n' ).split( ',' ) )
exon_ends = map((lambda x, y: x + y ), exon_starts, exon_ends);
for start, end in zip( exon_starts, exon_ends ):
start = max( start, region_start )
end = min( end, region_end )
if start < end:
if strand: print_tab_sep(out_file, chrom, start, end, name, "0", strand )
else: print_tab_sep(out_file, chrom, start, end )
else:
if strand: print_tab_sep(out_file, chrom, region_start, region_end, name, "0", strand )
else: print_tab_sep(out_file, chrom, region_start, region_end )
except:
continue
def print_tab_sep(out_file, *args ):
"""Print items in `l` to stdout separated by tabs"""
print >>out_file, string.join( [ str( f ) for f in args ], '\t' )
if __name__ == "__main__": main()
| mit | -1,295,368,544,682,247,000 | 40.924528 | 103 | 0.534878 | false | 3.778912 | false | false | false |
kepstin/picard | picard/ui/ui_infostatus.py | 1 | 4875 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/infostatus.ui'
#
# Created: Thu Jun 27 19:18:14 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_InfoStatus(object):
def setupUi(self, InfoStatus):
InfoStatus.setObjectName(_fromUtf8("InfoStatus"))
InfoStatus.resize(350, 24)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(InfoStatus.sizePolicy().hasHeightForWidth())
InfoStatus.setSizePolicy(sizePolicy)
InfoStatus.setMinimumSize(QtCore.QSize(0, 0))
self.horizontalLayout = QtGui.QHBoxLayout(InfoStatus)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.val1 = QtGui.QLabel(InfoStatus)
self.val1.setMinimumSize(QtCore.QSize(40, 0))
self.val1.setText(_fromUtf8(""))
self.val1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.val1.setObjectName(_fromUtf8("val1"))
self.horizontalLayout.addWidget(self.val1)
self.label1 = QtGui.QLabel(InfoStatus)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label1.sizePolicy().hasHeightForWidth())
self.label1.setSizePolicy(sizePolicy)
self.label1.setFrameShape(QtGui.QFrame.NoFrame)
self.label1.setTextFormat(QtCore.Qt.AutoText)
self.label1.setScaledContents(False)
self.label1.setMargin(1)
self.label1.setObjectName(_fromUtf8("label1"))
self.horizontalLayout.addWidget(self.label1)
self.val2 = QtGui.QLabel(InfoStatus)
self.val2.setMinimumSize(QtCore.QSize(40, 0))
self.val2.setText(_fromUtf8(""))
self.val2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.val2.setObjectName(_fromUtf8("val2"))
self.horizontalLayout.addWidget(self.val2)
self.label2 = QtGui.QLabel(InfoStatus)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label2.sizePolicy().hasHeightForWidth())
self.label2.setSizePolicy(sizePolicy)
self.label2.setText(_fromUtf8(""))
self.label2.setObjectName(_fromUtf8("label2"))
self.horizontalLayout.addWidget(self.label2)
self.val3 = QtGui.QLabel(InfoStatus)
self.val3.setMinimumSize(QtCore.QSize(40, 0))
self.val3.setText(_fromUtf8(""))
self.val3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.val3.setObjectName(_fromUtf8("val3"))
self.horizontalLayout.addWidget(self.val3)
self.label3 = QtGui.QLabel(InfoStatus)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label3.sizePolicy().hasHeightForWidth())
self.label3.setSizePolicy(sizePolicy)
self.label3.setText(_fromUtf8(""))
self.label3.setObjectName(_fromUtf8("label3"))
self.horizontalLayout.addWidget(self.label3)
self.val4 = QtGui.QLabel(InfoStatus)
self.val4.setMinimumSize(QtCore.QSize(40, 0))
self.val4.setText(_fromUtf8(""))
self.val4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.val4.setObjectName(_fromUtf8("val4"))
self.horizontalLayout.addWidget(self.val4)
self.label4 = QtGui.QLabel(InfoStatus)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label4.sizePolicy().hasHeightForWidth())
self.label4.setSizePolicy(sizePolicy)
self.label4.setText(_fromUtf8(""))
self.label4.setScaledContents(False)
self.label4.setObjectName(_fromUtf8("label4"))
self.horizontalLayout.addWidget(self.label4)
self.retranslateUi(InfoStatus)
QtCore.QMetaObject.connectSlotsByName(InfoStatus)
def retranslateUi(self, InfoStatus):
InfoStatus.setWindowTitle(_("Form"))
| gpl-2.0 | 1,008,436,030,703,050,100 | 47.267327 | 99 | 0.710769 | false | 3.869048 | false | false | false |
kimvais/cryptography | tests/hazmat/primitives/twofactor/test_totp.py | 5 | 5284 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.exceptions import _Reasons
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.twofactor import InvalidToken
from cryptography.hazmat.primitives.twofactor.totp import TOTP
from ....utils import (
load_nist_vectors, load_vectors_from_file, raises_unsupported_algorithm
)
vectors = load_vectors_from_file(
"twofactor/rfc-6238.txt", load_nist_vectors)
@pytest.mark.requires_backend_interface(interface=HMACBackend)
class TestTOTP(object):
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA1()),
skip_message="Does not support HMAC-SHA1."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA1"])
def test_generate_sha1(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA256()),
skip_message="Does not support HMAC-SHA256."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA256"])
def test_generate_sha256(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA256(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA512()),
skip_message="Does not support HMAC-SHA512."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA512"])
def test_generate_sha512(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA512(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA1()),
skip_message="Does not support HMAC-SHA1."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA1"])
def test_verify_sha1(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
assert totp.verify(totp_value, time) is None
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA256()),
skip_message="Does not support HMAC-SHA256."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA256"])
def test_verify_sha256(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA256(), 30, backend)
assert totp.verify(totp_value, time) is None
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA512()),
skip_message="Does not support HMAC-SHA512."
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA512"])
def test_verify_sha512(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA512(), 30, backend)
assert totp.verify(totp_value, time) is None
def test_invalid_verify(self, backend):
secret = b"12345678901234567890"
time = 59
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
with pytest.raises(InvalidToken):
totp.verify(b"12345678", time)
def test_floating_point_time_generate(self, backend):
secret = b"12345678901234567890"
time = 59.1
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
assert totp.generate(time) == b"94287082"
def test_get_provisioning_uri(self, backend):
secret = b"12345678901234567890"
totp = TOTP(secret, 6, hashes.SHA1(), 30, backend=backend)
assert totp.get_provisioning_uri("Alice Smith", None) == (
"otpauth://totp/Alice%20Smith?digits=6&secret=GEZDGNBVG"
"Y3TQOJQGEZDGNBVGY3TQOJQ&algorithm=SHA1&period=30")
assert totp.get_provisioning_uri("Alice Smith", 'World') == (
"otpauth://totp/World:Alice%20Smith?digits=6&secret=GEZ"
"DGNBVGY3TQOJQGEZDGNBVGY3TQOJQ&algorithm=SHA1&issuer=World"
"&period=30")
def test_invalid_backend():
secret = b"12345678901234567890"
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
TOTP(secret, 8, hashes.SHA1(), 30, pretend_backend)
| bsd-3-clause | 8,574,700,208,480,949,000 | 34.463087 | 79 | 0.64383 | false | 3.47403 | true | false | false |
pantheon-systems/etl-framework | etl_framework/configs/tests/test_job.py | 1 | 1196 | """ test the environment config """
import os
import unittest
from etl_framework.configs.job import JobConfig
class JobConfigTestCases(unittest.TestCase):
""" class for test cases """
JOB_CONFIG_FILEPATH = os.path.join(
os.path.dirname(__file__),
'fixtures/job.json'
)
def setUp(self):
self.job_config = JobConfig.create_from_filepath(
self.JOB_CONFIG_FILEPATH
)
def test_get_environment_configuration(self):
""" stuff """
# This is determined by the fixtures/job.json config
# and should be the value of "environment" key
expected_output = {
"config_dir": "fixtures",
"config_filename": "environment.json"
}
output = self.job_config.get_environment_configuration()
self.assertEqual(output, expected_output)
def test_get_environment_configuration_filepath(self):
""" stuff """
# This is determined by the fixtures/job.json config
expected_filepath = 'fixtures/environment.json'
filepath = self.job_config.get_environment_configuration_filepath()
self.assertEqual(filepath, expected_filepath)
| mit | -4,390,787,021,299,247,600 | 25 | 75 | 0.63796 | false | 4.271429 | true | false | false |
bocchan/DAalgorithm | matching.py | 1 | 1405 | import copy
import numpy as np
m_prefs=np.array([[4,0,1,2,3],[1,2,0,3,4],[3,1,0,2,4]])
f_prefs=np.array([[0,1,2,3],[1,0,3,2],[1,2,0,3],[0,3,2,1]])
def array_to_dict(array):
dict = {}
for x, y in enumerate(array):
dict[x] = list(y)
return dict
def deferred_acceptance(m_prefs,f_prefs):
m_prefers = array_to_dict(m_prefs)
f_prefers = array_to_dict(f_prefs)
guys = sorted(m_prefers.keys())
gals = sorted(f_prefers.keys())
guysfree = guys[:]
engaged = {}
guyprefers2 = copy.deepcopy(m_prefers)
galprefers2 = copy.deepcopy(f_prefers)
while guysfree:
guy = guysfree.pop(0)
guyslist = guyprefers2[guy]
gal = guyslist.pop(0)
fiance = engaged.get(gal)
if not fiance:
# She's free
engaged[gal] = guy
else:
# The bounder proposes to an engaged lass!
galslist = galprefers2[gal]
if galslist.index(fiance) > galslist.index(guy):
# She prefers new guy
engaged[gal] = guy
if guyprefers2[fiance]:
# Ex has more girls to try
guysfree.append(fiance)
else:
# She is faithful to old fiance
if guyslist:
# Look again
guysfree.append(guy)
return engaged
print engaged #men,women | gpl-3.0 | 8,446,696,807,165,299,000 | 28.914894 | 60 | 0.533808 | false | 2.983015 | false | false | false |
eliben/code-for-blog | 2017/async-socket-server/simple-client.py | 1 | 2990 | # Simple client used to interact with concurrent servers.
#
# Launches N concurrent client connections, each executing a pre-set sequence of
# sends to the server, and logs what was received back.
#
# Tested with Python 3.6
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import argparse
import logging
import socket
import sys
import threading
import time
class ReadThread(threading.Thread):
def __init__(self, name, sockobj):
super().__init__()
self.sockobj = sockobj
self.name = name
self.bufsize = 8 * 1024
def run(self):
fullbuf = b''
while True:
buf = self.sockobj.recv(self.bufsize)
logging.info('{0} received {1}'.format(self.name, buf))
fullbuf += buf
if b'1111' in fullbuf:
break
def make_new_connection(name, host, port):
"""Creates a single socket connection to the host:port.
Sets a pre-set sequence of messages to the server with pre-set delays; in
parallel, reads from the socket in a separate thread.
"""
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.connect((host, port))
if sockobj.recv(1) != b'*':
logging.error('Something is wrong! Did not receive *')
logging.info('{0} connected...'.format(name))
rthread = ReadThread(name, sockobj)
rthread.start()
s = b'^abc$de^abte$f'
logging.info('{0} sending {1}'.format(name, s))
sockobj.send(s)
time.sleep(1.0)
s = b'xyz^123'
logging.info('{0} sending {1}'.format(name, s))
sockobj.send(s)
time.sleep(1.0)
# The 0000 sent to the server here will result in an echo of 1111, which is
# a sign for the reading thread to terminate.
# Add WXY after 0000 to enable kill-switch in some servers.
s = b'25$^ab0000$abab'
logging.info('{0} sending {1}'.format(name, s))
sockobj.send(s)
time.sleep(0.2)
rthread.join()
sockobj.close()
logging.info('{0} disconnecting'.format(name))
def main():
argparser = argparse.ArgumentParser('Simple TCP client')
argparser.add_argument('host', help='Server host name')
argparser.add_argument('port', type=int, help='Server port')
argparser.add_argument('-n', '--num_concurrent', type=int,
default=1,
help='Number of concurrent connections')
args = argparser.parse_args()
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
t1 = time.time()
connections = []
for i in range(args.num_concurrent):
name = 'conn{0}'.format(i)
tconn = threading.Thread(target=make_new_connection,
args=(name, args.host, args.port))
tconn.start()
connections.append(tconn)
for conn in connections:
conn.join()
print('Elapsed:', time.time() - t1)
if __name__ == '__main__':
main()
| unlicense | -6,060,841,360,586,669,000 | 28.313725 | 80 | 0.616388 | false | 3.646341 | false | false | false |
nayarsystems/nxsugar-py | nxsugarpy/info.py | 1 | 1118 | # -*- coding: utf-8 -*-
##############################################################################
#
# nxsugarpy, a Python library for building nexus services with python
# Copyright (C) 2016 by the nxsugarpy team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
wanIps = "1.2.3.4"
lanIps = ["10.0.0.1", "172.16.0.1"]
user = "root"
directory = "/my/dir"
started = time.time() | lgpl-3.0 | 5,863,487,593,986,522,000 | 38.964286 | 78 | 0.60644 | false | 4.050725 | false | false | false |
edx-solutions/edx-platform | common/djangoapps/util/tests/test_course.py | 4 | 4770 | """
Tests for course utils.
"""
import ddt
import mock
from django.conf import settings
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from util.course import get_link_for_about_page
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class TestCourseSharingLinks(ModuleStoreTestCase):
"""
Tests for course sharing links.
"""
def setUp(self):
super(TestCourseSharingLinks, self).setUp()
# create test mongo course
self.course = CourseFactory.create(
org='test_org',
number='test_number',
run='test_run',
default_store=ModuleStoreEnum.Type.split,
social_sharing_url='test_social_sharing_url',
)
# load this course into course overview and set it's marketing url
self.course_overview = CourseOverview.get_from_id(self.course.id)
self.course_overview.marketing_url = 'test_marketing_url'
self.course_overview.save()
def get_course_sharing_link(self, enable_social_sharing, enable_mktg_site, use_overview=True):
"""
Get course sharing link.
Arguments:
enable_social_sharing(Boolean): To indicate whether social sharing is enabled.
enable_mktg_site(Boolean): A feature flag to decide activation of marketing site.
Keyword Arguments:
use_overview: indicates whether course overview or course descriptor should get
past to get_link_for_about_page.
Returns course sharing url.
"""
mock_settings = {
'FEATURES': {
'ENABLE_MKTG_SITE': enable_mktg_site
},
'SOCIAL_SHARING_SETTINGS': {
'CUSTOM_COURSE_URLS': enable_social_sharing
},
}
with mock.patch.multiple('django.conf.settings', **mock_settings):
course_sharing_link = get_link_for_about_page(
self.course_overview if use_overview else self.course
)
return course_sharing_link
@ddt.data(
(True, True, 'test_social_sharing_url'),
(False, True, 'test_marketing_url'),
(True, False, 'test_social_sharing_url'),
(False, False, '{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)),
)
@ddt.unpack
def test_sharing_link_with_settings(self, enable_social_sharing, enable_mktg_site, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url on settings manipulations.
"""
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=enable_social_sharing,
enable_mktg_site=enable_mktg_site,
)
self.assertEqual(actual_course_sharing_link, expected_course_sharing_link)
@ddt.data(
(['social_sharing_url'], 'test_marketing_url'),
(['marketing_url'], 'test_social_sharing_url'),
(
['social_sharing_url', 'marketing_url'],
'{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)
),
)
@ddt.unpack
def test_sharing_link_with_course_overview_attrs(self, overview_attrs, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url when:
1. Neither marketing url nor social sharing url is set.
2. Either marketing url or social sharing url is set.
"""
for overview_attr in overview_attrs:
setattr(self.course_overview, overview_attr, None)
self.course_overview.save()
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=True,
enable_mktg_site=True,
)
self.assertEqual(actual_course_sharing_link, expected_course_sharing_link)
@ddt.data(
(True, 'test_social_sharing_url'),
(
False,
'{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)
),
)
@ddt.unpack
def test_sharing_link_with_course_descriptor(self, enable_social_sharing, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url on passing
course descriptor as a parameter.
"""
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=enable_social_sharing,
enable_mktg_site=True,
use_overview=False,
)
self.assertEqual(actual_course_sharing_link, expected_course_sharing_link)
| agpl-3.0 | -5,378,677,625,954,462,000 | 35.976744 | 117 | 0.63522 | false | 4.042373 | true | false | false |
rdmorganiser/rdmo | rdmo/projects/views/membership.py | 1 | 4298 | import logging
from django.contrib.sites.models import Site
from django.http import (HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseRedirect)
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.urls import reverse
from django.views.generic import DeleteView, UpdateView
from django.views.generic.edit import FormView
from rdmo.accounts.utils import is_site_manager
from rdmo.core.mail import send_mail
from rdmo.core.views import ObjectPermissionMixin, RedirectViewMixin
from ..forms import MembershipCreateForm
from ..models import Membership, Project
from ..utils import is_last_owner
logger = logging.getLogger(__name__)
class MembershipCreateView(ObjectPermissionMixin, RedirectViewMixin, FormView):
model = Membership
form_class = MembershipCreateForm
permission_required = 'projects.add_membership_object'
template_name = 'projects/membership_form.html'
def dispatch(self, *args, **kwargs):
self.project = get_object_or_404(Project.objects.all(), pk=self.kwargs['project_id'])
return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Membership.objects.filter(project=self.project)
def get_permission_object(self):
return self.project
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['project'] = self.project
kwargs['is_site_manager'] = is_site_manager(self.request.user)
return kwargs
def get_success_url(self):
return self.project.get_absolute_url()
def form_valid(self, form):
invite = form.save()
if invite is not None:
context = {
'invite_url': self.request.build_absolute_uri(reverse('project_join', args=[invite.token])),
'invite_user': invite.user,
'project': invite.project,
'user': self.request.user,
'site': Site.objects.get_current()
}
subject = render_to_string('projects/email/project_invite_subject.txt', context)
message = render_to_string('projects/email/project_invite_message.txt', context)
# send the email
send_mail(subject, message, to=[invite.email])
return super().form_valid(form)
class MembershipUpdateView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
fields = ('role', )
permission_required = 'projects.change_membership_object'
def get_queryset(self):
return Membership.objects.filter(project_id=self.kwargs.get('project_id'))
def get_permission_object(self):
return self.get_object().project
class MembershipDeleteView(ObjectPermissionMixin, RedirectViewMixin, DeleteView):
permission_required = 'projects.delete_membership_object'
def get_queryset(self):
return Membership.objects.filter(project_id=self.kwargs.get('project_id'))
def delete(self, *args, **kwargs):
self.obj = self.get_object()
if (self.request.user in self.obj.project.owners) or is_site_manager(self.request.user):
# user is owner or site manager
if is_last_owner(self.obj.project, self.obj.user):
logger.info('User "%s" not allowed to remove last user "%s"', self.request.user.username, self.obj.user.username)
return HttpResponseBadRequest()
else:
logger.info('User "%s" deletes user "%s"', self.request.user.username, self.obj.user.username)
success_url = reverse('project', args=[self.get_object().project.id])
self.obj.delete()
return HttpResponseRedirect(success_url)
elif self.request.user == self.obj.user:
# user wants to remove him/herself
logger.info('User "%s" deletes himself.', self.request.user.username)
success_url = reverse('projects')
self.obj.delete()
return HttpResponseRedirect(success_url)
else:
logger.info('User "%s" not allowed to remove user "%s"', self.request.user.username, self.obj.user.username)
return HttpResponseForbidden()
def get_permission_object(self):
return self.get_object().project
| apache-2.0 | 2,156,405,560,474,445,600 | 37.720721 | 129 | 0.66496 | false | 4.152657 | false | false | false |
jeroendierckx/Camelot | camelot/view/controls/formview.py | 1 | 14400 | # ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / [email protected]
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact [email protected]
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# [email protected]
#
# ============================================================================
"""form view"""
import functools
import logging
LOGGER = logging.getLogger('camelot.view.controls.formview')
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from camelot.admin.action.application_action import Refresh
from camelot.admin.action.form_action import FormActionGuiContext
from camelot.view.model_thread import post
from camelot.view.controls.view import AbstractView
from camelot.view.controls.busy_widget import BusyWidget
from camelot.view import register
class FormEditors( object ):
"""A class that holds the editors used on a form
"""
option = None
bold_font = None
def __init__( self, columns, widget_mapper, delegate, admin ):
if self.option == None:
self.option = QtGui.QStyleOptionViewItem()
# set version to 5 to indicate the widget will appear on a
# a form view and not on a table view
self.option.version = 5
self.bold_font = QtGui.QApplication.font()
self.bold_font.setBold(True)
self._admin = admin
self._widget_mapper = widget_mapper
self._field_attributes = dict()
self._index = dict()
for i, (field_name, field_attributes ) in enumerate( columns):
self._field_attributes[field_name] = field_attributes
self._index[field_name] = i
def create_editor( self, field_name, parent ):
"""
:return: a :class:`QtGuiQWidget` or None if field_name is unknown
"""
index = self._index[field_name]
model = self._widget_mapper.model()
delegate = self._widget_mapper.itemDelegate()
model_index = model.index( self._widget_mapper.currentIndex(), index )
widget_editor = delegate.createEditor(
parent,
self.option,
model_index
)
widget_editor.setObjectName('%s_editor'%field_name)
delegate.setEditorData( widget_editor, model_index )
self._widget_mapper.addMapping( widget_editor, index )
return widget_editor
def create_label( self, field_name, editor, parent ):
from camelot.view.controls.field_label import FieldLabel
from camelot.view.controls.editors.wideeditor import WideEditor
field_attributes = self._field_attributes[field_name]
hide_title = field_attributes.get( 'hide_title', False )
widget_label = None
if not hide_title:
widget_label = FieldLabel(
field_name,
field_attributes['name'],
field_attributes,
self._admin
)
widget_label.setObjectName('%s_label'%field_name)
if not isinstance(editor, WideEditor):
widget_label.setAlignment(Qt.AlignVCenter | Qt.AlignRight)
# required fields font is bold
nullable = field_attributes.get( 'nullable', True )
if not nullable:
widget_label.setFont( self.bold_font )
return widget_label
class FormWidget(QtGui.QWidget):
"""A form widget comes inside a form view"""
changed_signal = QtCore.pyqtSignal( int )
def __init__(self, parent, admin):
QtGui.QWidget.__init__(self, parent)
self._admin = admin
widget_mapper = QtGui.QDataWidgetMapper(self)
widget_mapper.setObjectName('widget_mapper')
if self._admin.get_save_mode()=='on_leave':
widget_mapper.setSubmitPolicy(QtGui.QDataWidgetMapper.ManualSubmit)
widget_layout = QtGui.QHBoxLayout()
widget_layout.setSpacing(0)
widget_layout.setContentsMargins(0, 0, 0, 0)
self._index = 0
self._model = None
self._form = None
self._columns = None
self._delegate = None
self.setLayout(widget_layout)
def get_model(self):
return self._model
def set_model(self, model):
self._model = model
self._model.dataChanged.connect( self._data_changed )
self._model.layoutChanged.connect( self._layout_changed )
self._model.item_delegate_changed_signal.connect( self._item_delegate_changed )
self._model.setObjectName( 'model' )
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.setModel( model )
register.register( model, widget_mapper )
def get_columns_and_form():
return (self._model.getColumns(), self._admin.get_form_display())
post(get_columns_and_form, self._set_columns_and_form)
def clear_mapping(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.clearMapping()
@QtCore.pyqtSlot( QtCore.QModelIndex, QtCore.QModelIndex )
def _data_changed(self, index_from, index_to):
#@TODO: only revert if this form is in the changed range
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.revert()
self.changed_signal.emit( widget_mapper.currentIndex() )
@QtCore.pyqtSlot()
def _layout_changed(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.revert()
self.changed_signal.emit( widget_mapper.currentIndex() )
@QtCore.pyqtSlot()
def _item_delegate_changed(self):
from camelot.view.controls.delegates.delegatemanager import \
DelegateManager
self._delegate = self._model.getItemDelegate()
self._delegate.setObjectName('delegate')
assert self._delegate != None
assert isinstance(self._delegate, DelegateManager)
self._create_widgets()
@QtCore.pyqtSlot(int)
def current_index_changed( self, index ):
self.changed_signal.emit( index )
def set_index(self, index):
self._index = index
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.setCurrentIndex(self._index)
def get_index(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
return widget_mapper.currentIndex()
def submit(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.submit()
@QtCore.pyqtSlot(tuple)
def _set_columns_and_form(self, columns_and_form ):
self._columns, self._form = columns_and_form
self._create_widgets()
def _create_widgets(self):
"""Create value and label widgets"""
#
# Dirty trick to make form views work during unit tests, since unit
# tests have no event loop running, so the delegate will never be set,
# so we get it and are sure it will be there if we are running without
# threads
#
if not self._delegate:
self._delegate = self._model.getItemDelegate()
#
# end of dirty trick
#
# only if all information is available, we can start building the form
if not (self._form and self._columns and self._delegate):
return
widgets = {}
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if not widget_mapper:
return
LOGGER.debug( 'begin creating widgets' )
widget_mapper.setItemDelegate(self._delegate)
widgets = FormEditors( self._columns, widget_mapper, self._delegate, self._admin )
widget_mapper.setCurrentIndex( self._index )
LOGGER.debug( 'put widgets on form' )
self.layout().insertWidget(0, self._form.render( widgets, self, True) )
LOGGER.debug( 'done' )
#self._widget_layout.setContentsMargins(7, 7, 7, 7)
class FormView(AbstractView):
"""A FormView is the combination of a FormWidget, possible actions and menu
items
.. form_widget: The class to be used as a the form widget inside the form
view"""
form_widget = FormWidget
def __init__(self, title, admin, model, index, parent = None):
AbstractView.__init__( self, parent )
layout = QtGui.QVBoxLayout()
layout.setSpacing( 1 )
layout.setContentsMargins( 1, 1, 1, 1 )
layout.setObjectName( 'layout' )
form_and_actions_layout = QtGui.QHBoxLayout()
form_and_actions_layout.setObjectName('form_and_actions_layout')
layout.addLayout( form_and_actions_layout )
self.model = model
self.admin = admin
self.title_prefix = title
self.refresh_action = Refresh()
form = FormWidget(self, admin)
form.setObjectName( 'form' )
form.changed_signal.connect( self.update_title )
form.set_model(model)
form.set_index(index)
form_and_actions_layout.addWidget(form)
self.gui_context = FormActionGuiContext()
self.gui_context.workspace = self
self.gui_context.admin = admin
self.gui_context.view = self
self.gui_context.widget_mapper = self.findChild( QtGui.QDataWidgetMapper,
'widget_mapper' )
self.setLayout( layout )
self.change_title(title)
if hasattr(admin, 'form_size') and admin.form_size:
self.setMinimumSize(admin.form_size[0], admin.form_size[1])
self.accept_close_event = False
get_actions = admin.get_form_actions
post( functools.update_wrapper( functools.partial( get_actions,
None ),
get_actions ),
self.set_actions )
get_toolbar_actions = admin.get_form_toolbar_actions
post( functools.update_wrapper( functools.partial( get_toolbar_actions,
Qt.TopToolBarArea ),
get_toolbar_actions ),
self.set_toolbar_actions )
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the data in the current view"""
self.model.refresh()
def _get_title( self, index ):
obj = self.model._get_object( index )
return u'%s %s' % (
self.title_prefix,
self.admin.get_verbose_identifier(obj)
)
@QtCore.pyqtSlot( int )
def update_title(self, current_index ):
post( self._get_title, self.change_title, args=(current_index,) )
@QtCore.pyqtSlot(list)
def set_actions(self, actions):
form = self.findChild(QtGui.QWidget, 'form' )
layout = self.findChild(QtGui.QLayout, 'form_and_actions_layout' )
if actions and form and layout:
side_panel_layout = QtGui.QVBoxLayout()
from camelot.view.controls.actionsbox import ActionsBox
LOGGER.debug('setting Actions for formview')
actions_widget = ActionsBox( parent = self,
gui_context = self.gui_context )
actions_widget.setObjectName('actions')
actions_widget.set_actions( actions )
side_panel_layout.addWidget( actions_widget )
side_panel_layout.addStretch()
layout.addLayout( side_panel_layout )
@QtCore.pyqtSlot(list)
def set_toolbar_actions(self, actions):
layout = self.findChild( QtGui.QLayout, 'layout' )
if layout and actions:
toolbar = QtGui.QToolBar()
for action in actions:
qaction = action.render( self.gui_context, toolbar )
qaction.triggered.connect( self.action_triggered )
toolbar.addAction( qaction )
toolbar.addWidget( BusyWidget() )
layout.insertWidget( 0, toolbar, 0, Qt.AlignTop )
# @todo : this show is needed on OSX or the form window
# is hidden after the toolbar is added, maybe this can
# be solved using windowflags, since this causes some
# flicker
self.show()
@QtCore.pyqtSlot( bool )
def action_triggered( self, _checked = False ):
action_action = self.sender()
action_action.action.gui_run( self.gui_context )
@QtCore.pyqtSlot()
def validate_close( self ):
self.admin.form_close_action.gui_run( self.gui_context )
def close_view( self, accept ):
self.accept_close_event = accept
if accept == True:
# clear mapping to prevent data being written again to the model,
# when the underlying object would be reverted
form = self.findChild( QtGui.QWidget, 'form' )
if form != None:
form.clear_mapping()
self.close()
def closeEvent(self, event):
if self.accept_close_event == True:
event.accept()
else:
# make sure the next closeEvent is sent after this one
# is processed
QtCore.QTimer.singleShot( 0, self.validate_close )
event.ignore()
| gpl-2.0 | 6,573,367,326,368,849,000 | 38.130435 | 90 | 0.608542 | false | 4.194582 | false | false | false |
Kalimaha/pact-test | tests/config/config_builder.py | 1 | 2382 | import os
from pact_test.config.config_builder import Config
def test_default_consumer_tests_path():
config = Config()
assert config.consumer_tests_path == 'tests/service_consumers'
def test_default_provider_tests_path():
config = Config()
assert config.provider_tests_path == 'tests/service_providers'
def test_default_pact_broker_uri():
config = Config()
assert config.pact_broker_uri == 'http://localhost:9292/'
def test_custom_consumer_tests_path():
class TestConfig(Config):
def path_to_user_config_file(self):
return os.path.join(os.getcwd(), 'tests',
'resources', 'config',
'consumer_only.json')
config = TestConfig()
assert config.pact_broker_uri == 'http://localhost:9292/'
assert config.consumer_tests_path == 'mypath/mytests'
assert config.provider_tests_path == 'tests/service_providers'
def test_custom_provider_tests_path():
class TestConfig(Config):
def path_to_user_config_file(self):
return os.path.join(os.getcwd(), 'tests',
'resources', 'config',
'provider_only.json')
config = TestConfig()
assert config.pact_broker_uri == 'http://localhost:9292/'
assert config.provider_tests_path == 'mypath/mytests'
assert config.consumer_tests_path == 'tests/service_consumers'
def test_custom_pact_broker_uri():
class TestConfig(Config):
def path_to_user_config_file(self):
return os.path.join(os.getcwd(), 'tests',
'resources', 'config',
'pact_broker_only.json')
config = TestConfig()
assert config.pact_broker_uri == 'mypath/mytests'
assert config.provider_tests_path == 'tests/service_providers'
assert config.consumer_tests_path == 'tests/service_consumers'
def test_user_settings():
class TestConfig(Config):
def path_to_user_config_file(self):
return os.path.join(os.getcwd(), 'tests',
'resources', 'config',
'.pact.json')
config = TestConfig()
assert config.pact_broker_uri == 'mypath/mybroker'
assert config.provider_tests_path == 'mypath/myprovider'
assert config.consumer_tests_path == 'mypath/myconsumer'
| mit | 5,298,987,564,133,576,000 | 33.521739 | 66 | 0.607893 | false | 3.860616 | true | false | false |
SinnerSchraderMobileMirrors/django-cms | cms/utils/plugins.py | 5 | 6609 | # -*- coding: utf-8 -*-
from cms.exceptions import DuplicatePlaceholderWarning
from cms.models import Page
from cms.templatetags.cms_tags import Placeholder
from cms.utils.placeholder import validate_placeholder_name
from django.contrib.sites.models import Site, SITE_CACHE
from django.shortcuts import get_object_or_404
from django.template import NodeList, VariableNode, TemplateSyntaxError
from django.template.loader import get_template
from django.template.loader_tags import ConstantIncludeNode, ExtendsNode, BlockNode
import warnings
from sekizai.helpers import is_variable_extend_node
def get_page_from_plugin_or_404(cms_plugin):
return get_object_or_404(Page, placeholders=cms_plugin.placeholder)
def _extend_blocks(extend_node, blocks):
"""
Extends the dictionary `blocks` with *new* blocks in the parent node (recursive)
"""
# we don't support variable extensions
if is_variable_extend_node(extend_node):
return
parent = extend_node.get_parent(None)
# Search for new blocks
for node in parent.nodelist.get_nodes_by_type(BlockNode):
if not node.name in blocks:
blocks[node.name] = node
else:
# set this node as the super node (for {{ block.super }})
block = blocks[node.name]
seen_supers = []
while hasattr(block.super, 'nodelist') and block.super not in seen_supers:
seen_supers.append(block.super)
block = block.super
block.super = node
# search for further ExtendsNodes
for node in parent.nodelist.get_nodes_by_type(ExtendsNode):
_extend_blocks(node, blocks)
break
def _find_topmost_template(extend_node):
parent_template = extend_node.get_parent({})
for node in parent_template.nodelist.get_nodes_by_type(ExtendsNode):
# Their can only be one extend block in a template, otherwise django raises an exception
return _find_topmost_template(node)
# No ExtendsNode
return extend_node.get_parent({})
def _extend_nodelist(extend_node):
"""
Returns a list of placeholders found in the parent template(s) of this
ExtendsNode
"""
# we don't support variable extensions
if is_variable_extend_node(extend_node):
return []
# This is a dictionary mapping all BlockNode instances found in the template that contains extend_node
blocks = extend_node.blocks
_extend_blocks(extend_node, blocks)
placeholders = []
for block in blocks.values():
placeholders += _scan_placeholders(block.nodelist, block, blocks.keys())
# Scan topmost template for placeholder outside of blocks
parent_template = _find_topmost_template(extend_node)
placeholders += _scan_placeholders(parent_template.nodelist, None, blocks.keys())
return placeholders
def _scan_placeholders(nodelist, current_block=None, ignore_blocks=None):
placeholders = []
if ignore_blocks is None:
# List of BlockNode instances to ignore.
# This is important to avoid processing overriden block nodes.
ignore_blocks = []
for node in nodelist:
# check if this is a placeholder first
if isinstance(node, Placeholder):
placeholders.append(node.get_name())
# if it's a Constant Include Node ({% include "template_name.html" %})
# scan the child template
elif isinstance(node, ConstantIncludeNode):
# if there's an error in the to-be-included template, node.template becomes None
if node.template:
placeholders += _scan_placeholders(node.template.nodelist, current_block)
# handle {% extends ... %} tags
elif isinstance(node, ExtendsNode):
placeholders += _extend_nodelist(node)
# in block nodes we have to scan for super blocks
elif isinstance(node, VariableNode) and current_block:
if node.filter_expression.token == 'block.super':
if not hasattr(current_block.super, 'nodelist'):
raise TemplateSyntaxError("Cannot render block.super for blocks without a parent.")
placeholders += _scan_placeholders(current_block.super.nodelist, current_block.super)
# ignore nested blocks which are already handled
elif isinstance(node, BlockNode) and node.name in ignore_blocks:
continue
# if the node has the newly introduced 'child_nodelists' attribute, scan
# those attributes for nodelists and recurse them
elif hasattr(node, 'child_nodelists'):
for nodelist_name in node.child_nodelists:
if hasattr(node, nodelist_name):
subnodelist = getattr(node, nodelist_name)
if isinstance(subnodelist, NodeList):
if isinstance(node, BlockNode):
current_block = node
placeholders += _scan_placeholders(subnodelist, current_block, ignore_blocks)
# else just scan the node for nodelist instance attributes
else:
for attr in dir(node):
obj = getattr(node, attr)
if isinstance(obj, NodeList):
if isinstance(node, BlockNode):
current_block = node
placeholders += _scan_placeholders(obj, current_block, ignore_blocks)
return placeholders
def get_placeholders(template):
compiled_template = get_template(template)
placeholders = _scan_placeholders(compiled_template.nodelist)
clean_placeholders = []
for placeholder in placeholders:
if placeholder in clean_placeholders:
warnings.warn("Duplicate {{% placeholder \"{0}\" %}} "
"in template {1}."
.format(placeholder, template, placeholder),
DuplicatePlaceholderWarning)
else:
validate_placeholder_name(placeholder)
clean_placeholders.append(placeholder)
return clean_placeholders
SITE_VAR = "site__exact"
def current_site(request):
if SITE_VAR in request.REQUEST:
site_pk = request.REQUEST[SITE_VAR]
else:
session = getattr(request, 'session')
site_pk = request.session.get('cms_admin_site', None)
if site_pk:
try:
site = SITE_CACHE.get(site_pk) or Site.objects.get(pk=site_pk)
SITE_CACHE[site_pk] = site
return site
except Site.DoesNotExist:
return None
else:
return Site.objects.get_current()
| bsd-3-clause | 5,894,394,954,680,752,000 | 40.566038 | 110 | 0.646845 | false | 4.423695 | false | false | false |
bowen0701/algorithms_data_structures | lc0389_find_the_difference.py | 1 | 3185 | """Leetcode 389. Find the Difference
Easy
URL: https://leetcode.com/problems/find-the-difference/
Given two strings s and t which consist of only lowercase letters.
String t is generated by random shuffling string s and then
add one more letter at a random position.
Find the letter that was added in t.
Example:
Input:
s = "abcd"
t = "abcde"
Output:
e
Explanation:
'e' is the letter that was added.
"""
class SolutionSortIter(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(n*logn).
Space complexity: O(n).
"""
# Sort s & t.
s_ls = list(s)
t_ls = list(t)
s_ls.sort()
t_ls.sort()
# Iterate through s's chars to check mismatch.
for i, c in enumerate(s_ls):
if c != t_ls[i]:
return t_ls[i]
# If no mismatch, then the t's last char is the diff one.
return t_ls[-1]
class SolutionCharCountDict(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(n).
Space complexity: O(n).
"""
from collections import defaultdict
char_count_d = defaultdict(int)
# Iterate through s's chars and increment counter.
for c in s:
char_count_d[c] += 1
# Iterate through t's chars.
for c in t:
if not char_count_d[c]:
# If c is not in s, c is additional char.
return c
else:
# If c is in s, decrement its counter.
char_count_d[c] -= 1
class SolutionOrdSumDiff(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(n).
Space complexity: O(1).
"""
ord_sum_diff = 0
# Decrement ord_sum_diff by s's char ordinal.
for c in s:
ord_sum_diff -= ord(c)
# Increment by t's char ordinal.
for c in t:
ord_sum_diff += ord(c)
return chr(ord_sum_diff)
class SolutionXOR(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(n).
Space complexity: O(1).
"""
xor = 0
# XOR by s's char ord.
for c in s:
xor ^= ord(c)
# XOR by t's char ord.
for c in t:
xor ^= ord(c)
return chr(xor)
def main():
# Output: e
s = "abcd"
t = "abcde"
print SolutionSortIter().findTheDifference(s, t)
print SolutionCharCountDict().findTheDifference(s, t)
print SolutionOrdSumDiff().findTheDifference(s, t)
print SolutionXOR().findTheDifference(s, t)
# Output: a
s = ""
t = "a"
print SolutionSortIter().findTheDifference(s, t)
print SolutionCharCountDict().findTheDifference(s, t)
print SolutionOrdSumDiff().findTheDifference(s, t)
print SolutionXOR().findTheDifference(s, t)
if __name__ == '__main__':
main()
| bsd-2-clause | 2,294,694,876,237,306,000 | 21.588652 | 66 | 0.540659 | false | 3.570628 | false | false | false |
caxap/python-optimage | optimage/utils.py | 1 | 1263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
__all__ = ['is_executable', 'find_executable_path', 'file_size']
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def find_executable_path(command, paths=None):
path = os.environ.get('%s_PATH' % command.upper())
if not path:
if not paths:
# Most commonly used paths to install software
paths = ['/usr/bin/%s', '/usr/local/bin/%s', '/bin/%s']
paths = [p % command for p in paths]
for p in paths:
if is_executable(p):
path = p
break
return path or command
def file_size(fp):
# File descriptor
if hasattr(fp, 'name') and os.path.exists(fp.name):
return os.path.getsize(fp.name)
# File name
if type(fp) == type('') and os.path.exists(fp):
return os.path.getsize(fp)
# File buffer
if hasattr(fp, 'seek') and hasattr(fp, 'tell'):
pos = fp.tell()
fp.seek(0, os.SEEK_END)
size = fp.tell()
fp.seek(pos)
return size
# File wrapper, e.g Django File object
if hasattr(fp, 'size'):
return fp.size
raise ValueError("Unable to determine the file's size: %s" % (fp, ))
| mit | 805,107,199,800,589,600 | 27.066667 | 72 | 0.564529 | false | 3.422764 | false | false | false |
cphyc/ScriptManager | mysite/scripts/migrations/0001_initial.py | 1 | 1584 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Folderbla',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Langage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('langage', models.CharField(max_length=16)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Published', models.DateTimeField(auto_now_add=True)),
('Last edit', models.DateTimeField()),
('Title', models.CharField(max_length=100)),
('Content', models.TextField()),
('folder', models.ForeignKey(to='scripts.Folderbla')),
('langage', models.ForeignKey(to='scripts.Langage')),
],
options={
},
bases=(models.Model,),
),
]
| unlicense | 4,634,281,116,656,442,000 | 32 | 114 | 0.504419 | false | 4.728358 | false | false | false |
dgladkov/django-modelimport | modelimport/runners.py | 1 | 2758 | # -*- coding: utf-8 -*-
import os
from csv import excel
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from unicodecsv import DictReader
class excel_semicolon(excel):
delimiter = ';'
class RegistryMixin(object):
"""
Registers targets to uninstanciated class
"""
_registry = None
@classmethod
def register(cls, target):
if cls._registry is None:
cls._registry = {}
cls._registry[target.__name__] = target
@property
def registry(self):
registry = self._registry or {}
for line in registry.iteritems():
yield line
class BaseRunner(RegistryMixin):
"""
Base import runner class. All subclasses must define their own read() methods.
"""
NOT_STARTED = 0
IN_PROGRESS = 1
FAILED = 2
SUCCESS = 3
status_choices = {
NOT_STARTED: _('Not Started'),
IN_PROGRESS: _('In Progress'),
FAILED: _('Failed'),
SUCCESS: _('Success'),
}
_status = NOT_STARTED
def __init__(self, import_root, encoding, continue_on_error=True):
self.import_root = import_root
self.encoding = encoding
self.errors = {}
self.continue_on_error = continue_on_error
@property
def status(self):
return self.status_choices[self._status]
def _set_status(self, value):
self._status = value
def read(self, filepath):
"""
This field must be overriden by subclasses
"""
raise NotImplementedError
@transaction.atomic
def run(self):
self._set_status(self.IN_PROGRESS)
global_errors = {}
for name, cls in self.registry:
local_errors = []
filepath = os.path.join(self.import_root, cls._meta.filename)
for row in self.read(filepath):
modelimport = cls(row)
if modelimport.is_valid():
modelimport.save()
else:
local_errors.append(modelimport.errors)
self._set_status(self.FAILED)
if not self.continue_on_error:
break
global_errors[name] = local_errors
self.errors = global_errors
if self._status != self.FAILED:
self._set_status(self.SUCCESS)
class CsvRunner(BaseRunner):
def __init__(self, *args, **kwargs):
self.dialect = kwargs.pop('dialect', excel)
super(CsvRunner, self).__init__(*args, **kwargs)
def read(self, filepath):
with open(filepath, 'r') as f:
reader = DictReader(f, encoding=self.encoding, dialect=self.dialect)
for row in reader:
yield row
| mit | 8,766,955,446,528,260,000 | 25.266667 | 82 | 0.571791 | false | 4.236559 | false | false | false |
enjaz/enjaz | events/migrations/0009_booth_vote.py | 2 | 1703 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('events', '0008_add_collection_method'),
]
operations = [
migrations.CreateModel(
name='Booth',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u0631\u0643\u0646')),
('event', models.ForeignKey(verbose_name=b'\xd8\xa7\xd9\x84\xd8\xad\xd8\xaf\xd8\xab', to='events.Event')),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0635\u0648\u064a\u062a')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u0645\u062d\u0630\u0648\u0641\u061f')),
('booth', models.ForeignKey(verbose_name=b'\xd8\xa7\xd8\xb3\xd9\x85 \xd8\xa7\xd9\x84\xd8\xb1\xd9\x83\xd9\x86', to='events.Booth')),
('submitter', models.ForeignKey(related_name='event_booth_voter', verbose_name=b'\xd8\xa7\xd8\xb3\xd9\x85 \xd8\xa7\xd9\x84\xd9\x85\xd8\xb5\xd9\x88\xd8\xaa', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
| agpl-3.0 | -4,300,426,101,200,923,600 | 49.088235 | 226 | 0.623018 | false | 3.046512 | false | false | false |
MRtrix3/mrtrix3 | lib/mrtrix3/run.py | 1 | 26322 | # Copyright (c) 2008-2021 the MRtrix3 contributors.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Covered Software is provided under this License on an "as is"
# basis, without warranty of any kind, either expressed, implied, or
# statutory, including, without limitation, warranties that the
# Covered Software is free of defects, merchantable, fit for a
# particular purpose or non-infringing.
# See the Mozilla Public License v. 2.0 for more details.
#
# For more details, see http://www.mrtrix.org/.
import collections, itertools, os, shlex, signal, string, subprocess, sys, tempfile, threading
from distutils.spawn import find_executable
from mrtrix3 import ANSI, BIN_PATH, COMMAND_HISTORY_STRING, EXE_LIST, MRtrixBaseError, MRtrixError
from mrtrix3.utils import STRING_TYPES
IOStream = collections.namedtuple('IOStream', 'handle filename')
class Shared(object):
# A class for storing all information related to a running process
# This includes:
# - The process itself (so that a terminate signal can be sent if required)
# - For both stdout and stderr:
# - File handle (so that if not an internal pipe, the file can be closed)
# - File name (so that if not an internal pipe, it can be deleted from the file system)
# (these are encapsulated in named tuple "IOStream"
# Note: Input "stdin" should be a stream handle only, or None;
# "stdout" and "stderr" should be of type "IOStream"
class Process(subprocess.Popen):
def __init__(self, cmd, stdin, stdout, stderr, **kwargs):
assert isinstance(stdout, IOStream) or stdout is None
assert isinstance(stderr, IOStream) or stderr is None
my_kwargs = kwargs.copy()
my_kwargs['stdin'] = stdin
my_kwargs['stdout'] = stdout.handle if stdout else None
my_kwargs['stderr'] = stderr.handle if stderr else None
super(Shared.Process, self).__init__(cmd, **my_kwargs)
self.iostreams = (stdout, stderr)
def __init__(self):
# If the main script has been executed in an SGE environment, don't allow
# sub-processes to themselves fork SGE jobs; but if the main script is
# itself not an SGE job ('JOB_ID' environment variable absent), then
# whatever run.command() executes can send out SGE jobs without a problem.
self.env = os.environ.copy()
if self.env.get('SGE_ROOT') and self.env.get('JOB_ID'):
del self.env['SGE_ROOT']
# If environment variable is set, should apply to invoked script,
# but not to any underlying invoked commands
try:
self.env.pop('MRTRIX_QUIET')
except KeyError:
pass
self.env['MRTRIX_LOGLEVEL'] = 1
# Flagged by calling the set_continue() function;
# run.command() and run.function() calls will be skipped until one of the inputs to
# these functions matches the given string
self._last_file = ''
self.lock = threading.Lock()
self._num_threads = None
# Store executing processes so that they can be killed appropriately on interrupt;
# e.g. by the signal handler in the mrtrix3.app module
# Each sequential execution of run.command() either selects the first index for which the value is None,
# or extends the length of the list by 1, and uses this index as a unique identifier (within its own lifetime);
# each item is then itself a list of Process class instances required for that command string
self.process_lists = [ ]
self._scratch_dir = None
self.verbosity = 1
# Acquire a unique index
# This ensures that if command() is executed in parallel using different threads, they will
# not interfere with one another; but terminate() will also have access to all relevant data
def get_command_index(self):
with self.lock:
try:
index = next(i for i, v in enumerate(self.process_lists) if v is None)
self.process_lists[index] = [ ]
except StopIteration:
index = len(self.process_lists)
self.process_lists.append([ ])
return index
def close_command_index(self, index):
with self.lock:
assert index < len(self.process_lists)
assert self.process_lists[index]
self.process_lists[index] = None
# Wrap tempfile.mkstemp() in a convenience function, which also catches the case
# where the user does not have write access to the temporary directory location
# selected by default by the tempfile module, and in that case re-runs mkstemp()
# manually specifying an alternative temporary directory
def make_temporary_file(self):
try:
return IOStream(*tempfile.mkstemp())
except OSError:
return IOStream(*tempfile.mkstemp('', 'tmp', self._scratch_dir if self._scratch_dir else os.getcwd()))
def set_continue(self, filename): #pylint: disable=unused-variable
self._last_file = filename
def get_continue(self):
return bool(self._last_file)
# New function for handling the -continue command-line option functionality
# Check to see if the last file produced in the previous script execution is
# intended to be produced by this command; if it is, this will be the last
# thing that gets skipped by the -continue option
def trigger_continue(self, entries):
assert self.get_continue()
for entry in entries:
# It's possible that the file might be defined in a '--option=XXX' style argument
# It's also possible that the filename in the command string has the file extension omitted
if entry.startswith('--') and '=' in entry:
totest = entry.split('=')[1]
else:
totest = entry
if totest in [ self._last_file, os.path.splitext(self._last_file)[0] ]:
self._last_file = ''
return True
return False
def get_num_threads(self):
return self._num_threads
def set_num_threads(self, value):
assert value is None or (isinstance(value, int) and value >= 0)
self._num_threads = value
if value is not None:
# Either -nthreads 0 or -nthreads 1 should result in disabling multi-threading
external_software_value = 1 if value <= 1 else value
self.env['ITK_GLOBAL_NUMBER_OF_THREADS'] = str(external_software_value)
self.env['OMP_NUM_THREADS'] = str(external_software_value)
def get_scratch_dir(self):
return self._scratch_dir
def set_scratch_dir(self, path):
self.env['MRTRIX_TMPFILE_DIR'] = path
self._scratch_dir = path
# Controls verbosity of invoked MRtrix3 commands, as well as whether or not the
# stderr outputs of invoked commands are propagated to the terminal instantly or
# instead written to a temporary file for read on completion
def set_verbosity(self, verbosity):
assert isinstance(verbosity, int)
self.verbosity = verbosity
self.env['MRTRIX_LOGLEVEL'] = str(max(1, verbosity-1))
# Terminate any and all running processes, and delete any associated temporary files
def terminate(self, signum): #pylint: disable=unused-variable
with self.lock:
for process_list in self.process_lists:
if process_list:
for process in process_list:
if process:
# No need to propagate signals if we're in a POSIX-compliant environment
# and SIGINT has been received; that will propagate to children automatically
if sys.platform == 'win32':
process.send_signal(getattr(signal, 'CTRL_BREAK_EVENT'))
process.communicate(timeout=1) # Flushes the I/O buffers, unlike wait()
elif signum != signal.SIGINT:
process.terminate()
process.communicate(timeout=1)
for stream in process.iostreams:
if stream:
if stream.handle != subprocess.PIPE:
try:
os.close(stream.handle)
except OSError:
pass
if stream.filename is not None:
try:
os.remove(stream.filename)
except OSError:
pass
stream = None
process = None
process_list = None
self.process_lists = [ ]
shared = Shared() #pylint: disable=invalid-name
class MRtrixCmdError(MRtrixBaseError):
def __init__(self, cmd, code, stdout, stderr):
super(MRtrixCmdError, self).__init__('Command failed')
self.command = cmd
self.returncode = code
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return self.stdout + self.stderr
class MRtrixFnError(MRtrixBaseError):
def __init__(self, fn, text):
super(MRtrixFnError, self).__init__('Function failed')
self.function = fn
self.errortext = text
def __str__(self):
return self.errortext
CommandReturn = collections.namedtuple('CommandReturn', 'stdout stderr')
def command(cmd, **kwargs): #pylint: disable=unused-variable
from mrtrix3 import app, path #pylint: disable=import-outside-toplevel
global shared #pylint: disable=invalid-name
def quote_nonpipe(item):
return item if item == '|' else path.quote(item)
shell = kwargs.pop('shell', False)
show = kwargs.pop('show', True)
mrconvert_keyval = kwargs.pop('mrconvert_keyval', None)
force = kwargs.pop('force', False)
env = kwargs.pop('env', shared.env)
if kwargs:
raise TypeError('Unsupported keyword arguments passed to run.command(): ' + str(kwargs))
if shell and mrconvert_keyval:
raise TypeError('Cannot use "mrconvert_keyval=" parameter in shell mode')
subprocess_kwargs = {}
if sys.platform == 'win32':
subprocess_kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if shell:
subprocess_kwargs['shell'] = True
subprocess_kwargs['env'] = env
if isinstance(cmd, list):
if shell:
raise TypeError('When using run.command() with shell=True, input must be a text string')
cmdstring = ''
cmdsplit = []
for entry in cmd:
if isinstance(entry, STRING_TYPES):
cmdstring += (' ' if cmdstring else '') + quote_nonpipe(entry)
cmdsplit.append(entry)
elif isinstance(entry, list):
assert all(isinstance(item, STRING_TYPES) for item in entry)
if len(entry) > 1:
common_prefix = os.path.commonprefix(entry)
common_suffix = os.path.commonprefix([i[::-1] for i in entry])[::-1]
if common_prefix == entry[0] and common_prefix == common_suffix:
cmdstring += (' ' if cmdstring else '') + '[' + entry[0] + ' (x' + str(len(entry)) + ')]'
else:
cmdstring += (' ' if cmdstring else '') + '[' + common_prefix + '*' + common_suffix + ' (' + str(len(entry)) + ' items)]'
else:
cmdstring += (' ' if cmdstring else '') + quote_nonpipe(entry[0])
cmdsplit.extend(entry)
else:
raise TypeError('When run.command() is provided with a list as input, entries in the list must be either strings or lists of strings')
elif isinstance(cmd, STRING_TYPES):
cmdstring = cmd
# Split the command string by spaces, preserving anything encased within quotation marks
if os.sep == '/': # Cheap POSIX compliance check
cmdsplit = shlex.split(cmd)
else: # Native Windows Python
cmdsplit = [ entry.strip('\"') for entry in shlex.split(cmd, posix=False) ]
else:
raise TypeError('run.command() function only operates on strings, or lists of strings')
if shared.get_continue():
if shared.trigger_continue(cmdsplit):
app.debug('Detected last file in command \'' + cmdstring + '\'; this is the last run.command() / run.function() call that will be skipped')
if shared.verbosity:
sys.stderr.write(ANSI.execute + 'Skipping command:' + ANSI.clear + ' ' + cmdstring + '\n')
sys.stderr.flush()
return CommandReturn('', '')
# If operating in shell=True mode, handling of command execution is significantly different:
# Unmodified command string is executed using subprocess, with the shell being responsible for its parsing
# Only a single process per run.command() invocation is possible (since e.g. any piping will be
# handled by the spawned shell)
this_process_list = [ ]
if shell:
cmdstack = [ cmdsplit ]
with shared.lock:
app.debug('To execute: ' + str(cmdsplit))
if (shared.verbosity and show) or shared.verbosity > 1:
sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + cmdstring + '\n')
sys.stderr.flush()
# No locking required for actual creation of new process
this_stdout = shared.make_temporary_file()
this_stderr = IOStream(subprocess.PIPE, None) if shared.verbosity > 1 else shared.make_temporary_file()
this_process_list.append(shared.Process(cmdstring, None, this_stdout, this_stderr, **subprocess_kwargs))
else: # shell=False
# Need to identify presence of list constructs && or ||, and process accordingly
try:
(index, operator) = next((i,v) for i,v in enumerate(cmdsplit) if v in [ '&&', '||' ])
# If operator is '&&', next command should be executed only if first is successful
# If operator is '||', next command should be executed only if the first is not successful
try:
pre_result = command(cmdsplit[:index])
if operator == '||':
with shared.lock:
app.debug('Due to success of "' + cmdsplit[:index] + '", "' + cmdsplit[index+1:] + '" will not be run')
return pre_result
except MRtrixCmdError:
if operator == '&&':
raise
return command(cmdsplit[index+1:])
except StopIteration:
pass
# This splits the command string based on the piping character '|', such that each
# individual executable (along with its arguments) appears as its own list
cmdstack = [ list(g) for k, g in itertools.groupby(cmdsplit, lambda s : s != '|') if k ]
if mrconvert_keyval:
if cmdstack[-1][0] != 'mrconvert':
raise TypeError('Argument "mrconvert_keyval=" can only be used if the mrconvert command is being invoked')
assert not (mrconvert_keyval[0] in [ '\'', '"' ] or mrconvert_keyval[-1] in [ '\'', '"' ])
cmdstack[-1].extend([ '-copy_properties', mrconvert_keyval ])
if COMMAND_HISTORY_STRING:
cmdstack[-1].extend([ '-append_property', 'command_history', COMMAND_HISTORY_STRING ])
for line in cmdstack:
is_mrtrix_exe = line[0] in EXE_LIST
if is_mrtrix_exe:
line[0] = version_match(line[0])
if shared.get_num_threads() is not None:
line.extend( [ '-nthreads', str(shared.get_num_threads()) ] )
if force:
line.append('-force')
else:
line[0] = exe_name(line[0])
shebang = _shebang(line[0])
if shebang:
if not is_mrtrix_exe:
# If a shebang is found, and this call is therefore invoking an
# interpreter, can't rely on the interpreter finding the script
# from PATH; need to find the full path ourselves.
line[0] = find_executable(line[0])
for item in reversed(shebang):
line.insert(0, item)
with shared.lock:
app.debug('To execute: ' + str(cmdstack))
if (shared.verbosity and show) or shared.verbosity > 1:
sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + cmdstring + '\n')
sys.stderr.flush()
this_command_index = shared.get_command_index()
# Execute all processes for this command string
for index, to_execute in enumerate(cmdstack):
# If there's at least one command prior to this, need to receive the stdout from the prior command
# at the stdin of this command; otherwise, nothing to receive
this_stdin = this_process_list[index-1].stdout if index > 0 else None
# If this is not the last command, then stdout needs to be piped to the next command;
# otherwise, write stdout to a temporary file so that the contents can be read later
this_stdout = IOStream(subprocess.PIPE, None) if index<len(cmdstack)-1 else shared.make_temporary_file()
# If we're in debug / info mode, the contents of stderr will be read and printed to the terminal
# as the command progresses, hence this needs to go to a pipe; otherwise, write it to a temporary
# file so that the contents can be read later
this_stderr = IOStream(subprocess.PIPE, None) if shared.verbosity>1 else shared.make_temporary_file()
# Set off the process
try:
this_process_list.append(shared.Process(to_execute, this_stdin, this_stdout, this_stderr, **subprocess_kwargs))
# FileNotFoundError not defined in Python 2.7
except OSError as exception:
raise MRtrixCmdError(cmdstring, 1, '', str(exception))
# End branching based on shell=True/False
# Write process & temporary file information to globals, so that
# shared.terminate() can perform cleanup if required
this_command_index = shared.get_command_index()
with shared.lock:
shared.process_lists[this_command_index] = this_process_list
return_code = None
return_stdout = ''
return_stderr = ''
error = False
error_text = ''
# Wait for all commands to complete
# Switch how we monitor running processes / wait for them to complete
# depending on whether or not the user has specified -info or -debug option
if shared.verbosity > 1:
for process in this_process_list:
stderrdata = b''
do_indent = True
while True:
# Have to read one character at a time: Waiting for a newline character using e.g. readline() will prevent MRtrix progressbars from appearing
byte = process.stderr.read(1)
stderrdata += byte
char = byte.decode('cp1252', errors='ignore')
if not char and process.poll() is not None:
break
if do_indent and char in string.printable and char != '\r' and char != '\n':
sys.stderr.write(' ')
do_indent = False
elif char in [ '\r', '\n' ]:
do_indent = True
sys.stderr.write(char)
sys.stderr.flush()
stderrdata = stderrdata.decode('utf-8', errors='replace')
return_stderr += stderrdata
if not return_code: # Catch return code of first failed command
return_code = process.returncode
if process.returncode:
error = True
error_text += stderrdata
else:
for process in this_process_list:
process.wait()
if not return_code:
return_code = process.returncode
# For any command stdout / stderr data that wasn't either passed to another command or
# printed to the terminal during execution, read it here.
for process in this_process_list:
def finalise_temp_file(iostream):
os.close(iostream.handle)
with open(iostream.filename, 'rb') as stream:
contents = stream.read().decode('utf-8', errors='replace')
os.unlink(iostream.filename)
iostream = None
return contents
stdout_text = stderr_text = ''
if process.iostreams[0].filename is not None:
stdout_text = finalise_temp_file(process.iostreams[0])
return_stdout += stdout_text
if process.iostreams[1].filename is not None:
stderr_text = finalise_temp_file(process.iostreams[1])
return_stderr += stderr_text
if process.returncode:
error = True
error_text += stdout_text + stderr_text
# Get rid of any reference to the executed processes
shared.close_command_index(this_command_index)
this_process_list = None
if error:
raise MRtrixCmdError(cmdstring, return_code, return_stdout, return_stderr)
# Only now do we append to the script log, since the command has completed successfully
# Note: Writing the command as it was formed as the input to run.command():
# other flags may potentially change if this file is eventually used to resume the script
if shared.get_scratch_dir():
with shared.lock:
with open(os.path.join(shared.get_scratch_dir(), 'log.txt'), 'a') as outfile:
outfile.write(cmdstring + '\n')
return CommandReturn(return_stdout, return_stderr)
def function(fn_to_execute, *args, **kwargs): #pylint: disable=unused-variable
from mrtrix3 import app #pylint: disable=import-outside-toplevel
if not fn_to_execute:
raise TypeError('Invalid input to run.function()')
show = kwargs.pop('show', True)
fnstring = fn_to_execute.__module__ + '.' + fn_to_execute.__name__ + \
'(' + ', '.join(['\'' + str(a) + '\'' if isinstance(a, STRING_TYPES) else str(a) for a in args]) + \
(', ' if (args and kwargs) else '') + \
', '.join([key+'='+str(value) for key, value in kwargs.items()]) + ')'
if shared.get_continue():
if shared.trigger_continue(args) or shared.trigger_continue(kwargs.values()):
app.debug('Detected last file in function \'' + fnstring + '\'; this is the last run.command() / run.function() call that will be skipped')
if shared.verbosity:
sys.stderr.write(ANSI.execute + 'Skipping function:' + ANSI.clear + ' ' + fnstring + '\n')
sys.stderr.flush()
return None
if (shared.verbosity and show) or shared.verbosity > 1:
sys.stderr.write(ANSI.execute + 'Function:' + ANSI.clear + ' ' + fnstring + '\n')
sys.stderr.flush()
# Now we need to actually execute the requested function
try:
if kwargs:
result = fn_to_execute(*args, **kwargs)
else:
result = fn_to_execute(*args)
except Exception as exception: # pylint: disable=broad-except
raise MRtrixFnError(fnstring, str(exception))
# Only now do we append to the script log, since the function has completed successfully
if shared.get_scratch_dir():
with shared.lock:
with open(os.path.join(shared.get_scratch_dir(), 'log.txt'), 'a') as outfile:
outfile.write(fnstring + '\n')
return result
# When running on Windows, add the necessary '.exe' so that hopefully the correct
# command is found by subprocess
def exe_name(item):
from mrtrix3 import app, utils #pylint: disable=import-outside-toplevel
if not utils.is_windows():
path = item
elif item.endswith('.exe'):
path = item
elif os.path.isfile(os.path.join(BIN_PATH, item)):
path = item
elif os.path.isfile(os.path.join(BIN_PATH, item + '.exe')):
path = item + '.exe'
elif find_executable(item) is not None:
path = item
elif find_executable(item + '.exe') is not None:
path = item + '.exe'
# If it can't be found, return the item as-is; find_executable() fails to identify Python scripts
else:
path = item
app.debug(item + ' -> ' + path)
return path
# Make sure we're not accidentally running an MRtrix executable on the system that
# belongs to a different version of MRtrix3 to the script library currently being used,
# or a non-MRtrix3 command with the same name as an MRtrix3 command
# (e.g. C:\Windows\system32\mrinfo.exe; On Windows, subprocess uses CreateProcess(),
# which checks system32\ before PATH)
def version_match(item):
from mrtrix3 import app #pylint: disable=import-outside-toplevel
if not item in EXE_LIST:
app.debug('Command ' + item + ' not found in MRtrix3 bin/ directory')
return item
exe_path_manual = os.path.join(BIN_PATH, exe_name(item))
if os.path.isfile(exe_path_manual):
app.debug('Version-matched executable for ' + item + ': ' + exe_path_manual)
return exe_path_manual
exe_path_sys = find_executable(exe_name(item))
if exe_path_sys and os.path.isfile(exe_path_sys):
app.debug('Using non-version-matched executable for ' + item + ': ' + exe_path_sys)
return exe_path_sys
raise MRtrixError('Unable to find executable for MRtrix3 command ' + item)
# If the target executable is not a binary, but is actually a script, use the
# shebang at the start of the file to alter the subprocess call
def _shebang(item):
from mrtrix3 import app, utils #pylint: disable=import-outside-toplevel
# If a complete path has been provided rather than just a file name, don't perform any additional file search
if os.sep in item:
path = item
else:
path = version_match(item)
if path == item:
path = find_executable(exe_name(item))
if not path:
app.debug('File \"' + item + '\": Could not find file to query')
return []
# Read the first 1024 bytes of the file
with open(path, 'rb') as file_in:
data = file_in.read(1024)
# Try to find the shebang line
for line in data.splitlines():
# Are there any non-text characters? If so, it's a binary file, so no need to looking for a shebang
try:
line = str(line.decode('utf-8'))
except:
app.debug('File \"' + item + '\": Not a text file')
return []
line = line.strip()
if len(line) > 2 and line[0:2] == '#!':
# Need to strip first in case there's a gap between the shebang symbol and the interpreter path
shebang = line[2:].strip().split(' ')
# On Windows, /usr/bin/env can't be easily found, and any direct interpreter path will have a similar issue.
# Instead, manually find the right interpreter to call using distutils
# Also if script is written in Python, try to execute it using the same interpreter as that currently running
if os.path.basename(shebang[0]) == 'env':
if len(shebang) < 2:
app.warn('Invalid shebang in script file \"' + item + '\" (missing interpreter after \"env\")')
return []
if shebang[1] == 'python':
if not sys.executable:
app.warn('Unable to self-identify Python interpreter; file \"' + item + '\" not guaranteed to execute on same version')
return []
shebang = [ sys.executable ] + shebang[2:]
app.debug('File \"' + item + '\": Using current Python interpreter')
elif utils.is_windows():
shebang = [ os.path.abspath(find_executable(exe_name(shebang[1]))) ] + shebang[2:]
elif utils.is_windows():
shebang = [ os.path.abspath(find_executable(exe_name(os.path.basename(shebang[0])))) ] + shebang[1:]
app.debug('File \"' + item + '\": string \"' + line + '\": ' + str(shebang))
return shebang
app.debug('File \"' + item + '\": No shebang found')
return []
| mpl-2.0 | 4,200,728,413,799,274,500 | 41.454839 | 149 | 0.658803 | false | 3.850497 | false | false | false |
amozie/amozie | studzie/keras_rl_agent/dqn_test.py | 1 | 2269 | import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v1')
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9,
enable_dueling_network=False, dueling_type='avg', target_model_update=1e-2, policy=policy)
# dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
# enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=.001, decay=.001), metrics=['mae'])
rewards = []
callback = [TrainEpisodeLogger(), History()]
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
rewards.extend(hist.history.get('episode_reward'))
plt.plot(rewards)
dqn.test(env, nb_episodes=5, visualize=True)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(300):
state_list.append(state)
# action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
env.render()
env.render(close=True)
state_arr = np.array(state_list)
plt.plot(state_arr) | apache-2.0 | -1,884,155,499,636,017,700 | 30.971831 | 106 | 0.733363 | false | 2.962141 | false | false | false |
divio/django-cronjobs | cronjobs/migrations/0004_auto__add_field_cronlog_exception_message__add_field_cronlog_duration.py | 1 | 2682 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CronLog.exception_message'
db.add_column('cronjobs_cronlog', 'exception_message', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
# Adding field 'CronLog.duration'
db.add_column('cronjobs_cronlog', 'duration', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=10, decimal_places=3), keep_default=False)
def backwards(self, orm):
# Deleting field 'CronLog.exception_message'
db.delete_column('cronjobs_cronlog', 'exception_message')
# Deleting field 'CronLog.duration'
db.delete_column('cronjobs_cronlog', 'duration')
models = {
'cronjobs.cron': {
'Meta': {'object_name': 'Cron'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next_run': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 6, 7, 15, 42, 29, 846310)'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cronjobs.CronType']"})
},
'cronjobs.cronlog': {
'Meta': {'object_name': 'CronLog'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '3'}),
'exception_message': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cronjobs.crontype': {
'Meta': {'object_name': 'CronType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'run_every': ('django.db.models.fields.IntegerField', [], {'default': '86400'})
}
}
complete_apps = ['cronjobs']
| bsd-3-clause | 2,534,393,390,478,528,500 | 48.666667 | 168 | 0.579418 | false | 3.634146 | false | false | false |
comicxmz001/LeetCode | Python/304 Range Sum Query 2D - Immutable.py | 1 | 1254 | class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if len(matrix) == 0:
return None
self.m = [[0 for x in xrange(len(matrix[0])+1)] for x in xrange(len(matrix)+1)]
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
self.m[i+1][j+1] = self.m[i][j+1] + self.m[i+1][j] - self.m[i][j] + matrix[i][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.m[row2+1][col2+1] - self.m[row2+1][col1] - self.m[row1][col2+1] + self.m[row1][col1]
if __name__ == '__main__':
matrix = [
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, 2, 0, 1, 5],
[4, 1, 0, 1, 7],
[1, 0, 3, 0, 5]
]
m2 = []
numMatrix = NumMatrix(matrix)
print numMatrix.sumRegion(1, 1, 2, 2)
print numMatrix.sumRegion(2, 1, 4, 3)
print numMatrix.sumRegion(1, 2, 2, 4) | mit | -6,708,685,302,077,250,000 | 31.179487 | 104 | 0.464912 | false | 2.964539 | false | false | false |
AlekseyLM/Python | Game_1.py | 1 | 1991 | # Определяем начальные значения переменных
min_max = [0, 1000]
vvod = (min_max[0] + min_max[1]) / 2
v_text = {1:'нижний предел', 2:'верхний предел', 3:'отгадываемое число'}
# Создаем функцию проверки ввода
def prov_vvoda(vd_text):
global vvod
while True:
sp = input('Введите ' + v_text[vd_text] + ' (от ' + str(min_max[0]) + ', до ' + str(min_max[1]) + ')\nили нажмите "Enter" для выхода:')
if sp == '':
raise SystemExit(1)
elif not sp.isdigit():
print('Ну это же не положительное число!')
else:
vvod = int(sp)
if min_max[0] <= vvod <= min_max[1]:
break
else:
print('Число не соответствует условию (от ', min_max[0], ', до ', min_max[1], ')', sep = '')
# Вводим пределы min_max, проверяем ввод
prov_vvoda(1)
min_max[0] = vvod
prov_vvoda(2)
min_max[1] = vvod
# Генерируем случайное число в диапазоне min_max
import random
r_vopros = random.randint(min_max[0], min_max[1])
# Предлагаем отгадать, проверяем ввод
print('Компьютером загадано число. Попробуйте отгадать!')
i_step = 1
while True:
print('Попытка №', i_step, sep='')
prov_vvoda(3)
if r_vopros == vvod:
print('Отлично! Загаданное число "', vvod, '" Вы угадали с ', i_step, '-й попытки!', sep = '')
input('Нажмите "Enter" для выхода.')
raise SystemExit(1)
elif r_vopros > vvod:
if vvod > min_max[0]: min_max[0] = vvod
else:
if vvod < min_max[1]: min_max[1] = vvod
i_step += 1
input('Нажмите "Enter" для выхода.')
| gpl-2.0 | -6,841,831,948,513,184,000 | 32.680851 | 143 | 0.576121 | false | 1.877817 | false | false | false |
ddanier/django_price | django_price/currency.py | 1 | 1149 | # coding: utf-8
from .currencies import CURRENCIES
class Currency(object):
CURRENCIES = dict([(c[0], c[1:]) for c in CURRENCIES])
def __init__(self, iso_code):
if not iso_code in self.CURRENCIES:
raise TypeError('unknown currency (%s)' % iso_code)
self.iso_code = iso_code
self.iso_num, self.decimal_places, self.rounding, self.name, self.symbol = self.CURRENCIES[iso_code]
def __str__(self):
from django.utils.encoding import smart_str
return smart_str(unicode(self))
def __unicode__(self):
return self.iso_code
def __eq__(self, other):
if not isinstance(other, Currency):
# allow direct comparision to iso codes
if other in self.CURRENCIES:
return self.iso_code == other
return False
return self.iso_code == other.iso_code
def __ne__(self, other):
return not self == other
# django_ajax hook
def ajax_data(self):
return {
'iso_code': self.iso_code,
'name': self.name,
'symbol': self.symbol,
}
| bsd-3-clause | 3,699,915,892,193,529,300 | 27.725 | 108 | 0.563098 | false | 3.779605 | false | false | false |
linyiqun/minos | supervisor/supervisor/medusa/thread/pi_module.py | 5 | 1800 | # -*- Mode: Python -*-
# [reworking of the version in Python-1.5.1/Demo/scripts/pi.py]
# Print digits of pi forever.
#
# The algorithm, using Python's 'long' integers ("bignums"), works
# with continued fractions, and was conceived by Lambert Meertens.
#
# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,
# published by Prentice-Hall (UK) Ltd., 1990.
import string
StopException = "Stop!"
def go (file):
try:
k, a, b, a1, b1 = 2L, 4L, 1L, 12L, 4L
while 1:
# Next approximation
p, q, k = k*k, 2L*k+1L, k+1L
a, b, a1, b1 = a1, b1, p*a+q*a1, p*b+q*b1
# Print common digits
d, d1 = a/b, a1/b1
while d == d1:
if file.write (str(int(d))):
raise StopException
a, a1 = 10L*(a%b), 10L*(a1%b1)
d, d1 = a/b, a1/b1
except StopException:
return
class line_writer:
"partition the endless line into 80-character ones"
def __init__ (self, file, digit_limit=10000):
self.file = file
self.buffer = ''
self.count = 0
self.digit_limit = digit_limit
def write (self, data):
self.buffer = self.buffer + data
if len(self.buffer) > 80:
line, self.buffer = self.buffer[:80], self.buffer[80:]
self.file.write (line+'\r\n')
self.count = self.count + 80
if self.count > self.digit_limit:
return 1
else:
return 0
def main (env, stdin, stdout):
parts = string.split (env['REQUEST_URI'], '/')
if len(parts) >= 3:
ndigits = string.atoi (parts[2])
else:
ndigits = 5000
stdout.write ('Content-Type: text/plain\r\n\r\n')
go (line_writer (stdout, ndigits))
| apache-2.0 | 5,644,802,356,304,090,000 | 28.032258 | 74 | 0.546111 | false | 3.152364 | false | false | false |
nichollyn/libspark | extension_system/test/manual/pluginview/plugins/plugin1/plugin1.py | 1 | 1337 | __author__ = 'kevin'
from PySide.QtCore import QObject
from _plugin1__iplugin_test__dev_ import easy_import_ as ei
if ei.initialized:
from iplugin import IPlugin
from pluginmanager import PluginManager
class MyPlugin1(IPlugin):
def __init__(self, manager):
super(MyPlugin1, self).__init__(manager)
self.initializeCalled = False
def initialize(self, arguments):
self.initializeCalled = False
obj = QObject(self)
obj.setObjectName("MyPlugin1")
self.addAutoReleaseObject(obj)
found2 = False
found3 = False
for otherPluginObj in PluginManager.getInstance().allObjects():
if otherPluginObj.objectName() == "MyPlugin2":
found2 = True
elif otherPluginObj.objectName() == "MyPlugin3":
found3 = True
if found2 and found3:
return True, "No error"
errorString = "object(s) missing from plugin(s):"
if not found2:
errorString += "plugin2"
if not found3:
errorString += "plugin3"
return False, errorString
def extensionsInitialized(self):
if not self.initializeCalled:
return
obj = QObject(self)
obj.setObjectName("MyPlugin1_running")
self.addAutoReleaseObject(obj)
| gpl-2.0 | 5,226,707,147,472,492,000 | 26.854167 | 71 | 0.612565 | false | 4.178125 | false | false | false |
djtaylor/cloudscape-DEPRECATED | python/cloudscape/engine/api/app/agent/models.py | 2 | 7921 | import copy
from collections import OrderedDict
# Django Libraries
from django.conf import settings
from django.db import connections
# CloudScape Libraries
from cloudscape.common import logger
from cloudscape.common import config
from cloudscape.engine.api.app.host.models import DBHostDetails
# Maximum Rows per Table
MAX_ROWS = 360
class DBHostStats:
"""
Main database model for storing polling statistics for managed hosts. Each host has its own
table in the host statistics database.
"""
def __init__(self, uuid=None):
self.uuid = uuid
self.db = settings.DATABASES['host_stats']
self.dbh = connections['host_stats'].cursor()
# Configuration and logger
self.conf = config.parse()
self.log = logger.create(__name__, self.conf.server.log)
# Parameters validation flag
self.pv = True
# Create an ordered dictionary for the column names
self.columns = OrderedDict([
('uptime', 'VARCHAR(48)'),
('cpu_use', 'TEXT'),
('memory_use', 'TEXT'),
('memory_top', 'TEXT'),
('disk_use', 'TEXT'),
('disk_io', 'TEXT'),
('network_io', 'TEXT')])
# Make sure a UUID is specified
if not self.uuid:
self.pv = False
# Make sure the UUID is mapped to an existing host
if not DBHostDetails.objects.filter(uuid=self.uuid).count():
self.pv = False
def _table_init(self):
"""
Initialize the host statistics table.
"""
# Construct the columns string
col_str = ''
for name, data_type in self.columns.iteritems():
col_str += ',%s %s' % (name, data_type)
# Construct the table query
timestamp = 'created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'
init_query = 'CREATE TABLE IF NOT EXISTS `%s`.`%s`(%s%s)' % (self.db['NAME'], self.uuid, timestamp, col_str)
# Create the table
try:
self.dbh.execute(init_query)
self.log.info('Initialized host \'%s\' statistics table' % self.uuid)
return True
except Warning:
return True
except Exception as e:
self.log.error('Failed to initialize host \'%s\' statistics table: %s' % (self.uuid, e))
return False
def _construct_poll_query(self, params):
"""
Construct the polling data column query.
"""
col_names = ''
col_values = ''
for key, data_type in self.columns.iteritems():
col_names += '%s,' % key
col_values += '\'%s\',' % params[key]
col_names = col_names[:-1]
col_values = col_values[:-1]
# Construct and return the poll query
return 'INSERT INTO `%s`.`%s`(%s) VALUES(%s)' % (self.db['NAME'], self.uuid, col_names, col_values)
def _fetch_all(self, cursor):
"""
Retrieve all rows from a raw SQL query.
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def _count(self):
"""
Count the number of statistic rows being returned.
"""
self.dbh.execute('SELECT COUNT(*) FROM `%s`' % self.uuid)
result = self.dbh.fetchone()
return result[0]
def get(self, range=None, latest=None):
"""
Retrieve host statistics.
"""
# If getting the latest N rows
if latest:
query = 'SELECT * FROM `%s`.`%s` ORDER BY created DESC LIMIT %s' % (self.db['NAME'], self.uuid, latest)
self.dbh.execute(query)
rows = self._fetch_all(self.dbh)
# If returning a single row
if latest == 1:
return rows[0]
else:
return rows
# If provided a start/end range
if not 'start' in range or not 'end' in range:
return False
# Default range - last 60 entries
if not range['start'] and not range['end']:
query = 'SELECT * FROM `%s`.`%s` ORDER BY created DESC LIMIT 30' % (self.db['NAME'], self.uuid)
# Select all up until end
elif not range['start'] and range['end']:
query = 'SELECT * FROM `%s`.`%s` WHERE created < \'%s\'' % (self.db['NAME'], self.uuid, range['end'])
# Select from start date to end
elif range['start'] and not range['end']:
query = 'SELECT * FROM `%s`.`%s` WHERE created > \'%s\'' % (self.db['NAME'], self.uuid, range['start'])
# Select between two date ranges
else:
query = 'SELECT * FROM `%s`.`%s` WHERE (created BETWEEN \'%s\' AND %s)' % (self.db['NAME'], self.uuid, range['start'], range['end'])
# Select host statistics
try:
# Get the unsorted results
self.dbh.execute(query)
rows = self._fetch_all(self.dbh)
# Convert to a dictionary with date as the key
results_dict = {}
for row in rows:
key = row['created'].strftime('%Y-%m-%d %H:%M:%S')
stats = copy.copy(row)
del stats['created']
results_dict[row['created']] = stats
# Order the dictionary by date
results_sort = OrderedDict()
for key, value in sorted(results_dict.iteritems(), key=lambda t: t[0]):
results_sort[key] = value
# Return the ordered resuls
return results_sort
except Exception as e:
self.log.error('Failed to retrieve host statistics for \'%s\': %s' % (self.uuid, e))
return False
def create(self, params):
"""
Create a new host statistics row.
"""
# If any parameters are invalid
if self.pv == False:
self.log.error('Host UUID \'%s\' is invalid or is not a managed host' % self.uuid)
return False
# Require a row parameters dictionary
if not params or not isinstance(params, dict):
self.log.error('Missing required dictionary of column names/values')
return False
# Make sure all required
for key, data_type in self.columns.iteritems():
if key not in params:
self.log.error('Missing required column key \'%s\'' % key)
return False
# Make sure the host table exists
table_status = self._table_init()
if table_status != True:
return False
# Construct the polling query
poll_query = self._construct_poll_query(params)
# Create the statistics row
try:
self.dbh.execute(poll_query)
return True
except Exception as e:
self.log.error('Failed to create statistics row for host \'%s\': ' % (self.uuid, e))
return False
def delete(self):
"""
Delete a host statistics table.
"""
# If any parameters are invalid
if self.pv == False:
return False
# Construct the drop table syntax
drop_query = 'DROP TABLE IF EXISTS `%s`.`%s`' % (self.db['NAME'], self.uuid)
# Drop the host statistics table
try:
self.dbh.execute(drop_query)
return True
except Exception as e:
self.log.error('Failed to delete host \'%s\' statistics table: %s' % (self.uuid, e))
return False | gpl-3.0 | -944,699,862,993,960,000 | 33.745614 | 144 | 0.526322 | false | 4.373827 | false | false | false |
trep/opentrep | gui/django/webapps/opentrep/search/por_service.py | 1 | 2628 | #
import math, re
#
def get_random_airport (openTrepLibrary, travelPB2):
# OpenTREP
result = openTrepLibrary.getRandom()
# Protobuf
place = travelPB2.Place()
place.ParseFromString (result)
return place
#
def get_lat_lon (place):
return place.coord.latitude, place.coord.longitude
#
def get_lon_lat (place):
return place.coord.longitude, place.coord.latitude
#
def get_country_code (place):
return place.country_code.code
#
def get_country_name (place):
return place.country_name
#
def get_city_code (place):
return place.city_code.code
#
def get_city_names (place):
return place.city_name_utf, place.city_name_ascii
#
def get_airport_names (place, nice=False):
ugly_name_utf = place.name_utf
ugly_name_ascii = place.city_name_ascii
if not nice:
return ugly_name_utf, ugly_name_ascii
# "Ugly" names typically contain the country code (2 uppercase letters)
# followed by a truncated repetition of the POR name. For instance:
# - Cagnes Sur Mer FR Cagnes Sur M
# - Antibes FR Antibes
# We just truncate the name and keep the part before the country code.
nice_name_utf = re.sub (r"(.*)([ ])([A-Z]{2})([ ])(.*)", "\\1", ugly_name_utf)
nice_name_ascii = re.sub (r"(.*)([ ])([A-Z]{2})([ ])(.*)", "\\1",
ugly_name_ascii)
return nice_name_utf, nice_name_ascii
#
def get_continent_code (place):
return place.continent_code.code
#
def get_continent_name (place):
return place.continent_name
#
def get_country_and_continent (place):
return place.country_code.code, place.continent_code.code
#
def great_circle_distance (lat1, lon1, lat2, lon2, degrees=True):
if degrees:
lat1 = lat1/180.0*math.pi
lon1 = lon1/180.0*math.pi
lat2 = lat2/180.0*math.pi
lon2 = lon2/180.0*math.pi
diameter = 12742.0
lat_diff = (lat2-lat1) / 2.0
lat_diff_sin = math.sin (lat_diff)
lon_diff = (lon2-lon1) / 2.0
lon_diff_sin = math.sin (lon_diff)
lat_cos = math.cos (lat1) * math.cos (lat2)
proj_dist = lat_diff_sin**2.0 + lat_cos * lon_diff_sin**2.0
gcd = diameter * math.asin (math.sqrt (proj_dist))
return gcd
#
def get_distance_km (place1, place2):
lat1, lon1 = get_lat_lon (place1)
lat2, lon2 = get_lat_lon (place2)
dist = great_circle_distance (lat1, lon1, lat2, lon2)
return dist
#
def get_local_local_flight_duration_hr (place1, place2):
lat1, lon1 = get_lat_lon (place1)
lat2, lon2 = get_lat_lon (place2)
dist = great_circle_distance (lat1, lon1, lat2, lon2)
travel_hr = 0.5 + dist/800.0
time_diff_hr = (cit2['lon'] - cit1['lon']) / 15.0
return travel_hr + time_diff_hr, unknown
| lgpl-2.1 | 9,082,942,870,864,124,000 | 25.545455 | 80 | 0.6621 | false | 2.763407 | false | false | false |
laxect/scale | modules/bilibili_spider.py | 1 | 2474 | import re
import json
import gevent
import requests
from gevent.queue import Empty
# my module
from . import stand_task
from modules import database
class bilibili_spider(stand_task.task):
'a spider espeacially design for bilibili bangumi'
def __init__(self, aim=None):
'aim in stand of which bangumi you want to watch'
super().__init__()
self.id = 'laxect.bilibili_spider'
self.inbox = self.id
self.version = 1
self.mode = 'from_database'
self.aims = []
def _url(self, aim):
url = f'http://bangumi.bilibili.com/jsonp/seasoninfo/{aim}\
.ver?callback=seasonListCallback'
return url
def _handle(self, text, aim):
'the fun to handle the text spider return'
dica = json.loads(re.findall('\w*\((.*)\);', text)[0])
title = dica['result']['bangumi_title']
eps = dica['result']['episodes']
res = (
title,
eps[0]['index'],
eps[0]['index_title'],
eps[0]['webplay_url'])
fres = '%s 更新了第%s集 %s\n%s' % res # format string
with database.database(self.id) as db:
if db.check_up_to_date(aim, str(res)):
return fres
return None
def _aim_run(self, aim, res):
try:
ts = self._handle(requests.get(self._url(aim), timeout=5).text, aim)
except requests.exceptions.RequestException as err:
return
if ts:
res.append(ts)
def _run(self, targets):
if self.mode == 'from_inbox':
aims = self.aims
else:
aims = targets
res = []
pool = []
for aim in aims:
if aim:
pool.append(gevent.spawn(self._aim_run, aim, res))
gevent.joinall(pool)
if self.debug:
msg = f'the res of run is:\n{str(res)}'
self.debug_information_format(msg)
return res
def _inbox_handle(self, inbox):
aims = []
try:
while True:
item = inbox.get(block=False)
if item:
aims = item['msg']
except Empty:
pass
if self.debug:
msg = f'the argv recv from inbox is:\n{str(aims)}'
self.debug_information_format(msg)
if aims:
self.aims = aims
self.mode = 'from_inbox'
def mod_init(aim):
return bilibili_spider(aim=aim)
| mit | -6,638,228,883,525,679,000 | 27.651163 | 80 | 0.530438 | false | 3.5 | false | false | false |
rootsdev/python-gedcom-parser | gedcom.py | 1 | 19905 | #
# Python GEDCOM Parser
#
# This is a basic parser for the GEDCOM 5.5 format. For documentation of
# this format, see
#
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gctoc.htm
# Copyright (C) 2012 Daniel Zappala (daniel.zappala [at] gmail.com)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["Gedcom", "Element", "GedcomParseError"]
# Global imports
import string
class Gedcom:
"""Gedcom parser
This parser reads a GEDCOM file and parses it into a set of
elements. These elements can be accessed via a list (the order of
the list is the same as the order of the elements in the GEDCOM
file), or a dictionary (the key to the dictionary is a unique
identifier that one element can use to point to another element).
"""
def __init__(self,file):
"""Initialize a Gedcom parser. You must supply a Gedcom file."""
self.__element_list = []
self.__element_dict = {}
self.__element_top = Element(-1,"","TOP","",self.__element_dict)
self.__current_level = -1
self.__current_element = self.__element_top
self.__individuals = 0
self.__parse(file)
def element_list(self):
"""Return a list of all the elements in the Gedcom file. The
elements are in the same order as they appeared in the file.
"""
return self.__element_list
def element_dict(self):
"""Return a dictionary of elements from the Gedcom file. Only
elements identified by a pointer are listed in the dictionary.
The key for the dictionary is the pointer.
"""
return self.__element_dict
# Private methods
def __parse(self,file):
# open file
# go through the lines
f = open(file)
number = 1
for line in f.readlines():
# Skip over some junk that Rootsmagic puts in gedcom files.
if number == 1 and ord(line[0]) == 239:
line = line[3:]
self.__parse_line(number,line)
number += 1
self.__count()
def __parse_line(self,number,line):
# each line should have: Level SP (Pointer SP)? Tag (SP Value)? (SP)? NL
# parse the line
parts = string.split(line)
place = 0
l = self.__level(number,parts,place)
place += 1
p = self.__pointer(number,parts,place)
if p != '':
place += 1
t = self.__tag(number,parts,place)
place += 1
v = self.__value(number,parts,place)
# create the element
if l > self.__current_level + 1:
self.__error(number,"Structure of GEDCOM file is corrupted")
e = Element(l,p,t,v,self.element_dict())
self.__element_list.append(e)
if p != '':
self.__element_dict[p] = e
if l > self.__current_level:
self.__current_element.add_child(e)
e.add_parent(self.__current_element)
else:
# l.value <= self.__current_level:
while (self.__current_element.level() != l - 1):
self.__current_element = self.__current_element.parent()
self.__current_element.add_child(e)
e.add_parent(self.__current_element)
# finish up
self.__current_level = l
self.__current_element = e
def __level(self,number,parts,place):
if len(parts) <= place:
self.__error(number,"Empty line")
try:
l = int(parts[place])
except ValueError:
self.__error(number,"Line must start with an integer level")
if (l < 0):
self.__error(number,"Line must start with a positive integer")
return l
def __pointer(self,number,parts,place):
if len(parts) <= place:
self.__error(number,"Incomplete Line")
p = ''
part = parts[1]
if part[0] == '@':
if part[len(part)-1] == '@':
p = part
# could strip the pointer to remove the @ with
# string.strip(part,'@')
# but it may be useful to identify pointers outside this class
else:
self.__error(number,"Pointer element must start and end with @")
return p
def __tag(self,number,parts,place):
if len(parts) <= place:
self.__error(number,"Incomplete line")
return parts[place]
def __value(self,number,parts,place):
if len(parts) <= place:
return ''
p = self.__pointer(number,parts,place)
if p != '':
# rest of the line should be empty
if len(parts) > place + 1:
self.__error(number,"Too many elements")
return p
else:
# rest of the line should be ours
vlist = []
while place < len(parts):
vlist.append(parts[place])
place += 1
v = string.join(vlist)
return v
def __error(self,number,text):
error = "Gedcom format error on line " + str(number) + ': ' + text
raise GedcomParseError, error
def __count(self):
# Count number of individuals
self.__individuals = 0
for e in self.__element_list:
if e.individual():
self.__individuals += 1
def __print(self):
for e in self.element_list:
print string.join([str(e.level()),e.pointer(),e.tag(),e.value()])
class GedcomParseError(Exception):
"""Exception raised when a Gedcom parsing error occurs."""
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class Element:
"""Gedcom element
Each line in a Gedcom file is an element with the format
level [pointer] tag [value]
where level and tag are required, and pointer and value are
optional. Elements are arranged hierarchically according to their
level, and elements with a level of zero are at the top level.
Elements with a level greater than zero are children of their
parent.
A pointer has the format @pname@, where pname is any sequence of
characters and numbers. The pointer identifies the object being
pointed to, so that any pointer included as the value of any
element points back to the original object. For example, an
element may have a FAMS tag whose value is @F1@, meaning that this
element points to the family record in which the associated person
is a spouse. Likewise, an element with a tag of FAMC has a value
that points to a family record in which the associated person is a
child.
See a Gedcom file for examples of tags and their values.
"""
def __init__(self,level,pointer,tag,value,dict):
"""Initialize an element. You must include a level, pointer,
tag, value, and global element dictionary. Normally
initialized by the Gedcom parser, not by a user.
"""
# basic element info
self.__level = level
self.__pointer = pointer
self.__tag = tag
self.__value = value
self.__dict = dict
# structuring
self.__children = []
self.__parent = None
def level(self):
"""Return the level of this element."""
return self.__level
def pointer(self):
"""Return the pointer of this element."""
return self.__pointer
def tag(self):
"""Return the tag of this element."""
return self.__tag
def value(self):
"""Return the value of this element."""
return self.__value
def children(self):
"""Return the child elements of this element."""
return self.__children
def parent(self):
"""Return the parent element of this element."""
return self.__parent
def add_child(self,element):
"""Add a child element to this element."""
self.children().append(element)
def add_parent(self,element):
"""Add a parent element to this element."""
self.__parent = element
def individual(self):
"""Check if this element is an individual."""
return self.tag() == "INDI"
# criteria matching
def criteria_match(self,criteria):
"""Check in this element matches all of the given criteria.
The criteria is a colon-separated list, where each item in the
list has the form [name]=[value]. The following criteria are supported:
surname=[name]
Match a person with [name] in any part of the surname.
name=[name]
Match a person with [name] in any part of the given name.
birth=[year]
Match a person whose birth year is a four-digit [year].
birthrange=[year1-year2]
Match a person whose birth year is in the range of years from
[year1] to [year2], including both [year1] and [year2].
death=[year]
deathrange=[year1-year2]
marriage=[year]
marriagerange=[year1-year2]
"""
# error checking on the criteria
try:
for crit in criteria.split(':'):
key,value = crit.split('=')
except:
return False
match = True
for crit in criteria.split(':'):
key,value = crit.split('=')
if key == "surname" and not self.surname_match(value):
match = False
elif key == "name" and not self.given_match(value):
match = False
elif key == "birth":
try:
year = int(value)
if not self.birth_year_match(year):
match = False
except:
match = False
elif key == "birthrange":
try:
year1,year2 = value.split('-')
year1 = int(year1)
year2 = int(year2)
if not self.birth_range_match(year1,year2):
match = False
except:
match = False
elif key == "death":
try:
year = int(value)
if not self.death_year_match(year):
match = False
except:
match = False
elif key == "deathrange":
try:
year1,year2 = value.split('-')
year1 = int(year1)
year2 = int(year2)
if not self.death_range_match(year1,year2):
match = False
except:
match = False
elif key == "marriage":
try:
year = int(value)
if not self.marriage_year_match(year):
match = False
except:
match = False
elif key == "marriagerange":
try:
year1,year2 = value.split('-')
year1 = int(year1)
year2 = int(year2)
if not self.marriage_range_match(year1,year2):
match = False
except:
match = False
return match
def surname_match(self,name):
"""Match a string with the surname of an individual."""
(first,last) = self.name()
return last.find(name) >= 0
def given_match(self,name):
"""Match a string with the given names of an individual."""
(first,last) = self.name()
return first.find(name) >= 0
def birth_year_match(self,year):
"""Match the birth year of an individual. Year is an integer."""
return self.birth_year() == year
def birth_range_match(self,year1,year2):
"""Check if the birth year of an individual is in a given
range. Years are integers.
"""
year = self.birth_year()
if year >= year1 and year <= year2:
return True
return False
def death_year_match(self,year):
"""Match the death year of an individual. Year is an integer."""
return self.death_year() == year
def death_range_match(self,year1,year2):
"""Check if the death year of an individual is in a given range.
Years are integers.
"""
year = self.death_year()
if year >= year1 and year <= year2:
return True
return False
def marriage_year_match(self,year):
"""Check if one of the marriage years of an individual matches
the supplied year. Year is an integer.
"""
years = self.marriage_years()
return year in years
def marriage_range_match(self,year1,year2):
"""Check if one of the marriage year of an individual is in a
given range. Years are integers.
"""
years = self.marriage_years()
for year in years:
if year >= year1 and year <= year2:
return True
return False
def families(self):
"""Return a list of all of the family elements of a person."""
results = []
for e in self.children():
if e.tag() == "FAMS":
f = self.__dict.get(e.value(),None)
if f != None:
results.append(f)
return results
def name(self):
"""Return a person's names as a tuple: (first,last)."""
first = ""
last = ""
if not self.individual():
return (first,last)
for e in self.children():
if e.tag() == "NAME":
# some older Gedcom files don't use child tags but instead
# place the name in the value of the NAME tag
if e.value() != "":
name = string.split(e.value(),'/')
first = string.strip(name[0])
last = string.strip(name[1])
else:
for c in e.children():
if c.tag() == "GIVN":
first = c.value()
if c.tag() == "SURN":
last = c.value()
return (first,last)
def birth(self):
"""Return the birth tuple of a person as (date,place)."""
date = ""
place = ""
if not self.individual():
return (date,place)
for e in self.children():
if e.tag() == "BIRT":
for c in e.children():
if c.tag() == "DATE":
date = c.value()
if c.tag() == "PLAC":
place = c.value()
return (date,place)
def birth_year(self):
"""Return the birth year of a person in integer format."""
date = ""
if not self.individual():
return date
for e in self.children():
if e.tag() == "BIRT":
for c in e.children():
if c.tag() == "DATE":
datel = string.split(c.value())
date = datel[len(datel)-1]
if date == "":
return -1
try:
return int(date)
except:
return -1
def death(self):
"""Return the death tuple of a person as (date,place)."""
date = ""
place = ""
if not self.individual():
return (date,place)
for e in self.children():
if e.tag() == "DEAT":
for c in e.children():
if c.tag() == "DATE":
date = c.value()
if c.tag() == "PLAC":
place = c.value()
return (date,place)
def death_year(self):
"""Return the death year of a person in integer format."""
date = ""
if not self.individual():
return date
for e in self.children():
if e.tag() == "DEAT":
for c in e.children():
if c.tag() == "DATE":
datel = string.split(c.value())
date = datel[len(datel)-1]
if date == "":
return -1
try:
return int(date)
except:
return -1
def deceased(self):
"""Check if a person is deceased."""
if not self.individual():
return False
for e in self.children():
if e.tag() == "DEAT":
return True
return False
def marriage(self):
"""Return a list of marriage tuples for a person, each listing
(date,place).
"""
date = ""
place = ""
if not self.individual():
return (date,place)
for e in self.children():
if e.tag() == "FAMS":
f = self.__dict.get(e.value(),None)
if f == None:
return (date,place)
for g in f.children():
if g.tag() == "MARR":
for h in g.children():
if h.tag() == "DATE":
date = h.value()
if h.tag() == "PLAC":
place = h.value()
return (date,place)
def marriage_years(self):
"""Return a list of marriage years for a person, each in integer
format.
"""
dates = []
if not self.individual():
return dates
for e in self.children():
if e.tag() == "FAMS":
f = self.__dict.get(e.value(),None)
if f == None:
return dates
for g in f.children():
if g.tag() == "MARR":
for h in g.children():
if h.tag() == "DATE":
datel = string.split(h.value())
date = datel[len(datel)-1]
try:
dates.append(int(date))
except:
pass
return dates
def get_individual(self):
"""Return this element and all of its sub-elements."""
result = [self]
for e in self.children():
result.append(e)
return result
def get_family(self):
"""Return this element any all elements in its families."""
result = [self]
for e in self.children():
if e.tag() == "HUSB" or e.tag() == "WIFE" or e.tag() == "CHIL":
f = self.__dict.get(e.value())
if f != None:
result.append(f)
return result
def __str__(self):
"""Format this element as its original string."""
result = str(self.level())
if self.pointer() != "":
result += ' ' + self.pointer()
result += ' ' + self.tag()
if self.value() != "":
result += ' ' + self.value()
return result
| gpl-3.0 | -3,179,788,084,274,490,000 | 32.341709 | 80 | 0.507059 | false | 4.384361 | false | false | false |
trojkat/doner | doner/project/access_control_views.py | 1 | 2976 | from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.base import View
from django.shortcuts import render
from .models import Project, Ticket, Log
class LoginRequiredView(View):
'''
This view can be visited only by authenticated users.
'''
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredView, self).dispatch(*args, **kwargs)
class UserPrivateView(View):
'''
This view can be visited only by single user (view owner).
'''
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not self.request.user == self.get_object():
return render(self.request, 'access-denied.html')
return super(UserPrivateView, self).dispatch(*args, **kwargs)
class SuperUserView(View):
'''
This view can be visited only by superusers.
'''
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not self.request.user.is_superuser:
return render(self.request, 'access-denied.html')
return super(SuperUserView, self).dispatch(*args, **kwargs)
class ProjectReletedView(View):
url_pk_related_model = Project
project = None
def get_project(self):
'''
Based on self.url_pk_related_model get project instance and set it as self.project.
'''
if self.project:
# project is already available
return
model_instance = self.url_pk_related_model.objects.get(pk=self.kwargs['pk'])
if isinstance(model_instance, Project):
self.project = model_instance
elif isinstance(model_instance, Ticket):
self.project = model_instance.project
elif isinstance(model_instance, Log):
self.project = model_instance.ticket.project
else:
raise ValueError
def is_project_member(self):
self.get_project()
return self.request.user.is_superuser or self.request.user in self.project.members.all()
class ProjectView(ProjectReletedView):
'''
If project IS PRIVATE give access to:
- project members
- superusers
'''
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.get_project()
if self.project.is_private and not self.is_project_member():
return render(self.request, 'access-denied.html')
return super(ProjectView, self).dispatch(*args, **kwargs)
class MembersOnlyView(ProjectReletedView):
'''
This view can be visited only by:
- project members
- superusers
'''
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not self.is_project_member():
return render(self.request, 'access-denied.html')
return super(MembersOnlyView, self).dispatch(*args, **kwargs)
| mit | -4,608,560,247,374,047,000 | 27.075472 | 96 | 0.650538 | false | 4.116183 | false | false | false |
forgeservicelab/forge.benchmark | cas_log_in_and_measure.py | 1 | 4515 | #!/usr/bin/env python
# Script for measuring response time for redmine urls.
# It attempts to authenticate via html form which is it given on first GET.
# It should work generally for anything that forwards properly, and has
# "username" and "password" fields in the form.
#
# I tested it for CASino login and standard Redmine login
#
# Exmaple invocation:
# ./redmine.py -u tkarasek -b http://193.166.24.110:8080 \
# -l /rb/master_backlog/digile -c 5
import os
import sys
import getpass
import argparse
import mechanize
import cookielib
import logging
import time
import prettytable
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
DESCRIPTION = "FORGE benchmark for services behind CAS"
REDMINE_URL = 'https://support.forgeservicelab.fi/redmine'
MEASURED_URLS = ['/rb/taskboards/50',
'/rb/master_backlog/digile']
def getUser():
user = os.environ.get('USER')
if user and user != 'root':
print "Using username: %s" % user
else:
user = raw_input('Give username: ')
return user
def getPassword(user):
dot_file = os.path.join(os.environ['HOME'], '.ldappass')
pw = None
if os.path.isfile(dot_file):
with open(dot_file) as f:
pw = f.read().strip()
print "Using password from %s" % dot_file
if not pw:
pw = getpass.getpass(
prompt="Give password for username %s: " % user)
return pw
def getAuthenticatedHandle(baseurl, cookiejar, user, password, debug=False):
br = mechanize.Browser()
if debug:
br.set_debug_http(True)
br.set_debug_responses(True)
br.set_debug_redirects(True)
br.set_cookiejar(cookiejar)
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.addheaders = [
('User-agent', ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) '
'Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')),
('Accept', ('text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8'))
]
br.open(baseurl)
br.select_form(nr=0)
br.form['username'] = user
br.form['password'] = password
br.submit()
return br
def measureGet(browser, url):
start_time = time.time()
print "Getting %s .." % url
browser.open(url)
d = time.time() - start_time
print ".. took %.2f secs" % d
return d
def printResults(l):
x = prettytable.PrettyTable(['URL', 'avg time [sec]'])
for r in l:
x.add_row(r)
print x
if __name__ == '__main__':
parser = argparse.ArgumentParser(DESCRIPTION,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
help_count = 'how many times to measure timeout for each url'
help_baseurl = 'base url, e.g. https://auth.forgeservicelab.fi'
help_locations = ('locations to measure. full url is baseurl + locations. '
' e.g. /sessions')
parser.add_argument('-d','--debug', help='show debug output',
action='store_true')
parser.add_argument('-u','--user', help='user for CAS')
parser.add_argument('-t','--test', help='user for CAS',
action='store_true')
parser.add_argument('-b','--baseurl', help=help_baseurl,
default=REDMINE_URL)
parser.add_argument('-c','--count', help=help_count, default=2, type=int)
parser.add_argument('-l','--locations', help=help_locations, nargs='+',
default=MEASURED_URLS)
args = parser.parse_args()
if args.test:
printResults([['a', '1.3'], ['b', '1.5']])
sys.exit(0)
if args.user:
print "Using the username from args: %s" % args.user
user = args.user
else:
user = getUser()
password = getPassword(user)
cookiejar = cookielib.LWPCookieJar()
print ('Trying to authenticate via html form with given username and '
'password ..')
browser = getAuthenticatedHandle(args.baseurl, cookiejar, user, password,
args.debug)
print ".. authenticated"
res = []
for l in args.locations:
url = args.baseurl + l
tmp = []
for i in range(args.count):
d = measureGet(browser, url)
tmp.append(d)
res.append([url, "%.2f" % (sum(tmp) / float(len(tmp)))])
printResults(res)
| mit | 3,756,557,213,109,138,000 | 28.129032 | 79 | 0.604651 | false | 3.552321 | false | false | false |
novafloss/bag8 | bag8/tests/test_yaml.py | 1 | 4636 | from __future__ import absolute_import, division, print_function
import os
from bag8.project import Project
from bag8.yaml import Yaml
CURR_DIR = os.path.realpath('.')
def test_data():
# normal
project = Project('busybox')
assert Yaml(project).data == {
'busybox': {
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
]
},
'link': {
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
}
# develop
project = Project('busybox', develop=True)
assert Yaml(project).data == {
'busybox': {
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'yo',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
],
'volumes': [
'{}:/tmp'.format(CURR_DIR)
]
},
'link': {
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
}
def test_service_dicts():
# normal
project = Project('busybox')
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'busybox',
'bag8_name': 'busybox',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
]
},
{
'name': 'link',
'bag8_name': 'link',
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
])
# develop
project = Project('busybox', develop=True)
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'busybox',
'bag8_name': 'busybox',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': 'link',
'DNSDOCK_ALIAS': 'busybox.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'yo',
'NGINX_UPSTREAM_SERVER_DOMAIN': 'link.docker',
},
'image': 'bag8/busybox',
'links': [
'link:link'
],
'volumes': [
'{}:/tmp'.format(CURR_DIR)
]
},
{
'name': 'link',
'bag8_name': 'link',
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link.docker',
'DNSDOCK_IMAGE': '',
'DUMMY': 'nothing here too'
},
'expose': [1234],
'image': 'bag8/link'
}
])
# complex name
project = Project('link.2')
assert sorted(Yaml(project).service_dicts) == sorted([
{
'name': 'link2',
'bag8_name': 'link.2',
'dockerfile': os.path.join(project.build_path, 'Dockerfile'),
'environment': {
'BAG8_LINKS': '',
'DNSDOCK_ALIAS': 'link2.docker',
'DNSDOCK_IMAGE': '',
},
'image': 'bag8/busybox',
'links': []
}
])
| mit | 437,238,647,660,886,600 | 27.617284 | 73 | 0.410915 | false | 3.982818 | false | false | false |
lmacken/leafy-miracle | leafymiracle/views.py | 1 | 1661 | # Copyright (C) 2011 Luke Macken <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyramid.httpexceptions import HTTPFound
from tw2.jqplugins.ui.base import set_ui_theme_name
from widgets import LeafyGraph
from widgets import LeafyDialog
from widgets import LeafySearchbar
import simplejson
import webob
def view_root(context, request):
return HTTPFound(location='/1')
def view_model(context, request):
# TODO -- we need a fedora jquery-ui theme sitting around.
set_ui_theme_name('hot-sneaks')
return {'item':context, 'project':'leafymiracle',
'jitwidget': LeafyGraph(rootObject=context),
'dialogwidget': LeafyDialog,
'searchbarwidget': LeafySearchbar,
}
def view_search(context, request):
term = request.params['term']
cats = request.params.get('cats', 'Category,Group,Package')
data = context.search(term, cats)
resp = webob.Response(request=request, content_type="application/json")
resp.body = simplejson.dumps(data)
return resp
| agpl-3.0 | -1,192,856,243,058,274,600 | 35.108696 | 75 | 0.731487 | false | 3.749436 | false | false | false |
debugger06/MiroX | lib/test/widgetstateconstantstest.py | 1 | 2572 | from miro.test.framework import MiroTestCase
from miro.frontends.widgets.widgetstatestore import WidgetStateStore
from miro.frontends.widgets.itemlist import SORT_KEY_MAP
class WidgetStateConstants(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.display_types = set(WidgetStateStore.get_display_types())
def test_view_types(self):
# test that all view types are different
view_types = (WidgetStateStore.get_list_view_type(),
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type())
for i in range(len(view_types)):
for j in range(i + 1, len(view_types)):
self.assertNotEqual(view_types[i], view_types[j])
def test_default_view_types(self):
display_types = set(WidgetStateStore.DEFAULT_VIEW_TYPE)
self.assertEqual(self.display_types, display_types)
def test_default_column_widths(self):
# test that all available columns have widths set for them
# calculate all columns that available for some display/view
# combination
available_columns = set()
display_id = None # this isn't used yet, just set it to a dummy value
for display_type in self.display_types:
for view_type in (WidgetStateStore.get_list_view_type(),
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type()):
available_columns.update(
WidgetStateStore.get_columns_available(
display_type, display_id, view_type))
# make sure that we have widths for those columns
self.assertEqual(available_columns,
set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS.keys()))
def test_default_sort_column(self):
display_types = set(WidgetStateStore.DEFAULT_SORT_COLUMN)
self.assertEqual(self.display_types, display_types)
def test_default_columns(self):
display_types = set(WidgetStateStore.DEFAULT_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_available_columns(self):
# Currently what get_display_types() uses. Testing it anyway.
display_types = set(WidgetStateStore.AVAILABLE_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_sort_key_map(self):
columns = set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS)
sort_keys = set(SORT_KEY_MAP)
self.assertEqual(sort_keys, columns)
| gpl-2.0 | 728,278,717,984,210,300 | 42.59322 | 77 | 0.657076 | false | 4.07607 | true | false | false |
hzlf/openbroadcast.org | website/apps/statistics/apiv2/views.py | 2 | 1269 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..utils.usage_statistics import get_usage_statistics
class UsageStatisticsView(GenericAPIView):
def get_object(self, obj_ct, obj_uuid):
try:
obj = apps.get_model(*obj_ct.split(".")).objects.get(uuid=obj_uuid)
return obj
except ObjectDoesNotExist:
raise Http404
def get(self, request, obj_ct, obj_uuid):
obj = self.get_object(obj_ct, obj_uuid)
# for the moment the range defaults to the last 12 months (including current)
today = datetime.now()
end = date(today.year + today.month // 12, today.month % 12 + 1, 1) - timedelta(1)
start = end - relativedelta(years=1, days=-1)
usage_statistics = get_usage_statistics(obj=obj, start=start, end=end)
return Response(usage_statistics)
# return super(UsageStatisticsView, self).list(request, *args, **kwargs)
| gpl-3.0 | -3,544,083,552,119,572,000 | 36.323529 | 90 | 0.693459 | false | 4.028571 | false | false | false |
Anaconda-Platform/anaconda-client | binstar_client/commands/channel.py | 1 | 3490 | """
Manage your Anaconda repository channels.
"""
from __future__ import unicode_literals, print_function
from binstar_client.utils import get_server_api
import functools
import logging
import argparse
logger = logging.getLogger('binstar.channel')
def main(args, name, deprecated=False):
aserver_api = get_server_api(args.token, args.site)
if args.organization:
owner = args.organization
else:
current_user = aserver_api.user()
owner = current_user['login']
if deprecated:
logger.warning('channel command is deprecated in favor of label')
if args.copy:
aserver_api.copy_channel(args.copy[0], owner, args.copy[1])
logger.info("Copied {} {} to {}".format(name, *tuple(args.copy)))
elif args.remove:
aserver_api.remove_channel(args.remove, owner)
logger.info("Removed {} {}".format(name, args.remove))
elif args.list:
logger.info('{}s'.format(name.title()))
for channel, info in aserver_api.list_channels(owner).items():
if isinstance(info, int): # OLD API
logger.info((' + %s ' % channel))
else:
logger.info((' + %s ' % channel) + ('[locked]' if info['is_locked'] else ''))
elif args.show:
info = aserver_api.show_channel(args.show, owner)
logger.info('{} {} {}'.format(
name.title(),
args.show,
('[locked]' if info['is_locked'] else '')
))
for f in info['files']:
logger.info(' + %(full_name)s' % f)
elif args.lock:
aserver_api.lock_channel(args.lock, owner)
logger.info("{} {} is now locked".format(name.title(), args.lock))
elif args.unlock:
aserver_api.unlock_channel(args.unlock, owner)
logger.info("{} {} is now unlocked".format(name.title(), args.unlock))
else:
raise NotImplementedError()
def _add_parser(subparsers, name, deprecated=False):
deprecated_warn = ""
if deprecated:
deprecated_warn = "[DEPRECATED in favor of label] \n"
subparser = subparsers.add_parser(
name,
help='{}Manage your Anaconda repository {}s'.format(deprecated_warn, name),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
subparser.add_argument('-o', '--organization',
help="Manage an organizations {}s".format(name))
group = subparser.add_mutually_exclusive_group(required=True)
group.add_argument('--copy', nargs=2, metavar=name.upper())
group.add_argument(
'--list',
action='store_true',
help="{}list all {}s for a user".format(deprecated_warn, name)
)
group.add_argument(
'--show',
metavar=name.upper(),
help="{}Show all of the files in a {}".format(deprecated_warn, name)
)
group.add_argument(
'--lock',
metavar=name.upper(),
help="{}Lock a {}".format(deprecated_warn, name))
group.add_argument(
'--unlock',
metavar=name.upper(),
help="{}Unlock a {}".format(deprecated_warn, name)
)
group.add_argument(
'--remove',
metavar=name.upper(),
help="{}Remove a {}".format(deprecated_warn, name)
)
subparser.set_defaults(main=functools.partial(main, name=name, deprecated=deprecated))
def add_parser(subparsers):
_add_parser(subparsers, name="label")
_add_parser(subparsers, name="channel", deprecated=True)
| bsd-3-clause | 7,761,409,914,497,489,000 | 32.238095 | 93 | 0.602865 | false | 3.852097 | false | false | false |
qpython-android/QPython3-core | pybuild/env.py | 1 | 1550 | import os
target_arch = 'arm64'
android_api_level = os.getenv('ANDROID_VER')
# Python optional modules.
# Available:
# tinycc - Tiny cc compiler
# bzip2 - enable the bz2 module and the bzip2 codec
# xz - enable the lzma module and the lzma codec
# openssl - enable the ssl module and SSL/TLS support for sockets
# readline - enable the readline module and command history/the like in the REPL
# ncurses - enable the curses module
# sqlite - enable the sqlite3 module
# gdbm - enable the dbm/gdbm modules
# libffi - enable the ctypes module
# zlib - enable the zlib module
# expat - enable the pyexpat module
# tools - some handy utility scripts from ./devscripts
packages = ('openssl', 'ncurses', 'readline', 'bzip2', 'xz', 'zlib', 'sqlite', 'gdbm', 'libffi', 'expat', 'tools')
# 3rd Python modules.
py_packages = ('pycryptodome', ) #'openblas', 'numpy', 'scipy', 'pandas', 'kiwisolver', 'matplotlib', 'theano', 'scikitlearn')
#py_packages = ('libzmq','pyzmq')
py_packages2 = ('scikitlearn2',) #'libzmq','pyzmq2', 'numpy2','scipy2','pandas2','matplotlib2','kiwisolver2','theano2','pillow2','dropbear','dulwich2'
#'pyjnius2','android2','pygamesdl2','kivy2','libxml2','libxslt','lxml2','cryptography2'
#'pyopenssl2'
skip_build_py = os.path.exists(".skip_build_py")
skip_build_py2 = os.path.exists(".skip_build_py2")
skip_build_py_module = os.path.exists(".skip_build_py_module")
skip_build_py2_module = os.path.exists(".skip_build_py2_module")
use_bintray = False
bintray_username = 'qpython-android'
bintray_repo = 'qpython3-core'
| apache-2.0 | -740,139,808,360,140,300 | 43.285714 | 150 | 0.706452 | false | 2.958015 | false | false | false |
bdaroz/the-blue-alliance | database/team_query.py | 3 | 4402 | from google.appengine.ext import ndb
from consts.district_type import DistrictType
from database.dict_converters.district_converter import DistrictConverter
from database.dict_converters.team_converter import TeamConverter
from database.database_query import DatabaseQuery
from models.district import District
from models.district_team import DistrictTeam
from models.event import Event
from models.event_team import EventTeam
from models.team import Team
class TeamQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'team_{}' # (team_key)
DICT_CONVERTER = TeamConverter
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
team = yield Team.get_by_id_async(team_key)
raise ndb.Return(team)
class TeamListQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'team_list_{}' # (page_num)
PAGE_SIZE = 500
DICT_CONVERTER = TeamConverter
@ndb.tasklet
def _query_async(self):
page_num = self._query_args[0]
start = self.PAGE_SIZE * page_num
end = start + self.PAGE_SIZE
teams = yield Team.query(Team.team_number >= start, Team.team_number < end).fetch_async()
raise ndb.Return(teams)
class TeamListYearQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'team_list_year_{}_{}' # (year, page_num)
DICT_CONVERTER = TeamConverter
@ndb.tasklet
def _query_async(self):
year = self._query_args[0]
page_num = self._query_args[1]
event_team_keys_future = EventTeam.query(EventTeam.year == year).fetch_async(keys_only=True)
teams_future = TeamListQuery(page_num).fetch_async()
year_team_keys = set()
for et_key in event_team_keys_future.get_result():
team_key = et_key.id().split('_')[1]
year_team_keys.add(team_key)
teams = filter(lambda team: team.key.id() in year_team_keys, teams_future.get_result())
raise ndb.Return(teams)
class DistrictTeamsQuery(DatabaseQuery):
CACHE_VERSION = 3
CACHE_KEY_FORMAT = 'district_teams_{}' # (district_key)
DICT_CONVERTER = TeamConverter
@ndb.tasklet
def _query_async(self):
district_key = self._query_args[0]
district_teams = yield DistrictTeam.query(
DistrictTeam.district_key == ndb.Key(District, district_key)).fetch_async()
team_keys = map(lambda district_team: district_team.team, district_teams)
teams = yield ndb.get_multi_async(team_keys)
raise ndb.Return(teams)
class EventTeamsQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'event_teams_{}' # (event_key)
DICT_CONVERTER = TeamConverter
@ndb.tasklet
def _query_async(self):
event_key = self._query_args[0]
event_teams = yield EventTeam.query(EventTeam.event == ndb.Key(Event, event_key)).fetch_async()
team_keys = map(lambda event_team: event_team.team, event_teams)
teams = yield ndb.get_multi_async(team_keys)
raise ndb.Return(teams)
class EventEventTeamsQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'event_eventteams_{}' # (event_key)
@ndb.tasklet
def _query_async(self):
event_key = self._query_args[0]
event_teams = yield EventTeam.query(EventTeam.event == ndb.Key(Event, event_key)).fetch_async()
raise ndb.Return(event_teams)
class TeamParticipationQuery(DatabaseQuery):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = 'team_participation_{}' # (team_key)
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
event_teams = yield EventTeam.query(EventTeam.team == ndb.Key(Team, team_key)).fetch_async(keys_only=True)
years = map(lambda event_team: int(event_team.id()[:4]), event_teams)
raise ndb.Return(set(years))
class TeamDistrictsQuery(DatabaseQuery):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = 'team_districts_{}' # (team_key)
DICT_CONVERTER = DistrictConverter
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
district_team_keys = yield DistrictTeam.query(DistrictTeam.team == ndb.Key(Team, team_key)).fetch_async(keys_only=True)
districts = yield ndb.get_multi_async([ndb.Key(District, dtk.id().split('_')[0]) for dtk in district_team_keys])
raise ndb.Return(filter(lambda x: x is not None, districts))
| mit | 1,297,470,170,798,805,200 | 34.216 | 127 | 0.66697 | false | 3.253511 | false | false | false |
eniosp/google-python-exercises | copyspecial/copyspecial.py | 1 | 2312 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Problem description:
# https://developers.google.com/edu/python/exercises/copy-special
import sys
import re
import os
import shutil
import subprocess
import zipfile
"""Copy Special exercise
"""
def is_special(name):
if re.search(r"__.*__", name):
print(name + ' is special')
return True
print(name + ' not special')
return False
def get_special_paths(d):
special = []
for f in os.listdir(d):
file_to_check = d + '/' + f
if os.path.isfile(file_to_check) and is_special(f):
special.append(file_to_check)
return special
def copy_to_dir(paths, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for path in paths:
filename = path.split('/')[-1]
shutil.copyfile(path, dest_dir + '/' + filename)
pass
def copy_to_zip(paths, zip_fname):
with zipfile.ZipFile(zip_fname, 'w') as myzip:
for p in paths:
myzip.write(p)
pass
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print("usage: [--todir dir][--tozip zipfile] dir [dir ...]")
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print("error: must specify one or more dirs")
sys.exit(1)
#check each directory given in the input
special_paths = []
for d in args:
print('processing ' + d)
special_paths.extend(get_special_paths(d))
if todir:
copy_to_dir(special_paths, todir)
if tozip:
copy_to_zip(special_paths, tozip)
if __name__ == "__main__":
main()
| apache-2.0 | 8,945,271,836,969,937,000 | 21.891089 | 69 | 0.604239 | false | 3.430267 | false | false | false |
chrsbats/similarities | similarities/hashes/feature_hash.py | 1 | 1632 |
import xxhash
from similarities.distances.sparse_vector import cosine_sparse_vector
class FeatureHash(object):
def __init__(self, tokens, length=100000):
"""Calculates a Charikar simhash with appropriate bitlength.
Input can be any iterable, but for strings it will automatically
break it into words first, assuming you don't want to iterate
over the individual characters. Returns nothing.
"""
if isinstance(tokens,basestring):
tokens = tokens.split()
v = {}
if isinstance(tokens,dict):
for value,w in tokens.iteritems():
k = xxhash.xxh64(value).intdigest()
x = v.get(k%length,0)
if k & 1 << 63:
v[k%length] = x + w
else:
v[k%length] = x - w
else:
for value in tokens:
k = xxhash.xxh64(value).intdigest()
x = v.get(k%length,0)
if k & 1 << 63:
v[k%length] = x + 1
else:
v[k%length] = x - 1
self.hash = v
self.vector = v
def similarity(self, other_hash):
"""Calculate how different this hash is from another simhash.
Returns a float from 0.0 to 1.0 (inclusive)
"""
return 1.0 - self.distance(other_hash)
def distance(self,other_hash):
return cosine_sparse_vector(self.hash, other_hash.hash)
def digest(self):
return self.hash
def __eq__(self, other):
return self.hash == other.hash
| mit | -4,595,390,786,045,579,300 | 28.142857 | 72 | 0.525735 | false | 4.184615 | false | false | false |
sujoykroy/motion-picture | editor/MotionPicture/time_line_boxes/prop_time_line_box.py | 1 | 5800 | from ..commons import OrderedDict
from ..commons.draw_utils import *
from .box import Box
from .sizes import *
from .time_slice_box import TimeSliceBox
from .. import settings
class PropTimeLineBox(Box):
TOTAL_LABEL_WIDTH = PROP_NAME_LABEL_WIDTH + PROP_VALUE_LABEL_WIDTH + 6*PROP_NAME_LABEL_RIGHT_PADDING
def __init__(self, prop_time_line, shape_time_line_box):
Box.__init__(self, shape_time_line_box)
self.prop_time_line = prop_time_line
self.time_slice_boxes = OrderedDict()
self.y_per_value = 1
self.min_value = -1
self.max_value = 1
self.slices_container_box = Box(self)
self.slices_container_box.left = self.TOTAL_LABEL_WIDTH
self.vertical_zoom = 1.
self.time_line_width = 0.
self.update()
def get_multi_shape_time_line(self):
return self.parent_box.get_multi_shape_time_line()
def get_time_slice_box_at_index(self, index):
return self.time_slice_boxes.get_item_at_index(index)
def set_time_multiplier(self, scale):
self.slices_container_box.scale_x = scale
def get_time_multiplier(self):
return self.slices_container_box.scale_x
def set_vertical_multiplier(self, scale):
self.slices_container_box.scale_y *= scale
def update(self):
min_value, max_value = self.prop_time_line.get_min_max_value()
if max_value == min_value:
max_value = min_value + 1
diff = float(max_value-min_value)
self.max_value = max_value + diff*.05
self.min_value = min_value - diff*.05
self.y_per_value = HEIGHT_PER_TIME_SLICE/(self.max_value-self.min_value)
for time_slice in self.time_slice_boxes.keys:
if not self.prop_time_line.time_slices.key_exists(time_slice):
self.time_slice_boxes.remove(time_slice)
scaled_width = 0
width = 0
height = 0
horiz_index = 0
for time_slice in self.prop_time_line.time_slices:
if not self.time_slice_boxes.key_exists(time_slice):
time_slice_box = TimeSliceBox(time_slice, self)
self.time_slice_boxes.insert(horiz_index, time_slice, time_slice_box)
else:
time_slice_box = self.time_slice_boxes[time_slice]
time_slice_box.set_index(horiz_index)
time_slice_box.update()
time_slice_box.left = width
time_slice_box.top = 0
outline = time_slice_box.get_rel_outline()
width += outline.width
if height<outline.height:
height = outline.height#*self.slices_container_box.scale_y
horiz_index += 1
self.width = width*self.slices_container_box.scale_x + self.slices_container_box.left
self.height = height*self.slices_container_box.scale_y + PROP_TIME_LINE_VERTICAL_PADDING
self.time_line_width = width
def draw(self, ctx, visible_time_span):
ctx.save()
self.pre_draw(ctx)
ctx.rectangle(0, 0, 2000, self.height-PROP_TIME_LINE_VERTICAL_PADDING)
ctx.restore()
draw_stroke(ctx, 1, "aaaaaa")
time_elapsed = 0
for time_slice in self.prop_time_line.time_slices:
if time_elapsed>visible_time_span.end:
break
if time_elapsed+time_slice.duration<visible_time_span.start:
time_elapsed += time_slice.duration
continue
time_slice_box = self.time_slice_boxes[time_slice]
ctx.save()
rel_visible_time_span = visible_time_span.copy()
rel_visible_time_span.start-= time_elapsed
rel_visible_time_span.end -= time_elapsed
if rel_visible_time_span.start<time_elapsed:
rel_visible_time_span.start =0
if rel_visible_time_span.end>time_slice.duration:
rel_visible_time_span.end = time_slice.duration
time_slice_box.draw(ctx, rel_visible_time_span)
ctx.restore()
time_elapsed += time_slice.duration
ctx.save()
self.pre_draw(ctx)
ctx.rectangle(-SHAPE_LINE_LEFT_PADDING-2, -5,
PropTimeLineBox.TOTAL_LABEL_WIDTH+SHAPE_LINE_LEFT_PADDING, self.height+5)
ctx.restore()
draw_fill(ctx, PROP_LEFT_BACK_COLOR)
draw_text(ctx,
self.prop_time_line.prop_name, 0, 0, font_name=settings.TIME_LINE_FONT,
width=PROP_NAME_LABEL_WIDTH,
text_color = PROP_NAME_TEXT_COLOR, padding=PROP_NAME_LABEL_RIGHT_PADDING,
border_color = PROP_NAME_BORDER_COLOR, border_width=2,
back_color = PROP_NAME_BACK_COLOR, pre_draw=self.pre_draw)
value_x_pos = PROP_NAME_LABEL_WIDTH + 3*PROP_NAME_LABEL_RIGHT_PADDING + PROP_VALUE_LABEL_WIDTH
draw_text(ctx, "{0:.2f}".format(self.max_value), font_name="7",
x=value_x_pos, align="right bottom-center",
width=PROP_VALUE_LABEL_WIDTH, fit_width=True,
y=0, text_color="000000", pre_draw=self.pre_draw)
draw_text(ctx, "{0:02.2f}".format(self.min_value), font_name="7",
x=value_x_pos, align="right bottom",
width=PROP_VALUE_LABEL_WIDTH, fit_width=True,
y=self.height, text_color="000000", pre_draw=self.pre_draw)
ctx.save()
self.pre_draw(ctx)
draw_straight_line(ctx, value_x_pos, 0, value_x_pos+2*PROP_NAME_LABEL_RIGHT_PADDING, 0)
draw_stroke(ctx, 1)
draw_straight_line(ctx, value_x_pos, self.height-END_POINT_HEIGHT*.5,
value_x_pos+2*PROP_NAME_LABEL_RIGHT_PADDING,
self.height-END_POINT_HEIGHT*.5)
draw_stroke(ctx, 1)
ctx.restore()
| gpl-3.0 | -3,127,707,642,595,551,000 | 40.428571 | 104 | 0.6 | false | 3.389831 | false | false | false |
atiro/nikola | nikola/plugins/task/py3_switch.py | 2 | 3618 | # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Beg the user to switch to python 3."""
import datetime
import os
import random
import sys
import doit.tools
from nikola.utils import get_logger, STDERR_HANDLER
from nikola.plugin_categories import LateTask
PY2_AND_NO_PY3_WARNING = """Nikola is going to deprecate Python 2 support in 2016. Your current
version will continue to work, but please consider upgrading to Python 3.
Please check http://bit.ly/1FKEsiX for details.
"""
PY2_WARNING = """Nikola is going to deprecate Python 2 support in 2016. You already have Python 3
available in your system. Why not switch?
Please check http://bit.ly/1FKEsiX for details.
"""
PY2_BARBS = [
"Python 2 has been deprecated for years. Stop clinging to your long gone youth and switch to Python3.",
"Python 2 is the safety blanket of languages. Be a big kid and switch to Python 3",
"Python 2 is old and busted. Python 3 is the new hotness.",
"Nice unicode you have there, would be a shame something happened to it.. switch to python 3!.",
"Don’t get in the way of progress! Upgrade to Python 3 and save a developer’s mind today!",
"Winners don't use Python 2 -- Signed: The FBI",
"Python 2? What year is it?",
"I just wanna tell you how I'm feeling\n"
"Gotta make you understand\n"
"Never gonna give you up [But Python 2 has to go]",
"The year 2009 called, and they want their Python 2.7 back.",
]
LOGGER = get_logger('Nikola', STDERR_HANDLER)
def has_python_3():
"""Check if python 3 is available."""
if 'win' in sys.platform:
py_bin = 'py.exe'
else:
py_bin = 'python3'
for path in os.environ["PATH"].split(os.pathsep):
if os.access(os.path.join(path, py_bin), os.X_OK):
return True
return False
class Py3Switch(LateTask):
"""Beg the user to switch to python 3."""
name = "_switch to py3"
def gen_tasks(self):
"""Beg the user to switch to python 3."""
def give_warning():
if sys.version_info[0] == 3:
return
if has_python_3():
LOGGER.warn(random.choice(PY2_BARBS))
LOGGER.warn(PY2_WARNING)
else:
LOGGER.warn(PY2_AND_NO_PY3_WARNING)
task = {
'basename': self.name,
'name': 'please!',
'actions': [give_warning],
'clean': True,
'uptodate': [doit.tools.timeout(datetime.timedelta(days=3))]
}
return task
| mit | 1,485,140,476,722,904,300 | 34.07767 | 107 | 0.674509 | false | 3.736298 | false | false | false |
michalkurka/h2o-3 | h2o-py/tests/testdir_misc/pyunit_upload_import.py | 10 | 1083 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def upload_import_small():
# Connect to a pre-existing cluster
various_datasets = ["smalldata/iris/iris.csv", "smalldata/iris/iris_wheader.csv", "smalldata/prostate/prostate.csv",
"smalldata/prostate/prostate_woheader.csv.gz"]
for dataset in various_datasets:
uploaded_frame = h2o.upload_file(pyunit_utils.locate(dataset))
imported_frame = h2o.import_file(pyunit_utils.locate(dataset))
rows_u, cols_u = uploaded_frame.dim
rows_i, cols_i = imported_frame.dim
assert rows_u == rows_i, "Expected same number of rows regardless of method. upload: {0}, import: " \
"{1}.".format(rows_u, rows_i)
assert cols_u == cols_i, "Expected same number of cols regardless of method. upload: {0}, import: " \
"{1}.".format(cols_u, cols_i)
if __name__ == "__main__":
pyunit_utils.standalone_test(upload_import_small)
else:
upload_import_small()
| apache-2.0 | -7,980,365,775,883,682,000 | 30.852941 | 120 | 0.611265 | false | 3.516234 | false | false | false |
ray-project/ray | dashboard/modules/actor/actor_utils.py | 1 | 2183 | import time
import re
from collections import defaultdict
PYCLASSNAME_RE = re.compile(r"(.+?)\(")
def construct_actor_groups(actors):
"""actors is a dict from actor id to an actor or an
actor creation task The shared fields currently are
"actorClass", "actorId", and "state" """
actor_groups = _group_actors_by_python_class(actors)
stats_by_group = {
name: _get_actor_group_stats(group)
for name, group in actor_groups.items()
}
summarized_actor_groups = {}
for name, group in actor_groups.items():
summarized_actor_groups[name] = {
"entries": group,
"summary": stats_by_group[name]
}
return summarized_actor_groups
def actor_classname_from_task_spec(task_spec):
return task_spec.get("functionDescriptor", {})\
.get("pythonFunctionDescriptor", {})\
.get("className", "Unknown actor class").split(".")[-1]
def _group_actors_by_python_class(actors):
groups = defaultdict(list)
for actor in actors.values():
actor_class = actor["actorClass"]
groups[actor_class].append(actor)
return dict(groups)
def _get_actor_group_stats(group):
state_to_count = defaultdict(lambda: 0)
executed_tasks = 0
min_timestamp = None
num_timestamps = 0
sum_timestamps = 0
now = time.time() * 1000 # convert S -> MS
for actor in group:
state_to_count[actor["state"]] += 1
if "timestamp" in actor:
if not min_timestamp or actor["timestamp"] < min_timestamp:
min_timestamp = actor["timestamp"]
num_timestamps += 1
sum_timestamps += now - actor["timestamp"]
if "numExecutedTasks" in actor:
executed_tasks += actor["numExecutedTasks"]
if num_timestamps > 0:
avg_lifetime = int((sum_timestamps / num_timestamps) / 1000)
max_lifetime = int((now - min_timestamp) / 1000)
else:
avg_lifetime = 0
max_lifetime = 0
return {
"stateToCount": state_to_count,
"avgLifetime": avg_lifetime,
"maxLifetime": max_lifetime,
"numExecutedTasks": executed_tasks,
}
| apache-2.0 | -3,236,785,072,096,983,600 | 31.102941 | 71 | 0.607879 | false | 3.803136 | false | false | false |
variar/klogg | 3rdparty/hyperscan/tools/hsbench/scripts/pcapCorpus.py | 1 | 9875 | #!/usr/bin/env python
'''
Script to convert a pcap file containing UDP and TCP packets to a corpus file.
'''
import sys, getopt, pprint, os
from sqlite3 import dbapi2 as sqlite
import pcap
from optparse import OptionParser
from socket import AF_INET, IPPROTO_UDP, IPPROTO_TCP, inet_ntop, ntohs, ntohl, inet_ntoa
import struct
from CorpusBuilder import CorpusBuilder
ETHERTYPE_IP = 0x0800 # IP protocol
ETHERTYPE_ARP = 0x0806 # Addr. resolution protocol
ETHERTYPE_REVARP = 0x8035 # reverse Addr. resolution protocol
ETHERTYPE_VLAN = 0x8100 # IEEE 802.1Q VLAN tagging
ETHERTYPE_IPV6 = 0x86dd # IPv6
#
# A dictionary of active TCP streams
#
tcp_streams = {}
#
# A dictionary of UDP streams
#
udp_streams = {}
#
# Current stream id
cur_stream_id = 0
def usage(exeName) :
errmsg = "Usage: %s -i <pcap-file> -o <sqlite-file>"
errmsg = errmsg % exeName
print >> sys.stderr, errmsg
sys.exit(-1)
class FiveTuple(object):
def __init__(self, protocol, src_addr, src_port, dst_addr, dst_port):
self.protocol = protocol
self.src_addr = src_addr
self.src_port = src_port
self.dst_addr = dst_addr
self.dst_port = dst_port
def __str__(self):
return "%d,%s,%d,%s,%d" % (self.protocol, self.src_addr, self.src_port, self.dst_addr, self.dst_port)
class UdpSegment:
"""Definition of a UDP segment
"""
def __init__(self, five_tuple, header, payload):
self.five_tuple = five_tuple
self.udp_header = header
self.udp_payload = payload
class TcpSegment:
"""Definition of a TCP segment
"""
def __init__(self, five_tuple, header, payload):
self.five_tuple = five_tuple
self.tcp_header = header
self.tcp_payload = payload
self.tcp_sequence_number, self.tcp_acknowledgement_number = struct.unpack('!LL', header[4:12])
def opt_isset_FIN(self):
opts = ord(self.tcp_header[13]) & 0x3F
return (opts & 0x01)
def opt_isset_SYN(self):
opts = ord(self.tcp_header[13]) & 0x3F
return (opts & 0x02)
def get_sequence_number(self):
return self.tcp_sequence_number
def __cmp__(self, other):
return cmp(self.tcp_sequence_number, other.tcp_sequence_number)
class TcpStream:
"""Definition of a TCP stream.
"""
TCP_STREAM_ACTIVE = 0x1
TCP_STREAM_CLOSED = 0x02
def __init__(self, five_tuple):
self.five_tuple = five_tuple
self.initial_sequence_number = 0
self.segments = []
def reset_stream(self):
self.segments = []
self.initial_sequence_number = 0
def set_initial_sequence_number(self, sequence_number):
self.initial_sequence_number = sequence_number
def append_segment(self, tcp_segment):
if len(self.segments) == 0:
self.set_initial_sequence_number(tcp_segment.get_sequence_number())
self.segments.append(tcp_segment)
def get_segments_sorted(self):
return sorted(self.segments)
class UdpStream:
"""A container for UDP packets that share the same 5-tuple
"""
def __init__(self, five_tuple):
self.five_tuple = five_tuple
self.segments = []
def append_segment(self, udp_segment):
self.segments.append(udp_segment)
def newStream(five_tuple):
'''
Create a new stream using the arguments passed-in and return its ID.
'''
global cur_stream_id
stream_id = cur_stream_id
cur_stream_id += 1
return stream_id
def process_tcp_segment(builder, segment):
"""Process a tcp segment. It checks for SYN and FIN segments are
if set modifies the associated stream.
"""
segment_id = str(segment.five_tuple)
if segment_id in tcp_streams:
m_tcp_stream = tcp_streams[segment_id]
m_tcp_stream.append_segment(segment)
else:
m_tcp_stream = TcpStream(segment.five_tuple)
m_tcp_stream.append_segment(segment)
tcp_streams[segment_id] = m_tcp_stream
if segment.opt_isset_SYN():
m_tcp_stream.segments = []
if segment.opt_isset_FIN():
#
# Finished with the stream - add the segments in the
# stream to db allowing the stream to be reused.
#
db_add_tcp_stream_segments(builder, m_tcp_stream)
del tcp_streams[segment_id]
def process_udp_segment(builder, segment):
""" Process a UDP segment. Given the connectionless nature of the UDP
protocol we simple accumulate the segment for later processing
when all the packets have been read
"""
segment_id = str(segment.five_tuple)
if segment_id in udp_streams:
m_udp_stream = udp_streams[segment_id]
m_udp_stream.append_segment(segment)
else:
m_udp_stream = UdpStream(segment.five_tuple)
m_udp_stream.append_segment(segment)
udp_streams[segment_id] = m_udp_stream
def db_add_tcp_stream_segments(builder, tcp_stream):
"""Add the contents of a tcp stream to the database
"""
tcp_segments = tcp_stream.get_segments_sorted()
last_sequence_num = 0
streamID = None
for tcp_segment in tcp_segments:
if (len(tcp_segment.tcp_payload) > 0) and (tcp_segment.tcp_sequence_number > last_sequence_num):
#
# Segment with an actual payload - add it to the stream's
# list of chunks.
#
# Note: delay creating the stream until we have a via chunk to
# commit to it
#
if streamID == None:
streamID = newStream(tcp_stream.five_tuple)
builder.add_chunk(streamID, tcp_segment.tcp_payload)
last_sequence_num = tcp_segment.tcp_sequence_number
def db_add_udp_stream_segments(builder, udp_stream):
"""Add the contents of a UDP stream to the database. Since UDP is
connection-less, a UDP stream object is really just an accumulation
of all the packets associated with a given 5-tuple.
"""
udp_segments = udp_stream.segments
streamID = None
for udp_segment in udp_segments:
if len(udp_segment.udp_payload) > 0:
if streamID == None:
streamID = newStream(udp_stream.five_tuple)
builder.add_chunk(streamID, udp_segment.udp_payload)
def enchunk_pcap(pcapFN, sqliteFN):
"""Read the contents of a pcap file with name @pcapFN and produce
a sqlite db with name @sqliteFN. It will contain chunks of data
from TCP and UDP streams,
"""
if not os.path.exists(pcapFN):
print >> sys.stderr, "Input file '%s' does not exist. Exiting." % pcapFN
sys.exit(-1)
builder = CorpusBuilder(sqliteFN)
#
# Read in the contents of the pcap file, adding stream segments as found
#
pkt_cnt = 0
ip_pkt_cnt = 0
ip_pkt_off = 0
unsupported_ip_protocol_cnt = 0
pcap_ref = pcap.pcap(pcapFN)
done = False
while not done:
try:
ts, packet = pcap_ref.next()
except:
break
pkt_cnt += 1
linkLayerType = struct.unpack('!H', packet[(pcap_ref.dloff - 2):pcap_ref.dloff])[0]
#
# We're only interested in IP packets
#
if linkLayerType == ETHERTYPE_VLAN:
linkLayerType = struct.unpack('!H', packet[(pcap_ref.dloff + 2):(pcap_ref.dloff + 4)])[0]
if linkLayerType != ETHERTYPE_IP:
continue
else:
ip_pkt_off = pcap_ref.dloff + 4
elif linkLayerType == ETHERTYPE_IP:
ip_pkt_off = pcap_ref.dloff
else:
continue
ip_pkt_cnt += 1
ip_pkt_total_len = struct.unpack('!H', packet[ip_pkt_off + 2: ip_pkt_off + 4])[0]
ip_pkt = packet[ip_pkt_off:ip_pkt_off + ip_pkt_total_len]
pkt_protocol = struct.unpack('B', ip_pkt[9])[0]
if (pkt_protocol != IPPROTO_UDP) and (pkt_protocol != IPPROTO_TCP):
#
# we're only interested in UDP and TCP packets at the moment
#
continue
pkt_src_addr = inet_ntoa(ip_pkt[12:16])
pkt_dst_addr = inet_ntoa(ip_pkt[16:20])
ip_hdr_len_offset = (ord(ip_pkt[0]) & 0x0f) * 4
ip_payload = ip_pkt[ip_hdr_len_offset:len(ip_pkt)]
pkt_src_port, pkt_dst_port = struct.unpack('!HH', ip_payload[0:4])
five_tuple = FiveTuple(pkt_protocol, pkt_src_addr, pkt_src_port, pkt_dst_addr, pkt_dst_port)
five_tuple_id = str(five_tuple)
if pkt_protocol == IPPROTO_UDP:
udp_payload_len = struct.unpack('!H', ip_payload[4:6])[0] - 8
udp_header = ip_payload[0:8]
udp_payload = ip_payload[8:len(ip_payload)]
udp_segment = UdpSegment(five_tuple, udp_header, udp_payload)
process_udp_segment(builder, udp_segment)
elif pkt_protocol == IPPROTO_TCP:
tcp_hdr_len = (ord(ip_payload[12]) >> 4) * 4
tcp_header = ip_payload[0:tcp_hdr_len]
tcp_payload = ip_payload[tcp_hdr_len:len(ip_payload)]
segment = TcpSegment(five_tuple, tcp_header, tcp_payload)
process_tcp_segment(builder, segment)
#
# Having read the contents of the pcap, we fill the database with any
# remaining TCP and UDP segments
#
for tcp_stream in tcp_streams.itervalues():
db_add_tcp_stream_segments(builder, tcp_stream)
for udp_stream in udp_streams.itervalues():
db_add_udp_stream_segments(builder, udp_stream)
#
# We've finished with the database
#
builder.finish()
if __name__ == '__main__' :
args = getopt.getopt(sys.argv[1:], 'i:o:')
args = dict(args[0])
requiredKeys = [ '-i', '-o']
for k in requiredKeys :
if not args.has_key(k) :
usage(os.path.basename(sys.argv[0]))
fnArgs = tuple([ args[k] for k in requiredKeys ])
enchunk_pcap(*fnArgs)
| gpl-3.0 | 7,155,665,015,608,640,000 | 30.854839 | 109 | 0.613873 | false | 3.462482 | false | false | false |
argonnexraydetector/detectorMPI | XPCS/immcheck.py | 1 | 3549 |
"""
#How to use
#run ipython
execfile('immcheck.py')
iname = '/local/testa_00200-01199.imm'
checkFile(iname)
#to read headers of images
fp = open(iname,'r')
h = readHeader(fp)
print h
#call if you are skipping iamge data, and not reading iamge data.
getNextHeaderPos(fp,h)
h = readHeader(fp)
print h
getNextHeaderPos(fp,h)
clf()
h = readHeader(fp)
img = getImage(fp,h)
#do not call getNextHeader. getImage seeks to next header already
print h
#displauy image
figimage(img)
clf()
h = readHeader(fp)
img = getImage(fp,h)
#do not call getNextHeader. getImage seeks to next header already
print h
#displauy image
figimage(img)
#etc ..\
#raw IMM
iname = '/local/testyraw_00100-00199.imm'
checkFile(iname)
fp.close()
"""
import struct
#comment these ouit of not drawing images, but only reading headers"
import numpy as np
import matplotlib.pyplot as plt
imm_headformat = "ii32s16si16siiiiiiiiiiiiiddiiIiiI40sf40sf40sf40sf40sf40sf40sf40sf40sf40sfffiiifc295s84s12s"
imm_fieldnames = [
'mode',
'compression',
'date',
'prefix',
'number',
'suffix',
'monitor',
'shutter',
'row_beg',
'row_end',
'col_beg',
'col_end',
'row_bin',
'col_bin',
'rows',
'cols',
'bytes',
'kinetics',
'kinwinsize',
'elapsed',
'preset',
'topup',
'inject',
'dlen',
'roi_number',
'buffer_number',
'systick',
'pv1',
'pv1VAL',
'pv2',
'pv2VAL',
'pv3',
'pv3VAL',
'pv4',
'pv4VAL',
'pv5',
'pv5VAL',
'pv6',
'pv6VAL',
'pv7',
'pv7VAL',
'pv8',
'pv8VAL',
'pv9',
'pv9VAL',
'pv10',
'pv10VAL',
'imageserver',
'CPUspeed',
'immversion',
'corecotick',
'cameratype',
'threshhold',
'byte632',
'empty_space',
'ZZZZ',
'FFFF'
]
iname = '/local/testa_00200-01199.imm'
def checkFile(fname):
fp = open(fname,'rb')
lastcor=-1
lastbn = -1;
n_corerror = 0
n_bnerror = 0
while True:
h = readHeader(fp)
if h!='eof':
print 'buffer number %d'%h['buffer_number']
print 'corecotick %d'%h['corecotick']
if lastbn==-1: lastbn = h['buffer_number']-1
if lastcor==-1: lastcor = h['corecotick']-1
dbn = h['buffer_number'] - lastbn
dcor = h['corecotick'] - lastcor
if dbn>1: n_bnerror=n_bnerror+1
if dcor>1: n_corerror = n_corerror+1
lastbn = h['buffer_number']
lastcor = h['corecotick']
getNextHeaderPos(fp,h)
else: break
print "Skipped Buffer numbers %d"%n_bnerror
print "Skipped Corecoticks %d"%n_corerror
fp.close()
def readHeader(fp):
bindata = fp.read(1024)
if bindata=='':
return('eof')
imm_headerdat = struct.unpack(imm_headformat,bindata)
imm_header ={}
for k in range(len(imm_headerdat)):
imm_header[imm_fieldnames[k]]=imm_headerdat[k]
return(imm_header)
def getNextHeaderPos(fp,header):
dlen = header['dlen']
if header['compression']==6:
fp.seek(dlen*6,1)
else:
fp.seek(dlen*2,1)
#getImage requres numpy, comment out of no numpy
def getImage(fp,h):
dlen = h['dlen']
if h['compression']==6:
loc_b = fp.read(4*dlen)
pixloc = struct.unpack('%di'%dlen,loc_b)
val_b = fp.read(2*dlen)
pixval = struct.unpack('%dH'%dlen,val_b)
imgdata = np.array( [0] * (h['rows'] * h['cols']))
for k in range(dlen):
imgdata[ pixloc[k] ] = pixval[k]
else:
pixdat=fp.read(2*dlen)
pixvals=struct.unpack('%dH'%dlen,pixdat)
imgdata=np.array(pixvals)
imgdata = imgdata.reshape(h['rows'], h['cols'])
return(imgdata)
| gpl-2.0 | 4,197,411,314,741,586,000 | 14.773333 | 109 | 0.614258 | false | 2.674454 | false | false | false |
nuagenetworks/vspk-python | vspk/v5_0/nuautodiscoveredgateway.py | 1 | 12767 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUWANServicesFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUWirelessPortsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUPortsFetcher
from .fetchers import NUNSPortsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUAutoDiscoveredGateway(NURESTObject):
""" Represents a AutoDiscoveredGateway in the VSD
Notes:
Represents Auto discovered Gateway.
"""
__rest_name__ = "autodiscoveredgateway"
__resource_name__ = "autodiscoveredgateways"
## Constants
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERSONALITY_VSG = "VSG"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_PERSONALITY_NSG = "NSG"
CONST_PERSONALITY_VRSG = "VRSG"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a AutoDiscoveredGateway instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> autodiscoveredgateway = NUAutoDiscoveredGateway(id=u'xxxx-xxx-xxx-xxx', name=u'AutoDiscoveredGateway')
>>> autodiscoveredgateway = NUAutoDiscoveredGateway(data=my_dict)
"""
super(NUAutoDiscoveredGateway, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._gateway_id = None
self._peer = None
self._personality = None
self._description = None
self._entity_scope = None
self._controllers = None
self._vtep = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_id", remote_name="gatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=True, is_unique=False, choices=[u'DC7X50', u'HARDWARE_VTEP', u'NSG', u'OTHER', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="controllers", remote_name="controllers", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.wan_services = NUWANServicesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.wireless_ports = NUWirelessPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ports = NUPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ns_ports = NUNSPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def gateway_id(self):
""" Get gateway_id value.
Notes:
The Gateway associated with this Auto Discovered Gateway. This is a read only attribute
This attribute is named `gatewayID` in VSD API.
"""
return self._gateway_id
@gateway_id.setter
def gateway_id(self, value):
""" Set gateway_id value.
Notes:
The Gateway associated with this Auto Discovered Gateway. This is a read only attribute
This attribute is named `gatewayID` in VSD API.
"""
self._gateway_id = value
@property
def peer(self):
""" Get peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
return self._peer
@peer.setter
def peer(self, value):
""" Set peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
self._peer = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway - VSG,VRSG,NONE,OTHER, cannot be changed after creation.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway - VSG,VRSG,NONE,OTHER, cannot be changed after creation.
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Gateway
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Gateway
"""
self._description = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def controllers(self):
""" Get controllers value.
Notes:
Controllers to which this gateway instance is associated with.
"""
return self._controllers
@controllers.setter
def controllers(self, value):
""" Set controllers value.
Notes:
Controllers to which this gateway instance is associated with.
"""
self._controllers = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the Gateway
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the Gateway
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
| bsd-3-clause | 4,147,512,493,940,058,600 | 28.487298 | 228 | 0.594658 | false | 4.509714 | false | false | false |
kolibre/libkolibre-clientcore | tests/fakesoapserver.py | 1 | 6179 | #!/usr/bin/python
"""
Copyright (C) 2012 Kolibre
This file is part of kolibre-clientcore.
Kolibre-clientcore is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2.1 of the License, or
(at your option) any later version.
Kolibre-clientcore is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with kolibre-clientcore. If not, see <http://www.gnu.org/licenses/>.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import getopt, sys, os, re, ssl
input = None
orderfile = None
class FakeSoapServer(BaseHTTPRequestHandler):
# override log request method
def log_request(self, code=None, size=None):
pass
def do_GET(self):
self.send_error(404,'File Not Found: %s' % self.path)
def do_POST(self):
response = self.getResponse()
self.send_response(200)
self.send_header('Content-Length', len(response))
self.send_header('Content-Type', 'text/xml; charset=utf-8')
self.end_headers()
self.wfile.write(response)
self.incrementOrder()
return
def getSoapAction(self):
action = None
for name, value in sorted(self.headers.items()):
if name.lower() == "soapaction":
action = value.strip()
if action is None:
self.sendInternalError('SOAPAction not found in header')
return
return action.lstrip('"/').rstrip('"')
def getOrder(self):
f = open(orderfile, 'r')
order = int(f.read())
self.order = order
f.close()
return order
def incrementOrder(self):
f = open(orderfile, 'w')
order = self.order + 1
f.write(str(order))
f.close()
def getResponse(self):
SoapAction = self.getSoapAction()
responsefile = input + '/' + str(self.getOrder()) + '_' + SoapAction
if not os.path.exists(responsefile):
self.sendInternalError('input file ' + responsefile + ' not found')
return
f = open(responsefile)
content = f.read()
f.close()
# pattern for finding beginning of soap envelope "<SOAP-ENV:Envelope"
pattern = re.compile('<[\w-]*:Envelope', re.IGNORECASE)
# find last position of pattern and substring from there
matches = pattern.findall(content)
start = content.rfind(matches[len(matches)-1])
body = content[start:]
# manipulate response if SOAPAction is logOn
if SoapAction == 'logOn':
request_len = int(self.headers.getheader('content-length'))
request = self.rfile.read(request_len)
if 'incorrect' in request:
body = body.replace('logOnResult>true', 'logOnResult>false')
return body
def sendInternalError(self, faultstring):
soapfault = '<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns1="http://www.daisy.org/ns/daisy-online/"><SOAP-ENV:Body><SOAP-ENV:Fault><faultcode>SOAP-ENV:Server</faultcode><faultstring>' + faultstring + '</faultstring><faultactor></faultactor><detail><ns1:internalServerErrorFault/></detail></SOAP-ENV:Fault></SOAP-ENV:Body></SOAP-ENV:Envelope>'
self.send_response(500)
self.send_header('Content-Length', len(soapfault))
self.send_header('Content-Type', 'text/xml; charset=utf-8')
self.end_headers()
self.wfile.write(soapfault)
return
def usage():
print ''
print 'usage: python ' + sys.argv[0] + ' -i <dir> -o <file>'
print
print 'required arguments:'
print ' -i, --input <dir>\t\tpath to folder containing soap responses'
print ' -o, --order <file>\t\tpath to file controlling the order'
print
print 'optional arguments:'
print ' -p, --port <port>\t\tport to listen on [default: 8080]'
print ' -s, --ssl\t\t\tuse ssl'
print ' -c, --cert <cert>\t\tpath to certificate'
print ' -h, --help\t\t\tshow this help message and exit'
if __name__ == '__main__':
host = 'localhost'
port = 8080
secure = False
cert = None
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], 'hp:i:o:sc:', ['help', 'port', 'input', 'order', 'ssl', 'cert'])
except getopt.GetoptError, err:
sys.stderr.write(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for opt, value in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-p', '--port'):
port = int(value)
elif opt in ('-i', '--input'):
input = value
elif opt in ('-o', '--order'):
orderfile = value
elif opt in ('-s', '--ssl'):
secure = True
elif opt in ('-c', '--cert'):
cert = value
# check if input exists
if input is None:
usage()
sys.exit()
if not os.path.exists(input):
sys.stderr.write("error: input '" + input + "' does not exists\n")
sys.exit(2)
if orderfile is None:
usage()
sys.exit()
if not os.path.exists(orderfile):
sys.stderr.write("error: orderfile '" + orderfile + "' does not exists\n")
sys.exit(2)
if secure and cert is None:
sys.stderr.write("error: specify a certificate to use with ssl\n")
sys.exit(2)
if secure and not os.path.exists(cert):
sys.stderr.write("error: certificate '" + cert + "' does not exists\n")
sys.exit(2)
# start server
try:
server = HTTPServer(('', port), FakeSoapServer)
if secure:
server.socket = ssl.wrap_socket(server.socket, certfile=cert, server_side=True)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
| lgpl-2.1 | -8,709,200,525,714,554,000 | 34.107955 | 391 | 0.611264 | false | 3.756231 | false | false | false |
osrf/opensplice | build/docs/GPBTutorial/source/conf.py | 2 | 8778 | # -*- coding: utf-8 -*-
#
# DDS_Cpp_GPB_Tutorial build configuration file, created by
# ReST Editor on 27-Apr-2015
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# import liteconfig
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = [u'_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = u'utf-8-sig'
# The master toctree document.
master_doc = u'index'
# General information about the project.
project = u'OpenSplice GPB Tutorial'
this_year = time.strftime( '%Y' )
copyright = u'{y}, ADLINK Technology Limited'.format( y = this_year )
print 'Copyright string is:', copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u's'
#version = liteconfig.version
version = u'6.x'
# The full version, including alpha/beta/rc tags.
#release = u's'
release = version
#release = u'00'
print 'Short version string is:', version
print 'Full version string is:', release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = u'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# Force blank date with today = ' ' (space, not empty string)
today = ' '
# ***************
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = u'sphinxdoc'
html_theme = u'vortextheme'
# LINUX PATH:
html_theme_path = ['../../.']
# WINDOWS PATH:
# html_theme_path = ['..\..\.']
#build theme directory in lite using environment variable, so shared amongst books
# insight team can delete,
#html_theme_path = [os.environ['VL_HOME'] + '/build/docs']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
html_title = 'OpenSplice GPB Tutorial'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
#html_short_title = 'HTML short Title conf.py'
#html_short_title = ' '
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# ???????????????????????????????????????????????????????????????????
html_logo = './images/Vortex_logo_2014.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenSpliceGPBTutorial'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = u'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = u'10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
# latex_documents = [('index', 'OpenSpliceGettingStartedGuide.tex', u'OpenSplice Getting Started Guide', u'', 'manual', True)]
latex_documents = [('index', 'OpenSplice_GPBTutorial.tex', u'OpenSplice GPB Tutorial', u'', 'manual', True)]
# ***************
# Note 'author' field empty
# Added 'True' to end of generated line to suppress 'Index & Tables'
# A dictionary that contains LaTeX snippets that override those Sphinx usually
# puts into the generated .tex files.
latex_elements = { 'babel': '\\usepackage[english]{babel}' }
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = 'images/Vortex-OpenSplice-Cover.png'
# ***************
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# ***************
# * THIS GETS RID OF BLANK PAGES AT ENDS OF CHAPTERS & ToC
latex_elements = {
'classoptions': ',openany, oneside',
'babel': '\\usepackage[english]{babel}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'OpenSpliceGPBTutorial', u'OpenSplice GPB Tutorial', [u'ADLINK Technology Limited'], 1)]
# * NOT TESTED
# -- Additional options --------------------------------------------------------
todo_include_todos = True
# * NOT TESTED
| apache-2.0 | 1,067,068,664,074,144,000 | 30.57554 | 126 | 0.692755 | false | 3.600492 | true | false | false |
andreasbastian/pyControl | 2d.py | 1 | 4951 |
##### NEED TO FIX DIVISION OPERATOR!!!!!
from __future__ import division
#http://docs.python.org/release/2.2.3/whatsnew/node7.html
#The most controversial change in Python 2.2 heralds the start of an effort to fix an old design flaw that's been in Python from the beginning. Currently Python's division operator, /, behaves like C's division operator when presented with two integer arguments: it returns an integer result that's truncated down when there would be a fractional part. For example, 3/2 is 1, not 1.5, and (-1)/2 is -1, not -0.5. This means that the results of divison can vary unexpectedly depending on the type of the two operands and because Python is dynamically typed, it can be difficult to determine the possible types of the operands.
import time
from time import gmtime, strftime, localtime
import datetime
def now():
# now = strftime("%I:%M:%S %p", gmtime())
now = strftime("%A, %B %d, %Y @ %I:%M:%S %p", localtime())
return now
#G1 Code Object
class G1Code:
def __init__(self, X=0, Y=0, Z=0, F=0):
self.X =X
self.Y =Y
self.Z = Z
self.F = F
def __str__(self):
string = "G1 X" + str(self.X) + " Y" + str(self.Y) + " Z" + str(self.Z) + " F" + str(self.F)
return string
################## CHECK IF NUMBER IS EVEN OR ODD ###################
def checkIfEvenOrOdd(number):
if number%2==0:
return "even"
else:
return "odd"
class StartSintering:
def __init__(self, PWM=0, pause_msec=1000): #default to PWM=0 for safety
self.PWM = PWM
self.pause_msec = pause_msec
def __str__(self):
if self.pause_msec != 0:
string = "M128 S" + str(self.PWM) + " ; EXTRUSION pressure set\nG4 P4000 ; wait for 4 sec to get up to pressure\nM126 ; Start Extrusion at this PWM\nG4 P" + str(self.pause_msec) + " ; wait for " + str(self.pause_msec) + " milliseconds before movement\n"
else:
string = "M128 S" + str(self.PWM) + " ; EXTRUSION pressure set\nG4 P4000 ; wait for 4 sec to get up to pressure\nM126 ; Start Extrusion at this PWM WITH NO DELAY FOR NEXT MOVEMENT\n"
return string
class StopSintering:
def __init__(self, PWM=0, pause_msec=1000): #changed PWM=255 to PWM=0-- off better than full power.
self.PWM = PWM
self.pause_msec = pause_msec
def __str__(self):
if self.pause_msec != 0:
string = "M127 ; (Stop Sugar Extruding)\nM128 S255 ; (Vent EtoP and stop current)\nG4 P" + str(self.pause_msec) + " ; pause for " + str(self.pause_msec) + " milliseconds\n"
else:
string = "M127 ; (Stop Sugar Extruding)\nM128 S255 ; (Vent EtoP and stop current)\n"
return string
#DEFINE VARIABLES:
regularSpeed = 600 #mm/s? in/min?
defaultPWM = 30
# PHYSICAL REGION IN WHICH TO GENERATE LINES FOR TESTING POWER PARAMETER SPACE
SquareSize = 80
numLines = 10 #number of test lines to sinter in target area
ThisGCode = G1Code(X=0, Y=0, Z=0, F=regularSpeed)
SweetStart = StartSintering(PWM=defaultPWM)
SweetStop = StopSintering()
fname = "2d.gcode"
print "Preparing to output: " + fname
#Open the output f and paste on the "headers"
f = open(fname,"w")
f.writelines(";(***************SPEED/POWER TRAVERSALS*********************)\n")
f.writelines(";(*** " + str(now()) + " ***)\n")
#f.writelines(""";(Copyright 2012 Jordan Miller, [email protected], All Rights Reserved)
#;(*** Using significantly modified/customized Marlin Firmware, RAMBo ***)
#M127 ; Laser Off
#M129 ; Laser PWM set to zero
#""")
f.writelines("G92 X0 Y0 Z0 ; you are now at 0,0,0\n")
f.writelines("""G90 ; absolute coordinates
;(***************End of Beginning*********************)
""")
# GO TO 0,0
#ThisGCode.X = 0
#ThisGCode.Y = 0
#ThisGCode.F = regularSpeed
#f.writelines(str(ThisGCode)+ "\n")
linSpacing = SquareSize/numLines
#laserPWM = 30
#for i in range(0, numLines):
# ThisGCode.X = 0
# ThisGCode.Y = i*linSpacing
# ThisGCode.F = regularSpeed
# f.writelines(str(ThisGCode) + "\n")
#messy, but functional:
#def G1(targetX, targetY, speed):
#cmd = "G1 X" + targetX + " Y" + targetY + " E" + speed
#return cmd
currX = 0
lineLength = 10
laserSpeed = 25 #mm/s
laserSpeed *= 60 #mm/min
linSpacing = 0.2
#for y in range(0,50,linSpacing):
# f.writelines("M701 S40\n")
#
# f.writelines("G1 X" + str(lineLength) + " Y" + str(y) + " F" + str(laserSpeed) + "\n") #give user heads up about the impending laser blast
# f.writelines("M701 S0\n")
# f.writelines("G1 X0 Y" + str(y+linSpacing) + " F5000\n\n")
# laserSpeed -= 60
for x in range(0,50,1):
f.writelines("M128 S100\n")
f.writelines("G1 X" + str(x/5) + " Y" + str(lineLength) + " F960" + "\n")
f.writelines("M128 S0\n")
f.writelines("G1 X" + str(x/5+linSpacing) + " Y0 F5000\n\n")
f.writelines("M128 S0 \n")
f.writelines("""
;(end of the file, shutdown routines)
M127 ; Laser Off
M701 S0 ; Laser PWM set to zero
M84 ; motors off
""")
f.close
| gpl-3.0 | 7,031,432,962,849,804,000 | 27.646707 | 625 | 0.633003 | false | 2.724821 | false | false | false |
blackye/luscan-devel | thirdparty_libs/shodan/export.py | 7 | 2288 | import sys
from datetime import datetime
from xml.sax import make_parser, handler
# Type conversion helper functions
def parse_date(args):
return datetime.strptime(args, '%d.%m.%Y')
class ExportSaxParser(handler.ContentHandler):
"""Parses Shodan's export XML file and executes the callback for each
entry.
"""
# Callbacks
entry_cb = None
# Keep track of where we're at
_in_host = False
_in_data = False
_host = None
_data = u''
# Conversion schemas
_host_attr_schema = {
'port': int,
'updated': parse_date,
}
def __init__(self, entry_cb=None):
# Define the callbacks
self.entry_cb = entry_cb
# ContentHandler methods
def startElement(self, name, attrs):
if name =='host':
# Extract all the attribute information
self._host = {}
for (name, value) in attrs.items():
# Convert the field to a native type if it's defined in the schema
self._host[name] = self._host_attr_schema.get(name, lambda x: x)(value)
# Update the state machine
self._in_host = True
elif name == 'data':
self._in_data = True
self._data = u''
def endElement(self, name):
if name == 'host':
# Execute the callback
self.entry_cb(self._host)
# Update the state machine
self._in_host = False
elif name == 'data':
self._host['data'] = self._data
self._in_data = False
def characters(self, content):
if self._in_data:
self._data += content
class ExportParser(object):
entry_cb = None
def __init__(self, entry_cb=None):
self.entry_cb = entry_cb
def parse(self, filename):
parser = make_parser()
parser.setContentHandler(ExportSaxParser(self.entry_cb))
parser.parse(filename)
if __name__ == '__main__':
def test_cb(entry):
print entry
import sys
parser = ExportParser(test_cb)
parser.parse(sys.argv[1])
| gpl-2.0 | 6,024,183,163,331,204,000 | 24.298851 | 87 | 0.521416 | false | 4.292683 | false | false | false |
HIIT/hybra-core | hybra/wordcloud/module_wordclouds.py | 1 | 1879 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import os
import time
from string import Template
import random
import re
import helpers.urls as urls
from IPython.core.display import HTML, display
from collections import Counter
path = os.path.dirname(os.path.abspath(__file__))
def create_wordcloud( data, plt, stopwords = ["the", "a", "or", "tai", "and", "ja", "to", "on", "in", "of", "for", "is", "i", "this"], width=850, height=350 ):
import codecs
html_template = Template( codecs.open( path + '/wordcloud.html', 'r').read() )
js_template = Template(codecs.open(path + '/wordcloud.js', 'r').read())
css_text = codecs.open(path + '/wordcloud.css', 'r').read()
texts = ""
for node in data:
text = encode_utf8(node['text_content']).lower()
## clean up: remove URLs non-alphabet characters
text = re.sub( urls.URL_REGEXP, ' ', text )
text = re.sub('[^a-zöä\s]+', ' ', text)
for word in text.split(" "): ## todo? should we use nltk?
if word not in stopwords:
texts += word + " "
frequencies = Counter(texts.split())
freqs_list = []
colors = ["#A5E6BA", "#9AC6C5", "#7785AC", "#248757", "#360568", "#F0544F", "#e07f9f", "#1d7059", "#3e6282"]
for key, value in frequencies.iteritems():
freqs_list.append({"text":encode_utf8(key),"size":str(value), "color": random.choice(colors)})
graph_div_id = int(time.time() * 1000)
js_text = js_template.substitute({'frequencies': str(freqs_list), 'graph_div_id': graph_div_id, 'width': width, 'height': height})
html_template = html_template.substitute({'js': js_text, 'graph_div_id': graph_div_id, 'css': css_text})
display( HTML( html_template ) )
return None
def encode_utf8( string ):
try:
return string.encode('utf8')
except UnicodeDecodeError:
return string
| mit | -5,407,360,248,777,718,000 | 32.517857 | 159 | 0.609483 | false | 3.236207 | false | false | false |
InContextSolutions/flask-goat | setup.py | 1 | 1244 | """
Flask-Goat
-------------
Flask-Goat is a plugin for security and user administration via GitHub OAuth2 & team structure within your organization.
"""
from setuptools import setup
setup(
name='Flask-Goat',
version='0.2.1',
url='http://incontextsolutions.github.io/flask-goat/',
license='MIT',
author='Tristan Wietsma',
author_email='[email protected]',
description='Flask plugin for security and user administration via GitHub OAuth & organization',
long_description=__doc__,
packages=['flask_goat'],
zip_safe=False,
include_package_data=True,
platforms='any',
tests_requires=[
'coverage',
'nose',
'httmock',
'pep8',
'pyflakes',
],
install_requires=[
'Flask',
'redis',
'simplejson',
'requests',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mit | -8,015,692,140,014,328,000 | 26.043478 | 120 | 0.603698 | false | 4.216949 | false | false | false |
ak110/pytoolkit | pytoolkit/od_test.py | 1 | 5662 | import numpy as np
import pytest
import pytoolkit as tk
def test_to_str():
class_names = ["class00", "class01", "class02"]
y = tk.od.ObjectsAnnotation(
"path/to/dummy.jpg",
100,
100,
np.array([1, 2]),
np.array([[0.900, 0.900, 1.000, 1.000], [0.000, 0.000, 0.200, 0.200]]),
)
p = tk.od.ObjectsPrediction(
np.array([1, 0, 2]),
np.array([0.8, 0.1, 0.8]),
np.array(
[[0.900, 0.900, 1.000, 1.000], [0, 0, 1, 1], [0.000, 0.000, 0.200, 0.200]]
),
)
assert (
y.to_str(class_names)
== "(0, 0) [20 x 20]: class02\n(90, 90) [10 x 10]: class01"
)
assert (
p.to_str(100, 100, class_names, 0.5)
== "(0, 0) [20 x 20]: class02\n(90, 90) [10 x 10]: class01"
)
def test_plot_objects(data_dir, check_dir):
img_path = data_dir / "od" / "JPEGImages" / "無題.png"
class_names = ["~", "〇"]
ann = tk.od.ObjectsAnnotation(
img_path, 768, 614, [0], [[203 / 768, 255 / 614, 601 / 768, 355 / 614]]
)
img = tk.od.plot_objects(img_path, ann.classes, None, ann.bboxes, None)
tk.ndimage.save(check_dir / "plot_objects1.png", img)
img = tk.od.plot_objects(img_path, ann.classes, None, ann.bboxes, class_names)
tk.ndimage.save(check_dir / "plot_objects2.png", img)
img = tk.od.plot_objects(
img_path, ann.classes, np.array([0.5]), ann.bboxes, class_names
)
tk.ndimage.save(check_dir / "plot_objects3.png", img)
def test_iou():
bboxes_a = np.array([[0, 0, 200, 200], [1000, 1000, 1001, 1001]])
bboxes_b = np.array([[100, 100, 300, 300]])
iou = tk.od.compute_iou(bboxes_a, bboxes_b)
assert iou.shape == (2, 1)
assert iou[0, 0] == pytest.approx(100 * 100 / (200 * 200 * 2 - 100 * 100))
assert iou[1, 0] == 0
def test_is_in_box():
boxes_a = np.array([[100, 100, 300, 300]])
boxes_b = np.array(
[
[150, 150, 250, 250],
[100, 100, 300, 300],
[50, 50, 350, 350],
[150, 150, 350, 350],
]
)
is_in = tk.od.is_in_box(boxes_a, boxes_b)
assert is_in.shape == (len(boxes_a), len(boxes_b))
assert (is_in == [[False, True, True, False]]).all()
def test_od_accuracy():
y_true = np.tile(
np.array(
[
tk.od.ObjectsAnnotation(
path=".",
width=100,
height=100,
classes=[0, 1],
bboxes=[[0.00, 0.00, 0.05, 0.05], [0.25, 0.25, 0.75, 0.75]],
)
]
),
6,
)
y_pred = np.array(
[
# 一致
tk.od.ObjectsPrediction(
classes=[1, 0],
confs=[1, 1],
bboxes=[[0.25, 0.25, 0.75, 0.75], [0.00, 0.00, 0.05, 0.05]],
),
# conf低
tk.od.ObjectsPrediction(
classes=[1, 0, 0],
confs=[1, 0, 1],
bboxes=[
[0.25, 0.25, 0.75, 0.75],
[0.00, 0.00, 0.05, 0.05],
[0.00, 0.00, 0.05, 0.05],
],
),
# クラス違い
tk.od.ObjectsPrediction(
classes=[1, 1],
confs=[1, 1],
bboxes=[[0.25, 0.25, 0.75, 0.75], [0.00, 0.00, 0.05, 0.05]],
),
# 重複
tk.od.ObjectsPrediction(
classes=[1, 0, 0],
confs=[1, 1, 1],
bboxes=[
[0.25, 0.25, 0.75, 0.75],
[0.00, 0.00, 0.05, 0.05],
[0.00, 0.00, 0.05, 0.05],
],
),
# 不足
tk.od.ObjectsPrediction(
classes=[1], confs=[1], bboxes=[[0.25, 0.25, 0.75, 0.75]]
),
# IoU低
tk.od.ObjectsPrediction(
classes=[1, 0],
confs=[1, 1],
bboxes=[[0.25, 0.25, 0.75, 0.75], [0.90, 0.90, 0.95, 0.95]],
),
]
)
is_match_expected = [True, True, False, False, False, False]
for yt, yp, m in zip(y_true, y_pred, is_match_expected):
assert yp.is_match(yt.classes, yt.bboxes, conf_threshold=0.5) == m
assert tk.od.od_accuracy(y_true, y_pred, conf_threshold=0.5) == pytest.approx(2 / 6)
def test_confusion_matrix():
y_true = np.array([])
y_pred = np.array([])
cm_actual = tk.od.confusion_matrix(y_true, y_pred, num_classes=3)
assert (cm_actual == np.zeros((4, 4), dtype=np.int32)).all()
y_true = np.array(
[
tk.od.ObjectsAnnotation(
path=".",
width=100,
height=100,
classes=[1],
bboxes=[[0.25, 0.25, 0.75, 0.75]],
)
]
)
y_pred = np.array(
[
tk.od.ObjectsPrediction(
classes=[0, 2, 1, 1, 2],
confs=[0, 1, 1, 1, 1],
bboxes=[
[0.25, 0.25, 0.75, 0.75], # conf低
[0.25, 0.25, 0.75, 0.75], # クラス違い
[0.25, 0.25, 0.75, 0.75], # 検知
[0.25, 0.25, 0.75, 0.75], # 重複
[0.95, 0.95, 0.99, 0.99], # IoU低
],
)
]
)
cm_actual = tk.od.confusion_matrix(
y_true, y_pred, conf_threshold=0.5, num_classes=3
)
cm_expected = np.array(
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 1, 2, 0]], dtype=np.int32
)
assert (cm_actual == cm_expected).all()
| mit | -8,270,628,438,416,416,000 | 29.972376 | 88 | 0.430075 | false | 2.866053 | true | false | false |
WarrenWeckesser/scikits-image | doc/examples/plot_ssim.py | 2 | 2340 | """
===========================
Structural similarity index
===========================
When comparing images, the mean squared error (MSE)--while simple to
implement--is not highly indicative of perceived similarity. Structural
similarity aims to address this shortcoming by taking texture into account
[1]_, [2]_.
The example shows two modifications of the input image, each with the same MSE,
but with very different mean structural similarity indices.
.. [1] Zhou Wang; Bovik, A.C.; ,"Mean squared error: Love it or leave it? A new
look at Signal Fidelity Measures," Signal Processing Magazine, IEEE,
vol. 26, no. 1, pp. 98-117, Jan. 2009.
.. [2] Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality
assessment: From error visibility to structural similarity," IEEE
Transactions on Image Processing, vol. 13, no. 4, pp. 600-612,
Apr. 2004.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.measure import structural_similarity as ssim
matplotlib.rcParams['font.size'] = 9
img = img_as_float(data.camera())
rows, cols = img.shape
noise = np.ones_like(img) * 0.2 * (img.max() - img.min())
noise[np.random.random(size=noise.shape) > 0.5] *= -1
def mse(x, y):
return np.linalg.norm(x - y)
img_noise = img + noise
img_const = img + abs(noise)
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
mse_none = mse(img, img)
ssim_none = ssim(img, img, dynamic_range=img.max() - img.min())
mse_noise = mse(img, img_noise)
ssim_noise = ssim(img, img_noise,
dynamic_range=img_const.max() - img_const.min())
mse_const = mse(img, img_const)
ssim_const = ssim(img, img_const,
dynamic_range=img_noise.max() - img_noise.min())
label = 'MSE: %2.f, SSIM: %.2f'
ax0.imshow(img, cmap=plt.cm.gray, vmin=0, vmax=1)
ax0.set_xlabel(label % (mse_none, ssim_none))
ax0.set_title('Original image')
ax1.imshow(img_noise, cmap=plt.cm.gray, vmin=0, vmax=1)
ax1.set_xlabel(label % (mse_noise, ssim_noise))
ax1.set_title('Image with noise')
ax2.imshow(img_const, cmap=plt.cm.gray, vmin=0, vmax=1)
ax2.set_xlabel(label % (mse_const, ssim_const))
ax2.set_title('Image plus constant')
plt.show()
| bsd-3-clause | 6,255,014,518,788,584,000 | 30.2 | 135 | 0.669231 | false | 2.853659 | false | false | false |
Alwnikrotikz/stoqs | loaders/HABLoader.py | 4 | 23608 | #!/usr/bin/env python
__author__ = "Danelle Cline"
__copyright__ = "Copyright 2012, MBARI"
__license__ = "GPL"
__maintainer__ = "Danelle Cline"
__email__ = "dcline at mbari.org"
__status__ = "Development"
__doc__ = '''
This loader loads water sample data from the Southern California Coastal Ocean Observing System (SCCOS)
Harmful Algal Bloom project into the STOQS database. Each row is saved as a Sample, and each
sample measurement (column), e.g. nitrate, chlorophyll, etc. is saved as a Measurement.
To run the loader
1. Downloaded a csv from http://www.sccoos.org/query/?project=Harmful%20Algal%20Blooms&
Selecting some, or all desired measurements, and save to CSV file format
2. Create a stoqs database called stoqs_habs
3. Load with:
HABLoader.py <filename.csv> stoqs_habs
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
# Force lookup of models to THE specific stoqs module.
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
# Add parent dir to pythonpath so that we can see the loaders and stoqs modules
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../") )
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from stoqs import models as m
from loaders import STOQS_Loader, SkipRecord
from datetime import datetime, timedelta
from pydap.model import BaseType
from django.contrib.gis.geos import fromstr, Point, LineString
import time
import numpy
import csv
import urllib2
import logging
from glob import glob
from tempfile import NamedTemporaryFile
import re
import pprint
import pytz
# Set up logging
logger = logging.getLogger('loaders')
logger.setLevel(logging.ERROR)
# When settings.DEBUG is True Django will fill up a hash with stats on every insert done to the database.
# "Monkey patch" the CursorWrapper to prevent this. Otherwise we can't load large amounts of data.
# See http://stackoverflow.com/questions/7768027/turn-off-sql-logging-while-keeping-settings-debug
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.util import CursorWrapper
if settings.DEBUG:
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
class ClosestTimeNotFoundException(Exception):
pass
class SingleActivityNotFound(Exception):
pass
def get_closest_instantpoint(aName, tv, dbAlias):
'''
Start with a tolerance of 1 second and double it until we get a non-zero count,
get the values and find the closest one by finding the one with minimum absolute difference.
'''
tol = 1
num_timevalues = 0
logger.debug('Looking for tv = %s', tv)
while tol < 86400: # Fail if not found within 24 hours
qs = m.InstantPoint.objects.using(dbAlias).filter( activity__name__contains = aName,
timevalue__gte = (tv-timedelta(seconds=tol)),
timevalue__lte = (tv+timedelta(seconds=tol))
).order_by('timevalue')
if qs.count():
num_timevalues = qs.count()
break
tol = tol * 2
if not num_timevalues:
raise ClosestTimeNotFoundException
logger.debug('Found %d time values with tol = %d', num_timevalues, tol)
timevalues = [q.timevalue for q in qs]
logger.debug('timevalues = %s', timevalues)
i = 0
i_min = 0
secdiff = []
minsecdiff = tol
for t in timevalues:
secdiff.append(abs(t - tv).seconds)
if secdiff[i] < minsecdiff:
minsecdiff = secdiff[i]
i_min = i
logger.debug('i = %d, secdiff = %d', i, secdiff[i])
i = i + 1
logger.debug('i_min = %d', i_min)
return qs[i_min], secdiff[i_min]
class HABLoader(STOQS_Loader):
'''
Inherit database loding functions from STOQS_Loader and use its constructor
'''
parameter_dict={} # used to cache parameter objects
standard_names = {} # should be defined for each child class
include_names=[] # names to include, if set it is used in conjunction with ignored_names
# Note: if a name is both in include_names and ignored_names it is ignored.
ignored_names=[] # Should be defined for each child class
loaded = 0
mindepth = 8000.0
maxdepth = -8000.0
parmCount = {}
parameterCount = {}
def __init__(self, activityName, platformName, dbAlias='default', campaignName=None,
activitytypeName=None, platformColor=None, platformTypeName=None,
startDatetime=None, endDatetime=None, dataStartDatetime=None ):
'''
Build a set of standard names using the dataset.
The activity is saved, as all the data loaded will be a set of instantpoints
that use the specified activity.
@param activityName: A string describing this activity
@param platformName: A string that is the name of the platform. If that name for a Platform exists in the DB, it will be used.
@param platformColor: An RGB hex string represnting the color of the platform.
@param dbAlias: The name of the database alias as defined in settings.py
@param campaignName: A string describing the Campaign in which this activity belongs, If that name for a Campaign exists in the DB, it will be used.
@param activitytypeName: A string such as 'mooring deployment' or 'AUV mission' describing type of activity, If that name for a ActivityType exists in the DB, it will be used.
@param platformTypeName: A string describing the type of platform, e.g.: 'mooring', 'auv'. If that name for a PlatformType exists in the DB, it will be used.
@param startDatetime: A Python datetime.dateime object specifying the start date time of data to load
@param endDatetime: A Python datetime.dateime object specifying the end date time of data to load
@param dataStartDatetime: A Python datetime.dateime object specifying the start date time of data to append to an existing Activity
'''
self.campaignName = campaignName
self.activitytypeName = activitytypeName
self.platformName = platformName
self.platformColor = platformColor
self.dbAlias = dbAlias
self.platformTypeName = platformTypeName
self.activityName = activityName
self.startDatetime = startDatetime
self.endDatetime = endDatetime
self.dataStartDatetime = dataStartDatetime # For when we append data to an existing Activity
def initDB(self):
'''Do the intial Database activities that are required before the data are processed: getPlatorm and createActivity.
'''
self.platform = self.getPlatform(self.platformName, self.platformTypeName)
self.createActivity()
self.addParameters(self.ds)
#self.addResources()
def load_measurement(self, lat, lon, depth, time, parmNameValues):
'''
Load the data values recorded for each loaction
@parmNameValues is a list of 2-tuples of (ParameterName, Value) measured at the time and location specified by
@lat decimal degrees
@lon decimal degrees
@time Python datetime.datetime object
@depth in meters
'''
mt = None
try:
mt = self.createMeasurement(time = time,
depth = depth,
lat = lat,
long = lon)
logger.info("measurement._state.db = %s", mt._state.db)
if depth < self.mindepth:
self.mindepth = depth
if depth > self.maxdepth:
self.maxdepth = depth
except SkipRecord, e:
logger.info(e)
except Exception, e:
logger.error(e)
sys.exit(-1)
else:
logger.info("longitude = %s, latitude = %s, time = %s, depth = %s", lon, lat, time, depth)
for pn,value in parmNameValues:
logger.info("pn = %s", pn)
logger.info("parameter._state.db = %s", self.getParameterByName(pn)._state.db)
mp = m.MeasuredParameter(measurement = mt,
parameter = self.getParameterByName(pn),
datavalue = value)
try:
mp.save(using=self.dbAlias)
except ParameterNotFound:
logger.error("Unable to locate parameter for %s, skipping", pn)
continue
except Exception, e:
logger.error('Exception %s. Skipping this record.', e)
logger.error("Bad value (id=%(id)s) for %(pn)s = %(value)s", {'pn': pn, 'value': value, 'id': mp.pk})
continue
else:
self.loaded += 1
logger.info("Inserted value (id=%(id)s) for %(pn)s = %(value)s", {'pn': pn, 'value': value, 'id': mp.pk})
self.parmCount[pn] += 1
if self.parameterCount.has_key(self.getParameterByName(pn)):
self.parameterCount[self.getParameterByName(pn)] += 1
else:
self.parameterCount[self.getParameterByName(pn)] = 0
def load_sample(self, lon, lat, depth, timevalue, bottleName):
'''
Load a single water sample
'''
# Get the Activity from the Database
try:
activity = m.Activity.objects.using(self.dbAlias).get(name__contains=self.activityName)
logger.debug('Got activity = %s', activity)
except ObjectDoesNotExist:
logger.warn('Failed to find Activity with name like %s. Skipping load.', self.activityName)
return
except MultipleObjectsReturned:
logger.warn('Multiple objects returned for name__contains = %s. Selecting one by random and continuing...', self.activityName)
activity = m.Activity.objects.using(self.dbAlias).filter(name__contains=self.activityName)[0]
# Get or create SampleType
(sample_type, created) = m.SampleType.objects.using(self.dbAlias).get_or_create(name = 'Pier')
logger.debug('sampletype %s, created = %s', sample_type, created)
# Get or create SamplePurpose
(sample_purpose, created) = m.SamplePurpose.objects.using(self.dbAlias).get_or_create(name = 'StandardDepth')
logger.debug('samplepurpose %s, created = %s', sample_purpose, created)
try:
ip, seconds_diff = get_closest_instantpoint(self.activityName, timevalue, self.dbAlias)
point = 'POINT(%s %s)' % (lon, lat)
stuple = m.Sample.objects.using(self.dbAlias).get_or_create( name = bottleName,
depth = str(depth), # Must be str to convert to Decimal
geom = point,
instantpoint = ip,
sampletype = sample_type,
samplepurpose = sample_purpose,
volume = 20000.0
)
rtuple = m.Resource.objects.using(self.dbAlias).get_or_create( name = 'Seconds away from InstantPoint',
value = seconds_diff
)
# 2nd item of tuples will be True or False dependending on whether the object was created or gotten
logger.info('Loaded Sample %s with Resource: %s', stuple, rtuple)
except ClosestTimeNotFoundException:
logger.info('ClosestTimeNotFoundException: A match for %s not found for %s', timevalue, activity)
else:
logger.info('Loaded Bottle name = %s', bottleName)
def process_csv_file(self, fh):
'''
Iterate through lines of iterator to csv file and pull out data for loading into STOQS
'''
ds = {}
DA = BaseType()
DA.attributes = {'units': 'ng ml-1 ' ,
'long_name': 'Domoic Acid',
'standard_name': 'domoic_acid',
'type': 'float',
'description': 'Domoic acid' ,
'origin': 'www.sccoos.org' }
PD = BaseType()
PD.attributes = {'units': 'cells l-1',
'long_name': 'Pseudo-nitzschia delicatissima group',
'standard_name': 'pseudo_nitzschia_delicatissima',
'name': 'pseudo_nitzschia_delicatissima' ,
'type': 'float' ,
'description': 'Pseudo-nitzschia delicatissima group (cells/L)' ,
'origin': 'www.sccoos.org'
}
PA = BaseType()
PA.attributes = {'units': 'cells l-1',
'long_name': 'Pseudo-nitzschia seriata group',
'standard_name': 'pseudo_nitzschia_seriata',
'name': 'pseudo_nitzschia_seriata' ,
'type': 'float' ,
'description': 'Pseudo-nitzschia seriata group (cells/L)' ,
'origin': 'www.sccoos.org'
}
alexandrium = BaseType()
alexandrium.attributes = {'units': 'cells l-1',
'long_name': 'Alexandrium',
'standard_name': 'alexandrium',
'name': 'alexandrium' ,
'type': 'float' ,
'description': 'Alexandrium spp. (cells/L)' ,
'origin': 'www.sccoos.org'
}
phosphate = BaseType()
phosphate.attributes = {'units': 'm-3 mol l-1',
'long_name': 'Phosphate',
'standard_name': 'phosphate_dissolved_in_seawater',
'name': 'Phosphate' ,
'type': 'float' ,
'description': 'Phosphate (uM)' ,
'origin': 'www.sccoos.org'
}
ammonia = BaseType()
ammonia.attributes = {'units': 'm-3 mol l-1',
'long_name': 'Ammonia',
'standard_name': 'ammonia_dissolved_in_seawater',
'name': 'ammonia_dissolved_in_sewater' ,
'type': 'float' ,
'description': 'Ammonia (uM)' ,
'origin': 'www.sccoos.org'
}
silicate = BaseType()
silicate.attributes = {'units': 'm-3 mol l-1',
'long_name': 'Silicate',
'standard_name': 'silicate_dissolved_in_seawater',
'name': 'silicate_dissolved_in_seawater' ,
'type': 'float' ,
'description': 'Silicate (uM)' ,
'origin': 'www.sccoos.org'
}
chlorophyll = BaseType()
chlorophyll.attributes = {'units': 'kg m-3',
'long_name': 'Chlorophyll',
'standard_name': 'mass_concentration_of_chlorophyll_in_sea_water',
'name': 'mass_concentration_of_chlorophyll_in_sea_water' ,
'type': 'float' ,
'description': 'Chlorophyll (kg/m3)' ,
'origin': 'www.sccoos.org'
}
prorocentrum = BaseType()
prorocentrum.attributes = {'units': 'cells l-1',
'long_name': 'Prorocentrum',
'standard_name': 'mass_concentration_of_prorocentrum_in_sea_water',
'name': 'mass_concentration_of_prorocentrum_in_sea_water' ,
'type': 'float' ,
'description': 'Prorocentrum spp. (cells/L)' ,
'origin': 'www.sccoos.org'
}
self.ds = { 'Domoic Acid (ng/mL)': DA, 'Pseudo-nitzschia seriata group (cells/L)': PA,
'Pseudo-nitzschia delicatissima group (cells/L)': PD,
'Phosphate (uM)': phosphate,
'Silicate (uM)': silicate, 'Ammonia (uM)': ammonia,
'Chlorophyll (mg/m3)': chlorophyll, 'Chlorophyll 1 (mg/m3)': chlorophyll,
'Chlorophyll 2 (mg/m3)': chlorophyll ,
'Alexandrium spp. (cells/L)': alexandrium
}
self.include_names = ['Pseudo-nitzschia seriata group (cells/L)',
'Pseudo-nitzschia delicatissima group (cells/L)',
'Domoic Acid (ng/mL)',
'Chlorophyll (mg/m3)', 'Chlorophyll 1 (mg/m3)', 'Chlorophyll 2 (mg/m3)',
'Prorocentrum spp. (cells/L)', 'Silicate (uM)', 'Ammonia (uM)',
'Nitrate (uM)', 'Phosphate (uM)',
'Alexandrium spp. (cells/L)']
self.initDB()
for pn in self.include_names:
self.parmCount[pn] = 0
reader = csv.reader(fh)
for line in fh:
# Skip all lines that don't begin with '"' nor ' ' then open that with csv.DictReader
if not line.startswith('"') and not line.startswith(' '):
titles = reader.next()
reader = csv.DictReader(fh, titles)
for r in reader:
year = int(r['year'])
month = int(r['month'])
day = int(r['day'])
time = r['time']
lat = float(r['latitude'])
lon = float(r['longitude'])
depth = float(r['depth (m)'])
location = r['location']
hours = int(time.split(':')[0])
mins = int(time.split(':')[1])
secs = int(time.split(':')[2])
parmNameValues = []
for name in self.ds.keys():
if name.startswith('Chlorophyll'):
parmNameValues.append((name, 1e-5*float(r[name])))
else:
parmNameValues.append((name, float(r[name])))
# Check to make sure all data from this file are from the same location.
# The program could be modified to read data in one file from multiple locations by reading data into a hash keyed by location name
# and then stepping through each key of the hash saving the data for each location into it's own activity. For now just require
# each data file to have data from just one location.
try:
if lat != lastlat or lon != lastlon:
logger.error("lat and lon are not the same for location = %s and lastlocation = %s. The input data should have just one location." % (location, lastlocation))
sys.exit(-1)
except NameError, e:
# Expected first time through when lastlon & lastlat don't yet exist
pass
# Load data
dt = datetime(year, month, day, hours, mins, secs)
self.load_measurement(lon, lat, depth, dt, parmNameValues)
# Load sample
bName = dt.isoformat()
self.load_sample(lon, lat, depth, dt, bName)
lastlat = lat
lastlon = lon
lastlocation = location
logger.info("Data load complete, %d records loaded.", self.loaded)
fh.close()
# Update the Activity with information we now have following the load
# Careful with the structure of this comment. It is parsed in views.py to give some useful links in showActivities()
newComment = "%d MeasuredParameters loaded. Loaded on %sZ" % (self.loaded, datetime.utcnow())
logger.info("runHABLoader(): Updating its comment with newComment = %s", newComment)
aName = location
num_updated = m.Activity.objects.using(self.dbAlias).filter(id = self.activity.id).update(
name = aName,
comment = newComment,
maptrack = None,
mappoint = 'POINT(%s %s)' % (lon, lat),
mindepth = self.mindepth,
maxdepth = self.maxdepth,
num_measuredparameters = self.loaded,
loaded_date = datetime.utcnow())
self.updateActivityParameterStats(self.parameterCount)
self.updateCampaignStartEnd()
def process(self, file):
'''
Insert a Sample record to the database for each location in csv file.
Assumes that *.csv file exists on the local filesystem
*.csv file look like:
"Project: Harmful Algal Blooms"
"Calibrated: No"
"Requested start: 2011-10-05 13:00:00"
"Requested end: 2012-11-28 21:03:00"
"Request time: Wed, 28 Nov 2012 21:03:31 +0000"
"Other Notes: All times provided in UTC"
year,month,day,time,latitude,longitude,depth (m),location,Domoic Acid (ng/mL),Pseudo-nitzschia delicatissima group (cells/L),Pseudo-nitzschia seriata group (cells/L)
2011,10,05,13:00:00,36.958,-122.017,0.0,Santa Cruz Wharf,0.92 ,NaN,47200.0000
2011,10,12,12:55:00,36.958,-122.017,0.0,Santa Cruz Wharf,0.06 ,NaN,0.0000
2011,10,19,13:09:00,36.958,-122.017,0.0,Santa Cruz Wharf,0.26 ,NaN,13450.0000
2011,10,26,14:10:00,36.958,-122.017,0.0,Santa Cruz Wharf,0.05 ,NaN,900.0000
'''
fh = open(file)
try:
self.process_csv_file(fh)
except SingleActivityNotFound:
logger.error('Invalid csv file %s', file)
exit(-1)
if __name__ == '__main__':
_debug = True
try:
file = sys.argv[1]
except IndexError:
logger.error('Must specify csv file as first argument')
exit(-1)
try:
dbAlias = sys.argv[2]
except IndexError:
dbAlias = 'stoqs_habs'
#datetime.now(pytz.utc)
campaignName = 'SCCOS HABS 2011-2012'
activityName = 'Sample'
activitytypeName = 'WaterAnalysis'
platformName = 'Pier'
platformColor = '11665e'
platformTypeName = 'pier'
start = datetime(2011, 01, 01)
end = datetime(2012,12,31)
sl = HABLoader(activityName, platformName, dbAlias, campaignName, activitytypeName,platformColor, platformTypeName, start, end)
sl.process(file)
| gpl-3.0 | 1,748,290,724,237,543,000 | 45.381139 | 187 | 0.543926 | false | 4.102172 | false | false | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-personalizer/azure/cognitiveservices/personalizer/models/personalizer_error.py | 1 | 2146 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PersonalizerError(Model):
"""The error object.
All required parameters must be populated in order to send to Azure.
:param code: Required. High level error code. Possible values include:
'BadRequest', 'ResourceNotFound', 'InternalServerError'
:type code: str or ~azure.cognitiveservices.personalizer.models.ErrorCode
:param message: Required. A message explaining the error reported by the
service.
:type message: str
:param target: Error source element.
:type target: str
:param details: An array of details about specific errors that led to this
reported error.
:type details:
list[~azure.cognitiveservices.personalizer.models.PersonalizerError]
:param inner_error: Finer error details.
:type inner_error:
~azure.cognitiveservices.personalizer.models.InternalError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[PersonalizerError]'},
'inner_error': {'key': 'innerError', 'type': 'InternalError'},
}
def __init__(self, **kwargs):
super(PersonalizerError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.inner_error = kwargs.get('inner_error', None)
| mit | 9,055,293,772,262,861,000 | 37.321429 | 78 | 0.610904 | false | 4.224409 | false | false | false |
levelrf/level_basestation | grextras/python/extras_pmt.py | 1 | 2377 | # Copyright 2011-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
import numpy
import extras_swig
#hacky way so we can import in-tree
try: import pmt
except ImportError: from gruel import pmt
for name in dir(extras_swig):
if 'pmt' in name:
setattr(pmt, name, getattr(extras_swig, name))
setattr(pmt, name.replace('ext_blob', 'blob'), getattr(extras_swig, name))
#this function knows how to convert an address to a numpy array
def __pointer_to_ndarray(addr, nitems):
dtype = numpy.dtype(numpy.uint8)
class array_like:
__array_interface__ = {
'data' : (int(addr), False),
'typestr' : dtype.base.str,
'descr' : dtype.base.descr,
'shape' : (nitems,) + dtype.shape,
'strides' : None,
'version' : 3
}
return numpy.asarray(array_like()).view(dtype.base)
#re-create the blob data functions, but yield a numpy array instead
def pmt_blob_data(blob):
return __pointer_to_ndarray(extras_swig.pmt_blob_rw_data(blob), extras_swig.pmt_blob_length(blob))
#re-create mgr acquire by calling into python GIL-safe version
class pmt_mgr:
def __init__(self): self._mgr = extras_swig.pmt_mgr()
def set(self, x): self._mgr.set(x)
def reset(self, x): self._mgr.reset(x)
def acquire(self, block = True): return extras_swig.pmt_mgr_acquire_safe(self._mgr, block)
#inject it into the pmt namespace
pmt.pmt_make_blob = extras_swig.pmt_make_blob
pmt.pmt_blob_data = pmt_blob_data #both call rw data, numpy isnt good with const void *
pmt.pmt_blob_rw_data = pmt_blob_data
pmt.pmt_blob_resize = extras_swig.pmt_blob_resize
pmt.pmt_mgr = pmt_mgr
| gpl-3.0 | 6,004,448,685,671,345,000 | 37.33871 | 102 | 0.694994 | false | 3.381223 | false | false | false |
mikemcfarlane/Code_sprints | ThingSpeak/NAO_to_ThingSpeak.py | 1 | 1254 | """ A simple example to test if NAO can connect to thingspeak.com.
"""
from naoqi import ALProxy
import httplib, urllib
import time
# NAO_IP = "mistcalf.local"
NAO_IP = "192.168.0.13"
tts = ALProxy("ALTextToSpeech", NAO_IP, 9559)
def main():
tts.say("I'm going to connect to the internet now!")
for i in range (5):
params = urllib.urlencode({'field1': i, 'key':'9PBYIQ1RWXJ6XZBO'}) # use your API key generated in the thingspeak channels for the value of 'key'
headers = {"Content-typZZe": "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = httplib.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
print i, msg
print response.status, response.reason
data = response.read()
conn.close()
except:
print "connection failed"
tts.say("I sent some stuff to the internet!")
# If using ThingSpeak web service there is a 15s limit. Install locally or on own webserver for faster usage.
time.sleep(16)
tts.say("Yay, all sent!")
if __name__ == "__main__":
main() | gpl-2.0 | 467,417,381,829,688,060 | 32.026316 | 157 | 0.600478 | false | 3.645349 | false | false | false |
lengstrom/dotfiles | .emacs.d/elpa/anaconda-mode-20150626.441/anaconda_mode.py | 1 | 4228 | """
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from os.path import abspath, dirname
from pkg_resources import get_distribution, DistributionNotFound
from subprocess import Popen
project_path = dirname(abspath(__file__))
sys.path.insert(0, project_path)
missing_dependencies = []
try:
from jedi import Script, NotFoundError
except ImportError:
missing_dependencies.append('jedi')
try:
from service_factory import service_factory
except ImportError:
missing_dependencies.append('service_factory')
if missing_dependencies:
command = ['pip', 'install', '-t', project_path] + missing_dependencies
pip = Popen(command)
pip.communicate()
assert pip.returncode is 0, 'PyPi installation fails.'
from jedi import Script, NotFoundError
from service_factory import service_factory
print('Python executable:', sys.executable)
for package in ['jedi', 'service_factory']:
try:
version = get_distribution(package).version
except DistributionNotFound:
print('Unable to find {package} version'.format(package=package))
else:
print('{package} version: {version}'.format(
package=package, version=version))
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
@script_method
def complete(script):
"""Select auto-complete candidates for source position."""
def first_line(text):
"""Return text first line."""
return text.strip().split('\n', 1)[0]
return [{'name': comp.name,
'doc': comp.docstring() or None,
'info': first_line(comp.docstring(raw=True)) or None,
'type': comp.type,
'path': comp.module_path or None,
'line': comp.line}
for comp in script.completions()]
@script_method
def doc(script):
"""Documentation for all definitions at point."""
docs = ['\n'.join([d.module_name + ' - ' + d.description,
'=' * 40,
d.docstring() or "- No docstring -"]).strip()
for d in script.goto_definitions()]
return ('\n' + '-' * 40 + '\n').join(docs)
def process_definitions(f):
@wraps(f)
def wrapper(script):
cache = {script.path: script.source.splitlines()}
def get_description(d):
if d.module_path not in cache:
with open(d.module_path, 'r') as file:
cache[d.module_path] = file.read().splitlines()
return cache[d.module_path][d.line - 1]
return [{'line': d.line,
'column': d.column,
'name': d.name,
'description': get_description(d),
'module': d.module_name,
'type': d.type,
'path': d.module_path}
for d in f(script) if not d.in_builtin_module()]
return wrapper
@script_method
@process_definitions
def goto_definitions(script):
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
sgn = signatures[0]
return {
'name': sgn.name,
'index': sgn.index,
'params': [p.description for p in sgn.params]
}
return {}
app = [complete, doc, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
| gpl-3.0 | -1,771,323,944,087,753,500 | 25.425 | 75 | 0.609508 | false | 3.929368 | false | false | false |
varapp/varapp-backend-py | tests/samples/test_samples.py | 2 | 2081 | #!/usr/bin/env python3
import unittest
from django.test.client import RequestFactory
from varapp.samples.samples_factory import *
from varapp.constants.tests import NSAMPLES
from varapp.samples.samples_service import samples_selection_from_request
class TestSample(unittest.TestCase):
def test_sample(self):
s = Sample('A', sample_id=0)
self.assertEqual(s.sample_id, 0)
self.assertEqual(s.name, 'A')
def test_expose(self):
s = Sample('A', sample_id=0)
self.assertIsInstance(s.expose(), dict)
self.assertEqual(s.expose()['name'], 'A')
class TestSamplesSelectionFactory(unittest.TestCase):
def test_sample_factory(self):
"""Convert one Django `Samples` object to a `Sample`."""
django_s = Samples.objects.using('test').all()[0]
model_s = sample_factory(django_s)
self.assertIsInstance(model_s, Sample)
def test_samples_list_from_db(self):
samples = samples_list_from_db(db='test')
self.assertIsInstance(samples, list)
self.assertIsInstance(samples[0], Sample)
def test_samples_selection_factory(self):
"""Create a `SamplesSelection` from the database content."""
ss = samples_selection_factory(db='test')
self.assertIsInstance(ss, SamplesSelection)
self.assertEqual(list.__len__(ss.samples), NSAMPLES)
def test_samples_selection_from_samples_list(self):
"""Create a SamplesSelection from a list of `Sample`s."""
samples = [Sample('a'), Sample('b')]
ss = SamplesSelection(samples)
self.assertEqual(len(ss.samples), 2)
def test_samples_selection_from_request(self):
"""Create a SamplesSelection from groups dessribed in URL."""
request = RequestFactory().get('', [('samples','group1=09818'),
('samples','group2=09819'), ('samples','group3=09960,09961')])
ss = samples_selection_from_request(request, db='test')
self.assertIsInstance(ss, SamplesSelection)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,647,727,179,012,845,600 | 37.537037 | 106 | 0.649207 | false | 3.897004 | true | false | false |
ejyue/2.009PinkSpot | pysimpledmx.py | 1 | 2325 | # MAKE SURE THAT THIS IS IN SAME DIRECTORY AS MASTER PYTHON FILE
import serial, sys
START_VAL = 0x7E
END_VAL = 0xE7
COM_BAUD = 57600
COM_TIMEOUT = 1
COM_PORT = 7
DMX_SIZE = 512
LABELS = {
'GET_WIDGET_PARAMETERS' :3, #unused
'SET_WIDGET_PARAMETERS' :4, #unused
'RX_DMX_PACKET' :5, #unused
'TX_DMX_PACKET' :6,
'TX_RDM_PACKET_REQUEST' :7, #unused
'RX_DMX_ON_CHANGE' :8, #unused
}
class DMXConnection(object):
def __init__(self, comport = None):
'''
On Windows, the only argument is the port number. On *nix, it's the path to the serial device.
For example:
DMXConnection(4) # Windows
DMXConnection('/dev/tty2') # Linux
DMXConnection("/dev/ttyUSB0") # Linux
'''
self.dmx_frame = [0] * DMX_SIZE
try:
self.com = serial.Serial(comport, baudrate = COM_BAUD, timeout = COM_TIMEOUT)
except:
com_name = 'COM%s' % (comport + 1) if type(comport) == int else comport
print "Could not open device %s. Quitting application." % com_name
sys.exit(0)
print "Opened %s." % (self.com.portstr)
def setChannel(self, chan, val, autorender = False):
'''
Takes channel and value arguments to set a channel level in the local
DMX frame, to be rendered the next time the render() method is called.
'''
if not 1 <= chan <= DMX_SIZE:
print 'Invalid channel specified: %s' % chan
return
# clamp value
val = max(0, min(val, 255))
self.dmx_frame[chan] = val
if autorender: self.render()
def clear(self, chan = 0):
'''
Clears all channels to zero. blackout.
With optional channel argument, clears only one channel.
'''
if chan == 0:
self.dmx_frame = [0] * DMX_SIZE
else:
self.dmx_frame[chan-1] = 0
def render(self):
''''
Updates the DMX output from the USB DMX Pro with the values from self.dmx_frame.
'''
packet = [
START_VAL,
LABELS['TX_DMX_PACKET'],
len(self.dmx_frame) & 0xFF,
(len(self.dmx_frame) >> 8) & 0xFF,
]
packet += self.dmx_frame
packet.append(END_VAL)
packet = map(chr, packet)
self.com.write(''.join(packet))
def close(self):
self.com.close()
| mit | 132,626,769,375,859,730 | 26.678571 | 98 | 0.578065 | false | 3.202479 | false | false | false |
Serg09/socorro | socorro/external/postgresql/skiplist.py | 8 | 3829 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import psycopg2
from socorro.external import DatabaseError, MissingArgumentError
from socorro.external.postgresql.base import PostgreSQLBase
from socorro.lib import external_common
logger = logging.getLogger("webapi")
class SkipList(PostgreSQLBase):
filters = [
("category", None, ["str"]),
("rule", None, ["str"]),
]
def get(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
sql_params = []
sql = """
/* socorro.external.postgresql.skiplist.SkipList.get */
SELECT category,
rule
FROM skiplist
WHERE 1=1
"""
if params.category:
sql += 'AND category=%s'
sql_params.append(params.category)
if params.rule:
sql += 'AND rule=%s'
sql_params.append(params.rule)
# Use `UPPER()` to make the sort case insensitive
# which makes it more user-friendly on the UI later
sql += """
ORDER BY UPPER(category), UPPER(rule)
"""
error_message = "Failed to retrieve skip list data from PostgreSQL"
sql_results = self.query(sql, sql_params, error_message=error_message)
results = [dict(zip(("category", "rule"), x)) for x in sql_results]
return {'hits': results, 'total': len(results)}
def post(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql = """
/* socorro.external.postgresql.skiplist.SkipList.post */
INSERT INTO skiplist (category, rule)
VALUES (%s, %s);
"""
sql_params = [params.category, params.rule]
connection = self.database.connection()
try:
with connection.cursor() as cur:
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed updating skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
def delete(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql_params = [params.category, params.rule]
count_sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
SELECT COUNT(*) FROM skiplist
WHERE category=%s AND rule=%s
"""
sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
DELETE FROM skiplist
WHERE category=%s AND rule=%s
"""
connection = self.database.connection()
try:
cur = connection.cursor()
count = self.count(count_sql, sql_params, connection=connection)
if not count:
return False
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed delete skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
| mpl-2.0 | -3,424,378,661,777,563,600 | 32.295652 | 78 | 0.583181 | false | 4.504706 | false | false | false |
z0rr0/eshop | shop/shop/cart.py | 1 | 2967 | from django.conf import settings
from django.shortcuts import get_object_or_404
from sales.models import Product
import logging
LOGGER = logging.getLogger(__name__)
class Cart(object):
"""Customers cart
It uses cookie NAME.
Cookie template is "product_id1::number1;product_id2::number2".
"""
NAME = "cart"
SEP_NUM, SEP_PR = '::', ';'
def __init__(self, request=None):
super(Cart, self).__init__()
self._request = request
self._storage = {}
def is_empty(self):
if self._request is None:
return True
try:
value = self._request.get_signed_cookie(
self.NAME,
None,
settings.COOKIE_SALT,
max_age=settings.COOKIE_EXPIRE
)
if not value:
return True
except KeyError:
return True
return False
def get(self, force=False):
if self._storage or self.is_empty():
return self._storage
try:
value = self._request.get_signed_cookie(
self.NAME,
None,
settings.COOKIE_SALT,
max_age=settings.COOKIE_EXPIRE
)
for pair in value.split(self.SEP_PR):
product_id, number = pair.split(self.SEP_NUM)
product = get_object_or_404(Product, pk=product_id)
self._storage[product] = number
except (KeyError, ValueError) as err:
LOGGER.error(err)
return self._storage
def set(self, response):
pairs = []
for product in self._storage:
pairs.append("{}{}{}".format(product.id, self.SEP_NUM, self._storage[product]))
value = self.SEP_PR.join(pairs)
response.set_signed_cookie(
self.NAME,
value,
settings.COOKIE_SALT,
max_age=settings.COOKIE_EXPIRE,
)
return response
def add_or_update(self, product, number, reset=False):
if reset:
products = {}
else:
products = self.get()
products[product] = number
self._storage = products
def delete(self, product):
products = self.get()
try:
products.pop(product)
except KeyError:
LOGGER.debug("ignore missing product")
self._storage = products
def count(self):
return len(self.get())
def total(self):
value = 0
products = self.get()
try:
for product in products:
value += product.price * int(products[product])
except ValueError:
return 0
return value
def has(self, product):
products = self.get()
if products.get(product):
return True
return False
def clean(self, response):
response.delete_cookie(self.NAME)
return response
| mit | -8,872,298,837,236,488,000 | 26.220183 | 91 | 0.532861 | false | 4.389053 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.