repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
NickrenREN/kubernetes | hack/verify-publishing-bot.py | 1 | 4023 | #!/usr/bin/env python
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import fnmatch
import os
import sys
import json
def get_gomod_dependencies(rootdir, components):
all_dependencies = {}
for component in components:
with open(os.path.join(rootdir, component, "go.mod")) as f:
print(component + " dependencies")
all_dependencies[component] = []
lines = list(set(f))
lines.sort()
for line in lines:
for dep in components:
if dep == component:
continue
if ("k8s.io/" + dep + " v0.0.0") not in line:
continue
print("\t"+dep)
if dep not in all_dependencies[component]:
all_dependencies[component].append(dep)
return all_dependencies
def get_rules_dependencies(rules_file):
import yaml
with open(rules_file) as f:
data = yaml.load(f)
return data
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
components = []
for component in os.listdir(rootdir + '/staging/src/k8s.io/'):
components.append(component)
components.sort()
rules_file = "/staging/publishing/rules.yaml"
try:
import yaml
except ImportError:
print("Please install missing pyyaml module and re-run %s" % sys.argv[0])
sys.exit(1)
rules_dependencies = get_rules_dependencies(rootdir + rules_file)
gomod_dependencies = get_gomod_dependencies(rootdir + '/staging/src/k8s.io/', components)
processed_repos = []
for rule in rules_dependencies["rules"]:
branch = rule["branches"][0]
if branch["name"] != "master":
raise Exception("cannot find master branch for destination %s" % rule["destination"])
if branch["source"]["branch"] != "master":
raise Exception("cannot find master source branch for destination %s" % rule["destination"])
print("processing : %s" % rule["destination"])
if rule["destination"] not in gomod_dependencies:
raise Exception("missing go.mod for %s" % rule["destination"])
processed_repos.append(rule["destination"])
for dep in set(gomod_dependencies[rule["destination"]]):
found = False
if "dependencies" in branch:
for dep2 in branch["dependencies"]:
if dep2["branch"] != "master":
raise Exception("Looking for master branch and found : %s for destination", dep2,
rule["destination"])
if dep2["repository"] == dep:
found = True
else:
raise Exception(
"Please add %s as dependencies under destination %s in %s" % (gomod_dependencies[rule["destination"]], rule["destination"], rules_file))
if not found:
raise Exception("Please add %s as a dependency under destination %s in %s" % (dep, rule["destination"], rules_file))
else:
print(" found dependency %s" % dep)
items = set(gomod_dependencies.keys()) - set(processed_repos)
if len(items) > 0:
raise Exception("missing rules for %s" % ','.join(str(s) for s in items))
print("Done.")
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -2,821,874,619,000,023,000 | 36.95283 | 156 | 0.594581 | false |
slek120/param_fit | plot.py | 1 | 2014 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def plot(params):
# Extract parameters
t1 = 1.
t2, mu, x1, x2, epsf, V = params
# Format filename
filename = 't1_%.2f_t2_%.2f_mu_%.2f_x1_%.2f_x2_%.2f_epsf_%.2f_V_%.2f.png'%(t1,t2,mu,x1,x2,epsf,V)
# Define function to plot
def energy(k):
epsk = -2*t1*(np.cos(k[0])+np.cos(k[1]))-4*t2*np.cos(k[0])*np.cos(k[1])-mu
x0 = -2*x1*(np.cos(k[0])+np.cos(k[1]))-4*x2*np.cos(k[0])*np.cos(k[1])-epsf
det = np.sqrt(0.25*(epsk-x0)*(epsk-x0)+V*V)
return 0.5*(epsk+x0)-det, 0.5*(epsk+x0)+det
# Set steps
delta = 0.025
# Set bounds
x = np.arange(-3.14, 3.14, delta)
y = np.arange(-3.14, 3.14, delta)
# Set values
X, Y = np.meshgrid(x, y)
Z1, Z2 = energy([X,Y])
# Textbox
props = dict(boxstyle='round', facecolor='wheat', alpha=1)
textstr = (
'$t=%.2e$\n'
'$t\'=%.2e$\n'
'$\mu=%.2e$\n'
'$X_0=%.2e$\n'
'$X_0\'=%.2e$\n'
'$\epsilon_f=%.2e$\n'
'$V=%.2e$'
)%(t1,t2,mu,x1,x2,epsf,V)
# Heatmap
extent = [-3.14, 3.14, -3.14, 3.14]
plt.clf()
plt.text(-2.9, 2.9 , textstr, fontsize=14,
verticalalignment='top', bbox=props)
plt.imshow(Z1, extent=extent)
plt.savefig("Ea/heatmaps/"+filename)
plt.imshow(Z2, extent=extent)
plt.savefig("Eb/heatmaps/"+filename)
# Contour plots
plt.figure()
CS1 = plt.contour(X, Y, Z1)
plt.clabel(CS1, inline=1, fontsize=10)
plt.text(-2.9, 2.9 , textstr, fontsize=14,
verticalalignment='top', bbox=props)
plt.savefig("Ea/contours/"+filename)
plt.figure()
CS2 = plt.contour(X, Y, Z2)
plt.clabel(CS2, inline=1, fontsize=10)
plt.text(-2.9, 2.9 , textstr, fontsize=14,
verticalalignment='top', bbox=props)
plt.savefig("Eb/contours/"+filename)
# plot([2.29663137949, 0.00702885711482, 1.80269022673e-07, -7.64627539075e-08, -2.39035812789e-07, -0.000417686911034]) | mit | -8,066,709,127,629,451,000 | 28.202899 | 120 | 0.559086 | false |
fabsx00/chucky-old | tools/AttackSurfaceRanker.py | 1 | 7803 |
from tools.functionAnomaly.RankingEntry import RankingEntry
from tools.functionAnomaly.Ranker import Ranker
from sourceutils.misc.NameToDictMap import NameToDictMap
from mlutils.anomalyDetection.anomalyCalculator import AnomalyCalculator
import pickle
import numpy
import sys
def determineUncheckedIdentifiers():
pass
def calculateF(CAllSubtrees):
totalNumberOfChecks = numpy.sum((c for c in CAllSubtrees.d.itervalues()))
print 'Total number of checks: %d' % (totalNumberOfChecks)
# normalized F
F = CAllSubtrees.d
for k in F.keys():
F[k] = float(F[k]) / totalNumberOfChecks
# F[k] = 1
return F
def calculateCheckVectors(WFuncs, CFuncs, F, binary=True, alpha=1, weighByF = False):
WDict = NameToDictMap()
for (functionLocation, symbols) in WFuncs.d.iteritems():
if not functionLocation in CFuncs.d:
# The function does not contain any check,
# thus, projected onto the check-space, it's
# the NULL-vector
WDict.d[functionLocation] = {}
continue
CFunc = CFuncs.d[functionLocation]
for (s,occurrences) in symbols.iteritems():
if binary: occurrences = 1
if (not s in F):
# This symbol is never checked
WDict.setItem(s, functionLocation, 0)
elif (s in CFunc):
w = 1.0
if weighByF: w = F[s]
nChecks = CFunc[s]
if binary: nChecks = 1
WDict.setItem(s, functionLocation, (occurrences - alpha*nChecks)*w)
else:
w = 1.0
if weighByF: w = F[s]
WDict.setItem(s, functionLocation, occurrences*w)
return WDict
def relevancyWeighting(checkVectors, featureDir):
k = 20
termDocMatrix = pickle.load(file(featureDir + 'termDocMatrix.pickl'))
functionLocations = termDocMatrix.index2Doc
# it doesn't make much sense that we use euclidean distances here,
# should be L1, but I can't calculate L1 on the sparse matrices for now.
from scipy.spatial.distance import squareform
D = squareform(pickle.load(file(featureDir + 'D_euclidean.pickl')))
anomalyCalculator = AnomalyCalculator()
(NNV, NNI) = anomalyCalculator.calculateNearestNeighbours(k, D)
WDict = NameToDictMap()
for i in xrange(len(functionLocations)):
location = functionLocations[i]
if not location in checkVectors.d:
continue
WDict.d[location] = checkVectors.d[location]
indices = NNI[:,i]
gamma = float(numpy.sum(NNV[:,i]))/k
locations = [functionLocations[j] for j in indices]
V = [checkVectors.d[l] for l in locations if l in checkVectors.d]
distances = [NNV[j,i] for j in xrange(len(locations)) if locations[j] in checkVectors.d]
# len(V) may be unequal to k if at least one of the nearest neighbours has no checks.
# It is then a null-vector, so we're implicitly adding it in mean-calculation
meanVector = {}
for (v,d) in zip(V,distances):
for (name, score) in v.iteritems():
try:
meanVector[name] += (1-d)* (float(score)/k)
except KeyError:
meanVector[name] = (1-d)* (float(score)/k)
for (name, score) in checkVectors.d[location].iteritems():
if meanVector.has_key(name):
score -= meanVector[name]
if score < 0: score = 0
WDict.setItem(name, location, score)
return WDict
def scoresFromCheckVectors(checkVectors):
scores = []
for (functionLocation, symbols) in checkVectors.iteritems():
if len(symbols) == 0:
score = 0
else:
X = [s for s in symbols.itervalues()]
score = float(numpy.sum(X))
score /= len(X)
scores.append((score, functionLocation))
return scores
def main(projectRoot):
embedDir = projectRoot + 'embeddings/'
waterOnlyDir = embedDir + 'WaterOnly_1.pickl/'
identifiersInCondDir = embedDir + 'IdentifiersInConditions_1.pickl/'
apiUsageDir = embedDir + 'APISymbols_1.pickl/'
Wfunc2SubtreesFilename = waterOnlyDir + 'func2SubtreesMap.pickl'
Cfunc2SubtreesFilename = identifiersInCondDir + 'func2SubtreesMap.pickl'
CAllSubtreesFilename = identifiersInCondDir + 'allSubtreesDict.pickl'
CAllSubtrees = pickle.load(file(CAllSubtreesFilename))
CFuncs = pickle.load(file(Cfunc2SubtreesFilename))
WFuncs = pickle.load(file(Wfunc2SubtreesFilename))
if (len(WFuncs.d) < len(CFuncs.d)):
print 'Error'
print len(WFuncs.d)
F = calculateF(CAllSubtrees)
checkVectors = calculateCheckVectors(WFuncs, CFuncs, F)
# checkVectors = relevancyWeighting(checkVectors, apiUsageDir)
checkVectors = relevancyWeighting(checkVectors, waterOnlyDir)
ranking = scoresFromCheckVectors(checkVectors.d)
ranking.sort(reverse=True)
"""
ranking = []
for (functionLocation, symbols) in WFuncs.d.iteritems():
# number of _distinct_ symbols
nSymbols = numpy.sum([1 for v in symbols.itervalues()])
if not functionLocation in CFuncs.d:
CFunc = []
else:
CFunc = CFuncs.d[functionLocation]
score = 0.0
for (s,occurrences) in symbols.iteritems():
# This performs the projection onto the subspace
if not s in F:
# This is not a symbol ever used in a check
continue
occurrences = 1
score += occurrences * F[s]
if s in CFunc:
# symbol occurs in check
o = CFunc[s]
o = 1
score -= alpha*(o * F[s])
score /= nSymbols
ranking.append((score, functionLocation))
ranking.sort(reverse=True)
# Min-Max normalization of check-scores
checkScoreMax = numpy.max([r[0] for r in ranking])
checkScoreMin = numpy.min([r[0] for r in ranking])
ranking = [ ((r[0]- checkScoreMin)/(checkScoreMax - checkScoreMin),r[1]) for r in ranking]
termDocMatrix = pickle.load(file(waterOnlyDir + 'termDocMatrix.pickl'))
functionLocations = termDocMatrix.index2Doc
anomalyRanker = Ranker(waterOnlyDir)
anomalyRanker.loadDistanceMatrix()
anomalyScores = anomalyRanker.determineAnomaliesFromDistanceMatrix('gamma', 10)
# Min-Max normalization of anomaly-scores
anomalyScoresMax = numpy.max(anomalyScores)
anomalyScoresMin = numpy.min(anomalyScores)
anomalyScores = [(float(x) - anomalyScoresMin)/(anomalyScoresMax - anomalyScoresMin) for x in anomalyScores]
anomalyTuples = zip(anomalyScores, functionLocations)
anomalyDict = {}
for (score, location) in anomalyTuples:
anomalyDict[location] = score
beta = 0.15
combinedRanking = []
for (score, functionLocation) in ranking:
newScore = score
if anomalyDict.has_key(functionLocation):
anomalyScore = anomalyDict[functionLocation]
newScore = beta*score + (1-beta)*anomalyScore
combinedRanking.append((newScore, functionLocation))
"""
ranking = [RankingEntry(r[0], r[1]) for r in ranking]
pickle.dump(ranking, file(projectRoot + '../attackSurfaceRanking.pickl', 'w'))
if __name__ == '__main__':
import sys
projectRoot = sys.argv[1]
if projectRoot[-1] != '/': projectRoot += '/'
main(projectRoot) | gpl-3.0 | 1,255,731,039,337,383,700 | 34.153153 | 112 | 0.609125 | false |
sdelaughter/SmartHome | v0.3/resources/bulb.py | 1 | 3442 | # Samuel DeLaughter
# 5/8/15
from SimpleXMLRPCServer import SimpleXMLRPCServer
from threading import Thread
import logging
import time
import iot
class bulb(iot.device):
def __init__(self):
iot.device.__init__(self)
self.name = 'bulb'
self.state = 0
self.category = 'device'
self.last_on = time.time()
self.shutoff_interval = 300
#Set up logging
iot.setup_log(self.name, time.localtime())
#Register with the gateway
self.register()
'''
#Initialize and start daemon thread for serving as the clock synchronization leader
leader_thread=Thread(target=self.lead, name='Bulb Leader Thread')
leader_thread.daemon = True
leader_thread.start()
'''
#Initialize and start daemon thread to auotmatically shut off the bulb after a certain time interval
shutoff_thread=Thread(target=self.auto_shutoff, name='Bulb Auto-Shutoff Thread')
shutoff_thread.daemon=True
shutoff_thread.start()
#Start listening for requests
self.serve()
def serve(self):
self.server = SimpleXMLRPCServer((self.ip, self.port), logRequests=False, allow_none=True)
self.server.register_function(self.ping)
self.server.register_function(self.serve)
self.server.register_function(self.register)
self.server.register_function(self.timestamp)
#self.server.register_function(self.start_election)
#self.server.register_function(self.lead)
#self.server.register_function(self.get_time)
#self.server.register_function(self.set_time)
self.server.register_function(self.get_attr)
self.server.register_function(self.set_attr)
self.server.register_function(self.db_get_state)
self.server.register_function(self.db_get_history)
#self.server.register_function(self.set_leader)
self.server.register_function(self.device_by_name)
self.server.register_function(self.devices_by_name)
self.server.register_function(self.update_device_list)
self.server.register_function(self.auto_shutoff)
self.clock += 1
try:
print '\nStarting Server'
print 'Use Control-C to exit'
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Starting Server')
self.server.serve_forever()
except KeyboardInterrupt:
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Received keyboard interrupt, stopping server')
print 'Exiting'
def auto_shutoff(self):
while 1:
if self.state == 1:
duration = time.time() - self.last_on
if(duration >= self.shutoff_interval):
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Bulb has been on for ' + str(duration) + ' seconds - shutting off now')
print('Bulb has been on for ' + str(duration) + ' seconds - shutting off now')
self.state = 0
self.clock += 1
else:
time.sleep(self.shutoff_interval - duration)
else:
time.sleep(self.shutoff_interval)
def set_attr(self, c, attr, val):
self.update_clock(c)
if(attr == 'state'):
self.state = val
self.clock += 1
self.last_on = time.time()
self.clock += 1
self.db_put(self.name, val)
self.clock += 1
else:
setattr(self, attr, val)
self.clock += 1
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Attribute ' + str(attr) + ' was set to: ' + str(val))
print('Attribute ' + str(attr) + ' was set to: ' + str(val))
return self.clock
def main():
#Create a new instance of the bulb object
d = bulb()
if __name__ == '__main__':
main() | gpl-2.0 | -1,670,638,216,775,200,500 | 30.587156 | 147 | 0.687101 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/createTags.py | 1 | 1232 | """Creates resource tag(s)"""
from baseCmd import *
from baseResponse import *
class createTagsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""list of resources to create the tags for"""
"""Required"""
self.resourceids = []
self.typeInfo['resourceids'] = 'list'
"""type of the resource"""
"""Required"""
self.resourcetype = None
self.typeInfo['resourcetype'] = 'string'
"""Map of tags (key/value pairs)"""
"""Required"""
self.tags = []
self.typeInfo['tags'] = 'map'
"""identifies client specific tag. When the value is not null, the tag can't be used by cloudStack code internally"""
self.customer = None
self.typeInfo['customer'] = 'string'
self.required = ["resourceids", "resourcetype", "tags", ]
class createTagsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
| apache-2.0 | 7,633,278,320,166,446,000 | 30.589744 | 125 | 0.586851 | false |
Penguin7z/docx | settings.py | 1 | 9271 | # -*- coding: utf-8 -*-
"""
eve-demo settings
~~~~~~~~~~~~~~~~~
Settings file for our little demo.
PLEASE NOTE: We don't need to create the two collections in MongoDB.
Actually, we don't even need to create the database: GET requests on an
empty/non-existant DB will be served correctly ('200' OK with an empty
collection); DELETE/PATCH will receive appropriate responses ('404' Not
Found), and POST requests will create database and collections when needed.
Keep in mind however that such an auto-managed database will most likely
perform poorly since it lacks any sort of optimized index.
:copyright: (c) 2016 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
# We want to seamlessy run our API both locally and on Heroku. If running on
# Heroku, sensible DB connection settings are stored in environment variables.
MONGO_HOST = "localhost"
MONGO_PORT = 27017
MONGO_USERNAME = ''
MONGO_PASSWORD = ''
MONGO_DBNAME = 'docx'
URL_PREFIX = 'api'
API_VERSION = 'v1'
# X_DOMAINS = ['http://localhost:8000', # The domain where Swagger UI is running
# 'http://editor.swagger.io',
# 'http://petstore.swagger.io']
X_HEADERS = ['Content-Type', 'If-Match'] # Needed for the "Try it out" buttons
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST']
# Enable reads (GET), edits (PATCH) and deletes of individual items
# (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'DELETE']
# We enable standard client cache directives for all resources exposed by the
# API. We can always override these global settings later.
CACHE_CONTROL = 'max-age=20'
CACHE_EXPIRES = 20
# Our API will expose two resources (MongoDB collections): 'people' and
# 'works'. In order to allow for proper data validation, we define beaviour
# and structure.
people = {
# 'title' tag used in item links.
'item_title': 'person',
# by default the standard item entry point is defined as
# '/people/<ObjectId>/'. We leave it untouched, and we also enable an
# additional read-only entry point. This way consumers can also perform GET
# requests at '/people/<lastname>/'.
'additional_lookup': {
'url': 'regex("[\w]+")',
'field': 'lastname'
},
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/pyeve/cerberus) for details.
'schema': {
'firstname': {
'type': 'string',
'minlength': 1,
'maxlength': 10,
},
'lastname': {
'type': 'string',
'minlength': 1,
'maxlength': 15,
'required': True,
# talk about hard constraints! For the purpose of the demo
# 'lastname' is an API entry-point, so we need it to be unique.
'unique': True,
},
# 'role' is a list, and can only contain values from 'allowed'.
'role': {
'type': 'list',
'allowed': ["author", "contributor", "copy"],
},
# An embedded 'strongly-typed' dictionary.
'location': {
'type': 'dict',
'schema': {
'address': {'type': 'string'},
'city': {'type': 'string'}
},
},
'born': {
'type': 'datetime',
},
}
}
works = {
# if 'item_title' is not provided Eve will just strip the final
# 's' from resource name, and use it as the item_title.
#'item_title': 'work',
# We choose to override global cache-control directives for this resource.
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
'title': {
'type': 'string',
'required': True,
},
'description': {
'type': 'string',
},
'owner': {
'type': 'objectid',
'required': True,
# referential integrity constraint: value must exist in the
# 'people' collection. Since we aren't declaring a 'field' key,
# will default to `people._id` (or, more precisely, to whatever
# ID_FIELD value is).
'data_relation': {
'resource': 'people',
# make the owner embeddable with ?embedded={"owner":1}
'embeddable': True
},
},
}
}
# 模块表
module_x = {
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
# 模块名
'name': {
'type': 'string',
'required': True,
},
}
}
# 字段表
field_type = {
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
# 字段名
'name': {
'type': 'string',
'required': True,
},
# 字段别名
'alias': {
'type': 'string',
'required': True,
},
}
}
# 接口表
api_x = {
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
'name': {
'type': 'string',
'required': True,
},
'alias': {
'type': 'string',
'required': True,
},
}
}
# 入参表
api_in_x = {
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
# 接口id
'api_id': {
'type': 'objectid',
'required': True,
'data_relation': {
'resource': 'api_x',
'embeddable': True
},
},
# 字段类型id
'filed_type_id': {
'type': 'objectid',
'required': True,
'data_relation': {
'resource': 'field_type',
'embeddable': True
},
},
# 字段名
'name': {
'type': 'string',
'required': True,
},
# 字段描述
'desc': {
'type': 'string',
'required': True,
},
# 补充信息
'memo': {
'type': 'string',
},
# 最小值
'min_v': {
'type': 'number',
},
# 最大值
'max_v': {
'type': 'number',
},
# 最小长度
'min_len': {
'type': 'number',
},
# 最大长度
'max_len': {
'type': 'number',
},
# 是否必填
'is_required': {
'type': 'string',
},
# 是否唯一
'is_unique': {
'type': 'string',
},
# 允许取值范围
'allow_list': {
'type': 'list',
},
# 正则表达式
'regex_str': {
'type': 'string',
},
# 作者
'author': {
'type': 'string',
},
# 版本
'version': {
'type': 'string',
},
}
}
# 出参表
api_out_x = {
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'schema': {
# 接口id
'api_id': {
'type': 'objectid',
'required': True,
'data_relation': {
'resource': 'api_x',
'embeddable': True
},
},
# 字段类型id
'filed_type_id': {
'type': 'objectid',
'required': True,
'data_relation': {
'resource': 'field_type',
'embeddable': True
},
},
# 字段名
'name': {
'type': 'string',
'required': True,
},
# 字段描述
'desc': {
'type': 'string',
'required': True,
},
# 补充信息
'memo': {
'type': 'string',
},
# 最小值
'min_v': {
'type': 'number',
},
# 最大值
'max_v': {
'type': 'number',
},
# 最小长度
'min_len': {
'type': 'number',
},
# 最大长度
'max_len': {
'type': 'number',
},
# 是否必填
'is_required': {
'type': 'string',
},
# 是否唯一
'is_unique': {
'type': 'string',
},
# 允许取值范围
'allow_list': {
'type': 'list',
},
# 正则表达式
'regex_str': {
'type': 'string',
},
# 作者
'author': {
'type': 'string',
},
# 版本
'version': {
'type': 'string',
},
}
}
# The DOMAIN dict explains which resources will be available and how they will
# be accessible to the API consumer.
DOMAIN = {
'people': people,
'works': works,
'module_x': module_x,
'field_type': field_type,
'api_x': api_x,
'api_in_x': api_in_x,
'api_out_x': api_out_x,
} | mit | 4,056,091,271,714,653,000 | 23.947368 | 81 | 0.466408 | false |
LitleCo/litle-sdk-for-python | litleSdkPythonTest/unit/TestCreateFromDom.py | 1 | 4937 | #Copyright (c) 2017 Vantiv eCommerce
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class TestCreateFromDom(unittest.TestCase):
def testSimpleExtraField(self):
xml_text = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' \
xmlns='http://www.litle.com/schema'><captureGivenAuthResponse id='' \
reportGroup='DefaultReportGroup' customerId=''><litleTxnId>057484783403434000</litleTxnId>\
<orderId>12344</orderId><response>000</response><responseTime>2012-06-05T16:36:39</responseTime>\
<message>Approved</message><authCode>83307</authCode></captureGivenAuthResponse>\
</litleOnlineResponse>"
xml_object = litleXmlFields.CreateFromDocument(xml_text)
self.assertEquals("Approved", xml_object.transactionResponse.message)
def test_simpleExtraFieldEmbeddedExtraField(self):
xml_text = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' \
xmlns='http://www.litle.com/schema'><captureGivenAuthResponse id='' \
reportGroup='DefaultReportGroup' customerId=''><litleTxnId>057484783403434000</litleTxnId>\
<orderId>12344</orderId><response>000</response><responseTime>2012-06-05T16:36:39</responseTime>\
<message>Approved</message><authCode><extraField>extra</extraField></authCode>\
</captureGivenAuthResponse></litleOnlineResponse>"
xml_object = litleXmlFields.CreateFromDocument(xml_text)
self.assertEquals("Approved", xml_object.transactionResponse.message)
def test_simple_EmbeddedField(self):
xml_text = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' \
xmlns='http://www.litle.com/schema'><authorizationResponse id='' reportGroup='DefaultReportGroup' \
customerId=''><litleTxnId>057484783403434000</litleTxnId><orderId>12344</orderId>\
<response>000</response><responseTime>2012-06-05T16:36:39</responseTime><message>Approved</message>\
<tokenResponse><litleToken>4242424242424242</litleToken><tokenResponseCode>111</tokenResponseCode>\
<tokenMessage>Message</tokenMessage><bin>bin</bin></tokenResponse></authorizationResponse>\
</litleOnlineResponse>"
xml_object = litleXmlFields.CreateFromDocument(xml_text)
self.assertEquals("bin", xml_object.transactionResponse.tokenResponse.bin)
self.assertEquals("Message", xml_object.transactionResponse.tokenResponse.tokenMessage)
def test_simple_ExtraEmbeddedField(self):
xml_text = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' \
xmlns='http://www.litle.com/schema'><authorizationResponse id='' reportGroup='DefaultReportGroup' \
customerId=''><litleTxnId>057484783403434000</litleTxnId><orderId>12344</orderId><response>000</response>\
<responseTime>2012-06-05T16:36:39</responseTime><message>Approved</message><tokenResponse>\
<litleToken>4242424242424242</litleToken><tokenResponseCode>111</tokenResponseCode>\
<tokenMessage>Message</tokenMessage><bin>bin</bin><extra>extra</extra></tokenResponse></authorizationResponse>\
</litleOnlineResponse>"
xml_object = litleXmlFields.CreateFromDocument(xml_text)
self.assertEquals("bin", xml_object.transactionResponse.tokenResponse.bin)
self.assertEquals("Message", xml_object.transactionResponse.tokenResponse.tokenMessage)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestCreateFromDom)
return suite
if __name__ =='__main__':
unittest.main() | mit | 1,783,835,340,079,371,500 | 58.493976 | 120 | 0.710553 | false |
Z-Tool/ztool-backhend-mongo | app/models.py | 1 | 2961 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Kun Jia
# date: 2/23/17
# email: [email protected]
from datetime import datetime
from bson import ObjectId
from flask import current_app
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
from app import login_manager
class BaseDocument(db.Document):
created_at = db.DateTimeField(verbose_name='create_time', required=True)
updated_at = db.DateTimeField(verbose_name='update_time', required=True)
meta = {'abstract': True}
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.now()
self.updated_at = datetime.now()
return super(BaseDocument, self).save(*args, **kwargs)
class Jalpc_pv_count(BaseDocument):
count = db.IntField(verbose_name='pv_count', required=True)
@staticmethod
def init_db(count=1):
s = Jalpc_pv_count(count=count)
s.save()
return s
@staticmethod
def access():
if Jalpc_pv_count.objects.all():
s = Jalpc_pv_count.objects.all()[0]
s.count += 1
s.save()
return s.count
else:
s = Jalpc_pv_count.init_db(295500)
return s.count
class Hacker_news_cache(BaseDocument):
stype = db.StringField(verbose_name='cache_type', required=True)
data_list = db.DictField(verbose_name='data_list', required=True)
data_content = db.DictField(verbose_name='data_content', required=True)
class User(UserMixin, BaseDocument):
email = db.EmailField(verbose_name='email', required=True)
username = db.StringField(verbose_name='username', required=True)
password_hash = db.StringField(verbose_name='password', required=True)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': str(self.id)})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.objects.get_or_404(id=ObjectId(data['id']))
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def __init__(self):
pass
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.objects.get_or_404(id=ObjectId(user_id))
| mit | 7,437,980,100,336,140,000 | 28.61 | 79 | 0.66464 | false |
sillydan1/WhatEverEngine | openglcsharp/Lib/xml/etree/ElementTree.py | 1 | 56165 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, basestring) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
file = open(file_or_filename, "wb")
write = file.write
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
if method == "xml":
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
serialize = _serialize[method]
serialize(write, self._root, encoding, qnames, namespaces)
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _encode(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _encode(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
if sys.platform == 'cli':
raise NotImplementedError('iterparse is not supported on IronPython. (CP #31923)')
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser)
class _IterParseIterator(object):
def __init__(self, source, events, parser):
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.Position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(self._fixtext(data))
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
if sys.platform == 'cli':
from . import SimpleXMLTreeBuilder
XMLParser = SimpleXMLTreeBuilder.TreeBuilder
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| apache-2.0 | -2,277,228,194,437,559,800 | 32.916063 | 90 | 0.589887 | false |
justintweaver/mtchi-cert-game | makahiki/apps/managers/score_mgr/score_mgr.py | 1 | 16356 | """The manager for defining and managing scores."""
import datetime
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Sum, Max
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.score_mgr.models import ScoreboardEntry, PointsTransaction, ScoreSetting, \
ReferralSetting
from apps.managers.cache_mgr import cache_mgr
def info():
"""returns the score_mgr info."""
s = score_setting()
return "signup_points: %d \n" \
"setup_points: %d \n" \
"noshow_penalty_points: %d \n" \
"quest_points: %d" % (s.signup_bonus_points,
s.setup_points, s.noshow_penalty_points, s.quest_bonus_points)
def score_setting():
"""returns the score settings."""
score = cache_mgr.get_cache('score_setting')
if not score:
score, _ = ScoreSetting.objects.get_or_create(pk=1)
cache_mgr.set_cache('score_setting', score, 2592000)
return score
def referral_setting():
"""returns the referral settings."""
referral = cache_mgr.get_cache('referral_setting')
if not referral:
referral, _ = ReferralSetting.objects.get_or_create(pk=1)
cache_mgr.set_cache('referral_setting', referral, 2592000)
return referral
def referral_points(referral):
"""returns the referral point amount from referral settings, depends on if the dynamic bonus
starts and the participation rate of the referral's team.
"""
points, _ = referral_points_and_type(referral)
return points
def referral_points_and_type(referral):
"""returns the referral point amount and type from referral settings, depends on if the
dynamic bonus starts and the participation rate of the referral's team.
"""
rs = referral_setting()
if referral:
team = referral.team
if rs.start_dynamic_bonus and team:
participation = referral.team.teamparticipation_set.all()
if participation:
rate = participation[0].participation
if rate < 20:
return rs.mega_referral_points, "mega"
elif rate <= 40:
return rs.super_referral_points, "super"
# everything else, return the normal referral points
return rs.normal_referral_points, ""
def active_threshold_points():
"""returns the referral point amount from settings."""
return score_setting().active_threshold_points
def setup_points():
"""returns the setup point amount from settings."""
return score_setting().setup_points
def signup_points():
"""returns the signup point amount from settings."""
return score_setting().signup_bonus_points
def noshow_penalty_points():
"""returns the noshow penalty point amount from settings."""
return score_setting().noshow_penalty_points
def quest_points():
"""returns the signup point amount from settings."""
return score_setting().quest_bonus_points
def feedback_points():
"""returns the action feedback point amount from settings."""
return score_setting().feedback_bonus_points
def player_rank(profile, round_name=None):
"""user round overall rank"""
if not round_name:
round_name = challenge_mgr.get_round_name()
entry = None
try:
entry = ScoreboardEntry.objects.get(profile=profile, round_name=round_name)
except ObjectDoesNotExist:
pass
# Check if the user has done anything.
if entry and entry.last_awarded_submission:
return ScoreboardEntry.objects.filter(
Q(points__gt=entry.points) |
Q(points=entry.points,
last_awarded_submission__gt=entry.last_awarded_submission),
round_name=round_name,
).count() + 1
if entry:
points = entry.points
else:
points = 0
# Users who have not done anything yet are assumed to be last.
return ScoreboardEntry.objects.filter(
points__gt=points,
round_name=round_name,
).count() + 1
def player_rank_in_team(profile, round_name=None):
"""Returns user's rank in his team."""
if not round_name:
round_name = challenge_mgr.get_round_name()
team = profile.team
entry = None
try:
entry = ScoreboardEntry.objects.get(profile=profile, round_name=round_name)
except ObjectDoesNotExist:
pass
if entry and entry.last_awarded_submission:
return ScoreboardEntry.objects.filter(
Q(points__gt=entry.points) |
Q(points=entry.points,
last_awarded_submission__gt=entry.last_awarded_submission),
profile__team=team,
round_name=round_name,
).count() + 1
if entry:
points = entry.points
else:
points = 0
return ScoreboardEntry.objects.filter(
points__gt=points,
profile__team=team,
round_name=round_name,
).count() + 1
def player_points(profile, round_name=None):
"""Returns the amount of points the user has in the round."""
if not round_name:
round_name = challenge_mgr.get_round_name()
entry = ScoreboardEntry.objects.filter(profile=profile, round_name=round_name)
if entry:
return entry[0].points
else:
return 0
def player_points_leader(round_name=None):
"""Returns the points leader (the first place) out of all users, as a Profile object."""
if not round_name:
round_name = challenge_mgr.get_round_name()
entries = ScoreboardEntry.objects.filter(round_name=round_name,).order_by(
"-points",
"-last_awarded_submission")
if entries:
return entries[0].profile
else:
return None
def player_points_leaders(num_results=None, round_name=None):
"""Returns the points leaders out of all users, as a dictionary object
with profile__name and points.
"""
if not round_name:
round_name = challenge_mgr.get_round_name()
entries = ScoreboardEntry.objects.filter(round_name=round_name,).select_related(
'profile', 'user__is_staff').filter(profile__user__is_staff=False).order_by(
"-points",
"-last_awarded_submission").values('profile', 'profile__name', 'points')
if entries:
if num_results:
entries = entries[:num_results]
return entries
else:
return None
def player_last_awarded_submission(profile):
"""Returns the last awarded submission date for the profile."""
entry = profile.scoreboardentry_set.order_by("-last_awarded_submission")
if entry:
return entry[0].last_awarded_submission
else:
return None
def player_add_points(profile, points, transaction_date, message, related_object=None):
"""Adds points based on the point value of the submitted object."""
# player won't get points if outside of the competitions.
# ignore the transaction
if not challenge_mgr.in_competition(transaction_date):
return
# Create a transaction first.
transaction = PointsTransaction(
user=profile.user,
points=points,
transaction_date=transaction_date,
message=message,
)
if related_object:
transaction.related_object = related_object
transaction.save()
# update the scoreboard entry
_update_scoreboard_entry(profile, points, transaction_date)
# Invalidate info bar cache.
cache_mgr.invalidate_template_cache("RIB", profile.user.username)
def player_remove_points(profile, points, transaction_date, message, related_object=None):
"""Removes points from the user.
If the submission date is the same as the last_awarded_submission
field, we rollback to a previously completed task.
"""
if not challenge_mgr.in_competition(transaction_date):
return
# update the scoreboard entry
_update_scoreboard_entry(profile, points * -1, transaction_date)
# Log the transaction.
transaction = PointsTransaction(
user=profile.user,
points=points * -1,
transaction_date=transaction_date,
message=message,
)
if related_object:
transaction.related_object = related_object
transaction.save()
# Invalidate info bar cache.
cache_mgr.invalidate_template_cache("RIB", profile.user.username)
def player_remove_related_points(profile, related_object):
"""Removes all points transaction related to the related_object."""
txns = related_object.pointstransactions.all()
points = 0
last_awarded_submission = None
for txn in txns:
points += txn.points
# find the latest transaction date
if not last_awarded_submission or (
txn.points > 0 and last_awarded_submission < txn.transaction_date):
last_awarded_submission = txn.transaction_date
txn.delete()
if last_awarded_submission:
_update_scoreboard_entry(profile, points * -1, last_awarded_submission)
def _update_scoreboard_entry(profile, points, transaction_date):
"""Update the scoreboard entry for the associated round."""
current_round = challenge_mgr.get_round_name(transaction_date)
_update_round_scoreboard_entry(profile, current_round, points, transaction_date)
# also update for the overall round
_update_round_scoreboard_entry(profile, "Overall", points, transaction_date)
def _update_round_scoreboard_entry(profile, round_name, points, transaction_date):
"""update the round scoreboard entry for the transaction."""
entry, _ = ScoreboardEntry.objects.get_or_create(profile=profile, round_name=round_name)
entry.points += points
# update the last_awarded_submission
if points > 0:
if not entry.last_awarded_submission or transaction_date > entry.last_awarded_submission:
entry.last_awarded_submission = transaction_date
else:
if entry.last_awarded_submission == transaction_date:
# Need to find the previous update.
entry.last_awarded_submission = _last_submitted_before(profile.user, transaction_date)
entry.save()
def _last_submitted_before(user, transaction_date):
"""Time of the last task that was completed before the submission date.
:returns None if there are no other tasks.
"""
try:
return PointsTransaction.objects.filter(
user=user,
transaction_date__lt=transaction_date).latest("transaction_date").transaction_date
except ObjectDoesNotExist:
return None
def player_has_points(profile, points, round_name=None):
"""Returns True if the user has at least the requested number of points."""
if not round_name:
round_name = challenge_mgr.get_round_name()
entry = ScoreboardEntry.objects.filter(profile=profile, round_name=round_name)
if entry:
return entry[0].points >= points
else:
return False
def player_points_leaders_in_team(team, num_results=None, round_name=None):
"""Gets the individual points leaders for the team, as Profile objects and
scoreboardentry_points"""
if not round_name:
round_name = challenge_mgr.get_round_name()
results = team.profile_set.select_related('scoreboardentry').filter(
scoreboardentry__round_name=round_name
).order_by("-scoreboardentry__points",
"-scoreboardentry__last_awarded_submission", ).annotate(
scoreboardentry_points=Sum("scoreboardentry__points"))
if num_results:
results = results[:num_results]
return results
def team_rank(team, round_name=None):
"""Returns the rank of the team across all groups."""
if not round_name:
round_name = challenge_mgr.get_round_name()
aggregate = ScoreboardEntry.objects.filter(
profile__team=team,
round_name=round_name).aggregate(points=Sum("points"), last=Max("last_awarded_submission"))
points = aggregate["points"] or 0
last_awarded_submission = aggregate["last"]
# Group by teams, filter out other rounds, and annotate.
annotated_teams = ScoreboardEntry.objects.values("profile__team").filter(
round_name=round_name).annotate(team_points=Sum("points"),
last_awarded=Max("last_awarded_submission"))
count = annotated_teams.filter(team_points__gt=points).count()
# If there was a submission, tack that on to the count.
if last_awarded_submission:
count = count + annotated_teams.filter(
team_points=points,
last_awarded_submission__gt=last_awarded_submission
).count()
return count + 1
def team_points(team, round_name=None):
"""Returns the total number of points for the team. Optional parameter for a round."""
if not round_name:
round_name = challenge_mgr.get_round_name()
dictionary = ScoreboardEntry.objects.filter(profile__team=team,
round_name=round_name).aggregate(Sum("points"))
return dictionary["points__sum"] or 0
def group_points(group, round_name=None):
"""Returns the total number of points for the team. Optional parameter for a round."""
if not round_name:
round_name = challenge_mgr.get_round_name()
group_total_points = 0
scores = ScoreboardEntry.objects.all().filter(round_name=round_name)
for score in scores:
if score.profile.team.group == group:
group_total_points += score.points
return group_total_points
def team_points_leader(round_name=None):
"""Returns the team points leader (the first place) across all groups, as a Team ID."""
if not round_name:
round_name = challenge_mgr.get_round_name()
entry = ScoreboardEntry.objects.values("profile__team").filter(round_name=round_name).annotate(
points=Sum("points"),
last=Max("last_awarded_submission")).order_by("-points", "-last")
if entry:
return entry[0]["profile__team"]
else:
return None
def team_points_leaders(num_results=None, round_name=None):
"""Returns the team points leaders across all groups, as a dictionary profile__team__name
and points.
"""
if not round_name:
round_name = challenge_mgr.get_round_name()
entries = ScoreboardEntry.objects.filter(
round_name=round_name, profile__team__isnull=False).values(
"profile__team__name").annotate(
points=Sum("points"),
last=Max("last_awarded_submission")).order_by("-points", "-last")
if entries:
if num_results:
entries = entries[:num_results]
return entries
else:
return None
def team_points_leaders_in_group(group, num_results=None, round_name=None):
"""Returns the top points leaders for the given group."""
if not round_name:
round_name = challenge_mgr.get_round_name()
results = group.team_set.filter(
profile__scoreboardentry__round_name=round_name).annotate(
points=Sum("profile__scoreboardentry__points"),
last=Max("profile__scoreboardentry__last_awarded_submission")).order_by(
"-points", "-last")
if num_results:
results = results[:num_results]
return results
def award_referral_bonus(referral, referrer):
"""award the referral bonus to both party."""
#depends on the referred's team's participation, the bonus point could be different.
points, ref_type = referral_points_and_type(referral)
player_add_points(referral, points, datetime.datetime.today(),
'%s Referred by %s' % (ref_type.capitalize(), referrer.name),
referral)
player_add_points(referrer, points, datetime.datetime.today(),
'%s Referred %s' % (ref_type.capitalize(), referral.name), referrer)
def copy_scoreboard_entry(previous_round, current_round):
"""copy the scoreboardentry to the new round."""
for entry in ScoreboardEntry.objects.filter(round_name=previous_round):
ScoreboardEntry.objects.create(
profile=entry.profile,
round_name=current_round,
points=entry.points,
last_awarded_submission=entry.last_awarded_submission)
| gpl-3.0 | 2,256,365,725,695,180,500 | 33.146138 | 99 | 0.657129 | false |
nitely/Spirit | spirit/comment/tags.py | 1 | 2310 | # -*- coding: utf-8 -*-
from django.utils.translation import gettext_lazy as _
from django.utils.html import mark_safe, format_html
from django.contrib.humanize.templatetags import humanize
from django.template.defaultfilters import date as date_format
from spirit.core.conf import settings
from spirit.core.tags.registry import register
from .poll.utils.render import render_polls
from .forms import CommentForm
from .models import Comment
@register.inclusion_tag('spirit/comment/_form.html', takes_context=True)
def render_comments_form(context, topic, next=None):
form = CommentForm()
return {
'form': form,
'topic_id': topic.pk,
'next': next,
# fixes #249
'user': context['request'].user,
}
@register.simple_tag()
def get_allowed_file_types():
return ", ".join(
'.%s' % ext
for ext in sorted(settings.ST_ALLOWED_UPLOAD_FILE_MEDIA_TYPE.keys()))
@register.simple_tag()
def get_allowed_image_types():
return ", ".join(
'.%s' % ext
for ext in sorted(settings.ST_ALLOWED_UPLOAD_IMAGE_FORMAT))
ACTIONS = {
Comment.MOVED: _("{user} moved this {time_ago}"),
Comment.CLOSED: _("{user} closed this {time_ago}"),
Comment.UNCLOSED: _("{user} reopened this {time_ago}"),
Comment.PINNED: _("{user} pinned this {time_ago}"),
Comment.UNPINNED: _("{user} unpinned this {time_ago}"),
}
@register.simple_tag()
def get_comment_action_text(comment):
user_frag = '<a href="{url}">{user}</a>'
date_frag = '<span title="{title}">{date}</span>'
text_frag = ACTIONS.get(comment.action, "{user} unknown action {time_ago}")
if comment.is_removed:
text_frag = _("{user}'s comment was removed {time_ago}")
return format_html(
text_frag,
user=format_html(
user_frag,
url=comment.user.st.get_absolute_url(),
user=comment.user.st.nickname),
time_ago=format_html(
date_frag,
title=date_format(comment.date, "DATETIME_FORMAT"),
date=humanize.naturaltime(comment.date)))
@register.simple_tag(takes_context=True)
def post_render_comment(context, comment):
request = context['request']
csrf_token = context['csrf_token']
return mark_safe(render_polls(comment, request, csrf_token))
| mit | -5,524,012,494,408,355,000 | 30.643836 | 79 | 0.649351 | false |
DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_bolton.py | 1 | 2487 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "BOL"
addresses_name = "2021-05-01T18:52:07.783562/Democracy_Club__06May2021.CSV"
stations_name = "2021-05-01T18:52:07.783562/Democracy_Club__06May2021.CSV"
elections = ["2021-05-06"]
csv_delimiter = ","
def station_record_to_dict(self, record):
if record.polling_place_id == "4291":
# Trinity Methodist Hall (postcode geocode puts this quite away from actual location, making error spotting
# more difficult)
record = record._replace(
polling_place_easting=374156, polling_place_northing=405696
)
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100012434533", # RATCLIFFES FARM HOUSE, WINGATES LANE, WESTHOUGHTON, BOLTON
"10070916825", # CURLEYS FISHERY, TOP O TH WALLSUCHES, HORWICH, BOLTON
"100012431797", # 321 DERBY STREET, BOLTON
"10001244960", # FLAT 3, 115-117 DERBY STREET, BOLTON
"100012556511", # 152 LONGSIGHT, BOLTON
]:
return None
# FLAT 1 290 ST HELENS ROAD, BOLTON
if uprn == "10001244221":
record = record._replace(property_urn="", post_code="BL1 4JU")
if record.addressline6 in [
"BL2 4JU",
"BL2 3EL",
"BL2 3BQ",
"BL2 6DZ",
"BL1 3QW",
"BL2 2JU",
"BL4 8JA",
"BL1 5DB",
"BL1 3AU",
"BL1 5HP",
"BL1 3SJ",
"BL1 2HZ",
"BL3 2DP",
"BL4 0LW",
"BL4 7PQ",
"BL5 2DL",
"BL4 7BB",
"BL3 1BA",
"BL6 4ED",
"BL6 6PX",
"BL6 6HN",
"BL3 6ST",
"BL4 0HU",
"BL5 3LT",
"BL5 2JX",
"BL5 2DJ",
]:
return None
rec = super().address_record_to_dict(record)
if record.addressline6.strip() == "BL7 OHR":
rec["postcode"] = "BL7 0HR"
if record.addressline6.strip() == "BL4 ONX":
rec["postcode"] = "BL4 0NX"
if record.addressline6.strip() == "BL4 ONY":
rec["postcode"] = "BL4 0NY"
return rec
| bsd-3-clause | 5,023,162,688,096,483,000 | 30.884615 | 119 | 0.524729 | false |
bwesterb/claviger | src/config.py | 1 | 6883 | """ Reads claviger's configuration file. """
import yaml
import logging
import os.path
import textwrap
import itertools
import collections
import six
import tarjan
import jsonschema
import claviger.authorized_keys
class ConfigError(Exception):
pass
ParsedServerKey = collections.namedtuple('ParsedServerKey',
('hostname', 'user', 'port', 'abstract'))
l = logging.getLogger(__name__)
# Schema for the configuration file.
_SCHEMA = None
def get_schema():
global _SCHEMA
if not _SCHEMA:
l.debug('loading scheme ...')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'config.schema.yml')) as f:
_SCHEMA = yaml.load(f)
l.debug(' ... done!')
return _SCHEMA
class ConfigurationError(Exception):
pass
def parse_server_key(key):
""" Converts a server key like (root@host:1234 or just host)
to a triplet (user, port, hostname, abstract) """
port = None
user = None
abstract = False
hostname = None
if key.startswith('$'):
abstract = True
else:
hostname = key
if ':' in hostname:
hostname, _port = hostname.rsplit(':', 1)
port = int(_port)
if '@' in hostname:
user, hostname = hostname.split('@', 1)
return ParsedServerKey(user=user, port=port, hostname=hostname,
abstract=abstract)
def load(path):
""" Loads the configuration file.
A lot of the work is done by YAML. We validate the easy bits with
a JSON schema. The rest by hand. """
# TODO Cache schema and configuration file
l.debug('loading configuration file ...')
with open(path) as f:
cfg = yaml.load(f)
if not isinstance(cfg, dict):
raise ConfigurationError('Configuration file is empty')
l.debug(' - checking schema')
# First small fixes which the schema can't handle
cfg.setdefault('servers', {})
cfg['servers'].setdefault('$default', {})
for key in cfg['servers']:
if cfg['servers'][key] is None:
cfg['servers'][key] = dict()
# Now check the schema
jsonschema.validate(cfg, get_schema())
# TODO format into pretty error message
l.debug(' - processing keys')
new_keys = {}
cfg.setdefault('keys', {})
for key_name, key in six.iteritems(cfg['keys']):
# TODO handle error
entry = claviger.authorized_keys.Entry.parse(key)
new_key = {'key': entry.key,
'options': entry.options,
'comment': entry.comment,
'keytype': entry.keytype}
new_keys[key_name] = new_key
cfg['keys'] = new_keys
l.debug(' - processing server stanza short-hands')
new_servers = {}
for server_key, server in six.iteritems(cfg['servers']):
parsed_server_key = parse_server_key(server_key)
server.setdefault('name', server_key)
server_name = server['name']
server.setdefault('port', parsed_server_key.port)
server.setdefault('user', parsed_server_key.user)
server.setdefault('hostname', parsed_server_key.hostname)
server.setdefault('ssh_user', server['user'])
server.setdefault('present', [])
server.setdefault('absent', [])
server.setdefault('allow', [])
server.setdefault('keepOtherKeys')
server.setdefault('like', '$default' if server_key != '$default'
else None)
server.setdefault('abstract', parsed_server_key.abstract)
prabsent = frozenset(server['present']) & frozenset(server['absent'])
if prabsent:
raise ConfigurationError(
"Keys {0} are required to be both present and absent on {1}"
.format(tuple(prabsent), server_name))
ablow = frozenset(server['allow']) & frozenset(server['absent'])
if ablow:
raise ConfigurationError(
"Keys {0} are listed allowed and absent on {1}"
.format(tuple(ablow), server_name))
for key_name in itertools.chain(server['present'], server['absent'],
server['allow']):
if not key_name in cfg['keys']:
"Key {0} (on {1}) does not exist".format(key_name, server_name)
if server_name in new_servers:
raise ConfigurationError(
"Duplicate server name {0}".format(server_name))
new_servers[server_name] = server
cfg['servers'] = new_servers
l.debug(' - resolving server stanza inheritance')
# create dependancy graph and use Tarjan's algorithm to find a possible
# order to evaluate the server stanzas.
server_dg = {server_name: [server['like']] if server['like'] else []
for server_name, server in six.iteritems(cfg['servers'])}
for server_cycle_names in tarjan.tarjan(server_dg):
if len(server_cycle_names) != 1:
raise ConfigurationError(
"There is a cyclic dependacy among the servers {0}".format(
server_cycle_names))
target_server = cfg['servers'][server_cycle_names[0]]
if not target_server['like']:
continue
if not target_server['like'] in cfg['servers']:
pass
source_server = cfg['servers'][target_server['like']]
# First the simple attributes
for attr in ('port', 'user', 'hostname', 'ssh_user',
'keepOtherKeys'):
if attr in source_server:
if target_server[attr] is None:
target_server[attr] = source_server[attr]
# Now, the present/absent/allow lists
for key in source_server['present']:
if key in target_server['absent']:
continue
if key not in target_server['present']:
target_server['present'].append(key)
for key in source_server['absent']:
if (key in target_server['present']
or key in target_server['allow']):
continue
if key not in target_server['absent']:
target_server['absent'].append(key)
for key in source_server['allow']:
if key in target_server['absent']:
continue
if key not in target_server['allow']:
target_server['allow'].append(key)
l.debug(' - setting defaults on server stanzas')
for server in six.itervalues(cfg['servers']):
for attr, dflt in (('port', 22),
('user', 'root'),
('keepOtherKeys', True)):
if server[attr] is None:
server[attr] = dflt
l.debug(' ... done')
return cfg
| gpl-3.0 | -7,095,990,264,709,830,000 | 36.205405 | 79 | 0.568212 | false |
Micronaet/micronaet-bom | order_bom/report/bom_status_parser.py | 1 | 4509 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import sys
import logging
import erppeek
import pickle
from datetime import datetime
from openerp.report import report_sxw
from openerp.report.report_sxw import rml_parse
from openerp.tools.translate import _
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class Parser(report_sxw.rml_parse):
# Constructor:
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'load_bom': self.load_bom,
'get_filter': self.get_filter,
})
# Method
def get_filter(self, data):
if data is None:
data = {}
start_code = data.get('start_code', '')
only = data.get('only', 'all')
from_order = data.get('from_order')
description = _('Line: %s') % only
if start_code:
description += _('Code start with: %s ') % start_code
if from_order:
description += _('From open order ')
if not description:
description = _('All product ')
return description
def load_bom(self, data):
''' Master function for generate data
'''
if data is None:
data = {}
cr = self.cr
uid = self.uid
context = {}
product_pool = self.pool.get('product.product')
start_code = data.get('start_code', '') # mandatory field
only = data.get('only', 'all')
from_order = data.get('from_order')
product_ids = product_pool.search(cr, uid, [
('default_code', '=ilike', '%s%%' % start_code),
], context=context)
# Intersect with order for login AND operation
if from_order:
sol_product_ids = self.pool.get(
'res.company').mrp_domain_sale_order_line_ids(
cr, uid, context=context)
product_ids = list(
sol_product_ids.intersection(
set(product_ids)))
res = []
for item in product_pool.browse(cr, uid, product_ids, context=context):
record = (item, [])
double_check = [] # save component id
for component in item.dynamic_bom_line_ids:
placeholder = component.product_id.bom_placeholder
# Test if yet present:
if component.product_id.id in double_check:
double = True
else:
double = False
double_check.append(component.product_id.id)
# Consider ph error, double error and override if required:
if not placeholder and not double and only=='override' and not\
component.dynamic_mask:
continue
# If only error jump placeholder and double both false
if only=='error' and not placeholder and not double:
continue
record[1].append((component, double, placeholder))
res.append(record)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,114,346,447,587,470,300 | 35.362903 | 79 | 0.550676 | false |
cloudify-cosmo/softlayer-python | SoftLayer/managers/ssl.py | 1 | 2128 | """
SoftLayer.ssl
~~~~~~~~~~~~~
SSL Manager/helpers
:license: MIT, see LICENSE for more details.
"""
class SSLManager(object):
"""Manages SSL certificates.
:param SoftLayer.API.Client client: an API client instance
"""
def __init__(self, client):
self.client = client
self.ssl = self.client['Security_Certificate']
def list_certs(self, method='all'):
"""List all certificates.
:param string method: The type of certificates to list. Options are
'all', 'expired', and 'valid'.
:returns: A list of dictionaries representing the requested SSL certs.
"""
ssl = self.client['Account']
methods = {
'all': 'getSecurityCertificates',
'expired': 'getExpiredSecurityCertificates',
'valid': 'getValidSecurityCertificates'
}
mask = "mask[id, commonName, validityDays, notes]"
func = getattr(ssl, methods[method])
return func(mask=mask)
def add_certificate(self, certificate):
"""Creates a new certificate.
:param dict certificate: A dictionary representing the parts of the
certificate. See SLDN for more information.
"""
return self.ssl.createObject(certificate)
def remove_certificate(self, cert_id):
"""Removes a certificate.
:param integer cert_id: a certificate ID to remove
"""
return self.ssl.deleteObject(id=cert_id)
def edit_certificate(self, certificate):
"""Updates a certificate with the included options.
The provided dict must include an 'id' key and value corresponding to
the certificate ID that should be updated.
:param dict certificate: the certificate to update.
"""
return self.ssl.editObject(certificate, id=certificate['id'])
def get_certificate(self, cert_id):
"""Gets a certificate with the ID specified.
:param integer cert_id: the certificate ID to retrieve
"""
return self.ssl.getObject(id=cert_id)
| mit | 1,883,408,280,363,804,700 | 28.150685 | 78 | 0.609492 | false |
q2apro/graph-padowan | Plugins/Statistics.py | 1 | 2105 | # Example plugin for Graph for showing statistics information for a point series.
# The plugin creates an action and adds it to the main menu and the context menu of the function list.
# The plugin hooks into the function list. Every time a new element is selected, the action is updated to be enabled if the element is a point series.
# At the same time the visibility of the context menu item is updated, so it only is visible if enabled.
# When the action is executed a dialog form is shown with staticks data calculated from the points in the point series.
import Graph
import vcl
import Gui
def Execute(Sender):
import math
yList = sorted(p[1] for p in Graph.Selected.Points)
N = len(yList)
Sum = math.fsum(yList)
Mean = Sum / N
Form = Gui.SimpleDialog(Caption="Statistics", Height=220, ShowCancel=False)
Lines = "Statistics for " + Graph.Selected.LegendText + ":"
Lines += "\n Mean:\t\t" + format(Mean, ".8g")
Lines += "\n Sum:\t\t" + str(Sum)
Lines += "\n Median:\t\t" + format(yList[N//2] if N % 2 else (yList[N//2-1] + yList[N//2]) / 2, ".8g")
Lines += "\n Standard deviation: " + format( math.sqrt(math.fsum([(y-Mean)**2 for y in yList]) / N), ".8g")
Lines += "\n 1st quartile:\t" + str(yList[N//4] if (N//2) % 2 else (yList[N//4-1] + yList[N//4]) / 2)
Lines += "\n 3rd quartile:\t" + str(yList[math.ceil(N/2)+N//4] if (N//2) % 2 else (yList[math.ceil(N/2)+N//4-1] + yList[math.ceil(N/2)+N//4]) / 2)
Lines += "\n Min:\t\t" + str(min(yList))
Lines += "\n Max:\t\t" + str(max(yList))
Memo = vcl.TMemo(None, Parent = Form.panel, ReadOnly=True, Align="alClient", Color=-16777201, WantReturns=False)
Memo.Font.Size = 10
Memo.Lines.Text = Lines
Form.ShowModal()
def OnSelect(Item):
Action.Enabled = isinstance(Item, Graph.TPointSeries)
ContextMenuItem.Visible = Action.Enabled
Action = Graph.CreateAction(Caption="Statistics", OnExecute=Execute, Hint="Statistics on a point series.")
Graph.AddActionToMainMenu(Action)
ContextMenuItem = Graph.AddActionToContextMenu(Action)
Graph.OnSelect.append(OnSelect)
| gpl-2.0 | 683,708,309,037,313,500 | 52.974359 | 151 | 0.675059 | false |
claudep/pootle | tests/views/tp.py | 1 | 10456 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
from pytest_pootle.suite import view_context_test
from django.urls import reverse
from pootle_app.models import Directory
from pootle_app.models.permissions import check_permission
from pootle.core.browser import get_parent
from pootle.core.delegate import scores
from pootle.core.helpers import (
SIDEBAR_COOKIE_NAME, get_sidebar_announcements_context)
from pootle.core.url_helpers import get_previous_url, get_path_parts
from pootle.core.utils.stats import (
TOP_CONTRIBUTORS_CHUNK_SIZE, get_translation_states)
from pootle.core.views.display import ChecksDisplay
from pootle_misc.checks import (
CATEGORY_IDS, check_names,
get_qualitychecks, get_qualitycheck_schema)
from pootle.core.views.browse import StatsDisplay
from pootle_misc.forms import make_search_form
from pootle_store.models import Store
from virtualfolder.delegate import vfolders_data_view
def _test_browse_view(tp, request, response, kwargs):
assert (request.user.is_anonymous
or "announcements/projects/%s" % tp.project.code in request.session)
assert (request.user.is_anonymous
or "announcements/%s" % tp.language.code in request.session)
assert (request.user.is_anonymous
or "announcements/%s/%s" % (tp.language.code, tp.project.code)
in request.session)
ctx = response.context
kwargs["project_code"] = tp.project.code
kwargs["language_code"] = tp.language.code
resource_path = "%(dir_path)s%(filename)s" % kwargs
pootle_path = "%s%s" % (tp.pootle_path, resource_path)
if not (kwargs["dir_path"] or kwargs.get("filename")):
obj = tp.directory
elif not kwargs.get("filename"):
obj = Directory.objects.get(
pootle_path=pootle_path)
else:
obj = Store.objects.get(
pootle_path=pootle_path)
if obj.tp_path == "/":
data_obj = obj.tp
else:
data_obj = obj
stats = StatsDisplay(
data_obj,
stats=data_obj.data_tool.get_stats(user=request.user)).stats
if not kwargs.get("filename"):
vfolders = True
else:
vfolders = None
filters = {}
if vfolders:
filters['sort'] = 'priority'
checks = ChecksDisplay(obj).checks_by_category
del stats["children"]
score_data = scores.get(tp.__class__)(tp)
chunk_size = TOP_CONTRIBUTORS_CHUNK_SIZE
top_scorers = score_data.top_scorers
def scores_to_json(score):
score["user"] = score["user"].to_dict()
return score
top_scorers = score_data.display(
limit=chunk_size,
formatter=scores_to_json)
top_scorer_data = dict(
items=list(top_scorers),
has_more_items=len(score_data.top_scorers) > chunk_size)
assertions = dict(
page="browse",
object=obj,
translation_project=tp,
language=tp.language,
project=tp.project,
has_admin_access=check_permission('administrate', request),
is_store=(kwargs.get("filename") and True or False),
browser_extends="translation_projects/base.html",
pootle_path=pootle_path,
resource_path=resource_path,
resource_path_parts=get_path_parts(resource_path),
translation_states=get_translation_states(obj),
checks=checks,
top_scorers=top_scorer_data,
url_action_continue=obj.get_translate_url(
state='incomplete', **filters),
url_action_fixcritical=obj.get_critical_url(**filters),
url_action_review=obj.get_translate_url(
state='suggestions', **filters),
url_action_view_all=obj.get_translate_url(state='all'),
stats=stats,
parent=get_parent(obj))
sidebar = get_sidebar_announcements_context(
request, (tp.project, tp.language, tp))
for k in ["has_sidebar", "is_sidebar_open", "announcements"]:
assertions[k] = sidebar[k]
view_context_test(ctx, **assertions)
assert (('display_download' in ctx and ctx['display_download']) ==
(request.user.is_authenticated
and check_permission('translate', request)))
def _test_translate_view(tp, request, response, kwargs, settings):
ctx = response.context
obj = ctx["object"]
kwargs["project_code"] = tp.project.code
kwargs["language_code"] = tp.language.code
resource_path = "%(dir_path)s%(filename)s" % kwargs
request_path = "%s%s" % (tp.pootle_path, resource_path)
checks = get_qualitychecks()
schema = {sc["code"]: sc for sc in get_qualitycheck_schema()}
check_data = obj.data_tool.get_checks()
_checks = {}
for check, checkid in checks.items():
if check not in check_data:
continue
_checkid = schema[checkid]["name"]
_checks[_checkid] = _checks.get(
_checkid, dict(checks=[], title=schema[checkid]["title"]))
_checks[_checkid]["checks"].append(
dict(
code=check,
title=check_names[check],
count=check_data[check]))
_checks = OrderedDict(
(k, _checks[k])
for k in CATEGORY_IDS.keys()
if _checks.get(k))
current_vfolder_pk = ""
display_priority = False
if not kwargs["filename"]:
vf_view = vfolders_data_view.get(obj.__class__)(obj, request.user)
display_priority = vf_view.has_data
unit_api_root = "/xhr/units/"
assertions = dict(
page="translate",
translation_project=tp,
language=tp.language,
project=tp.project,
has_admin_access=check_permission('administrate', request),
ctx_path=tp.pootle_path,
pootle_path=request_path,
resource_path=resource_path,
resource_path_parts=get_path_parts(resource_path),
editor_extends="translation_projects/base.html",
checks=_checks,
previous_url=get_previous_url(request),
current_vfolder_pk=current_vfolder_pk,
display_priority=display_priority,
cantranslate=check_permission("translate", request),
cansuggest=check_permission("suggest", request),
canreview=check_permission("review", request),
search_form=make_search_form(request=request),
unit_api_root=unit_api_root,
POOTLE_MT_BACKENDS=settings.POOTLE_MT_BACKENDS,
AMAGAMA_URL=settings.AMAGAMA_URL)
view_context_test(ctx, **assertions)
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_views_tp(tp_views, settings):
test_type, tp, request, response, kwargs = tp_views
if test_type == "browse":
_test_browse_view(tp, request, response, kwargs)
elif test_type == "translate":
_test_translate_view(tp, request, response, kwargs, settings)
@pytest.mark.django_db
def test_view_tp_browse_sidebar_cookie(client, member):
# - ensure that when a sidebar cookie is sent the session is changed
# - ensure that the cookie is deleted
from pootle_translationproject.models import TranslationProject
tp = TranslationProject.objects.first()
args = [tp.language.code, tp.project.code]
client.login(username=member.username, password=member.password)
response = client.get(reverse("pootle-tp-browse", args=args))
assert SIDEBAR_COOKIE_NAME not in response
assert client.session.get('is_sidebar_open', True) is True
client.cookies[SIDEBAR_COOKIE_NAME] = 1
response = client.get(reverse("pootle-tp-browse", args=args))
assert SIDEBAR_COOKIE_NAME not in response
assert client.session.get('is_sidebar_open', True) is True
del client.cookies[SIDEBAR_COOKIE_NAME]
response = client.get(reverse("pootle-tp-browse", args=args))
assert SIDEBAR_COOKIE_NAME not in response
assert client.session.get('is_sidebar_open', True) is True
client.cookies[SIDEBAR_COOKIE_NAME] = 0
response = client.get(reverse("pootle-tp-browse", args=args))
assert SIDEBAR_COOKIE_NAME not in response
assert client.session.get('is_sidebar_open', True) is False
@pytest.mark.django_db
def test_view_tp_browse_sidebar_cookie_nonsense(client, member):
# - ensure that sending nonsense in a cookie does the right thing
from pootle_translationproject.models import TranslationProject
tp = TranslationProject.objects.first()
args = [tp.language.code, tp.project.code]
client.login(username=member.username, password=member.password)
client.cookies[SIDEBAR_COOKIE_NAME] = "complete jibberish"
client.get(reverse("pootle-tp-browse", args=args))
assert client.session.get('is_sidebar_open', True) is True
@pytest.mark.django_db
def test_view_tp_browse_sidebar_openness_in_anonymous_session(client):
from pootle_translationproject.models import TranslationProject
tp = TranslationProject.objects.first()
args = [tp.language.code, tp.project.code]
client.cookies[SIDEBAR_COOKIE_NAME] = 1
response = client.get(reverse("pootle-tp-browse", args=args))
session = response.wsgi_request.session
assert "announcements/projects/%s" % tp.project.code not in session
assert "announcements/%s" % tp.language.code not in session
assert (
"announcements/%s/%s" % (tp.language.code, tp.project.code)
not in session)
assert "is_sidebar_open" in session
@pytest.mark.django_db
def test_view_user_choice(client):
client.cookies["user-choice"] = "language"
response = client.get("/foo/bar/baz")
assert response.status_code == 302
assert response.get("location") == "/foo/"
assert "user-choice" not in response
client.cookies["user-choice"] = "project"
response = client.get("/foo/bar/baz")
assert response.status_code == 302
assert response.get("location") == "/projects/bar/"
assert "user-choice" not in response
client.cookies["user-choice"] = "foo"
response = client.get("/foo/bar/baz")
assert response.status_code == 404
assert "user-choice" not in response
@pytest.mark.django_db
def test_uploads_tp(revision, tp_uploads):
tp_, request_, response, kwargs_, errors = tp_uploads
assert response.status_code == 200
assert errors.keys() == response.context['upload_form'].errors.keys()
| gpl-3.0 | 8,016,228,546,327,207,000 | 37.021818 | 80 | 0.672533 | false |
09zwcbupt/ryu | ryu/tests/unit/packet/test_tcp.py | 1 | 4277 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import netaddr
from struct import *
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib import mac
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.tcp import tcp
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet import packet_utils
LOG = logging.getLogger('test_tcp')
class Test_tcp(unittest.TestCase):
""" Test case for tcp
"""
src_port = 6431
dst_port = 8080
seq = 5
ack = 1
offset = 6
bits = 0b101010
window_size = 2048
csum = 12345
urgent = 128
option = '\x01\x02\x03\x04'
t = tcp(src_port, dst_port, seq, ack, offset, bits,
window_size, csum, urgent, option)
buf = pack(tcp._PACK_STR, src_port, dst_port, seq, ack,
offset << 4, bits, window_size, csum, urgent)
buf += option
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.src_port, self.t.src_port)
eq_(self.dst_port, self.t.dst_port)
eq_(self.seq, self.t.seq)
eq_(self.ack, self.t.ack)
eq_(self.offset, self.t.offset)
eq_(self.bits, self.t.bits)
eq_(self.window_size, self.t.window_size)
eq_(self.csum, self.t.csum)
eq_(self.urgent, self.t.urgent)
eq_(self.option, self.t.option)
def test_parser(self):
r1, r2 = self.t.parser(self.buf)
eq_(self.src_port, r1.src_port)
eq_(self.dst_port, r1.dst_port)
eq_(self.seq, r1.seq)
eq_(self.ack, r1.ack)
eq_(self.offset, r1.offset)
eq_(self.bits, r1.bits)
eq_(self.window_size, r1.window_size)
eq_(self.csum, r1.csum)
eq_(self.urgent, r1.urgent)
eq_(self.option, r1.option)
eq_(None, r2)
def test_serialize(self):
offset = 5
csum = 0
src_ip = int(netaddr.IPAddress('192.168.10.1'))
dst_ip = int(netaddr.IPAddress('192.168.100.1'))
prev = ipv4(4, 5, 0, 0, 0, 0, 0, 64,
inet.IPPROTO_UDP, 0, src_ip, dst_ip)
t = tcp(self.src_port, self.dst_port, self.seq, self.ack,
offset, self.bits, self.window_size, csum, self.urgent)
buf = t.serialize(bytearray(), prev)
res = struct.unpack(tcp._PACK_STR, str(buf))
eq_(res[0], self.src_port)
eq_(res[1], self.dst_port)
eq_(res[2], self.seq)
eq_(res[3], self.ack)
eq_(res[4], offset << 4)
eq_(res[5], self.bits)
eq_(res[6], self.window_size)
eq_(res[8], self.urgent)
# checksum
ph = struct.pack('!IIBBH', src_ip, dst_ip, 0, 6, offset * 4)
d = ph + buf + bytearray()
s = packet_utils.checksum(d)
eq_(0, s)
def test_serialize_option(self):
offset = 6
csum = 0
option = '\x01\x02'
src_ip = int(netaddr.IPAddress('192.168.10.1'))
dst_ip = int(netaddr.IPAddress('192.168.100.1'))
prev = ipv4(4, 5, 0, 0, 0, 0, 0, 64,
inet.IPPROTO_UDP, 0, src_ip, dst_ip)
t = tcp(self.src_port, self.dst_port, self.seq, self.ack,
offset, self.bits, self.window_size, csum, self.urgent,
option)
buf = t.serialize(bytearray(), prev)
r_option = buf[tcp._MIN_LEN:tcp._MIN_LEN + len(option)]
eq_(option, r_option)
@raises(Exception)
def test_malformed_tcp(self):
m_short_buf = self.buf[1:tcp._MIN_LEN]
tcp.parser(m_short_buf)
| apache-2.0 | -6,133,757,123,314,086,000 | 29.55 | 71 | 0.595277 | false |
gem/oq-engine | openquake/hazardlib/tests/calc/disagg_test.py | 1 | 8628 | # The Hazard Library
# Copyright (C) 2012-2021 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os.path
import numpy
from openquake.hazardlib.nrml import to_python
from openquake.hazardlib.calc import disagg
from openquake.hazardlib import nrml
from openquake.hazardlib.sourceconverter import SourceConverter
from openquake.hazardlib.gsim.campbell_2003 import Campbell2003
from openquake.hazardlib.geo import Point
from openquake.hazardlib.imt import PGA, SA
from openquake.hazardlib.site import Site
from openquake.hazardlib.gsim.bradley_2013 import Bradley2013
from openquake.hazardlib import sourceconverter
DATA_PATH = os.path.dirname(__file__)
class BuildDisaggDataTestCase(unittest.TestCase):
def test_magnitude_bins(self):
""" Testing build disaggregation matrix """
fname = os.path.join(DATA_PATH, 'data', 'ssm.xml')
converter = sourceconverter.SourceConverter(50., 1., 10, 0.1, 10)
groups = to_python(fname, converter)
sources = []
for g in groups:
sources += g.sources
site = Site(Point(172.63, -43.53), vs30=250, vs30measured=False,
z1pt0=330)
imt = SA(3.0)
iml = 0.25612220
gsim_by_trt = {"Active Shallow Crust": Bradley2013()}
truncation_level = 3.0
n_epsilons = 1
mag_bin_width = 0.1
dist_bin_width = 100.
coord_bin_width = 100.
# Compute the disaggregation matrix
edges, mtx = disagg.disaggregation(sources, site, imt, iml,
gsim_by_trt, truncation_level,
n_epsilons, mag_bin_width,
dist_bin_width, coord_bin_width)
tm = disagg.mag_pmf(mtx[:, :, :, :, :, 0])
numpy.testing.assert_array_less(numpy.zeros_like(tm[2:]), tm[2:])
class DigitizeLonsTestCase(unittest.TestCase):
def setUp(self):
# First test
self.lons1 = numpy.array([179.2, 179.6, 179.8, -179.9, -179.7, -179.1])
self.bins1 = numpy.array([179.0, 179.5, 180.0, -179.5, -179])
# Second test
self.lons2 = numpy.array([90.0, 90.3, 90.5, 90.7, 91.3])
self.bins2 = numpy.array([90.0, 90.5, 91.0, 91.5])
def test1(self):
idx = disagg._digitize_lons(self.lons1, self.bins1)
expected = numpy.array([0, 1, 1, 2, 2, 3], dtype=int)
numpy.testing.assert_equal(idx, expected)
def test2(self):
idx = disagg._digitize_lons(self.lons2, self.bins2)
expected = numpy.array([0, 0, 1, 1, 2], dtype=int)
numpy.testing.assert_equal(idx, expected)
class DisaggregateTestCase(unittest.TestCase):
def setUp(self):
d = os.path.dirname(os.path.dirname(__file__))
source_model = os.path.join(d, 'source_model/multi-point-source.xml')
[self.sources] = nrml.to_python(source_model, SourceConverter(
investigation_time=50., rupture_mesh_spacing=2.))
self.site = Site(Point(0.1, 0.1), 800, z1pt0=100., z2pt5=1.)
self.imt = PGA()
self.iml = 0.1
self.truncation_level = 1
self.trt = 'Stable Continental Crust'
gsim = Campbell2003()
self.gsims = {self.trt: gsim}
def test(self):
# a test sensitive to gsim.minimum_distance
bin_edges, matrix = disagg.disaggregation(
self.sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
mag_bin_width=3, dist_bin_width=4, coord_bin_width=2.4)
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins = bin_edges
aaae = numpy.testing.assert_array_almost_equal
aaae(mag_bins, [3, 6, 9])
aaae(dist_bins, [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52,
56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104,
108, 112])
aaae(lon_bins, [-0.904195, 0.1, 1.104195])
aaae(lat_bins, [-0.904194, 0.1, 1.104194])
aaae(eps_bins, [-1, -0.3333333, 0.3333333, 1])
self.assertEqual(trt_bins, [self.trt])
aaae(matrix.shape, (2, 27, 2, 2, 3, 1))
aaae(matrix.sum(), 6.14179818e-11)
class PMFExtractorsTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.aae = numpy.testing.assert_almost_equal
# test matrix is not normalized, but that's fine for test
self.matrix = numpy.array(
[ # magnitude
[ # distance
[ # longitude
[ # latitude
[ # epsilon
0.10, 0.11, 0.12],
[
0.00, 0.10, 0.20]],
[
[
0.74, 0.20, 0.95],
[
0.52, 0.49, 0.21]]],
[
[
[
0.16, 0.61, 0.53],
[
0.95, 0.34, 0.31]],
[
[
0.5, 0.61, 0.7],
[
0.40, 0.84, 0.24]]]],
[
[
[
[
0.40, 0.32, 0.06],
[
0.47, 0.93, 0.70]],
[
[
0.03, 0.94, 0.12],
[
0.93, 0.13, 0.23]]],
[
[
[
0.11, 0.85, 0.85],
[
0.67, 0.84, 0.41]],
[
[
0.39, 0.88, 0.20],
[
0.14, 0.61, 0.67]]]]])
def test_mag(self):
pmf = disagg.mag_pmf(self.matrix)
self.aae(pmf, [1.0, 1.0])
def test_dist(self):
pmf = disagg.dist_pmf(self.matrix)
self.aae(pmf, [1.0, 1.0])
def test_trt(self):
pmf = disagg.trt_pmf(self.matrix[None])
# NB: self.matrix.shape -> (2, 2, 2, 2, 3)
# self.matrix[None].shape -> (1, 2, 2, 2, 2, 3)
self.aae(pmf, [1.0])
def test_mag_dist(self):
pmf = disagg.mag_dist_pmf(self.matrix)
self.aae(pmf, [[0.9989792, 0.999985], [0.9999897, 0.999996]])
def test_mag_dist_eps(self):
pmf = disagg.mag_dist_eps_pmf(self.matrix)
self.aae(pmf, [[[0.88768, 0.673192, 0.972192],
[0.9874, 0.98393824, 0.9260596]],
[[0.9784078, 0.99751528, 0.8089168],
[0.84592498, 0.9988768, 0.976636]]])
def test_lon_Lat(self):
pmf = disagg.lon_lat_pmf(self.matrix)
self.aae(pmf, [[0.9991665, 0.9999943],
[0.9999982, 0.9999268]])
def test_mag_lon_lat(self):
pmf = disagg.mag_lon_lat_pmf(self.matrix)
self.aae(pmf, [[[0.89146822, 0.9836056],
[0.9993916, 0.98589012]],
[[0.99232001, 0.99965328],
[0.99700079, 0.99480979]]])
def test_mean(self):
# for doc purposes: the mean of PMFs is not the PMF of the mean
numpy.random.seed(42)
matrix = numpy.random.random(self.matrix.shape)
pmf1 = disagg.mag_pmf(self.matrix)
pmf2 = disagg.mag_pmf(matrix)
mean = (matrix + self.matrix) / 2
numpy.testing.assert_allclose(
(pmf1 + pmf2) / 2, [1, 1])
numpy.testing.assert_allclose(
disagg.mag_pmf(mean), [0.99999944, 0.99999999])
| agpl-3.0 | -6,164,884,161,192,352,000 | 38.218182 | 79 | 0.5 | false |
bigfatnoob/optima | measures/hypervolume.py | 1 | 9397 | # Original Author : Simon Wessing
# From : TU Dortmund University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function,division
import sys, os
sys.path.append(os.path.abspath("."))
__author__ = 'george'
def gt(a, b): return a>b
def lt(a, b): return a<b
def gte(a, b): return a>=b
def lte(a, b): return a<=b
class HyperVolume:
"""
Hypervolume computation based on variant 3 of the algorithm in the paper:
C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep
algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary
Computation, pages 1157-1163, Vancouver, Canada, July 2006.
Minimization is implicitly assumed here!
"""
def __init__(self, reference):
self.reference = reference
self.list = None
def compute(self, front):
"""
Returns the hyper-volume that is dominated by a non-dominated front.
Before the HV computation, front and reference point are translated, so
that the reference point is [0, ..., 0].
:param front:
:return: hyper-volume value
"""
def weak_dominate(one, two):
"""
Check if one dominates two
:param one: First set of objectives
:param two: Second set of objectives
:return:
"""
for i in xrange(len(one)):
if one[i] > two[i]:
return False
return True
relevants = []
reference = self.reference
d = len(reference)
for point in front:
if weak_dominate(point, reference): relevants.append(point)
if any(reference):
for j in xrange(len(relevants)):
relevants[j] = [relevants[j][i] - reference[i] for i in xrange(d)]
self.pre_process(relevants)
bounds = [-1.0e308]*d
return self.recurse(d-1, len(relevants), bounds)
def recurse(self, d, length, bounds):
"""
Recursive call for hyper volume calculation.
In contrast to the paper, the code assumes that the reference point
is [0, ..., 0]. This allows the avoidance of a few operations.
:param d: Dimension Index
:param length: Number of relevant points
:param bounds: Bounding Values
:return: hyper-volume
"""
hvol = 0.0
sentinel = self.list.sentinel
if length == 0:
return hvol
elif d == 0:
# Single Dimension
return -sentinel.next[0].value[0]
elif d == 1:
# 2 dimensional problem
q = sentinel.next[1]
h = q.value[0]
p = q.next[1]
while p is not sentinel:
p_value = p.value
hvol += h * (q.value[1] - p_value[1])
if p_value[0] < h:
h = p_value[0]
q = p
p = q.next[1]
hvol += h * q.value[1]
return hvol
else:
remove = MultiList.remove
reinsert = MultiList.reinsert
recurse = self.recurse
p = sentinel
q = p.prev[d]
while q.value is not None:
if q.ignore < d:
q.ignore = 0
q = q.prev[d]
q = p.prev[d]
while length > 1 and (q.value[d] > bounds[d] or q.prev[d].value[d] >= bounds[d]):
p = q
remove(p, d, bounds)
q = p.prev[d]
length -= 1
q_area = q.area
q_value = q.value
q_prev_d = q.prev[d]
if length > 1:
hvol = q_prev_d.volume[d] + q_prev_d.area[d] * (q_value[d] - q_prev_d.value[d])
else:
q_area[0] = 1
q_area[1:d+1] = [q_area[i] * -q_value[i] for i in xrange(d)]
q.volume[d] = hvol
if q.ignore >= d:
q_area[d] = q_prev_d.area[d]
else:
q_area[d] = recurse(d-1, length, bounds)
if q_area[d] < q_prev_d.area[d]:
q.ignore = d
while p is not sentinel:
p_value_d = p.value[d]
hvol += q.area[d] * (p_value_d - q.value[d])
bounds[d] = p_value_d
reinsert(p, d, bounds)
length += 1
q = p
p = p.next[d]
q.volume[d] = hvol
if q.ignore >= d:
q.area[d] = q.prev[d].area[d]
else:
q.area[d] = recurse(d-1, length, bounds)
if q.area[d] <= q.prev[d].area[d]:
q.ignore = d
hvol - q.area[d] * q.value[d]
return hvol
def pre_process(self, front):
d = len(self.reference)
multi_list = MultiList(d)
nodes = [MultiList.Node(d, point) for point in front]
for i in xrange(d):
HyperVolume.dimension_sort(nodes, i)
multi_list.extend(nodes, i)
self.list = multi_list
@staticmethod
def dimension_sort(nodes, i):
decorated = [(node.value[i], node) for node in nodes]
decorated.sort()
nodes[:] = [node for (_, node) in decorated]
@staticmethod
def get_reference_point(problem, points):
reference = [-sys.maxint if obj.to_minimize else sys.maxint for obj in problem.objectives]
for point in points:
for i, obj in enumerate(problem.objectives):
if obj.to_minimize:
if point[i] > reference[i]:
reference[i] = point[i]
else:
if point[i] < reference[i]:
reference[i] = point[i]
for i, obj in enumerate(problem.objectives):
if obj.to_minimize:
reference[i] += 1
else:
reference[i] -= 1
return reference
class MultiList:
"""A special data structure needed by FonsecaHyperVolume.
It consists of several doubly linked lists that share common nodes. So,
every node has multiple predecessors and successors, one in every list.
"""
class Node:
def __init__(self, count, value=None):
self.value = value
self.next = [None] * count
self.prev = [None] * count
self.ignore = 0
self.area = [0.0] * count
self.volume = [0.0] * count
def __str__(self):
return str(self.value)
def __init__(self, count):
"""
Build 'count' number of doubly linked lists.
:param count: Number of doubly linked lists
:return:
"""
self.count = count
self.sentinel = MultiList.Node(count)
self.sentinel.next = [self.sentinel] * count
self.sentinel.prev = [self.sentinel] * count
def __str__(self):
strings = []
for i in xrange(self.count):
current_list = []
node = self.sentinel.next[i]
while node != self.sentinel:
current_list.append(str(node))
node = node.next[i]
strings.append(str(current_list))
string_repr = ""
for string in strings:
string_repr += string + "\n"
return string_repr
def __len__(self):
"""
Returns the number of lists that are included in this MultiList.
"""
return self.count
def size(self, index):
"""
Returns the length of the i-th list.
"""
length = 0
sentinel = self.sentinel
node = sentinel.next[index]
while node != sentinel:
length += 1
node = node.next[index]
return length
def append(self, node, index):
"""
Appends a node to the end of the list at the given index.
:param node: Node to be appended
:param index: Index of list to be appended into
"""
penultimate = self.sentinel.prev[index]
node.next[index] = self.sentinel
node.prev[index] = penultimate
self.sentinel.prev[index] = node
penultimate.next[index] = node
def extend(self, nodes, index):
"""
Extend the list at the given index with nodes
:param nodes: Nodes to be appended
:param index: Index of list to be extended
"""
sentinel = self.sentinel
for node in nodes:
penultimate = sentinel.prev[index]
node.next[index] = sentinel
node.prev[index] = penultimate
sentinel.prev[index] = node
penultimate.next[index]= node
@staticmethod
def remove(node, index, bounds):
"""
Removes and returns node from all lists in [0, index]
:param node: Node to be removed
:param index: Index to be removed till
:param bounds:
:return: Removed node
"""
for i in xrange(index):
pred = node.prev[i]
succ = node.next[i]
pred.next[i] = succ
succ.prev[i] = pred
if bounds[i] > node.value[i]:
bounds[i] = node.value[i]
return node
@staticmethod
def reinsert(node, index, bounds):
"""
Inserts 'node' at the position it had in all lists in [0, 'index'[
before it was removed. This method assumes that the next and previous
nodes of the node that is reinserted are in the list.
:param node: Node to be reinserted
:param index: Index to be reinserted at
:param bounds:
"""
for i in xrange(index):
node.prev[i].next[i] = node
node.next[i].prev[i] = node
if bounds[i] > node.value[i]:
bounds[i] = node.value[i]
def _test():
reference_point = [2,2,2]
hv = HyperVolume(reference_point)
front = [[1,0,1], [0,1,0]]
print(hv.compute(front))
if __name__ == "__main__":
_test()
| mit | -6,270,421,406,375,879,000 | 28.550314 | 94 | 0.601362 | false |
Dih5/physdata | tests/TestStar.py | 1 | 1839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TestStar.py: Tests for the `star` module.
"""
import unittest
from physdata import star
class TestStar(unittest.TestCase):
def test_fetch_star_type(self):
# Test type of return
for data in [star.fetch_estar(13), star.fetch_astar(13), star.fetch_pstar(13)]:
self.assertTrue(type(data) is list)
for i in data[0]:
self.assertTrue(type(i) is float)
def test_fetch_star_input(self):
# Test formats for argument
for f in [star.fetch_estar, star.fetch_astar, star.fetch_pstar]:
self.assertEqual(f(1), f("1"))
self.assertEqual(f(13), f("13"))
self.assertEqual(f(101), f("101"))
def test_fetch_star_density(self):
# Test density scaling
for f in [star.fetch_estar, star.fetch_astar, star.fetch_pstar]:
no_density = f(13)
density_2 = f(13, density=2.0)
auto_density = f(13, density=True)
for x, y, z in zip(no_density, density_2, auto_density):
# Energy is not scaled
self.assertAlmostEqual(x[0], y[0])
self.assertAlmostEqual(x[0], z[0])
# Last value is not scaled
self.assertAlmostEqual(x[-1], y[-1])
self.assertAlmostEqual(x[-1], z[-1])
# Stopping power (of a certain kind) is scaled
self.assertAlmostEqual(x[1] * 2, y[1])
self.assertAlmostEqual(x[1] * 2.6989, z[1]) # The Aluminium density in the website
# Ranges are scaled by the inverse
self.assertAlmostEqual(x[4] / 2, y[4])
self.assertAlmostEqual(x[4] / 2.6989, z[4]) # The Aluminium density in the website
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 6,090,021,977,719,950,000 | 35.78 | 99 | 0.557368 | false |
flyhigher139/mayblog | blog/main/views.py | 1 | 25917 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.views.generic import View
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# from django.utils.encoding import smart_text
from django.db.models import Count, Q
from django.contrib.auth.models import User
from guardian.shortcuts import assign_perm, get_perms
from guardian.core import ObjectPermissionChecker
from guardian.decorators import permission_required
# import markdown2
from . import models, forms, misc
# Create your views here.
PER_PAGE = settings.MAY_BLOG['PER_PAGE']
PER_PAGE_ADMIN = settings.MAY_BLOG['PER_PAGE_ADMIN']
def get_site_meta():
seo = {}
try:
record = models.BlogMeta.objects.get(key='blog_name')
seo['title'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='blog_desc')
seo['desc'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='owner')
seo['author'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='keywords')
seo['keywords'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='blog_subtitle')
seo['subtitle'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='google_verify')
seo['google_verify'] = record.value
except models.BlogMeta.DoesNotExist:
pass
try:
record = models.BlogMeta.objects.get(key='baidu_verify')
seo['baidu_verify'] = record.value
except models.BlogMeta.DoesNotExist:
pass
return seo
def get_user_info(user):
try:
data = {
'username': user.username,
'display_name': user.account.display_name,
'biography': user.account.biography,
'homepage': user.account.homepage,
'weixin': user.account.weixin,
'douban': user.account.douban,
'twitter': user.account.twitter,
'github': user.account.github,
'weibo': user.account.weibo,
}
except:
data = None
return data
class Index(View):
template_name = 'main/index.html'
def get(self, request):
data = {}
tag = request.GET.get('tag')
category = request.GET.get('category')
keywords = request.GET.get('keywords')
try:
tag = int(tag) if tag else 0
category = int(category) if category else 0
except:
raise Http404
if tag:
posts = filter_posts_by_tag(tag)
elif category:
posts = filter_posts_by_category(category)
else:
posts = models.Post.objects.all()
posts = posts.filter(is_draft=False).order_by('-id')
if keywords:
posts = posts.filter(Q(title__contains=keywords) | Q(raw__contains=keywords))
data['keywords'] = keywords
post_pages = models.Page.objects.filter(is_draft=False)
paginator = Paginator(posts, PER_PAGE)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
tags = models.Tag.objects.all()
catagories = models.Category.objects.annotate(num_posts=Count('post'))
data['posts'] = posts
data['pages'] = post_pages
data['tags'] = tags
data['catagories'] = catagories
data['category_id'] = category
data['tag_id'] = tag
data['seo'] = get_site_meta()
return render(request, self.template_name, data)
class Post(View):
template_name = 'main/post.html'
def get(self, request, pk):
try:
pk = int(pk)
post = models.Post.objects.get(pk=pk)
except models.Post.DoesNotExist:
raise Http404
data = {'post':post}
tags = post.tags.all()
data['tags'] = tags
comment_type = settings.MAY_BLOG['COMMENT_TYPE']
comment_type_id = settings.MAY_BLOG['COMMENT_OPT'].get(comment_type)
if not comment_type_id:
comment_script = 'no comment script for {0}'.format(comment_type)
else:
comment_func = misc.get_comment_func(comment_type)
# url_partial = [request.META['SERVER_NAME'], ':', request.META['SERVER_PORT'], request.path]
# post_url = ''.join(url_partial)
post_url = request.build_absolute_uri()
comment_script = comment_func(request, comment_type_id, post.id, post.title, post_url)
data['comment_script'] = comment_script
data['jiathis_share'] = misc.jiathis_share(request)
data['allow_donate'] = settings.MAY_BLOG['ALLOW_DONATE']
seo = {
'title': post.title,
'desc': post.abstract,
'author': post.author.username,
'keywords': ', '.join([tag.name for tag in tags])
}
data['seo'] = seo
post_pages = models.Page.objects.filter(is_draft=False)
data['pages'] = post_pages
return render(request, self.template_name, data)
class Page(View):
template_name = 'main/page.html'
def get(self, request, pk):
try:
pk = int(pk)
page = models.Page.objects.get(pk=pk)
except models.Page.DoesNotExist:
raise Http404
data = {'page':page}
data['seo'] = get_site_meta()
post_pages = models.Page.objects.filter(is_draft=False)
data['pages'] = post_pages
return render(request, self.template_name, data)
class Archive(View):
template_name = 'main/archive.html'
def get(self, request):
data = {}
data['seo'] = get_site_meta()
posts = models.Post.objects.filter(is_draft=False)
paginator = Paginator(posts, PER_PAGE)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
data['posts'] = posts
post_pages = models.Page.objects.filter(is_draft=False)
data['pages'] = post_pages
return render(request, self.template_name, data)
class Author(View):
template_name = 'main/author.html'
def get(self, request, pk):
data = {}
data['seo'] = get_site_meta()
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
data['user'] = user
data['account_info'] = user.account
posts = models.Post.objects.filter(is_draft=False, author=user)
paginator = Paginator(posts, PER_PAGE)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
data['posts'] = posts
return render(request, self.template_name, data)
class AdminIndex(View):
template_name = 'blog_admin/index.html'
@method_decorator(login_required)
def get(self, request):
data = {'site_info':get_site_meta(), 'account_info':get_user_info(request.user)}
return render(request, self.template_name, data)
class AdminBlogMeta(View):
template_name = 'main/simple_form.html'
@method_decorator(permission_required('main.add_blogmeta', accept_global_perms=True))
def get(self, request, form=None):
if not form:
form = forms.BlogMetaForm(initial=get_site_meta())
data = {'form':form}
return render(request, self.template_name, data)
@method_decorator(permission_required('main.add_blogmeta', accept_global_perms=True))
def post(self, request):
form = forms.BlogMetaForm(request.POST)
if form.is_valid():
record = models.BlogMeta.objects.get(key='blog_name')
record.value = form.cleaned_data['title']
record.save()
record = models.BlogMeta.objects.get(key='blog_desc')
record.value = form.cleaned_data['desc']
record.save()
record = models.BlogMeta.objects.get(key='owner')
record.value = form.cleaned_data['author']
record.save()
record = models.BlogMeta.objects.get(key='keywords')
record.value = form.cleaned_data['keywords']
record.save()
record = models.BlogMeta.objects.get(key='blog_subtitle')
record.value = form.cleaned_data['subtitle']
record.save()
record = models.BlogMeta.objects.get(key='google_verify')
record.value = form.cleaned_data['google_verify']
record.save()
record = models.BlogMeta.objects.get(key='baidu_verify')
record.value = form.cleaned_data['baidu_verify']
record.save()
msg = 'Succeed to update blog meta'
messages.add_message(request, messages.SUCCESS, msg)
url = reverse('main:admin_index')
return redirect(url)
return self.get(request, form)
class AdminPosts(View):
template_name_posts = 'blog_admin/posts.html'
template_name_pages = 'blog_admin/pages.html'
@method_decorator(permission_required('main.add_post', accept_global_perms=True))
def get(self, request, is_blog_page=False):
data = {}
draft = request.GET.get('draft')
if draft and draft.lower()=='true':
flag = True
else:
flag = False
if is_blog_page:
if not request.user.has_perm('main.change_page'):
return HttpResponseForbidden()
posts = models.Page.objects.all()
template_name = self.template_name_pages
else:
posts = models.Post.objects.all()
if not request.user.has_perm('main.change_post'):
posts = posts.filter(author=request.user)
template_name = self.template_name_posts
posts = posts.filter(is_draft=flag)
key = request.GET.get('key')
if key:
posts = posts.filter(Q(title__icontains=key)|Q(raw__icontains=key))
posts = posts.order_by('-update_time')
paginator = Paginator(posts, PER_PAGE_ADMIN)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
data['posts'] = posts
data['is_blog_page'] = is_blog_page
data['allow_search'] = True
return render(request, template_name, data)
class AdminPost(View):
template_name = 'blog_admin/post.html'
# @method_decorator(login_required)
@method_decorator(permission_required('main.add_post', accept_global_perms=True))
def get(self, request, pk=0, form=None):
data = {}
form_data = {}
if pk:
try:
pk = int(pk)
post = models.Post.objects.get(pk=pk)
#############################
# It works!
#############################
# if not 'change_post' in get_perms(request.user, post):
# raise HttpResponseForbidden()
#############################
# It works, too!
#############################
checker = ObjectPermissionChecker(request.user)
if not request.user.has_perm('main.change_post') \
and not checker.has_perm('change_post', post):
return HttpResponse('Forbidden')
form_data['title'] = post.title
form_data['content'] = post.raw
form_data['abstract'] = post.abstract
form_data['author_id'] = post.author.id
data['edit_flag'] = True
except models.Post.DoesNotExist:
raise Http404
else:
post = None
if not form:
form = forms.NewPost(initial=form_data)
data['form'] = form
data['posted_tags'] = [tag for tag in post.tags.all()] if post else None
data['posted_category'] = post.category if post else None
tags = models.Tag.objects.all()
data['tags'] = tags
catagories = models.Category.objects.all()
data['catagories'] = catagories
data['pk'] = pk
return render(request, self.template_name, data)
@method_decorator(permission_required('main.add_post', accept_global_perms=True))
def post(self, request, pk=0, form=None):
form = forms.NewPost(request.POST)
if form.is_valid():
if not pk:
cur_post = models.Post()
else:
try:
pk = int(pk)
cur_post = models.Post.objects.get(pk=pk)
checker = ObjectPermissionChecker(request.user)
if not checker.has_perm('change_post', cur_post) \
and not request.user.has_perm('main.change_post'):
return HttpResponseForbidden('forbidden1')
except models.Post.DoesNotExist:
raise Http404
cur_post.title = form.cleaned_data['title']
cur_post.raw = form.cleaned_data['content']
cur_post.abstract = form.cleaned_data['abstract']
if not cur_post.abstract:
cur_post.abstract = cur_post.raw[0:140]
# html = markdown2.markdown(cur_post.raw, extras=['code-friendly', 'fenced-code-blocks'])
# cur_post.content_html = smart_text(html)
cur_post.author = User.objects.get(pk=form.cleaned_data['author_id']) if form.cleaned_data['author_id'] else request.user
# cur_post.author = request.user
tag_ids = request.POST.getlist('tags')
category_id = request.POST.get('category', None)
# return HttpResponse(len(tag_ids))
if request.POST.get('publish'):
cur_post.is_draft = False
msg = 'Post has been pulished!'
messages.add_message(request, messages.SUCCESS, msg)
url = reverse('main:admin_posts')
else:
cur_post.is_draft=True
if request.POST.get('preview'):
cur_post.save()
return HttpResponse(cur_post.id)
msg = 'Draft has been saved!'
messages.add_message(request, messages.SUCCESS, msg)
url = '{0}?draft=true'.format(reverse('main:admin_posts'))
cur_post.category_id = category_id
cur_post.save()
cur_post.tags.clear()
cur_post.tags.add(*tag_ids)
assign_perm('main.change_post', request.user, cur_post)
assign_perm('main.delete_post', request.user, cur_post)
if request.POST.get('preview'):
url = reverse('main:post', kwargs={'pk':cur_post.id})
return redirect(url)
return self.get(request, form)
class AdminPage(View):
template_name = 'blog_admin/page.html'
@method_decorator(permission_required('main.add_page', accept_global_perms=True))
def get(self, request, pk=0, form=None):
data = {}
form_data = {}
if pk:
try:
pk = int(pk)
page = models.Page.objects.get(pk=pk)
form_data['title'] = page.title
form_data['content'] = page.raw
form_data['slug'] = page.slug
form_data['author_id'] = page.author.id
data['edit_flag'] = True
except models.Post.DoesNotExist:
raise Http404
else:
page = None
if not form:
form = forms.NewPage(initial=form_data)
data['form'] = form
return render(request, self.template_name, data)
@method_decorator(permission_required('main.add_page', accept_global_perms=True))
def post(self, request, pk=0, form=None):
form = forms.NewPage(request.POST)
if form.is_valid():
if not pk:
cur_post = models.Page()
else:
try:
pk = int(pk)
cur_post = models.Page.objects.get(pk=pk)
except models.Page.DoesNotExist:
raise Http404
cur_post.title = form.cleaned_data['title']
cur_post.raw = form.cleaned_data['content']
cur_post.slug = form.cleaned_data['slug']
# html = markdown2.markdown(cur_post.raw, extras=['code-friendly', 'fenced-code-blocks'])
# cur_post.content_html = smart_text(html)
# cur_post.author = request.user
cur_post.author = User.objects.get(pk=form.cleaned_data['author_id']) if form.cleaned_data['author_id'] else request.user
if request.POST.get('publish'):
cur_post.is_draft = False
msg = 'Page has been pulished!'
messages.add_message(request, messages.SUCCESS, msg)
url = reverse('main:admin_pages')
else:
cur_post.is_draft=True
msg = 'Draft has been saved!'
messages.add_message(request, messages.SUCCESS, msg)
url = '{0}?draft=true'.format(reverse('main:admin_pages'))
cur_post.save()
return redirect(url)
return self.get(request, form)
class DeletePost(View):
@method_decorator(permission_required('main.delete_post', (models.Post, 'id', 'pk'), accept_global_perms=True))
def get(self, request, pk):
try:
pk = int(pk)
cur_post = models.Post.objects.get(pk=pk)
is_draft = cur_post.is_draft
# checker = ObjectPermissionChecker(request.user)
# if not request.user.has_perm('main.delete_post') \
# and not checker.has_perm('delete_post', cur_post):
# return HttpResponse('forbidden')
url = reverse('main:admin_posts')
if is_draft:
url = '{0}?draft=true'.format(url)
cur_post.delete()
except models.Post.DoesNotExist:
raise Http404
return redirect(url)
class DeletePage(View):
@method_decorator(permission_required('main.delete_page', accept_global_perms=True))
def get(self, request, pk):
try:
pk = int(pk)
cur_post = models.Page.objects.get(pk=pk)
is_draft = cur_post.is_draft
checker = ObjectPermissionChecker(request.user)
if not checker.has_perm('delete_page', cur_post):
# return HttpResponseForbidden('forbidden')
return HttpResponse('forbidden')
url = reverse('main:admin_pages')
if is_draft:
url = '{0}?draft=true'.format(url)
cur_post.delete()
except models.Page.DoesNotExist:
raise Http404
return redirect(url)
class AdminTags(View):
template_name = 'blog_admin/tags.html'
@method_decorator(permission_required('main.add_tag', accept_global_perms=True))
def get(self, request, form=None):
if not form:
form = forms.TagForm()
tags = models.Tag.objects.all()
paginator = Paginator(tags, PER_PAGE_ADMIN)
page = request.GET.get('page')
try:
tags = paginator.page(page)
except PageNotAnInteger:
tags = paginator.page(1)
except EmptyPage:
tags = paginator.page(paginator.num_pages)
data = {'tags':tags, 'form':form}
return render(request, self.template_name, data)
@method_decorator(permission_required('main.add_tag', accept_global_perms=True))
def post(self, request, form=None):
form = forms.TagForm(request.POST)
if form.is_valid():
tags = form.cleaned_data['tags'].split(',')
for tag in tags:
tag_model, created = models.Tag.objects.get_or_create(name=tag.strip())
msg = 'Succeed to create tags'
messages.add_message(request, messages.SUCCESS, msg)
url = reverse('main:admin_tags')
return redirect(url)
else:
return self.get(request, form=form)
class AdminCategory(View):
template_name = 'blog_admin/category.html'
@method_decorator(permission_required('main.add_category', accept_global_perms=True))
def get(self, request, form=None):
if not form:
form = forms.CategoryForm()
catagories = models.Category.objects.all()
paginator = Paginator(catagories, PER_PAGE_ADMIN)
page = request.GET.get('page')
try:
catagories = paginator.page(page)
except PageNotAnInteger:
catagories = paginator.page(1)
except EmptyPage:
catagories = paginator.page(paginator.num_pages)
data = {'catagories':catagories, 'form':form}
return render(request, self.template_name, data)
@method_decorator(permission_required('main.add_category', accept_global_perms=True))
def post(self, request, form=None):
form = forms.CategoryForm(request.POST)
if form.is_valid():
category = models.Category()
category.name = form.cleaned_data['name']
category.save()
msg = 'Succeed to create new category'
messages.add_message(request, messages.SUCCESS, msg)
url = reverse('main:admin_category')
return redirect(url)
else:
return self.get(request, form=form)
class AdminFilterPosts(View):
template_name = 'blog_admin/posts.html'
@method_decorator(permission_required('main.add_post', accept_global_perms=True))
def get(self, request):
tag_id = request.GET.get('tag')
category_id = request.GET.get('category')
if tag_id:
posts = filter_posts_by_tag(tag_id)
elif category_id:
posts = filter_posts_by_category(category_id)
else:
url = reverse('main:admin_posts')
return redirect(url)
if posts == None:
raise Http404
paginator = Paginator(posts, PER_PAGE_ADMIN)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
data = {'posts':posts}
return render(request, self.template_name, data)
def filter_posts_by_tag(pk):
try:
tag = models.Tag.objects.get(pk=pk)
except models.Tag.DoesNotExist:
return None
posts = tag.post_set.all()
return posts
def filter_posts_by_category(pk):
try:
category = models.Category.objects.get(pk=pk)
except models.Category.DoesNotExist:
return None
posts = category.post_set.all()
return posts
# In MayBlog's permission system, if you can change tags,
# you can also change categories
@permission_required('main.change_tag', accept_global_perms=True)
def simple_update(request, pk, flag=None):
# flag = request.GET.get('flag', '')
if not flag:
raise Http404
if flag.lower() == 'tag':
model = models.Tag
elif flag.lower() == 'category':
model = models.Category
else:
return HttpResponse(flag)
raise Http404
name = request.GET.get('name', '')
if not name:
return HttpResponse('Please post the correct name')
record = model.objects.get(pk=pk)
record.name = name
record.save()
return HttpResponse('Succeed to update {0}'.format(flag))
# In MayBlog's permission system, if you can delete tags,
# you can also delete categories
@permission_required('main.delete_tag', accept_global_perms=True)
def simple_delete(request, pk, flag=None):
# flag = request.GET.get('flag', '')
if not flag:
raise Http404
if flag.lower() == 'tag':
model = models.Tag
elif flag.lower() == 'category':
model = models.Category
else:
raise Http404
record = model.objects.get(pk=pk)
record.delete()
return HttpResponse('Succeed to delete {0}'.format(flag))
| gpl-2.0 | -1,485,444,318,215,554,600 | 33.056505 | 133 | 0.574681 | false |
datastreaming/mflow_nodes | mflow_nodes/config.py | 1 | 1687 | # Configuration for the m_manage.py, where to look for config files.
MANAGE_MACHINE_FILENAME = "/etc/mflow_nodes.json"
MANAGE_USER_FILENAME = "~/.mflow_nodes_rc.json"
MANAGE_PWD_FILENAME = "mflow_nodes.json"
LOG_MACHINE_FILENAME = "/etc/mflow_nodes_logging.json"
LOG_USER_FILENAME = "~/.mflow_nodes_rc_logging.json"
LOG_PWD_FILENAME = "mflow_nodes_logging.json"
# Stream node defaults.
DEFAULT_CONNECT_ADDRESS = "tcp://127.0.0.1:40000"
DEFAULT_REST_HOST = "http://0.0.0.0"
DEFAULT_REST_PORT = 41000
DEFAULT_DATA_QUEUE_LENGTH = 16
DEFAULT_N_RECEIVING_THREADS = 1
DEFAULT_STATISTICS_BUFFER_LENGTH = 100
DEFAULT_STARTUP_TIMEOUT = 5
# Default logging level
DEFAULT_LOGGING_LEVEL = "DEBUG"
# How much time we allow for the processor to shut down, before we terminate it.
DEFAULT_SHUTDOWN_TIMEOUT = 5
# Time it takes for the multiprocess exchange to happen.
DEFAULT_IPC_TIMEOUT = 2
# Time to use for polling the ipc communication.
DEFAULT_IPC_POLL_TIMEOUT = 0.5
# Number of ZMQ io threads
ZMQ_IO_THREADS = 1
# Node thread defaults.
DEFAULT_RECEIVE_TIMEOUT = 1000
DEFAULT_QUEUE_READ_INTERVAL = 0
DEFAULT_ZMQ_QUEUE_LENGTH = 32
# REST Interface defaults.
API_PATH_FORMAT = "/api/v1/{instance_name}/{{url}}"
HTML_PATH_FORMAT = "/{instance_name}/{{url}}"
# Client defaults.
DEFAULT_CLIENT_INSTANCE = '{variable_name} = NodeClient(address="{address}", instance_name="{instance_name}")'
# Process parameters.
PARAMETER_PROCESS_UID = "process_uid"
PARAMETER_PROCESS_GID = "process_gid"
PARAMETER_N_MESSAGES = "n_messages"
PARAMETER_DISABLE_PROCESSING = "disable_processing"
PROCESS_PARAMETERS = [PARAMETER_PROCESS_UID, PARAMETER_PROCESS_GID, PARAMETER_N_MESSAGES, PARAMETER_DISABLE_PROCESSING]
| gpl-3.0 | -4,524,829,192,184,026,600 | 34.893617 | 119 | 0.75163 | false |
pandeydivesh15/AI_lab-codes | POS-tagging(HunPos and CRF++)/confusion_matrix_calc.py | 1 | 1525 | import sys
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
if len(sys.argv) != 2:
print("Enter the model option.")
exit()
if sys.argv[1] == "--hunpos":
check = 0;
elif sys.argv[1] == "--crf++":
check = 1
else:
print "Enter correct option."
exit()
dir_path = "./"
if not check:
test_data_labels_file = dir_path + "en_test_labels.txt"
predicted_labels_file = dir_path + "en_pos_result.txt"
labels = set()
predicted_labels = []
test_data_labels = []
if not check:
with open(test_data_labels_file, "r") as file1, open(predicted_labels_file, "r") as file2:
for line in (l.rstrip() for l in file1):
test_data_labels.append(line)
labels.add(line)
for line in (l.rstrip() for l in file2):
if line:
predicted_labels.append(line.split()[1])
else:
with open(predicted_labels_file, "r") as file:
for line in (l.rstrip() for l in file):
if line:
predicted_labels.append(line.split()[2])
test_data_labels.append(line.split()[1])
labels.add(line.split()[1])
labels = sorted(list(labels))
predicted_labels = np.array(predicted_labels)
test_data_labels = np.array(test_data_labels)
simple_conf_matrix = confusion_matrix(test_data_labels,predicted_labels)
conf_matrix = pd.DataFrame(columns = labels, index = labels)
for x,y in zip(simple_conf_matrix,labels):
conf_matrix[y] = x
conf_matrix = conf_matrix.transpose()
print conf_matrix
print "Classification Report: \n" + classification_report(test_data_labels, predicted_labels) | mit | -7,945,343,165,054,343,000 | 25.310345 | 93 | 0.697705 | false |
tensorflow/probability | tensorflow_probability/python/internal/backend/numpy/test_lib.py | 1 | 2890 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
# Dependency imports
from absl import logging
from absl.testing import absltest
import numpy as onp # Avoid JAX rewrite. # pylint: disable=reimported
try:
# If TF is not imported, we return dummy `TestCase` and `Benchmark` classes
# because if we aren't testing, we shouldn't need these classes. Thus, tests
# that need `nptf.test.TestCase` should also import TF.
import tensorflow.compat.v2 as tf # pylint: disable=g-import-not-at-top
have_tf = True
except ImportError:
have_tf = False
__all__ = [
'is_gpu_available',
'Benchmark',
'TestCase',
]
# --- Begin Public Functions --------------------------------------------------
is_gpu_available = lambda: False
if have_tf:
class Benchmark(tf.test.Benchmark):
pass
class TestCase(tf.test.TestCase):
"""Wrapper of `tf.test.TestCase`."""
def evaluate(self, x):
return tf.nest.map_structure(onp.array, x)
def _GetNdArray(self, a):
return onp.array(a)
@contextlib.contextmanager
def assertRaisesOpError(self, msg):
# Numpy backend doesn't raise OpErrors.
try:
yield
self.fail('No exception raised. Expected exception similar to '
'tf.errors.OpError with message: %s' % msg)
except Exception as e: # pylint: disable=broad-except
err_str = str(e)
if re.search(msg, err_str):
return
logging.error('Expected exception to match `%s`!', msg)
raise
def assertEqual(self, first, second, msg=None):
if isinstance(first, list) and isinstance(second, tuple):
first = tuple(first)
if isinstance(first, tuple) and isinstance(second, list):
second = tuple(second)
return super(TestCase, self).assertEqual(first, second, msg)
def assertShapeEqual(self, first, second, msg=None):
self.assertTupleEqual(first.shape, second.shape, msg=msg)
main = tf.test.main
else:
class Benchmark(object):
pass
class TestCase(absltest.TestCase):
pass
main = None
| apache-2.0 | 6,533,194,326,772,202,000 | 28.191919 | 79 | 0.659862 | false |
apdjustino/DRCOG_Urbansim | src/opus_core/ndimage.py | 1 | 12168 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
# Functions to work around numpy/scipy bug -- need to fix the char and num attributes of arrays with dtype==int32
# to get them to work with ndimage.sum etc. TODO: remove this if scipy bug gets fixed.
# The problem is that under some circumstances dtype.char and dtype.num have the wrong values (for intc
# rather than int_, the native python int 32 type).
# See comment at the end of this file for more details.
from numpy import int32, int64, ndarray, ones, array, asarray
import scipy, scipy.ndimage
import numpy
# *** ndimage.measurements functions ***
def sum(input, labels=None, index=None):
# work around for sum() method of scipy.ndimage not allowing numpy.int64 index type
# this won't be needed if scipy ticket #1162 is fixed: http://projects.scipy.org/scipy/ticket/1162
if index is not None and getattr(index, "dtype", int32) == int64 and index.max() <= numpy.iinfo(int32).max:
index = index.astype(int32)
_fix_dtype(input)
if labels is not None:
_fix_dtype(labels)
if index is not None:
_fix_dtype(index)
if index is not None and (labels is None or len(labels) ==0):
return scipy.ndimage.sum(input, labels=None, index=index) * ones(array(len(index)))
return scipy.ndimage.sum(input, labels, index)
def mean(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
if index is not None and (labels is None or len(labels) ==0):
results = scipy.ndimage.mean(input, labels=None, index=index) * ones(array(len(index)))
else:
results = scipy.ndimage.mean(input, labels, index)
## scipy 0.7.0 may return a list instead of an array
results = asarray(results)
## scipy 0.8.0 returns NaN for 0 counts
if numpy.any(numpy.isnan(results)):
results[numpy.isnan(results)] = 0
return results
def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):
'''
Taken from the trunk version of scipy/ndimage/measurements.py
Roughly equivalent to [func(input[labels == i]) for i in index].
Special cases:
- index a scalar: returns a single value
- index is None: returns func(inputs[labels > 0])
func will be called with linear indices as a second argument if
pass_positions is True.
'''
as_scalar = numpy.isscalar(index)
input = numpy.asarray(input)
if pass_positions:
positions = numpy.arange(input.size).reshape(input.shape)
if labels is None:
if index is not None:
raise ValueError, "index without defined labels"
if not pass_positions:
return func(input.ravel())
else:
return func(input.ravel(), positions.ravel())
try:
input, labels = numpy.broadcast_arrays(input, labels)
except ValueError:
raise ValueError, "input and labels must have the same shape (excepting dimensions with width 1)"
if index is None:
if not pass_positions:
return func(input[labels > 0])
else:
return func(input[labels > 0], positions[labels > 0])
index = numpy.atleast_1d(index)
if numpy.any(index.astype(labels.dtype).astype(index.dtype) != index):
raise ValueError, "Cannot convert index values from <%s> to <%s> (labels' type) without loss of precision"%(index.dtype, labels.dtype)
index = index.astype(labels.dtype)
# optimization: find min/max in index, and select those parts of labels, input, and positions
lo = index.min()
hi = index.max()
mask = (labels >= lo) & (labels <= hi)
# this also ravels the arrays
labels = labels[mask]
input = input[mask]
if pass_positions:
positions = positions[mask]
# sort everything by labels
label_order = labels.argsort()
labels = labels[label_order]
input = input[label_order]
if pass_positions:
positions = positions[label_order]
index_order = index.argsort()
sorted_index = index[index_order]
def do_map(inputs, output):
'''labels must be sorted'''
nlabels = labels.size
nidx = sorted_index.size
# Find boundaries for each stretch of constant labels
# This could be faster, but we already paid N log N to sort labels.
lo = numpy.searchsorted(labels, sorted_index, side='left')
hi = numpy.searchsorted(labels, sorted_index, side='right')
for i, l, h in zip(range(nidx), lo, hi):
if l == h:
continue
idx = sorted_index[i]
output[i] = func(*[inp[l:h] for inp in inputs])
temp = numpy.empty(index.shape, out_dtype)
temp[:] = default
if not pass_positions:
do_map([input], temp)
else:
do_map([input, positions], temp)
output = numpy.zeros(index.shape, out_dtype)
output[index_order] = temp
if as_scalar:
output = output[0]
return output
def median(input, labels = None, index = None):
"""
Calculate the median of the input array by label.
Parameters
----------
input : array_like
median of the values of `input` inside the regions defined by `labels`
are calculated.
labels : array of integers, same shape as input
Assign labels to the values of the array.
index : scalar or array
A single label number or a sequence of label numbers of
the objects to be measured.
Returns
-------
output : array
An array of the median of the values of `input` inside the regions
defined by `labels`.
See also
--------
mean
Examples
--------
>>> median(array([1,2,8,5, 2,4,6, 7]), labels=array([1,1,1,1, 2,2,2, 5]))
4.5
>>> median(array([1,2,8,5, 2,4,6, 7]), labels=array([1,1,1,1, 2,2,2, 5]), index=2)
4
>>> median(array([1,2,8,5, 2,4,6, 7]), labels=array([1,1,1,1, 2,2,2, 5]), index=array([1,5]))
array([3.5, 7])
>>> median(array([1,2,8,5, 2,4,6, 7]), labels=None, index=None))
4.5
"""
return labeled_comprehension(input, labels, index, numpy.median, numpy.float, 0.0, pass_positions=False)
def variance(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.variance(input, labels, index)
def standard_deviation(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.standard_deviation(input, labels, index)
def minimum(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.minimum(input, labels, index)
def maximum(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.maximum(input, labels, index)
def minimum_position(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.minimum_position(input, labels, index)
def maximum_position(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.maximum_position(input, labels, index)
def extrema(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.extrema(input, labels, index)
def center_of_mass(input, labels=None, index=None):
_fix_dtype(input)
_fix_dtype(labels)
_fix_dtype(index)
return scipy.ndimage.center_of_mass(input, labels, index)
# *** ndimage.filters functions ***
def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0):
_fix_dtype(input)
_fix_dtype(weights)
return scipy.ndimage.correlate(input, weights, output, mode, cval, origin)
# *** private functions (to fix dtype) ***
def _fix_dtype(a):
if isinstance(a, ndarray) and a.dtype == int32:
a.dtype = int32
## lmwang: this seems to have been fixed as of numpy 1.4.0 and scipy 0.7.2
## the below code doesn't emit any error for me
# More details on the numpy/scipy bug:
# Running the following test code will trigger the error:
# from numpy import array, ma, int32
# from scipy import ndimage
# a = array([1,2,3,4])
# m = ma.masked_array(10-a, mask=False)
# ndimage.sum(ma.filled(m, 0))
# if you put this line before the ndimage.sum command then it works:
# m.dtype = int32
# Even though numpy thinks the dype of m is int32, there is a difference from the standard one:
# before changing dtype:
# m.dtype.char = 'i'
# m.dtype.num = 5
# afterwards:
# m.dtype.char = 'l'
# m.dtype.num = 7
# The former one is for numpy type intc; the latter is for int_ (corresponding to the builtin Python type int)
from opus_core.tests import opus_unittest
class ndimageTests(opus_unittest.OpusTestCase):
def test_median(self):
from numpy import array, all
input = array([1,2,8,5, 2,4,6, 7, 19])
labels=array([1,1,1,1, 2,2,2, 5, 0])
index = None
expected = 4.5
self.assert_(all(median(input, labels=labels, index=index)==expected))
index = 2
expected = 4
self.assert_(all(median(input, labels=labels, index=index)==expected))
index = array([1,5])
expected = array([3.5,7])
self.assert_(all(median(input, labels=labels, index=index)==expected))
index = array([1,2,5])
expected = array([3.5,4,7])
self.assert_(all(median(input, labels=labels, index=index)==expected))
labels = None
index = None
expected = 5.0
self.assert_(all(median(input, labels=labels, index=index)==expected))
def test_empty_array_zero_identity_error1(self):
"""test fix for scipy 0.8.0
"""
from numpy import array, all, int64, int32
input = array([],dtype=int32)
labels=array([], dtype=int32)
index = None
expected = 0
results = sum(input, labels=labels, index=index)
self.assert_(all(results==expected))
def test_empty_array_zero_identity_error2(self):
""" test fix for scipy 0.8.0
"""
from numpy import array, all, int64, int32
input = array([],dtype=int32)
labels=array([], dtype=int32)
index = array([1,2,3],dtype=int32)
expected = array([0, 0, 0], dtype=int32)
results = sum(input, labels=labels, index=index)
self.assert_(all(results==expected))
self.assert_(len(results)==len(expected))
index = [1,2,3]
expected = array([0, 0, 0], dtype=int32)
results = sum(input, labels=labels, index=index)
self.assert_(all(results==expected))
self.assert_(len(results)==len(expected))
def test_ndimage_mean_nan(self):
""" test fix for ndimage.mean for scipy 0.8.0
"""
from numpy import array, all, int64, int32
input = array([],dtype=int32)
labels=array([], dtype=int32)
index = array([1,2,3],dtype=int32)
expected = array([0, 0, 0], dtype=int32)
results = mean(input, labels=labels, index=index)
self.assert_(all(results==expected))
self.assert_(len(results)==len(expected))
index = [1,2,3]
expected = array([0, 0, 0], dtype=int32)
results = mean(input, labels=labels, index=index)
self.assert_(all(results==expected))
self.assert_(len(results)==len(expected))
def MASKED_test_empty_array_memory_error(self):
""" weird error introduced in scipy 0.8.0
"""
from numpy import array, all, int64, int32
input = array([], dtype=int64)
labels=array([]) ## default to float64
print labels.dtype
index = array([1,2,3])
expected = 0
self.assert_(all(sum(input, labels=labels, index=index)==expected))
if __name__ == "__main__":
opus_unittest.main()
| agpl-3.0 | -6,871,191,045,195,117,000 | 32.428571 | 142 | 0.629849 | false |
jucimarjr/IPC_2017-1 | lista06/lista06_lista01_questao20.py | 1 | 2874 | #---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
#
# Ulisses Antonio Antonino da Costa 1515090555
# Walter Nobre da Silva conceição 1715310057
# Jandinne Duarte de Oliveira 1015070265
# Vitor Summer Oliveira Pantaleão 1715310042
# Reinaldo vargas 1715310054
#
As Organizações Tabajara resolveram dar um abono aos seus colaboradores em reconhecimento ao bom resultado alcançado durante o ano que passou. Para isto contratou você para desenvolver a aplicação que servirá como uma projeção de quanto será gasto com o pagamento deste abono.
Após reuniões envolvendo a diretoria executiva, a diretoria financeira e os representantes do sindicato laboral, chegou-se a seguinte forma de cálculo:
a.Cada funcionário receberá o equivalente a 20% do seu salário bruto de dezembro; a.O piso do abono será de 100 reais, isto é, aqueles funcionários cujo salário for muito baixo, recebem este valor mínimo; Neste momento, não se deve ter nenhuma preocupação com colaboradores com tempo menor de casa, descontos, impostos ou outras particularidades. Seu programa deverá permitir a digitação do salário de um número indefinido (desconhecido) de salários. Um valor de salário igual a 0 (zero) encerra a digitação. Após a entrada de todos os dados o programa deverá calcular o valor do abono concedido a cada colaborador, de acordo com a regra definida acima. Ao final, o programa deverá apresentar:
O salário de cada funcionário, juntamente com o valor do abono;
O número total de funcionário processados;
O valor total a ser gasto com o pagamento do abono;
O número de funcionário que receberá o valor mínimo de 100 reais;
O maior valor pago como abono; A tela abaixo é um exemplo de execução do programa, apenas para fins ilustrativos. Os valores podem mudar a cada execução do programa.
vetor_salario =[]
abono = 100
contador = 0
montante = 1
soma_abono = 0
maior = []
while contador < montante:
salario = int(input("Digite o salário : "))
vetor_salario.append(salario)
if salario == 0:
vetor_salario.remove(0)
print('todos os salários',vetor_salario)
len(vetor_salario)
print("=================================================")
contador+=1
for salario in vetor_salario:
if salario < 1000:
soma = abono+salario
maior.append(soma)
len(maior)
print(salario,"salário + abono = ",soma)
print("Foram processados ", len(vetor_salario)," colaboradores")
print("O total gasto com abono foi de :", sum(maior))
print("Maior valor de abono pago foi: ", 'R$',max(maior))
print("valor minimo pago a: ",len(maior), "funcionários")
| apache-2.0 | 4,224,848,726,915,924,000 | 56.428571 | 693 | 0.702203 | false |
MSeifert04/nddata | nddata/nddata/mixins/ndfilter.py | 1 | 10473 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..nduncertainty_var import VarianceUncertainty
from ..nduncertainty_stddev import StdDevUncertainty
from ...utils.numbautils import convolve, interpolate
from ...utils.numbautils import convolve_median, interpolate_median
from ...deps import OPT_DEPS
__all__ = ['NDFilterMixin']
if not OPT_DEPS['NUMBA']: # pragma: no cover
__doctest_skip__ = ['*']
class NDFilterMixin(object):
"""Mixin to allow some filtering (convolution, interpolation).
"""
def interpolate_average(self, kernel):
"""Interpolate masked pixel in the instance by applying an average \
filter.
.. note::
This function requires ``Numba`` and that the ``data`` has only 1,
2 or 3 dimensions.
Parameters
----------
kernel : `numpy.ndarray`, `astropy.convolution.Kernel`
The kernel (or footprint) for the interpolation. The sum of the
``kernel`` must not be 0 (or very close to it). Each axis of the
kernel must be odd.
Returns
-------
nothing : `None`
The interpolation works in-place!
Examples
--------
>>> from nddata.nddata import NDData
>>> import numpy as np
>>> ndd = NDData([1,100,1], mask=np.array([0,1,0], dtype=bool))
>>> ndd.interpolate_average([1,1,1])
>>> ndd
NDData([ 1., 1., 1.])
>>> ndd.mask
array([False, False, False], dtype=bool)
"""
self.data = interpolate(self.data, kernel, self._get_mask_numpylike())
self.mask = np.isnan(self.data)
def interpolate_median(self, kernel):
"""Interpolate masked pixel in the instance by applying an median \
filter.
.. note::
This function requires ``Numba`` and that the ``data`` has only 1,
2 or 3 dimensions.
Parameters
----------
kernel : `numpy.ndarray`, `astropy.convolution.Kernel`
The kernel for the convolution. One difference from normal
convolution is that the actual values of the kernel do not matter,
except when it is zero then it won't use the element for the median
computation. Each axis of the kernel must be odd.
Returns
-------
nothing : `None`
The interpolation works in-place!
Examples
--------
>>> from nddata.nddata import NDData
>>> import numpy as np
>>> ndd = NDData([1,100,1], mask=np.array([0,1,0], dtype=bool))
>>> ndd.interpolate_median([1,1,1])
>>> ndd
NDData([ 1., 1., 1.])
>>> ndd.mask
array([False, False, False], dtype=bool)
"""
self.data = interpolate_median(self.data, kernel,
self._get_mask_numpylike())
self.mask = np.isnan(self.data)
def filter_average(self, kernel, uncertainty=False):
"""Filter the instance data by applying a weighted average filter.
.. note::
This function requires ``Numba`` and that the ``data`` has only 1,
2 or 3 dimensions.
Parameters
----------
kernel : `numpy.ndarray`, `astropy.convolution.Kernel`
The kernel (or footprint) for the interpolation. The sum of the
``kernel`` must not be 0 (or very close to it). Each axis of the
kernel must be odd.
uncertainty : `bool`, optional
Create a new uncertainty by calculating the variance of the
filtered values. This at least doubles the total runtime!
Default is ``False``.
Returns
-------
filtered : `~nddata.nddata.NDDataBase` instance
Same class as self but new instance containing the convolved data,
mask (and uncertainty). All other attributes remain the same.
Examples
--------
>>> from nddata.nddata import NDData
>>> import numpy as np
>>> ndd = NDData([1,100,1], mask=np.array([0,1,0], dtype=bool))
>>> ndd2 = ndd.filter_average([1,1,1])
>>> ndd2
NDData([ 1., 1., 1.])
>>> ndd2.mask
array([False, False, False], dtype=bool)
>>> ndd = NDData([1,2,3,100], mask=np.array([0,0,0,1], dtype=bool))
>>> ndd2 = ndd.filter_average([1,1,1], True)
>>> ndd2
NDData([ 1.5, 2. , 2.5, 3. ])
>>> ndd2.uncertainty
VarianceUncertainty([ 0.25 , 0.66666667, 0.25 , \
0. ])
"""
kwargs = {'kernel': kernel, 'rescale': False, 'var': uncertainty}
return self._filter_convolve(**kwargs)
def filter_sum(self, kernel, uncertainty=False):
"""Filter the instance data by applying a weighted sum filter.
.. note::
This function requires ``Numba`` and that the ``data`` has only 1,
2 or 3 dimensions.
Parameters
----------
kernel : `numpy.ndarray`, `astropy.convolution.Kernel`
The kernel (or footprint) for the interpolation. The sum of the
``kernel`` must not be 0 (or very close to it). Each axis of the
kernel must be odd.
uncertainty : `bool`, optional
Create a new uncertainty by calculating the variance of the
filtered values. This at least doubles the total runtime!
Default is ``False``.
Returns
-------
filtered : `~nddata.nddata.NDDataBase` instance
Same class as self but new instance containing the convolved data,
mask (and uncertainty). All other attributes remain the same.
Examples
--------
>>> from nddata.nddata import NDData
>>> import numpy as np
>>> ndd = NDData([1,2,3,100])
>>> ndd2 = ndd.filter_median([1,1,1], 'robust')
>>> ndd2
NDData([ 1.5, 2. , 3. , 51.5])
>>> ndd2.uncertainty
StdDevUncertainty([ 0. , 1.48260222, 1.48260222, \
71.16490649])
>>> ndd = NDData([1,100,1], mask=np.array([0,1,0], dtype=bool))
>>> ndd2 = ndd.filter_sum([1,1,1])
>>> ndd2
NDData([ 3., 3., 3.])
>>> ndd2.mask
array([False, False, False], dtype=bool)
>>> ndd = NDData([1,2,3,100], mask=np.array([0,0,0,1], dtype=bool))
>>> ndd2 = ndd.filter_sum([1,1,1], True)
>>> ndd2
NDData([ 4.5, 6. , 7.5, 9. ])
>>> ndd2.uncertainty
VarianceUncertainty([ 2.25, 6. , 2.25, 0. ])
"""
kwargs = {'kernel': kernel, 'rescale': True, 'var': uncertainty}
return self._filter_convolve(**kwargs)
def filter_median(self, kernel, uncertainty=False):
"""Filter the instance data by applying a median filter.
.. note::
This function requires ``Numba`` and that the ``data`` has only 1,
2 or 3 dimensions.
Parameters
----------
kernel : `numpy.ndarray`, `astropy.convolution.Kernel`
The kernel (or footprint) for the interpolation. Each axis of the
kernel must be odd.
uncertainty : `bool` or ``"robust"``, optional
Create a new uncertainty by calculating the median absolute
deviation of the filtered values. This at least doubles the total
runtime! If ``"robust"`` the result is multiplied a correction
factor of approximatly ``1.428``.
Default is ``False``.
Returns
-------
filtered : `~nddata.nddata.NDDataBase` instance
Same class as self but new instance containing the convolved data,
mask (and uncertainty). All other attributes remain the same.
Examples
--------
>>> from nddata.nddata import NDData
>>> import numpy as np
>>> ndd = NDData([1,100,1], mask=np.array([0,1,0], dtype=bool))
>>> ndd2 = ndd.filter_median([1,1,1])
>>> ndd2
NDData([ 1., 1., 1.])
>>> ndd2.mask
array([False, False, False], dtype=bool)
>>> ndd = NDData([1,2,3,100], mask=np.array([0,0,0,1], dtype=bool))
>>> ndd2 = ndd.filter_median([1,1,1], True)
>>> ndd2
NDData([ 1.5, 2. , 2.5, 3. ])
>>> ndd2.uncertainty
StdDevUncertainty([ 0., 1., 0., 0.])
>>> ndd2 = ndd.filter_median([1,1,1], 'robust')
>>> ndd2.uncertainty
StdDevUncertainty([ 0. , 1.48260222, 0. , 0. ])
"""
kwargs = self._filter_get_invariants()
if uncertainty:
c_data, c_uncert = convolve_median(self.data, kernel,
self._get_mask_numpylike(),
mad=uncertainty)
kwargs['uncertainty'] = StdDevUncertainty(c_uncert, copy=False)
else:
c_data = convolve_median(self.data, kernel,
self._get_mask_numpylike(), mad=False)
kwargs['uncertainty'] = None
kwargs['mask'] = np.isnan(c_data)
return self.__class__(c_data, **kwargs)
def _filter_convolve(self, kernel, rescale, var):
"""Average and sum convolution are the same function so this internal
method sets it up correctly.
"""
kwargs = self._filter_get_invariants()
if var:
c_data, c_uncert = convolve(self.data, kernel,
self._get_mask_numpylike(),
rescale=rescale, var=True)
kwargs['uncertainty'] = VarianceUncertainty(c_uncert,
copy=False)
else:
c_data = convolve(self.data, kernel, self._get_mask_numpylike(),
rescale=rescale, var=False)
kwargs['uncertainty'] = None
kwargs['mask'] = np.isnan(c_data)
return self.__class__(c_data, **kwargs)
def _filter_get_invariants(self):
"""Attributes that do not change during convolution.
"""
return {'wcs': self.wcs, 'meta': self.meta, 'unit': self.unit,
'flags': self.flags}
| bsd-3-clause | 8,424,937,322,362,398,000 | 35.747368 | 79 | 0.537191 | false |
michaeljones/packed | tests/test_translate.py | 1 | 7361 |
from __future__ import unicode_literals, print_function
from unittest import TestCase
from packed import translate
class TestTranslate(TestCase):
def test_whitespace(self):
code = """ """
expected = code
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_simple_code(self):
code = """return True"""
expected = code
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_comment(self):
code = """
# This is a comment
# This is commented out 'packed' syntax:
# <a attr="value"></a>
# This is commented out 'packed' syntax after valid code
return True # <a attr="value"></a>
"""
expected = code
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_simple_element(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a></a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem('a')
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_empty_element(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a />
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem('a')
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_single_child_no_attributes(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a><i></i></a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem(
'a',
{},
Elem('i'),
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_single_child(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a href={twitter_share}><i class="fa fa-twitter-square large-icon"></i></a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem(
'a',
{
'href': twitter_share,
},
Elem(
'i',
{
'class': 'fa fa-twitter-square large-icon',
},
),
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_simple_multiple_children(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a> <i></i> <b></b> </a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem(
'a',
{},
' ',
Elem('i'),
' ',
Elem('b'),
' ',
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_multiple_children(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a href={twitter_share}>
<i class="fa fa-twitter-square large-icon"></i>
<i class="fa fa-facebook-square large-icon"></i>
</a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem(
'a',
{
'href': twitter_share,
},
' ',
Elem(
'i',
{
'class': 'fa fa-twitter-square large-icon',
},
),
' ',
Elem(
'i',
{
'class': 'fa fa-facebook-square large-icon',
},
),
' ',
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_empty_tag_translate(self):
code = """
@packed
def tag(self):
twitter_share = ""
return <a href={twitter_share}><i class="fa fa-twitter-square large-icon" /></a>
"""
expected = """
@packed
def tag(self):
twitter_share = ""
return Elem(
'a',
{
'href': twitter_share,
},
Elem(
'i',
{
'class': 'fa fa-twitter-square large-icon',
},
),
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_empty_text(self):
code = ""
expected = code
result = translate(code)
self.assertEqual(expected, result)
def test_single_empty_line(self):
code = "\n"
expected = code
result = translate(code)
self.assertEqual(expected, result)
def test_pure_text(self):
code = """
@packed
def tag(self):
twitter_share = ""
return "This is a test of text"
"""
expected = code
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_text_in_tag(self):
code = """
return <a href={twitter_share}>My link text</a>
"""
expected = """
return Elem(
'a',
{
'href': twitter_share,
},
'My link text',
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_value_in_text(self):
code = """
return <p>My paragraph with {target} and {anotherTarget}</p>
"""
expected = """
return Elem(
'p',
{},
'My paragraph with ',
target,
' and ',
anotherTarget,
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_double_attribute(self):
code = """
link = <a href={link.url} rel="nofollow">{link.display}</a>
"""
expected = """
link = Elem(
'a',
{
'href': link.url,
'rel': 'nofollow',
},
link.display,
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_indentation(self):
"""Make sure we're attempting to pick up the indentation from the code we're reading"""
code = """
link = <a href={link.url} rel="nofollow">{link.display}</a>
"""
expected = """
link = Elem(
'a',
{
'href': link.url,
'rel': 'nofollow',
},
link.display,
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
class TestComponentTranslate(TestCase):
def test_simple_component(self):
code = """
return <ExampleComponent />
"""
expected = """
return Elem(ExampleComponent)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_single_character_name_component(self):
code = """
return <E />
"""
expected = """
return Elem(E)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
def test_mixed_children(self):
code = """
return <a><b></b><ExampleComponent /></a>
"""
expected = """
return Elem(
'a',
{},
Elem('b'),
Elem(ExampleComponent),
)
"""
result = translate(code)
self.assertMultiLineEqual(expected, result)
| gpl-2.0 | -3,769,752,905,414,495,700 | 17.220297 | 95 | 0.496807 | false |
govdata/APyMongo | doc/examples/insert.py | 1 | 1177 | import json
import tornado.web
import apymongo
from apymongo import json_util
import base
class InsertHandler(tornado.web.RequestHandler):
"""
Inserts a test record, and shows a record count.
"""
@tornado.web.asynchronous
def get(self):
self.connection = apymongo.Connection()
coll = self.connection['testdb']['testcollection']
to_insert = {"testkey1":22,
"testkey2":[2,3],
"testkey3":{"inner1":2,
"inner2":'testval'}}
coll.insert(to_insert,callback=self.count_handler)
def count_handler(self,response):
def callback(r):
self.final_handler(response,r)
coll = self.connection['testdb']['testcollection']
coll.count(callback = callback)
def final_handler(self,rec_id, response):
msg = "You just inserted record " + str(rec_id) + '. There are now ' + str(response) + ' records.'
self.write(msg)
self.finish()
if __name__ == "__main__":
base.main(InsertHandler)
| apache-2.0 | 4,683,641,974,698,362,000 | 22.098039 | 107 | 0.53441 | false |
rwl/PyCIM | CIM14/CDPSM/GIS_Connectivity/IEC61970/Wires/Junction.py | 1 | 1604 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.GIS_Connectivity.IEC61970.Core.ConductingEquipment import ConductingEquipment
class Junction(ConductingEquipment):
"""A point where one or more conducting equipments are connected with zero resistance.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'Junction' instance.
"""
super(Junction, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| mit | 5,917,236,307,994,077,000 | 40.128205 | 94 | 0.727556 | false |
bracket/ratchet | examples/005_voice.py | 1 | 5114 | from math import pi
import numpy as np
import pyaudio
import time
from ratchet.scales import *
from functools import lru_cache
memoize = lru_cache()
CHANNELS = 1
RATE = 44100
def make_ramp(time):
return np.linspace(0., time, time * RATE, dtype=np.float32)
def read_sample():
with open('data/test.dat', 'rb') as fd:
return np.frombuffer(fd.read(), dtype=np.int16).astype(dtype=np.float32) / 32768.
def plot_sample():
import matplotlib.pyplot as plt
from scipy import signal
# sample = read_sample()[:RATE]
sample = shifted_sample(0)[:RATE]
f, t, Sxx = signal.spectrogram(
sample,
fs=44100,
nperseg=2048,
)
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequencies [Hz]')
plt.xlabel('Time [sec]')
plt.show()
def low_c(time):
from ratchet.scales import middle_major_octave
c = middle_major_octave['c']
low_c = c / 2.
return np.sin(2. * pi * low_c * make_ramp(time))
def shifted_sample(steps):
from scipy.interpolate import interp1d
sample = read_sample()
transform = np.fft.rfft(sample)
smooth = interp1d(
x = np.linspace(0., 1., len(transform), endpoint=True),
y = transform,
kind = 'linear',
bounds_error = False,
fill_value = complex(0, 0),
)
out = smooth(np.linspace(0., pow(2., -steps/12.), len(transform), endpoint=True))
return np.fft.irfft(out, len(sample)).astype(dtype=np.float32)
def make_sample_scale():
from ratchet.scales import c_major
return {
c : shifted_sample(steps)
for c, steps
in c_major.items()
}
def test_note_parser():
notes = r'''
e4/4 d4/4 c4/4 d4/4
e4/4 e4/4 e4/2
d4/4 d4/4 d4/2
e4/4 g4/4 g4/2
e4/4 d4/4 c4/4 d4/4
e4/4 e4/4 e4/4 e4/4
d4/4 d4/4 e4/4 d4/4
c4/1
'''
for note in parse_notes(notes):
print(note)
@memoize
def make_adsr(duration):
from scipy.interpolate import interp1d
points = [
(0., 0.),
(.1, 1.),
(.15, .75),
((duration - .1), .75),
(duration, 0.),
]
adsr = interp1d(
x = [ p[0] for p in points ],
y = [ p[1] for p in points ],
kind = 'linear',
)
return adsr(np.linspace(0, duration, duration * RATE, endpoint=True))
def make_mary():
from ratchet.scales import c_major
notes = r'''
e1 d1 c1 d1
e1 e1 e2
d1 d1 d2
e1 g1 g2
e1 d1 c1 d1
e1 e1 e1 e1
d1 d1 e1 d1
c4
'''
quarter_note = .375
notes = [
(n[0], int(n[1]) * quarter_note)
for n
in notes.split()
]
sample_scale = make_sample_scale()
out = np.zeros(RATE * sum(d for _, d in notes), dtype=np.float32)
env = make_adsr(4 * quarter_note) * (.5 / 3.)
c_major = (sample_scale['c'] + sample_scale['e'] + sample_scale['g'])
g_major = (sample_scale['g'] + sample_scale['b'] + sample_scale['d'])
c_major = c_major[:len(env)] * env
g_major = g_major[:len(env)] * env
chords = [
c_major,
c_major,
g_major,
c_major,
c_major,
c_major,
g_major,
c_major
]
offset = 0
for chord in chords:
start = offset
end = offset = min(start + len(chord), len(out))
out[start:end] += chord
if end >= len(out): break
offset = 0
for note, duration in notes:
env = make_adsr(duration)
sample = sample_scale[note][:len(env)] * env
out[offset:offset + len(sample)] += sample
offset += len(sample)
return out
def write_mary():
import aifc
sample = make_mary()
final_sample = (sample * (2 ** 15 - 1)).astype(dtype='>i2')
with aifc.open('mary.caff', 'wb') as out:
out.setframerate(44100)
out.setnchannels(1)
out.setsampwidth(2)
out.writeframes(final_sample.tobytes())
def test_aiff():
import aifc
with aifc.open('mary.caff', 'rb') as fd:
print(fd.getparams())
def main():
import aifc
# test_aiff()
# return
write_mary()
return
pa = pyaudio.PyAudio()
# sample = shifted_sample(0.)
sample = make_mary()
# with aifc.open('test.caff', 'rb') as fd:
# sample = np.frombuffer(fd.readframes(fd.getnframes()), dtype='>i2')
# sample = sample.astype(np.float32) / (2 ** 15)
# print(sample.shape)
current_offset = 0
def callback(in_data, frame_count, time_info, status):
nonlocal current_offset
start = current_offset
current_offset = start + frame_count
return (sample[start:current_offset], pyaudio.paContinue)
stream = pa.open(
format = pyaudio.paFloat32,
channels = CHANNELS,
rate = int(RATE / 2),
stream_callback = callback,
output = True,
)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
pa.terminate()
if __name__ == '__main__':
main()
| mit | 2,182,637,683,714,768,600 | 19.538153 | 89 | 0.551623 | false |
ZeitOnline/zeit.content.article | src/zeit/content/article/edit/browser/tests/test_html.py | 1 | 4542 | # coding: utf8
import zeit.content.article.edit.browser.testing
class HTMLConvertTest(
zeit.content.article.edit.browser.testing.EditorTestCase):
def setUp(self):
super(HTMLConvertTest, self).setUp()
self.add_article()
def convert(self):
self.eval(
"window.zeit.content.article.html.to_xml("
"window.jQuery('.editable')[0])")
def test_h3_is_translated_to_intertitle(self):
s = self.selenium
self.create('<h3>foo</h3>')
self.convert()
s.assertElementPresent('jquery=.editable intertitle:contains(foo)')
def test_div_is_translated_to_p(self):
s = self.selenium
self.create('<div>foo</div>')
self.convert()
s.assertElementPresent('jquery=.editable p:contains(foo)')
def test_b_is_translated_to_strong(self):
s = self.selenium
self.create('<p><b>foo</b></p>')
self.convert()
s.assertElementPresent('jquery=.editable p strong:contains(foo)')
def test_i_is_translated_to_em(self):
s = self.selenium
self.create('<p><i>foo</i></p>')
self.convert()
s.assertElementPresent('jquery=.editable p em:contains(foo)')
def test_double_p_is_removed(self):
s = self.selenium
self.create('<p><p>foo</p>')
self.convert()
s.assertElementPresent('jquery=.editable p:contains(foo)')
s.assertXpathCount('//*[@class="editable"]//p', 1)
def test_all_double_brs_are_translated_to_p(self):
s = self.selenium
self.create('<p>foo<br><br>bar<br><br>baz</p>')
self.convert()
s.waitForCssCount('css=.editable p', 3)
self.assertEqual('<p>foo</p><p>bar</p><p>baz</p>',
self.eval('window.jQuery(".editable")[0].innerHTML'))
s.assertXpathCount('//*[@class="editable"]//br', 0)
def test_single_br_is_conserved(self):
s = self.selenium
self.create('<p>foo<br>bar</p>')
self.convert()
s.assertXpathCount('//*[@class="editable"]//p', 1)
s.assertXpathCount('//*[@class="editable"]//br', 1)
def test_separate_single_brs_are_conserved(self):
s = self.selenium
self.create('<p>foo<br>bar<br>baz</p>')
self.convert()
s.assertXpathCount('//*[@class="editable"]//p', 1)
s.assertXpathCount('//*[@class="editable"]//br', 2)
def test_a_witout_href_should_be_escaped(self):
s = self.selenium
self.create('<p>A stupid <a>link</a>.</p>')
self.convert()
s.assertAttribute('css=.editable p a@href', '*#')
def test_a_with_href_should_be_allowed(self):
s = self.selenium
self.create('<p>A working <a href="foo">link</a>.</p>')
self.convert()
s.assertAttribute('css=.editable p a@href', '*foo')
def test_a_target_should_be_allowed(self):
s = self.selenium
self.create('<p>A working <a href="foo" target="_blank">link</a>.</p>')
self.convert()
s.assertAttribute('css=.editable p a@target', '_blank')
def test_text_nodes_should_become_paragraphs(self):
s = self.selenium
self.create('Mary<p>had a little</p>')
self.convert()
s.assertElementPresent('jquery=.editable p:contains(Mary)')
def test_top_level_inline_styles_are_joined_to_paragraph(self):
s = self.selenium
self.create('Mary <strong>had</strong> a little lamb.')
self.convert()
s.assertElementPresent('jquery=.editable p:contains(Mary had a)')
s.assertElementPresent('jquery=.editable p > strong:contains(had)')
def test_top_level_inline_styles_are_not_joined_to_existing_p(self):
s = self.selenium
self.create(
'<p>foo</p>Mary <strong>had</strong> a little lamb. <p>bar</p>')
self.convert()
s.assertElementPresent('jquery=.editable p:contains(Mary had a)')
s.assertElementPresent('jquery=.editable p > strong:contains(had)')
# XXX jQuery's contains() yields incorrect results here, why?
s.assertElementNotPresent(
'//*[contains(@class, "editable")]//p[contains(., "foo Mary")]')
s.assertElementNotPresent(
'//*[contains(@class, "editable")]//p[contains(., "lamb. bar")]')
def test_quotation_marks_are_normalized(self):
s = self.selenium
self.create(u'<p>“up” and „down‟ and «around»</p>')
self.convert()
s.assertText('css=.editable p', '"up" and "down" and "around"')
| bsd-3-clause | -6,725,216,459,648,159,000 | 37.084034 | 79 | 0.59466 | false |
dc3-plaso/plaso | tests/formatters/skype.py | 1 | 3621 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Skype main database event formatter."""
import unittest
from plaso.formatters import skype
from tests.formatters import test_lib
class SkypeAccountFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Skype account event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = skype.SkypeAccountFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = skype.SkypeAccountFormatter()
expected_attribute_names = [
u'username',
u'email',
u'country']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class SkypeChatFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Skype chat event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = skype.SkypeChatFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = skype.SkypeChatFormatter()
expected_attribute_names = [
u'from_account',
u'to_account',
u'title',
u'text']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class SkypeSMSFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Skype SMS event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = skype.SkypeSMSFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = skype.SkypeSMSFormatter()
expected_attribute_names = [
u'number',
u'text']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class SkypeCallFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Skype call event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = skype.SkypeCallFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = skype.SkypeCallFormatter()
expected_attribute_names = [
u'src_call',
u'dst_call',
u'call_type']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class SkypeTransferFileFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Skype transfer file event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = skype.SkypeTransferFileFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = skype.SkypeTransferFileFormatter()
expected_attribute_names = [
u'source',
u'destination',
u'transferred_filename',
u'action_type']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,280,944,007,784,335,400 | 27.069767 | 70 | 0.715272 | false |
materialsproject/MPContribs | mpcontribs-api/mpcontribs/api/notebooks/__init__.py | 1 | 2672 | # -*- coding: utf-8 -*-
import os
from tornado.escape import json_encode, json_decode, url_escape
from websocket import create_connection
from notebook.utils import url_path_join
from notebook.gateway.managers import GatewayClient
def run_cells(kernel_id, cid, cells):
print(f"running {cid} on {kernel_id}")
gw_client = GatewayClient.instance()
url = url_path_join(
gw_client.ws_url, gw_client.kernels_endpoint, url_escape(kernel_id), "channels",
)
outputs = {}
ws = create_connection(url)
for idx, cell in enumerate(cells):
if cell["cell_type"] == "code":
ws.send(
json_encode(
{
"header": {
"username": cid,
"version": "5.3",
"session": "",
"msg_id": f"{cid}-{idx}-{os.getpid()}",
"msg_type": "execute_request",
},
"parent_header": {},
"channel": "shell",
"content": {
"code": cell["source"],
"silent": False,
"store_history": False,
"user_expressions": {},
"allow_stdin": False,
"stop_on_error": True,
},
"metadata": {},
"buffers": {},
}
)
)
outputs[idx] = []
status = None
while status is None or status == "busy" or not len(outputs[idx]):
msg = ws.recv()
msg = json_decode(msg)
msg_type = msg["msg_type"]
if msg_type == "status":
status = msg["content"]["execution_state"]
elif msg_type in ["stream", "display_data", "execute_result"]:
# display_data/execute_result required fields:
# "output_type", "data", "metadata"
# stream required fields: "output_type", "name", "text"
output = msg["content"]
output.pop("transient", None)
output["output_type"] = msg_type
msg_idx = msg["parent_header"]["msg_id"].split("-")[1]
outputs[int(msg_idx)].append(output)
elif msg_type == "error":
tb = msg["content"]["traceback"]
raise ValueError(tb)
ws.close()
return outputs
| mit | -2,232,988,239,722,105,000 | 38.294118 | 88 | 0.421781 | false |
solus-project/evolve-sc | xng/plugins/os_release.py | 2 | 2969 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of solus-sc
#
# Copyright © 2017-2018 Ikey Doherty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
import os.path
class OsRelease:
mapping = None
def __init__(self):
self.mapping = dict()
paths = [
"/etc/os-release",
"/usr/lib64/os-release",
"/usr/lib/os-release",
]
# Follow paths in stateless order
for p in paths:
if not os.path.exists(p):
continue
try:
self._parse_blob(p)
break
except Exception as e:
print(e)
continue
def _parse_blob(self, path):
""" Read the key/value sh-source syntax os-release file. It might
break typical INI constraints so we can't use ConfigObj here """
with open(path, "r") as inblob:
for line in inblob.readlines():
line = line.strip()
if '=' not in line:
continue
splits = line.split('=')
key = splits[0]
val = '='.join(splits[1:]).strip()
if val.startswith('"'):
val = val[1:]
if val.endswith('"'):
val = val[0:-1]
self.mapping[key.lower()] = val
def _keyval(self, key, fallback=""):
""" Simple helper to not panic when reading a value """
if key not in self.mapping:
return fallback
return self.mapping[key]
def id(self):
""" Return the main os-family """
return self._keyval("id", "<unknown OS type>")
def id_like(self):
""" Return the ID_LIKE field """
return self._keyval("id_like", "<unknown OS type>")
def from_family(self, family):
""" Return True if the OS is from the given family """
if self.id() == family:
return True
if self.id_like() == family:
return True
return False
def pretty_name(self):
return self._keyval("pretty_name", "<unknown OS>")
def version_id(self):
return self._keyval("version_id")
def version(self):
return self._keyval("version")
def name(self):
""" Return the OS name """
return self._keyval("name")
def home_url(self):
""" Return the homepage """
return self._keyval("home_url", None)
def support_url(self):
""" Return the main support URL """
return self._keyval("support_url", None)
def bug_report_url(self):
""" Return the bug report URL """
return self._keyval("bug_report_url", None)
| gpl-2.0 | -1,150,402,107,070,261,200 | 28.386139 | 76 | 0.528639 | false |
aldeka/herring | herring/puzzles/tasks.py | 1 | 3926 | import celery
from celery import shared_task
from puzzles.models import Puzzle
from puzzles.spreadsheets import make_sheet
import slacker
import time
import sys
import logging
try:
from herring.secrets import SECRETS
# A token logged in as a legitimate user. Turns out that "bots" can't
# do the things we want to automate!
SLACK = slacker.Slacker(SECRETS['slack-user-token'])
except KeyError:
print(
"Couldn't find the SECRETS environment variable. This server won't be able "
"to use Slack and Google Drive integrations."
)
SLACK = None
def post_local_and_global(local_channel, local_message, global_message):
logging.warning("tasks: post_local_and_global(%s, %s, %s)", local_channel, local_message, global_message)
try:
response = SLACK.channels.join(local_channel)
channel_id = response.body['channel']['id']
SLACK.chat.post_message(channel_id, local_message, link_names=True, as_user=True)
except Exception:
# Probably the channel's already archived. Don't worry too much about it.
logging.warning("tasks: failed to post to local channel (probably archived)", exc_info=True)
response = SLACK.channels.join('puzzle-status')
global_channel_id = response.body['channel']['id']
SLACK.chat.post_message(global_channel_id, global_message, link_names=True, as_user=True)
@shared_task(rate_limit=0.5)
def post_answer(slug, answer):
logging.warning("tasks: post_answer(%s, %s)", slug, answer)
puzzle = Puzzle.objects.get(slug=slug)
answer = answer.upper()
local_message = ":tada: Confirmed answer: {}".format(answer)
global_message = ':tada: Puzzle "{name}" (#{slug}) was solved! The answer is: {answer}'.format(
answer=answer,
slug=slug,
name=puzzle.name
)
post_local_and_global(slug, local_message, global_message)
@shared_task(rate_limit=0.5)
def post_update(slug, updated_field, value):
logging.warning("tasks: post_update(%s, %s, %s)", slug, updated_field, value)
try:
puzzle = Puzzle.objects.get(slug=slug)
except Puzzle.DoesNotExist:
return
local_message = '{} set to: {}'.format(updated_field, value)
global_message = '"{name}" (#{slug}) now has these {field}: {value}'.format(
field=updated_field,
value=value,
slug=slug,
name=puzzle.name
)
post_local_and_global(slug, local_message, global_message)
@shared_task(bind=True, max_retries=10, default_retry_delay=5, rate_limit=0.25) # rate_limit is in tasks/sec
def create_puzzle_sheet_and_channel(self, slug):
logging.warning("tasks: create_puzzle_sheet_and_channel(%s)", slug)
try:
puzzle = Puzzle.objects.get(slug=slug)
except Exception as e:
logging.error("tasks: Failed to retrieve puzzle when creating sheet and channel (may be retried) - %s", slug, exc_info=True)
raise self.retry(exc=e)
sheet_title = '{} {}'.format(puzzle.identifier(), puzzle.name)
sheet_url = make_sheet(sheet_title).rsplit('?', 1)[0]
puzzle.url = sheet_url
puzzle.save()
try:
created = SLACK.channels.create(slug)
except slacker.Error:
created = SLACK.channels.join(slug)
channel_id = created.body['channel']['id']
puzzle_name = puzzle.name
if len(puzzle_name) >= 30:
puzzle_name = puzzle_name[:29] + '\N{HORIZONTAL ELLIPSIS}'
topic = "{name} - Sheet: {sheet} - Puzzle: {url}".format(
name=puzzle_name,
url=puzzle.hunt_url,
sheet=sheet_url
)
SLACK.channels.set_topic(channel_id, topic)
response = SLACK.channels.join('puzzle-status')
status_channel_id = response.body['channel']['id']
new_channel_msg = 'New puzzle created: "{name}" (#{slug})'.format(
slug=slug, name=puzzle.name
)
SLACK.chat.post_message(status_channel_id, new_channel_msg, link_names=True, as_user=True)
| mit | 1,425,608,479,827,831,800 | 34.053571 | 132 | 0.662506 | false |
david-hoffman/scripts | mip.py | 1 | 2599 | # ! /usr/bin/env python
# -*- coding: utf-8 -*-
"""mip.py
A utility to call mip from the command line
Usage:
mip.py <myfile> [--PDF --log]
mip.py -h | --help
Options:
-h --help Show this screen.
--PDF Print PDF to current directory
--log Take log of data first
"""
from docopt import docopt
import matplotlib.pyplot as plt
from dphplotting.mip import mip
# check to see if we're being run from the command line
if __name__ == "__main__":
# if we want to update this to take more arguments we'll need to use one of the
# argument parsing packages
# grab the arguments from the command line
arg = docopt(__doc__)
# a little output so that the user knows whats going on
print("Running mip on", arg["<myfile>"])
# Need to take the first system argument as the filename for a TIF file
# test if filename has tiff in it
filename = arg["<myfile>"]
# try our main block
try:
if ".tif" in filename or ".tiff" in filename:
# Import skimage so we have access to tiff loading
import tifffile as tif
# here's the real danger zone, did the user give us a real file?
try:
data = tif.imread(filename)
except FileNotFoundError as er:
raise er
if arg["--log"]:
import numpy as np
if data.min() > 0:
data = np.log(data)
else:
print(filename, "had negative numbers, log not taken")
# Trying to set the cmap here opens a new figure window
# need to set up kwargs for efficient argument passing
# plt.set_cmap('gnuplot2')
# plot the data
fig, ax = mip(data)
# readjust the white space (maybe move this into main code later)
fig.subplots_adjust(top=0.85, hspace=0.3, wspace=0.3)
# add an overall figure title that's the file name
fig.suptitle(filename, fontsize=16)
# check to see if we should make a PDF
if arg["--PDF"]:
fig.savefig(filename.replace(".tiff", ".pdf").replace(".tif", ".pdf"))
else:
# I still don't know why fig.show() doesn't work
# I think it has something to do with pyplot's backend
plt.show()
else:
# this is our own baby error handling, it avoids loading the
# skimage package
print("You didn't give me a TIFF")
except Exception as er:
print(er)
| apache-2.0 | -1,941,892,218,879,892,500 | 31.4875 | 86 | 0.565987 | false |
gregpechiro/dndtools | dndtools/dnd/skills/views.py | 1 | 5380 | # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from dndtools.dnd.menu import MenuItem, menu_item, submenu_item
from dndtools.dnd.views import permanent_redirect_object, permanent_redirect_view, is_3e_edition
from dndtools.dnd.dnd_paginator import DndPaginator
from dndtools.dnd.filters import SkillFilter
from dndtools.dnd.models import Rulebook, Skill, SkillVariant
@menu_item(MenuItem.CHARACTER_OPTIONS)
@submenu_item(MenuItem.CharacterOptions.SKILLS)
def skill_list(request):
f = SkillFilter(request.GET, queryset=Skill.objects.all())
form_submitted = 1 if '_filter' in request.GET else 0
paginator = DndPaginator(f.qs, request)
return render_to_response('dnd/skills/skill_list.html',
{
'request': request,
'skill_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
@menu_item(MenuItem.CHARACTER_OPTIONS)
@submenu_item(MenuItem.CharacterOptions.SKILLS)
def skill_detail(request, skill_slug, rulebook_slug=None,
rulebook_id=None):
# fetch the class
skill = get_object_or_404(Skill.objects.select_related(
'skill_variant', 'skill_variant__rulebook'), slug=skill_slug)
# fetch primary variant, this is independent of rulebook selected
try:
primary_variant = SkillVariant.objects.select_related(
'rulebook', 'rulebook__dnd_edition',
).filter(
skill=skill,
).order_by('-rulebook__dnd_edition__core', '-rulebook__published')[0]
except Exception:
primary_variant = None
# if rulebook is supplied, select find this variant
if rulebook_slug and rulebook_id:
# use canonical link in head as this is more or less duplicated content
use_canonical_link = True
selected_variant = get_object_or_404(
SkillVariant.objects.select_related(
'rulebook', 'skill', 'rulebook__dnd_edition'),
skill__slug=skill_slug,
rulebook__pk=rulebook_id)
# possible malformed/changed slug
if rulebook_slug != selected_variant.rulebook.slug:
return permanent_redirect_object(request, selected_variant)
# selected variant is primary! Redirect to canonical url
if selected_variant == primary_variant:
return permanent_redirect_view(
request, skill_detail, kwargs={
'skill_slug': skill_slug}
)
else:
# this is canonical, no need to specify it
use_canonical_link = False
selected_variant = primary_variant
other_variants = [
variant
for variant
in skill.skillvariant_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'skill').all()
if variant != selected_variant
]
if selected_variant:
display_3e_warning = is_3e_edition(selected_variant.rulebook.dnd_edition)
else:
display_3e_warning = False
feat_list = skill.required_by_feats.select_related('rulebook').all()
feat_paginator = DndPaginator(feat_list, request)
return render_to_response('dnd/skills/skill_detail.html',
{
'skill': skill,
'feat_list': feat_paginator.items(),
'feat_paginator': feat_paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'selected_variant': selected_variant,
'other_variants': other_variants,
'use_canonical_link': use_canonical_link,
'display_3e_warning': display_3e_warning,
}, context_instance=RequestContext(request), )
@menu_item(MenuItem.CHARACTER_OPTIONS)
@submenu_item(MenuItem.CharacterOptions.SKILLS)
def skills_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'skills_in_rulebook',
kwargs={
'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
skill_list = [
skill_variant.skill
for skill_variant
in rulebook.skillvariant_set.all()
]
return render_to_response('dnd/skills/skill_in_rulebook.html',
{
'rulebook': rulebook,
'skill_list': skill_list,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), ) | mit | -8,548,592,003,951,022,000 | 41.706349 | 96 | 0.565985 | false |
tsybulkin/dor | dor.py | 1 | 11481 | #!/usr/bin/python
#
# dor-bug robot
#
#
#######################################################
from math import sin,cos,radians,degrees,pi,asin,atan,sqrt
import numpy as np
from tools import *
from time import sleep
alpha_max = pi/2
beta_max = pi
phi_max = pi/5
small_angles = [-0.01, 0.0, 0.01 ]
big_angles = [ -0.5, -0.02, 0, 0.02, 0.5]
MAX_ATTEMPTS = 20
class Dor():
def __init__(self,X=0,Y=0):
self.actions = []
self.Ox = X
self.Oy = Y
self.Oz = 0
self.R = 5
self.legs = [ Leg(i,pi/2*i) for i in range(4) ]
self.raised_leg = 0
self.legs[0].alpha += -0.01
self.legs[2].alpha += -0.01
self.feet_distances = self.get_dist_between_feet()
self.legs[0].xy = self.get_foot(0,(0,0,0))[:2]
self.legs[1].xy = self.get_foot(1,(0,0,0))[:2]
self.legs[3].xy = self.get_foot(3,(0,0,0))[:2]
self.legs[2].xy = self.get_ground_xy(2)
self.CoM = np.array([0,0])
self.orientation = 0.0
_, self.CoM, _q = self.is_stable()
def get_foot(self, leg_index, (da,db,dp)):
Leg = self.legs[leg_index].get_copy()
Leg.alpha += da
Leg.beta += db
Leg.phi += dp
return np.array([self.R * cos(Leg.aa) + Leg.get_foot()[0] * cos(Leg.aa+Leg.phi),
self.R * sin(Leg.aa) + Leg.get_foot()[0] * sin(Leg.aa+Leg.phi),
Leg.get_foot()[1]])
def get_feet(self):
"""returns a list of foot coordinates in its own reference system
"""
return [ np.array([ self.R * cos(Leg.aa) + Leg.get_foot()[0] * cos(Leg.aa+Leg.phi),
self.R * sin(Leg.aa) + Leg.get_foot()[0] * sin(Leg.aa+Leg.phi),
Leg.get_foot()[1] ]) for Leg in self.legs]
def get_ground_xy(self,leg_index):
"""returns xy coordinates for the foot having given foot_index
in the global frame of references
"""
Matrix = np.array([[0, 1],[-1, 0]])
#print "leg:",leg_index,
#raised = self.raised_leg
leg1 = (leg_index+3)%4
leg2 = (leg_index+1)%4
#print "Legs:", leg1,leg2
feet = self.get_feet()
v1 = feet[leg2]-feet[leg1]
e1 = v1 / np.linalg.norm(v1)
#e2 = Matrix.dot(e1)
#v2 = feet[leg_index]-feet[leg1]
d3 = self.feet_distances[(leg1,leg2)]
d1 = self.feet_distances[(leg1,leg_index)]
d2 = self.feet_distances[(leg_index,leg2)]
q = (d3**2+d1**2-d2**2)/d3**2/2
h = sqrt(d1**2-(q*d3)**2)
#print "q,h:",q,h
#print "v1,v2:",v1,v2
#print q*v1+h*e2, "=", feet[leg_index]
#print "C:",C
v11 = self.legs[leg2].xy - self.legs[leg1].xy
e11 = v11/np.linalg.norm(v11)
e22 = Matrix.dot(e11)
#print "e11,e22:",e11,e22
res = self.legs[leg1].xy + q*v11 + h*e22
#print "xy =",res
return res
def get_dist_between_feet(self):
distances = {}
feet = self.get_feet()
for i in range(len(feet)):
for j in range(len(feet)-1):
d = np.linalg.norm(feet[i]-feet[j])
distances[(i,j)] = d
distances[(j,i)] = d
return distances
def find_phi(self,Action):
i = self.raised_leg
xyz1 = self.get_foot((i+1)%4,Action[1])
(da,db,_) = Action[2]
#print "\nLeg2: da,db:",da,db
p = self.legs[(i+2)%4].phi
A = -phi_max - p
B = phi_max - p
#print "A,B:",A,B
def fun(dp):
xyz2 = self.get_foot((i+2)%4,(da,db,dp))
return np.linalg.norm(xyz2-xyz1) - self.feet_distances[((i+1)%4,(i+2)%4)]
if fun(A)*fun(B) > 0:
#print "Leg2: Phi cannot be found. Fa=%.1g, Fb=%.1g" % (fun(A),fun(B))
return None
else:
return secant(A,B,fun(A),fun(B),fun)
def take_action(self, action):
"""changes state taking the given action
Assumed that action is legal
"""
self.actions.append((self.raised_leg,action[0],action[1],action[2][:2],action[3][2]))
old_raised = self.raised_leg
#print action
for k in range(4):
self.legs[(old_raised+k)%4].move_leg(action[k])
if old_raised in self.get_raised_leg():
# check CoM
stable,CoM,qs = self.is_stable()
if stable:
# no changes
self.legs[self.raised_leg].xy = self.get_ground_xy(self.raised_leg)
self.CoM = CoM
else:
# raised goes to the opposite leg
self.raised_leg = (old_raised + 2) % 4
#print "Opposite leg is raised:", self.raised_leg
self.legs[old_raised].xy = self.get_ground_xy(old_raised)
stable,CoM,qs1 = self.is_stable()
if not stable: print "qs:%s, qs1:%s" % (str(qs), str(qs1))
self.CoM = CoM
else:
# raised goes to the next leg
self.raised_leg = (old_raised + 1) % 4
self.legs[old_raised].xy = self.get_ground_xy(old_raised)
stable,CoM1,qs1 = self.is_stable()
if not stable:
# the opposit leg is raised
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
stable,CoM2,qs = self.is_stable()
if not stable: print "q1:%s, q2:%s" % (qs1,qs)
self.CoM = CoM2
else:
# both could be stable
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
stable,CoM2,_ = self.is_stable()
if not stable:
# the first option is true
self.raised_leg = (old_raised + 1) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM1
stable = True
else:
# both stable
if np.linalg.norm(CoM1 - self.CoM) < np.linalg.norm(CoM2 - self.CoM):
self.raised_leg = (old_raised + 1) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM1
else:
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM2
self.update_orientation()
self.feet_distances = self.get_dist_between_feet()
if not stable:
print "Fell"
return stable
def get_move(self, Action):
i = self.raised_leg
Res = {0:Action[0], 1:Action[1], 2:Action[2]+(0,)}
dp = self.find_phi(Res)
if dp == None: return self.get_random_action()
Res[2] = Action[2]+(dp,)
foot3 = find_foot3(self,i,Action[3],Res)
if foot3 == None:
return self.get_random_action()
a_b = find_alpha_beta(self,i,foot3)
if a_b == None:
return self.get_random_action()
alpha, beta = a_b
leg = self.legs[(i+3)%4]
da = alpha - leg.alpha
db = beta - leg.beta
if leg.alpha_is_legal(da) and leg.beta_is_legal(db):
Res[3] = (da,db,Action[3])
else:
return self.get_random_action()
return Res
def get_random_action(self):
Action = {}
i = self.raised_leg
Action[0] = get_random_action(self.legs[i],big_angles)
N = 0
while True:
if N > MAX_ATTEMPTS:
#print "Cannot move any standing leg at the current state"
Action[1] = (0,0,0)
Action[2] = (0,0,0)
Action[3] = (0,0,0)
return Action
Action[1] = get_random_action(self.legs[(i+1)%4],small_angles)
Action[2] = get_random_action(self.legs[(i+2)%4],small_angles)
# find phi
dp = self.find_phi(Action)
if not self.legs[(i+2)%4].phi_is_legal(dp):
#print "dPhi =",dp
#print "Phi found is illegal"
N += 1
continue
da,db,_ = Action[2]
Action[2] = (da,db,dp)
attempts = 5
while attempts > 0:
dp3 = choose_randomly(big_angles)
foot3 = find_foot3(self, i, dp3, Action)
if foot3 != None: break
else: attempts -= 1
else: # no solution found
#print "This random action is illegal\n"
N += 1
continue
# find alpha and beta
a_b = find_alpha_beta(self,i,foot3)
if a_b == None:
N += 1
continue
else:
alpha, beta = a_b
leg = self.legs[(i+3)%4]
da = alpha - leg.alpha
db = beta - leg.beta
if leg.alpha_is_legal(da) and leg.beta_is_legal(db):
Action[3] = (da,db,dp3)
break
else:
#print "legal da or db cannot be found\nda,da:",da,db
#print "leg3: a, b, phi:", leg.alpha, leg.beta, leg.phi
N += 1
continue
return Action
def get_raised_leg(self):
feet = self.get_feet()
self.feet = feet
v1 = feet[-1]-feet[0]
v2 = feet[1]-feet[0]
v3 = feet[2]-feet[0]
dot = np.dot(v3, np.cross(v1,v2) )
if dot == 0:
#print "all legs touch the surface\n"
raise
return []
elif dot > 0:
#print "1st and 3rd legs can be raised\n"
return [1, 3]
else:
#print "0th and 2nd legs can be raised\n"
return [0, 2]
def is_stable(self):
"""returns tuple. First element is True or Flase if robot can stand on its tree legs respectively
the second element is a projection of centre of mass onto the plane of three feet
the third is a tuple of three q - a load factor on each leg
"""
raised = self.raised_leg
feet = self.get_feet()
f1,f2,f3 = tuple([ feet[i] for i in range(4) if i != raised])
v1 = f1-f2
v2 = f3-f2
ez1 = np.cross(v2,v1)
ez1 = ez1 / np.linalg.norm(ez1)
#print "sin:",ez1
X0,Y0 = (ez1 * np.dot(ez1,f1))[:2]
#print "X0,Y0",X0,Y0
X1,Y1 = f1[:2]
X2,Y2 = f2[:2]
X3,Y3 = f3[:2]
#print "Feet:", f1[:2],f2[:2],f3[:2]
TX0 = (X0-X3)/(X1-X3)
TX2 = (X2-X3)/(X1-X3)
q2 = ( TX0 - (Y0-Y3)/(Y1-Y3) )/( TX2 - (Y2-Y3)/(Y1-Y3) )
q1 = TX0 - TX2 * q2
q3 = 1 - (q1 + q2)
xy = [ self.legs[i].xy for i in range(4) if raised != i]
CoM = xy[0]*q1+xy[1]*q2+xy[2]*q3
if q1>0 and q2>0 and q3>0: return (True, CoM, (q1,q2,q3))
else: return (False,CoM,(q1,q2,q3))
def update_orientation(self):
#print "Raised leg:", self.raised_leg
#print "CoM:",self.CoM
if self.raised_leg != 0:
f0 = self.get_foot(0,(0,0,0))
niu = atan(f0[1]/f0[0])
#print "niu:",niu,self.legs[0].xy
self.orientation = atan((self.legs[0].xy[1]-self.CoM[1])/(self.legs[0].xy[0]-self.CoM[0])) - niu
else:
f2 = self.get_foot(2,(0,0,0))
niu = atan(f2[1]/f2[0])
#print "niu3:", niu,self.legs[2].xy
self.orientation = atan((self.legs[2].xy[1]-self.CoM[1])/(self.legs[2].xy[0]-self.CoM[0])) - niu
#print "orientation:",self.orientation
#if abs(self.orientation)>1:
# raise
def measure_output(self,mode):
if mode == "forward":
raised = self.raised_leg
opposite = (raised+2)%4
return np.array([sum([l.xy[0] for l in self.legs if not l.index in [raised,opposite] ])/2,
sum([l.xy[1] for l in self.legs if not l.index in [raised,opposite] ])/2])
elif mode == "right" or mode == "left":
return float(self.orientation)
else:
return None
def draw(self,plt):
ps = plt.gca().patches
while len(ps) >1: ps.pop()
circle = plt.Circle(tuple(self.CoM), radius=self.R, fc='r')
plt.gca().add_patch(circle)
raised = self.raised_leg
for i in range(4):
f = self.legs[(raised+i)%4].xy
if i == 0:
foot = plt.Circle(tuple(f), radius=self.R/5, fc='r')
else:
foot = plt.Circle(tuple(f), radius=self.R/5, fc='b')
plt.gca().add_patch(foot)
plt.draw()
sleep(0.5)
class Leg():
def __init__(self,index,attach_angle,alpha=radians(30),beta=radians(45),phi=0,L=10):
self.index = index
self.aa = attach_angle
self.alpha = alpha
self.beta = beta
self.phi = phi
self.L = L
def get_copy(self):
copy = Leg(self.index,self.aa, self.alpha, self.beta, self.phi, self.L)
return copy
def move_leg(self,(da,db,dp)):
self.alpha += da
self.beta += db
self.phi += dp
def get_foot(self):
"""returns a xz coordinate of a foot in leg's own reference system
"""
return np.array([ self.L * ( sin(self.alpha) + sin(self.beta-self.alpha) ),
self.L * ( cos(self.alpha) - cos(self.beta-self.alpha) ) ])
def alpha_is_legal(self,da):
a = self.alpha + da
return a >= pi/15 and a < pi/2
def beta_is_legal(self,db):
b = self.beta + db
return b >= pi/9 and b < 2 * self.alpha
def phi_is_legal(self,dp):
if dp == None:
return False
p = self.phi + dp
return abs(p) < phi_max
| apache-2.0 | 7,863,833,608,741,897,000 | 23.221519 | 99 | 0.587579 | false |
h-friederich/lpm | run-server.py | 1 | 3227 | #!/usr/bin/env python
import sys
import os
from flask import Flask, redirect, url_for
# ensure lpm is found and can be directly imported from this file
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import lpm
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY=b'2E\x9d\xe8"\xb5\xa3\x1b\xc5a6\xd8\x12:\x1ea\xf6\x91N[\xe4X\x8e\x8a',
MONGO_DBNAME='lpm',
LPM_AUTH_SRC='simple',
LPM_AUTH_USERS={
'worker': dict(
name='Worker',
password='1234',
roles={'login', 'component_edit'},
active=True,
),
'admin': dict(
name='Admin',
password='1234',
roles={
'login', 'request_login',
'component_edit', 'component_admin',
'stock_admin', 'item_admin', 'db_debug',
},
active=True,
),
'viewer': dict(
name='Viewer',
password='1234',
roles={'login'},
active=True,
),
'disabled': dict(
name='Disabled User',
password='1234',
roles={'login', 'component_admin'},
active=False,
),
'ext': dict(
name='External Scripts',
password='1234',
roles={'request_login', 'component_admin'},
active=True,
)
},
LPM_PARTNO_PREFIX='LP',
LPM_COMPONENT_FILES_DIR='/tmp',
LPM_COMPONENT_CATEGORIES=['category 1', 'category 2'],
LPM_ITEM_VIEW_MAP={
'LP0002': 'LP0002.html',
},
LPM_ITEM_IMPORT_MAP={
'LP0002': dict(
required_fields=['param1'],
integer_fields=['param2'],
),
'LP0001a': dict(
required_fields=['param5'],
date_fields=['param5'],
integer_fields=['param2', 'param6'],
floating_point_fields=['param7'],
boolean_fields=['param8']
)
},
LPM_ITEM_STATUS_MAP={
'LP0002': {
'tested': dict(origins=[''], unavailable=False, role='item_admin'),
'reserved': dict(origins=['', 'tested'], unavailable=False),
'shipped': dict(origins=['reserved'], unavailable=True, require_admin=False),
'obsolete': dict(origins=['', 'tested', 'reserved', 'shipped'],
unavailable=True, role='item_admin')
},
'default': {
'obsolete': dict(origins=[''], unavailable=True, role='item_admin')
},
},
LPM_EXT_UPDATE_FIELDS={
'default': set(),
},
)
lpm.init(app)
@app.route('/')
def main():
"""
main entry point, redirect to the items overview
"""
return redirect(url_for('items.overview'))
app.run()
| bsd-3-clause | 508,047,208,766,367,200 | 31.27 | 93 | 0.438798 | false |
bmihelac/django-cruds | setup.py | 1 | 1592 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import cruds
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = cruds.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-cruds',
version=version,
description="""django-cruds is simple drop-in django app that creates CRUD for faster prototyping.""", # noqa
long_description=readme + '\n\n' + history,
author='Bojan Mihelac',
author_email='[email protected]',
url='https://github.com/bmihelac/django-cruds',
packages=[
'cruds',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-cruds',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| bsd-3-clause | -6,656,825,327,990,374,000 | 27.428571 | 114 | 0.608668 | false |
akx/shoop | shoop/core/methods/base.py | 1 | 7131 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from decimal import Decimal
import six
from django import forms
from django.core.exceptions import ValidationError
from django.http.response import HttpResponseRedirect
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from shoop.core.models import PaymentStatus
from shoop.core.pricing import PriceInfo
class BaseMethodModule(object):
"""
Base method module implementation.
"""
checkout_phase_class = None
identifier = None
name = None
admin_detail_view_class = None
option_fields = [
("price", forms.DecimalField(
initial=0,
label=_('Price'),
)),
("price_waiver_product_minimum", forms.DecimalField(
label=_('Waiver minimum'),
help_text=_(
'Waive the price when the total price of products '
'in the order reaches this amount'
),
initial=0
)),
]
def __init__(self, method, options):
"""
:type method: shoop.core.models.Method
:type options: dict
"""
self.method = method
self.options = options
def get_options(self):
data = self.options
if self.option_fields:
# If we have declared `option_fields`, use them to create a faux form that provides data validation and
# transformation over the string-form data stored in the database.
class_name = "%sOptionForm" % self.__class__.__name__
if six.PY2: # pragma: no cover
class_name = force_bytes(class_name, errors="ignore")
form = (
type(
class_name,
(forms.BaseForm,),
{"base_fields": dict(self.option_fields)}
)
)(data=data)
form.full_clean()
data.update(getattr(form, "cleaned_data", {}))
return data
def get_validation_errors(self, source, **kwargs):
"""
Return an iterable of human-readable errors (either Django's `ValidationError`s
or just plain old strings) if there are any errors that would prevent using
this method with a given `source`.
This (instead of raising an error) broadly follows the Notification pattern.
http://martinfowler.com/eaaDev/Notification.html
:param source: source object
:param kwargs: Other kwargs for future expansion
:return: Iterable of errors
:rtype: Iterable[str]
"""
return ()
def _is_price_waived(self, source):
"""
Figure out whether any price should be waived for the given source.
Meant for internal use by other module impls, hence the underscore.
:param source: source
:type source: shoop.core.order_creator.OrderSource
:return: Boolean of waiver
:rtype: bool
"""
options = self.get_options()
waive_limit_value = options.get("price_waiver_product_minimum")
if waive_limit_value and waive_limit_value > 0:
assert isinstance(waive_limit_value, Decimal)
waive_limit = source.create_price(waive_limit_value)
product_total = source.total_price_of_products
if not product_total:
return False
return (product_total >= waive_limit)
return False
def get_effective_price_info(self, source, **kwargs):
"""
Get price of this method for given OrderSource.
:param source: source object
:type source: shoop.core.order_creator.OrderSource
:param kwargs: Other kwargs for future expansion
:rtype: shoop.core.pricing.PriceInfo
"""
price_value = self.get_options().get("price", 0)
normal_price = source.shop.create_price(price_value)
if self._is_price_waived(source):
return PriceInfo(source.shop.create_price(0), normal_price, 1)
return PriceInfo(normal_price, normal_price, 1)
def get_effective_name(self, source, **kwargs):
"""
Return the effective name for this method. Useful to add shipping mode ("letter", "parcel") for instance.
:param source: source object
:type source: shoop.core.order_creator.OrderSource
:param kwargs: Other kwargs for future expansion
:return: name
:rtype: unicode
"""
try:
return self.method.name
except:
return six.text_type(self)
def get_source_lines(self, source):
from shoop.core.order_creator import SourceLine
price_info = self.get_effective_price_info(source)
assert price_info.quantity == 1
yield SourceLine(
source=source,
quantity=1,
type=self.method.line_type,
text=self.get_effective_name(source),
base_unit_price=price_info.base_unit_price,
discount_amount=price_info.discount_amount,
tax_class=self.method.tax_class,
)
class BaseShippingMethodModule(BaseMethodModule):
"""
Base shipping method module implementation.
"""
no_lower_limit_text = _('0 or below: no lower limit')
option_fields = BaseMethodModule.option_fields + [
("min_weight", forms.DecimalField(label=_('minimum weight'), initial=0, help_text=no_lower_limit_text)),
("max_weight", forms.DecimalField(label=_('maximum weight'), initial=0, help_text=no_lower_limit_text)),
]
def get_validation_errors(self, source, **kwargs):
weight = sum(((l.get("weight") or 0) for l in source.get_lines()), 0)
options = self.get_options()
min_weight = options.get("min_weight")
if min_weight:
assert isinstance(min_weight, Decimal)
if min_weight > 0 and weight < min_weight:
yield ValidationError(_("Minimum weight not met."), code="min_weight")
max_weight = options.get("max_weight")
if max_weight:
assert isinstance(max_weight, Decimal)
if max_weight > 0 and weight > max_weight:
yield ValidationError(_("Maximum weight exceeded."), code="max_weight")
class BasePaymentMethodModule(BaseMethodModule):
"""
Base payment method module implementation.
"""
def get_payment_process_response(self, order, urls):
return HttpResponseRedirect(urls["return"]) # Directly return to wherever we want to.
def process_payment_return_request(self, order, request):
if order.payment_status == PaymentStatus.NOT_PAID:
order.payment_status = PaymentStatus.DEFERRED
order.add_log_entry("Payment status set to deferred by %s" % self.method)
order.save(update_fields=("payment_status",))
| agpl-3.0 | 6,226,013,237,256,783,000 | 33.616505 | 115 | 0.615902 | false |
j-towns/pymanopt | pymanopt/tools/autodiff/_autograd.py | 1 | 2575 | """
Module containing functions to differentiate functions using autograd.
"""
try:
import autograd.numpy as np
from autograd.core import grad
except ImportError:
np = None
grad = None
from ._backend import Backend, assert_backend_available
class AutogradBackend(Backend):
def __str__(self):
return "autograd"
def is_available(self):
return np is not None and grad is not None
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
if type(x) in (list, tuple):
return objective([np.array(xi) for xi in x])
else:
return objective(np.array(x))
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
"""
Compute the gradient of 'objective' with respect to the first
argument and return as a function.
"""
g = grad(objective)
# Sometimes x will be some custom type, e.g. with the FixedRankEmbedded
# manifold. Therefore cast it to a numpy.array.
def gradient(x):
if type(x) in (list, tuple):
return g([np.array(xi) for xi in x])
else:
return g(np.array(x))
return gradient
@assert_backend_available
def compute_hessian(self, objective, argument):
h = _hessian_vector_product(objective)
def hess_vec_prod(x, a):
return h(x, a)
return hess_vec_prod
def _hessian_vector_product(fun, argnum=0):
"""Builds a function that returns the exact Hessian-vector product.
The returned function has arguments (*args, vector, **kwargs). Note,
this function will be incorporated into autograd, with name
hessian_vector_product. Once it has been this function can be
deleted."""
fun_grad = grad(fun, argnum)
def vector_dot_grad(*args, **kwargs):
args, vector = args[:-1], args[-1]
try:
return np.tensordot(fun_grad(*args, **kwargs), vector,
axes=vector.ndim)
except AttributeError:
# Assume we are on the product manifold.
return np.sum([np.tensordot(fun_grad(*args, **kwargs)[k],
vector[k], axes=vector[k].ndim)
for k in range(len(vector))])
# Grad wrt original input.
return grad(vector_dot_grad, argnum)
| bsd-3-clause | 5,524,637,587,608,100,000 | 31.1875 | 79 | 0.599612 | false |
npawelek/rpc-maas | playbooks/files/rax-maas/plugins/disk_utilisation.py | 1 | 2054 | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shlex
import subprocess
from maas_common import metric
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
def utilisation(args, time):
output = subprocess.check_output(
shlex.split('iostat {device} -x -d {time} 2'
.format(device=args.device,
time=time)
))
device_lines = output.split('\nDevice:')[-1].strip().split('\n')[1:]
devices = [d.split() for d in device_lines]
utils = [(d[0], d[-1]) for d in devices if d]
return utils
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Disk utilisation checks')
parser.add_argument('device',
type=str,
help='Device we gather metric from')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
try:
utils = utilisation(args, 5)
except Exception as e:
status_err(e, m_name='maas_disk_utilisation')
else:
status_ok(m_name='maas_disk_utilisation')
for util in utils:
metric('disk_utilisation_%s' % util[0], 'double', util[1], '%')
| apache-2.0 | 1,663,741,820,486,070,800 | 35.035088 | 79 | 0.621227 | false |
amit0701/rally | tests/unit/common/io/test_subunit_v2.py | 1 | 4862 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from rally.common.io import subunit_v2
from tests.unit import test
class SubunitParserTestCase(test.TestCase):
fake_stream = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"subunit_v2.stream")
def test_parse_results_file(self):
result = subunit_v2.parse_results_file(self.fake_stream)
self.assertEqual({"skipped": 1,
"success": 2,
"time": "5.00732",
"failures": 3,
"expected_failures": 0,
"tests": 7,
"unexpected_success": 1}, result.total)
self.assertEqual(len(result.tests), result.total["tests"])
skipped_tests = result.filter_tests("skip")
skipped_test = "test_foo.SimpleTestCase.test_skip_something"
self.assertEqual(result.total["skipped"], len(skipped_tests))
self.assertSequenceEqual([skipped_test], skipped_tests.keys())
self.assertEqual(
{"status": "skip", "reason": "This should be skipped.",
"time": "0.00007", "name": skipped_test},
skipped_tests[skipped_test])
failed_tests = result.filter_tests("fail")
failed_test = "test_foo.SimpleTestCaseWithBrokenSetup.test_something"
self.assertEqual(result.total["failures"], len(failed_tests))
self.assertIn(failed_test, failed_tests)
trace = """Traceback (most recent call last):
File "test_foo.py", line 34, in setUp
raise RuntimeError("broken setUp method")
RuntimeError: broken setUp method
"""
self.assertEqual({"status": "fail", "traceback": trace,
"time": "0.00005", "name": failed_test},
failed_tests[failed_test])
def test_filter_results(self):
results = subunit_v2.SubunitV2StreamResult()
results._tests = {
"failed_test_1": {"status": "fail"},
"failed_test_2": {"status": "fail"},
"passed_test_1": {"status": "success"},
"passed_test_2": {"status": "success"},
"passed_test_3": {"status": "success"}}
self.assertEqual({"failed_test_1": results.tests["failed_test_1"],
"failed_test_2": results.tests["failed_test_2"]},
results.filter_tests("fail"))
self.assertEqual({"passed_test_1": results.tests["passed_test_1"],
"passed_test_2": results.tests["passed_test_2"],
"passed_test_3": results.tests["passed_test_3"]},
results.filter_tests("success"))
def test_property_test(self):
results = subunit_v2.SubunitV2StreamResult()
results._tests = {
"SkippedTestCase.test_1": {"status": "init"},
"SkippedTestCase.test_2": {"status": "init"}}
results._unknown_entities = {"SkippedTestCase": {"status": "skip",
"reason": ":("}}
self.assertFalse(results._is_parsed)
self.assertEqual(
{"SkippedTestCase.test_1": {"status": "skip", "reason": ":("},
"SkippedTestCase.test_2": {"status": "skip", "reason": ":("}},
results.tests)
self.assertTrue(results._is_parsed)
def test_preparse_input_args(self):
some_mock = mock.MagicMock()
@subunit_v2.preparse_input_args
def some_a(self_, test_id, test_status, test_tags, file_name,
file_bytes, mime_type, timestamp, charset):
some_mock(test_id, test_tags)
some_a("", "setUpClass (some_test[tag1,tag2])")
some_mock.assert_called_once_with(
"some_test[tag1,tag2]", ["tag1", "tag2"])
some_mock.reset_mock()
some_a("", "tearDown (some_test[tag1,tag2])")
some_mock.assert_called_once_with(
"some_test[tag1,tag2]", ["tag1", "tag2"])
def test_no_status_called(self):
self.assertEqual({"tests": 0, "time": 0, "failures": 0, "skipped": 0,
"success": 0, "unexpected_success": 0,
"expected_failures": 0},
subunit_v2.SubunitV2StreamResult().total) | apache-2.0 | -3,721,387,347,412,954,600 | 40.922414 | 78 | 0.564377 | false |
Colorless-Green-Ideas/docker-stature | test_stature.py | 1 | 5452 | import unittest
import json
import random
import subprocess
import logging
try:
import unittest.mock as mock
except ImportError:
import mock
import docker
import attr
from cachet import Cachet
from stature import main
@attr.s
class FakeContainer(object):
name = attr.ib()
labels = attr.ib()
status = attr.ib()
def container_mock():
"make a fake docker.Container"
name = "foo_{}".format(random.randint(0, 25))
cont = FakeContainer(name=name, labels={}, status=u"running")
return cont
class TestOneShotMode(unittest.TestCase):
# containerz = json.load(open("fixtures/containers.json")
settings = {
"cachet": {"api_key": "afancyapikey", "url": "http://localhost/api/v1"},
"containers": {"cachetdocker_cachet_1": 1},
}
@mock.patch("cachet.Cachet.postComponents")
def test_registers_containers(self, cachet_mock):
# containers_mock.side_effect = self.containerz
cli_mock = mock.Mock()
cli_mock.containers.list.return_value = [FakeContainer(name="cachetdocker_cachet_1", labels={}, status=u"running")]
cachet = mock.Mock()
# cachet.putComponentsByID = mock.Mock()
main(cli_mock, cachet, self.settings)
# cli_mock.containers.assert_called()
cachet.putComponentsByID.assert_called_once_with(1, status=1)
# cachet_mock.assert_called()
@mock.patch("stature.logging.error")
@mock.patch("sys.exit")
def test_no_containers(self, patched_exit, patched_error):
cli_mock = mock.Mock()
cli_mock.containers.list.return_value = []
cachet = mock.Mock(spec=Cachet)
main(cli_mock, cachet, self.settings)
patched_exit.assert_called_with(4)
patched_error.assert_called_with("No containers running!")
@mock.patch("stature.logging.error")
def test_no_containers_section(self, patched_error):
cli_mock = mock.MagicMock()
cachet_mock = mock.Mock()
lsettings = {"cachet": {"api_key": "fjdjkfhsdfh", "url": "http://localhost/api/v1"},}
main(cli_mock, cachet_mock, lsettings)
patched_error.assert_called_with("Write out your toml file! Try an empty containers section.")
def test_exited_container(self):
pass
def test_tag_annotations(self):
pass
class IntegrationHCTest(unittest.TestCase):
settings = {
"cachet": {"api_key": "afancyapikey", "url": "http://localhost/api/v1"},
"containers": {}
}
def setUp(self):
self.client = docker.from_env()
labels = {"org.cachet.name": "Python Test Container", "org.cachet.link": "http://localhost:1337/", "org.cachet.description": "This tests the cachet integrations!"}
self.container = self.client.containers.run("python:2", "python -m SimpleHTTPServer 1337", detach=True, healthcheck={"test": ["CMD", "curl", "localhost:1337"]}, labels=labels)
def tearDown(self):
self.client.close()
self.container.kill()
self.container.remove(force=True)
@mock.patch("cachet.Cachet.postComponents")
def test_with_healthceck(self, fake_cachet):
main(self.client, fake_cachet, self.settings)
print(fake_cachet.mock_calls)
fake_cachet.postComponents.assert_called_with(description='This tests the cachet integrations!', link='http://localhost:1337/', name='Python Test Container', status=mock.ANY)
# One or two???? wtf
def seed_cachet():
subprocess.call(["docker-compose", "run", "--rm", "cachet", "php7", "artisan", "cachet:seed"], cwd="fixtures")
# https://github.com/CachetHQ/Cachet/blob/b431ee3702831df88a669c1909cba02d863b4cef/app/Console/Commands/DemoSeederCommand.php#L441
class IntegrationCachetTest(unittest.TestCase):
def setUp(self):
self.settings = {
"cachet":{
"api_key" : "9yMHsdioQosnyVK4iCVR",
"url": "http://localhost:3666/api/v1"
},
"containers": {}
}
subprocess.call(["docker-compose", "up", "-d"], cwd="fixtures")
seed_cachet()
self.client = docker.from_env()
self.cachet = Cachet("http://localhost:3666/api/v1", "9yMHsdioQosnyVK4iCVR")
labels = {"org.cachet.name": "Python Test Container", "org.cachet.link": "http://localhost:1337/", "org.cachet.description": "This tests the cachet integrations!"}
self.container = self.client.containers.run("python:2", "python -m SimpleHTTPServer 1337", detach=True, healthcheck={"test": ["CMD", "curl", "localhost:1337"]}, labels=labels)
def tearDown(self):
self.client.close()
subprocess.call(["docker-compose", "stop"], cwd="fixtures")
self.container.kill()
self.container.remove(force=True)
def test_cachet_integration(self):
main(self.client, self.cachet, self.settings)
ret = self.cachet.getComponents()
comps = ret.json()['data']
# print(comps[-1])
self.assertEquals('http://localhost:1337/', comps[-1]['link'])
self.assertEquals('Python Test Container', comps[-1]['name'])
# self.assertIn({'enabled': True, 'status': 1, 'link': , 'deleted_at': None, 'group_id': 0, 'name': 'Python Test Container', 'order': 0, 'created_at': '2018-08-11 10:36:41', 'id': 8, 'description': 'This tests the cachet integrations!', 'tags': [], 'status_name': 'Operational', 'updated_at': '2018-08-11 10:36:41'} , comps)
| agpl-3.0 | -6,533,127,517,157,104,000 | 39.088235 | 332 | 0.639032 | false |
rochacbruno/dynaconf | tests/test_json_loader.py | 1 | 5842 | import json
import pytest
from dynaconf import LazySettings
from dynaconf.loaders.json_loader import DynaconfEncoder
from dynaconf.loaders.json_loader import load
settings = LazySettings(environments=True, ENV_FOR_DYNACONF="PRODUCTION")
JSON = """
{
"a": "a,b",
"default": {
"password": "@int 99999",
"host": "server.com",
"port": "@int 8080",
"alist": ["item1", "item2", 23],
"service": {
"url": "service.com",
"port": 80,
"auth": {
"password": "qwerty",
"test": 1234
}
}
},
"development": {
"password": "@int 88888",
"host": "devserver.com"
},
"production": {
"password": "@int 11111",
"host": "prodserver.com"
},
"global": {
"global_value": "global"
}
}
"""
# the @float is not needed in JSON but kept to ensure it works
JSON2 = """
{
"global": {
"secret": "@float 42",
"password": 123456,
"host": "otherjson.com"
}
}
"""
JSONS = [JSON, JSON2]
def test_load_from_json():
"""Assert loads from JSON string"""
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PORT == 8080
assert settings.ALIST == ["item1", "item2", 23]
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "qwerty"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
def test_load_from_multiple_json():
"""Assert loads from JSON string"""
load(settings, filename=JSONS)
assert settings.HOST == "otherjson.com"
assert settings.PASSWORD == 123456
assert settings.SECRET == 42.0
assert settings.PORT == 8080
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "qwerty"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSONS, env="DEVELOPMENT")
assert settings.PORT == 8080
assert settings.HOST == "otherjson.com"
load(settings, filename=JSONS)
assert settings.HOST == "otherjson.com"
assert settings.PASSWORD == 123456
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.PORT == 8080
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PASSWORD == 11111
def test_no_filename_is_none():
"""Assert if passed no filename return is None"""
assert load(settings) is None
def test_key_error_on_invalid_env():
"""Assert error raised if env is not found in JSON"""
with pytest.raises(KeyError):
load(settings, filename=JSON, env="FOOBAR", silent=False)
def test_no_key_error_on_invalid_env():
"""Assert error raised if env is not found in JSON"""
load(settings, filename=JSON, env="FOOBAR", silent=True)
def test_load_single_key():
"""Test loading a single key"""
_JSON = """
{
"foo": {
"bar": "blaz",
"zaz": "naz"
}
}
"""
load(settings, filename=_JSON, env="FOO", key="bar")
assert settings.BAR == "blaz"
assert settings.exists("BAR") is True
assert settings.exists("ZAZ") is False
def test_empty_value():
load(settings, filename="")
def test_multiple_filenames():
load(settings, filename="a.json,b.json,c.json,d.json")
def test_cleaner():
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PORT == 8080
assert settings.ALIST == ["item1", "item2", 23]
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "qwerty"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
settings.clean()
with pytest.raises(AttributeError):
assert settings.HOST == "prodserver.com"
def test_using_env(tmpdir):
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
tmpfile = tmpdir.mkdir("sub").join("test_using_env.json")
tmpfile.write(JSON)
with settings.using_env("DEVELOPMENT", filename=str(tmpfile)):
assert settings.HOST == "devserver.com"
assert settings.HOST == "prodserver.com"
def test_load_dunder():
"""Test loading with dunder settings"""
_JSON = """
{
"foo": {
"colors__yellow__code": "#FFCC00",
"COLORS__yellow__name": "Yellow"
}
}
"""
load(settings, filename=_JSON, env="FOO")
assert settings.COLORS.yellow.code == "#FFCC00"
assert settings.COLORS.yellow.name == "Yellow"
def test_dynaconf_encoder():
class Dummy:
def _dynaconf_encode(self):
return "Dummy"
class DummyNotSerializable:
_dynaconf_encode = 42
data = {"dummy": Dummy()}
data_error = {"dummy": DummyNotSerializable()}
assert json.dumps(data, cls=DynaconfEncoder) == '{"dummy": "Dummy"}'
with pytest.raises(TypeError):
json.dumps(data_error, cls=DynaconfEncoder)
def test_envless():
settings = LazySettings()
_json = """
{
"colors__yellow__code": "#FFCC00",
"COLORS__yellow__name": "Yellow"
}
"""
load(settings, filename=_json)
assert settings.COLORS.yellow.code == "#FFCC00"
assert settings.COLORS.yellow.name == "Yellow"
| mit | -7,556,519,038,617,148,000 | 26.299065 | 73 | 0.625471 | false |
Katello/katello-cli | test/katello/tests/core/system/system_data.py | 1 | 2883 |
SYSTEM_GROUPS = [
{
"description" : "This is my first system group.",
"updated_at" : "2012-04-26T19:59:46Z",
"pulp_id" : "ACME_Corporation-Test System Group 1-0cdaf879",
"created_at" : "2012-04-26T19:59:23Z",
"name" : "Test System Group 1",
"id" : 1,
"organization_id" : 1
},
{
"description" : "This is another system group.",
"updated_at" : "2012-04-26T19:59:46Z",
"pulp_id" : "ACME_Corporation-Test System Group 3-0adcf897",
"created_at" : "2012-04-27T19:59:23Z",
"name" : "Test System Group 2",
"id" : 2,
"organization_id" : 1
}
]
SYSTEM_GROUP_HISTORY = [
{
"task_type": "package_install",
"created_at": "2012-05-22T20:04:15Z",
"parameters": {
"packages": [
"foo"
]
},
"tasks": [
{
"result": {
"errors": [
"('c8574ddd-b2f8-41f9-b47a-cedeb3c670ad', 0)",
"RequestTimeout('c8574ddd-b2f8-41f9-b47a-cedeb3c670ad', 0)"
]
},
"uuid": "4e2f2dde-a449-11e1-9dbe-0019b90d1d4e",
"progress": None,
"id": 4,
"finish_time": "2012-05-22T20:04:25Z",
"state": "error",
"start_time": "2012-05-22T20:04:14Z"
}
],
"id": 1
}
]
SYSTEM_GROUP_SYSTEMS = [
{
"id": "d49f6d91-0bb3-43f0-9881-dc051fa818c7",
"name": "FakeSystem345"
},
{
"id": "92eb02a6-0d33-4f89-885c-55aebedaf0e1",
"name": "Winterfell"
}
]
SYSTEMS = [
{
"guests": [
],
"created_at": "2012-04-26T20:00:38Z",
"serviceLevel": "",
"name": "FakeSystem345",
"description": "Initial Registration Params",
"location": "None",
"updated_at": "2012-04-26T20:00:38Z",
"id": 1,
"environment": {
"created_at": "2012-04-20T14:01:22Z",
"name": "Dev",
"description": "",
"updated_at": "2012-04-20T14:01:22Z",
"prior_id": 1,
"organization": "ACME_Corporation",
"id": 5,
"library": False,
"organization_id": 1,
"prior": "Library"
},
"uuid": "d49f6d91-0bb3-43f0-9881-dc051fa818c7",
"activation_key": [
],
"environment_id": 5,
},
{
"guests": [
],
"created_at": "2012-04-30T19:05:14Z",
"serviceLevel": "",
"name": "Winterfell",
"description": "Initial Registration Params",
"location": "None",
"updated_at": "2012-04-30T19:05:14Z",
"id": 2,
"environment": {
"created_at": "2012-04-20T14:01:22Z",
"name": "Dev",
"description": "",
"updated_at": "2012-04-20T14:01:22Z",
"prior_id": 1,
"organization": "ACME_Corporation",
"id": 5,
"library": False,
"organization_id": 1,
"prior": "Library"
},
"uuid": "92eb02a6-0d33-4f89-885c-55aebedaf0e1",
"activation_key": [
],
"environment_id": 5,
}
]
| gpl-2.0 | -8,146,273,098,658,856,000 | 22.826446 | 71 | 0.512314 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/modules/rna_prop_ui.py | 1 | 5611 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
def rna_idprop_ui_get(item, create=True):
try:
return item['_RNA_UI']
except:
if create:
item['_RNA_UI'] = {}
return item['_RNA_UI']
else:
return None
def rna_idprop_ui_del(item):
try:
del item['_RNA_UI']
except KeyError:
pass
def rna_idprop_ui_prop_update(item, prop):
prop_rna = item.path_resolve("[\"%s\"]" % prop.replace("\"", "\\\""), False)
if isinstance(prop_rna, bpy.types.bpy_prop):
prop_rna.update()
def rna_idprop_ui_prop_get(item, prop, create=True):
rna_ui = rna_idprop_ui_get(item, create)
if rna_ui is None:
return None
try:
return rna_ui[prop]
except:
rna_ui[prop] = {}
return rna_ui[prop]
def rna_idprop_ui_prop_clear(item, prop, remove=True):
rna_ui = rna_idprop_ui_get(item, False)
if rna_ui is None:
return
try:
del rna_ui[prop]
except KeyError:
pass
if remove and len(item.keys()) == 1:
rna_idprop_ui_del(item)
def rna_idprop_context_value(context, context_member, property_type):
space = context.space_data
if space is None or isinstance(space, bpy.types.SpaceProperties):
pin_id = space.pin_id
else:
pin_id = None
if pin_id and isinstance(pin_id, property_type):
rna_item = pin_id
context_member = "space_data.pin_id"
else:
rna_item = eval("context." + context_member)
return rna_item, context_member
def rna_idprop_has_properties(rna_item):
keys = rna_item.keys()
nbr_props = len(keys)
return (nbr_props > 1) or (nbr_props and '_RNA_UI' not in keys)
def draw(layout, context, context_member, property_type, use_edit=True):
def assign_props(prop, val, key):
prop.data_path = context_member
prop.property = key
try:
prop.value = str(val)
except:
pass
rna_item, context_member = rna_idprop_context_value(context, context_member, property_type)
# poll should really get this...
if not rna_item:
return
from bpy.utils import escape_identifier
if rna_item.id_data.library is not None:
use_edit = False
assert(isinstance(rna_item, property_type))
items = rna_item.items()
items.sort()
if use_edit:
row = layout.row()
props = row.operator("wm.properties_add", text="Add")
props.data_path = context_member
del row
rna_properties = {prop.identifier for prop in rna_item.bl_rna.properties if prop.is_runtime} if items else None
for key, val in items:
if key == '_RNA_UI':
continue
row = layout.row()
to_dict = getattr(val, "to_dict", None)
to_list = getattr(val, "to_list", None)
# val_orig = val # UNUSED
if to_dict:
val = to_dict()
val_draw = str(val)
elif to_list:
val = to_list()
val_draw = str(val)
else:
val_draw = val
box = row.box()
if use_edit:
split = box.split(percentage=0.75)
row = split.row()
else:
row = box.row()
row.label(text=key, translate=False)
# explicit exception for arrays
is_rna = (key in rna_properties)
if to_dict or to_list:
row.label(text=val_draw, translate=False)
else:
if is_rna:
row.prop(rna_item, key, text="")
else:
row.prop(rna_item, '["%s"]' % escape_identifier(key), text="")
if use_edit:
row = split.row(align=True)
if not is_rna:
props = row.operator("wm.properties_edit", text="Edit")
assign_props(props, val_draw, key)
props = row.operator("wm.properties_remove", text="", icon='ZOOMOUT')
assign_props(props, val_draw, key)
else:
row.label(text="API Defined")
class PropertyPanel:
"""
The subclass should have its own poll function
and the variable '_context_path' MUST be set.
"""
bl_label = "Custom Properties"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
rna_item, context_member = rna_idprop_context_value(context, cls._context_path, cls._property_type)
return bool(rna_item)
"""
def draw_header(self, context):
rna_item, context_member = rna_idprop_context_value(context, self._context_path, self._property_type)
tot = len(rna_item.keys())
if tot:
self.layout().label("%d:" % tot)
"""
def draw(self, context):
draw(self.layout, context, self._context_path, self._property_type)
| gpl-3.0 | 6,176,873,088,856,123,000 | 26.10628 | 115 | 0.586705 | false |
vallemrv/tpvB3 | tpv/controllers/pedido.py | 1 | 12375 | # -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 10-May-2017
# @Email: [email protected]
# @Last modified by: valle
# @Last modified time: 12-Feb-2018
# @License: Apache license vesion 2.0
from kivy.uix.boxlayout import BoxLayout
from kivy.storage.jsonstore import JsonStore
from kivy.properties import ObjectProperty, NumericProperty, StringProperty
from kivy.lang import Builder
from controllers.lineawidget import LineaWidget
from modals import Sugerencias, Efectivo
from models.pedido import Pedido
Builder.load_file('view/pedido.kv')
class Wrap():
def __init__(self, obj):
self.tag = obj
class PedidoController(BoxLayout):
tpv = ObjectProperty(None, allownone=True)
pedido = ObjectProperty(None, allownone=True)
total = NumericProperty(0.0)
des = StringProperty("Pedido {0: >10} articulos".format(0))
def __init__(self, **kargs):
super(PedidoController, self).__init__(**kargs)
self.clase = None
self.puntero = 0
self.pilaDeStados = []
self.linea_editable = None
self.tipo_cobro = "Efectivo"
self.dbCliente = None
self.promocion = None
self.modal = Sugerencias(onExit=self.exit_sug)
self.efectivo = Efectivo(onExit=self.salir_efectivo)
def on_pedido(self, key, value):
self.pedido.bind(total=self.show_total)
def show_total(self, key, value):
self.total = self.pedido.total
self.des = "Pedido {0: >10} articulos".format(
self.pedido.getNumArt())
def pedido_domicilio(self, db):
self.dbCliente = db
self.lista.rm_all_widgets()
self.pedido = Pedido()
self.btnPedido.disabled = True
self.btnAtras.disabled = False
self.total = 0
self.des = "Pedido {0: >10} articulos".format(0)
self.linea_nueva()
def onPress(self, botones):
for i in range(len(botones)):
btn = botones[i]
tipo = btn.tag.get('tipo')
if tipo == 'cobros':
self.pedido.modo_pago = btn.tag.get("text")
if self.pedido.modo_pago == "Efectivo":
self.mostrar_efectivo()
else:
self.pedido.cambio = 0.00
self.pedido.efectivo = 0.00
self.tpv.imprimirTicket(self.pedido.guardar_pedido())
self.tpv.mostrar_inicio()
elif tipo == 'llevar':
self.show_botonera('../db/privado/num_avisador.json')
self.pedido.para_llevar = btn.tag.get('text')
if self.pedido.para_llevar == "Para recoger":
self.pedido.para_llevar = "Para llevar"
self.pedido.num_avisador = "Para recoger"
self.pedido.modo_pago = "Efectivo"
self.pedido.cambio = 0.00
self.pedido.efectivo = 0.00
self.pedido.guardar_pedido()
self.tpv.mostrar_inicio()
elif tipo == 'num':
self.show_botonera('../db/privado/cobrar.json')
self.pedido.num_avisador = btn.tag.get("text")
elif tipo == 'clase':
self.clase = btn.tag
if "promocion" in self.clase:
self.promocion = self.clase["promocion"]
self.puntero = 0
name = self.clase.get('productos')
db = "../db/productos/%s.json" % name
self.show_botonera(db)
self.linea_editable = None
self.pilaDeStados = []
self.pilaDeStados.append({'db': db, 'punt': 0,
'pass': 1})
self.btnAtras.disabled = False
else:
if "precio" in self.clase:
btn.tag["precio"] = btn.tag["precio"] * self.clase["precio"]
db = self.pedido.add_modificador(btn.tag)
if not self.linea_editable:
self.add_linea()
self.refresh_linea()
num = len(self.clase.get('preguntas')) if self.clase else 0
ps = len(botones) - 1
if db != None:
self.show_botonera(db)
elif db == None and self.puntero < num and i == ps:
db = None
igDb = False
while self.puntero < num:
name = self.clase.get('preguntas')[self.puntero]
db = "../db/preguntas/%s.json" % name
self.puntero += 1
if 'ignore' in btn.tag:
if db not in btn.tag.get('ignore'):
igDb = False
break
else:
igDb = True
db = None
else:
break
if not igDb:
self.show_botonera(db)
else:
self.puntero += 1
if not db and self.puntero >= num and i == ps:
self.linea_nueva()
if i == ps:
self.pilaDeStados.append({'db': db, 'punt': self.puntero,
'pass': len(botones)})
def mostrar_efectivo(self):
self.efectivo.total = str(self.total)
self.efectivo.open()
def salir_efectivo(self, cancelar=True):
self.efectivo.dismiss()
if cancelar:
self.show_botonera('../db/privado/cobrar.json')
else:
self.pedido.efectivo = self.efectivo.efectivo.replace("€", "")
self.pedido.cambio = self.efectivo.cambio.replace("€", "")
self.tpv.imprimirTicket(self.pedido.guardar_pedido())
self.tpv.abrir_cajon()
self.tpv.mostrar_inicio()
self.tpv.mostrar_men_cobro("Cambio "+ self.efectivo.cambio)
def exit_sug(self, key, w, txt, ln):
if txt != "":
if "sug" not in ln.obj:
ln.obj["sug"] = []
ln.obj["sug"].append(txt)
db = JsonStore("../db/sugerencias.json")
sug = self.modal.sug
db.put(ln.obj.get("text").lower(), db=sug)
self.rf_parcial(w, ln)
self.modal.dismiss()
def sugerencia(self, w, linea):
try:
name = linea.obj.get('text').lower()
db = JsonStore("../db/sugerencias.json")
if not db.exists(name):
db.put(name, db=[])
self.modal.sug = db[name].get("db")
self.modal.des = "{0} {1:.2f}".format(linea.getTexto(),
linea.getTotal())
self.modal.clear_text()
self.modal.tag = linea
self.modal.content = w
self.modal.open()
except:
self.modal.content = None
def atras(self):
num = len(self.pilaDeStados)
if num == 1:
self.linea_nueva()
if num == 2:
self.pilaDeStados.pop()
pr = self.pilaDeStados[-1]
self.show_botonera(pr['db'])
self.puntero = pr['punt']
self.pedido.rm_estado()
if self.linea_editable:
self.lista.rm_linea(self.linea_editable)
self.linea_editable = None
if num > 2:
sc = self.pilaDeStados.pop()
pr = self.pilaDeStados[-1]
self.show_botonera(pr['db'])
self.puntero = pr['punt']
if sc['pass'] > 1:
for i in range(int(sc['pass'])):
self.pedido.rm_estado()
else:
self.pedido.rm_estado()
self.refresh_linea()
def linea_nueva(self):
db = "../db/clases.json"
self.show_botonera(db)
self.clase = None
self.promocion = None
self.linea_editable = None
if len(self.pedido.lineas_pedido) > 0:
self.btnPedido.disabled = False
self.btnAtras.disabled = True
self.pedido.finaliza_linea()
self.pilaDeStados = []
def add_linea(self):
self.btnPedido.disabled = True
self.btnAtras.disabled = False
if self.pedido.linea:
self.linea_editable = LineaWidget(tag=self.pedido.linea,
borrar=self.borrar,
sumar=self.sumar,
sugerencia=self.sugerencia)
if self.promocion is not None:
self.linea_editable.mostar_btnpromo()
self.linea_editable.aplicar = self.aplicar_promo
self.linea_editable.promocion = self.promocion
else:
self.linea_editable.mostar_btnpromo(False)
self.lista.add_linea(self.linea_editable)
def aplicar_promo(self, btn):
self.rf_parcial(btn, btn.tag)
def refresh_linea(self):
if self.pedido and self.pedido.linea:
self.linea_editable.texto = self.pedido.linea.getTexto()
self.linea_editable.total = self.pedido.linea.getTotal()
if len(self.pedido.lineas_pedido) == 0:
self.btnPedido.disabled = True
def rf_parcial(self, w, ln):
w.texto = ln.getTexto()
w.total = ln.getTotal()
if self.pedido:
self.pedido.actualizar_total()
def sumar(self, w, tag):
self.pedido.sumar(tag)
self.rf_parcial(w, tag)
def borrar(self, widget, tag):
if self.pedido.borrar(tag):
self.linea_nueva()
self.pedido.borrar(tag)
self.lista.rm_linea(widget)
self.refresh_linea()
else:
self.rf_parcial(widget, tag)
def show_botonera(self, db):
self.storage = JsonStore(db)
if self.storage.exists('db'):
if self.storage.exists('selectable'):
self.botonera.selectable = True
else:
self.botonera.selectable = False
lista = self.storage['db'].get('lista')
num = len(lista)
if num <= 4:
self.botonera.cols = 1
elif num > 4 and num <= 12:
self.botonera.cols = 3
else:
self.botonera.cols = 4
title = 'None'
if self.clase != None:
title = str(self.clase['text'])
self.botonera.title = title
self.botonera.buttons = []
self.botonera.buttons = self.storage['db'].get('lista')
def nuevo_pedido(self, clase):
self.onPress([Wrap(clase)])
self.clear_pedidos()
def clear_pedidos(self):
self.lista.rm_all_widgets()
self.pedido = Pedido()
self.btnPedido.disabled = True
self.btnAtras.disabled = False
self.total = 0
self.des = "Pedido {0: >10} articulos".format(0)
self.dbCliente = None
def aparcar_pedido(self):
if self.dbCliente == None:
self.pedido.aparcar_pedido()
self.tpv.mostrar_inicio()
def recuperar_pedido(self, db):
self.clear_pedidos()
self.pedido.cargar_pedido(db)
lineas = db.get("db")['lineas']
for linea in lineas:
self.pedido.add_linea(linea)
self.add_linea()
self.refresh_linea()
self.linea_nueva()
def hacer_pedido(self):
if not self.dbCliente and self.total > 0:
self.btnPedido.disabled = True
self.btnAtras.disabled = True
self.show_botonera('../db/privado/llevar.json')
else:
if self.dbCliente:
self.pedido.para_llevar = "Domicilio"
self.pedido.dbCliente = self.dbCliente
self.pedido.num_avisador = "Domicilio"
self.pedido.modo_pago = "Efectivo"
self.tpv.imprimirTicket(self.pedido.guardar_pedido())
self.tpv.mostrar_inicio()
else:
self.show_botonera("../db/privado/num_avisador.json")
self.pedido.modo_pago = "Efectivo"
| apache-2.0 | -6,857,285,848,387,114,000 | 34.548851 | 80 | 0.509579 | false |
OpenSecurityResearch/clipcaptcha | clipcaptcha/ProviderInfo.py | 1 | 5374 | # Copyright (c) 2012 Gursev Singh Kalra McAfee, Foundstone
#
# This class contains information for all CAPTCHA providers that this tool targets
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import sys
from xml.etree.ElementTree import ElementTree
class ProviderInfo:
_bypassString = None
_providers = []
def __init__(self, name, hostname, path):
self.name = name
self.hostname = hostname
self.path = path
@staticmethod
def exxit(msg):
print msg
sys.exit()
def addSuccessFailure(self, responseType, sOrF):
responseType = responseType[0]
rcode = responseType.findall("rcode")
rcodestr = responseType.findall("rcodestr")
if(len(rcode) == 0 or len(rcodestr) == 0):
ProviderInfo.exxit("[-] Success response codes not found for a CAPTCHA provider. Exiting")
rcode = rcode[0]
rcodestr = rcodestr[0]
if(rcode.text == None or rcode.text.strip() == ''or rcodestr.text == None or rcodestr.text.strip() == ''):
ProviderInfo.exxit("[-] Invalid rcode or rcodestr elements. Exiting")
rbody = responseType.findall("rbody")
if(len(rbody) == 0):
rbody = ''
else:
rbody = rbody[0]
if(rbody.text == None):
rbody = ''
else:
rbody = rbody.text.strip()
rbody = rbody.replace("\\n","\n")
rheaders = responseType.findall("rheaders")
headerDict = {}
if(len(rheaders) != 0):
rheaders = rheaders[0]
headers = rheaders.findall("header")
for header in headers:
name = header.findall("name")
value = header.findall("value")
if(len(name) !=0 and len(value) != 0 and name[0].text != None and name[0].text.strip() != '' and value[0].text != None and value[0].text.strip() != '' ):
headerDict[name[0].text.strip()] = value[0].text.strip()
try:
if(sOrF == "success"):
self.setSuccessResp(int(rcode.text.strip()), rcodestr.text.strip(), headerDict, rbody)
elif(sOrF == "failure"):
self.setFailureResp(int(rcode.text.strip()), rcodestr.text.strip(), headerDict, rbody)
except ValueError:
ProviderInfo.exxit("[-] Invalid Response code in config XML")
def setSuccessResp(self, sCode, sCodeStr, sHeaders, sBody):
self.sCode = sCode
self.sCodeStr = sCodeStr
self.sHeaders = sHeaders
self.sBody = sBody
def setFailureResp(self, fCode, fCodeStr, fHeaders, fBody):
self.fCode = fCode
self.fCodeStr = fCodeStr
self.fHeaders = fHeaders
self.fBody = fBody
@staticmethod
def getProviders():
return ProviderInfo._providers
@staticmethod
def setBypassString(bypass):
ProviderInfo._bypassString = bypass
@staticmethod
def getBypassString():
return ProviderInfo._bypassString
@staticmethod
def initProviders(configFile = "config.xml"):
if(configFile == None):
temp = ProviderInfo('reCAPTCHA', 'www.google.com', '/recaptcha/api/verify')
temp.setSuccessResp(200, "OK", {}, "true")
temp.setFailureResp(200, "OK", {}, "false\nincorrect-captcha-sol")
ProviderInfo._providers.append(temp)
temp = ProviderInfo('OpenCAPTCHA', 'www.opencaptcha.com', '/validate.php')
temp.setSuccessResp(200, "OK", {}, "pass")
temp.setFailureResp(200, "OK", {}, "fail")
ProviderInfo._providers.append(temp)
temp = ProviderInfo('Captchator', 'captchator.com', '/captcha/check_answer/')
temp.setSuccessResp(200, "OK", {}, "1")
temp.setFailureResp(200, "OK", {}, "0")
ProviderInfo._providers.append(temp)
else:
try:
with open(configFile) as f: pass
except IOError as e:
ProviderInfo.exxit("[-] Configuration file not found. Exiting")
tree = ElementTree()
tree.parse(configFile)
providers = tree.findall("provider")
if( len(providers) == 0):
ProviderInfo.exxit("[-] No CAPTCHA providers found in config file")
for provider in providers:
name = provider.findall("name")
hostname = provider.findall("hostname")
path = provider.findall("path")
success = provider.findall("success")
failure = provider.findall("failure")
if(len(name) == 0 or len(hostname) == 0 or len(path) == 0 or len(success) == 0 or len(failure) == 0 ):
ProviderInfo.exxit("[-] One among name, hostname, path, success or failure elements not found for a CAPTCHA provider. Exiting")
name = name[0]
hostname = hostname[0]
path = path[0]
if(name.text == None or name.text.strip() == '' or hostname.text == None or hostname.text.strip() == '' or path.text == None or path.text.strip() == ''):
ProviderInfo.exxit("[-] One or more of name, hostname or path elements has a blank value")
tprovider = ProviderInfo(name.text.strip(), hostname.text.strip(), path.text.strip())
tprovider.addSuccessFailure(success, "success")
tprovider.addSuccessFailure(failure, "failure")
ProviderInfo._providers.append(tprovider)
| gpl-3.0 | 4,164,429,859,710,945,300 | 33.448718 | 157 | 0.689058 | false |
valentin-krasontovitsch/ansible | lib/ansible/modules/cloud/google/gcp_compute_target_vpn_gateway_facts.py | 1 | 6089 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_vpn_gateway_facts
description:
- Gather facts for GCP TargetVpnGateway
short_description: Gather facts for GCP TargetVpnGateway
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
region:
description:
- The region this gateway should sit in.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a target vpn gateway facts
gcp_compute_target_vpn_gateway_facts:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
network:
description:
- The network this VPN gateway is accepting traffic for.
returned: success
type: str
tunnels:
description:
- A list of references to VpnTunnel resources associated with this VPN gateway.
returned: success
type: list
forwardingRules:
description:
- A list of references to the ForwardingRule resources associated with this
VPN gateway.
returned: success
type: list
region:
description:
- The region this gateway should sit in.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | -9,125,704,389,823,794,000 | 30.066327 | 136 | 0.572672 | false |
gitcoinco/web | app/gas/management/commands/output_gas_viz.py | 1 | 5506 | import datetime
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import boto
from boto.s3.key import Key
from gas.models import GasProfile
from numpy import array
from perftools.models import JSONStore
def convert_to_movie():
command = "ffmpeg -framerate 30 -pattern_type glob -i 'cache/frames/*.jpg' -c:v libx264 -pix_fmt yuv420p cache/out.mp4"
print("converting to movie")
os.system(command)
def clear_cache():
# TODO: This whole method and utilization needs modified to use S3 storage... not local.
# We can't be using local storage moving forward.
command = "mkdir cache"
os.system(command)
command = "mkdir cache/frames"
os.system(command)
command = "rm cache/frames/*.jpg"
os.system(command)
command = "rm cache/*.jpg"
os.system(command)
command = "rm cache/*.mp4"
os.system(command)
def upload_to_s3():
def percent_cb(complete, total):
import sys
sys.stdout.write('.')
sys.stdout.flush()
filepath = 'cache/out.mp4'
s3 = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = s3.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
k = Key(bucket)
k.key = 'gas_price_viz.mp4'
k.set_contents_from_filename(filepath, cb=percent_cb, num_cb=10)
k.set_acl('public-read')
return k.generate_url(expires_in=0, query_auth=False)
def get_color(j, k, num_items_to_show_at_a_time):
c1 = (3 * ((k/num_items_to_show_at_a_time) / 10.0)) + 0.6
jsub = j % 100 if int(j / 100) % 2 == 0 else 100 - (j % 100)
c2 = (3 * (((jsub)/100) / 10.0)) + 0.6
color = [c1, c2, c2]
return color
def sub_array(val, i):
return [[x[i], x[i], x[i], x[i]] for x in val]
class Command(BaseCommand):
help = 'gets observations and visualizes them in 3d'
def handle(self, *args, **options):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
vantage_point = 'med'
rotate = True
last_degrees = 275
invert_colors = False
max_gas = 50
package = {}
for gp in GasProfile.objects.filter(gas_price__lt=max_gas, created_on__gt=timezone.now() - timezone.timedelta(days=7)).order_by('created_on'):
key = gp.created_on.strftime("%Y-%m-%dT%H:%M:00")
new_time = datetime.datetime.strptime(key, "%Y-%m-%dT%H:%M:00")
if key not in package.keys():
package[key] = []
new_pkg = [float(gp.gas_price), -1 * int(new_time.strftime("%s")), float(gp.mean_time_to_confirm_minutes)]
package[key].append(new_pkg)
clear_cache()
# Divide into X, Y, Z
keys = list(package.keys())
arr = list(package.values())
num_items_to_show_at_a_time = 50
for j in range(num_items_to_show_at_a_time, len(arr)):
print(j)
key = f"Ethereum Mainnet Gas Tradeoffs\n Gas Prices (x axis) vs Time to Confirm (y axis) vs Time (z axis) \n {keys[j]}\n\nMade with <3 at Gitcoin.\nhttps://gitcoin.co/gas"
facecolor = '#274150' if invert_colors else 'white'
fig = plt.figure(figsize=(16, 9), dpi=200, facecolor=facecolor, edgecolor='k')
ax = fig.add_subplot(111, projection='3d', title=key, facecolor=facecolor)
if invert_colors:
axiscolor = 'white'
ax.spines['bottom'].set_color(axiscolor)
ax.spines['top'].set_color(axiscolor)
ax.spines['right'].set_color(axiscolor)
ax.spines['left'].set_color(axiscolor)
ax.tick_params(axis='x', colors=axiscolor)
ax.tick_params(axis='y', colors=axiscolor)
ax.tick_params(axis='z', colors=axiscolor)
ax.yaxis.label.set_color(axiscolor)
ax.xaxis.label.set_color(axiscolor)
ax.zaxis.label.set_color(axiscolor)
ax.title.set_color(axiscolor)
ax.set_ylabel('Time (unixtime)')
ax.set_xlabel('Gas Price (gwei)')
ax.set_zlabel('Time To Confirm (min)')
X = []
Y = []
Z = []
for k in range(0, num_items_to_show_at_a_time):
val = arr[j-k]
tmp = []
for i in range(0, 3):
sa = sub_array(val, i)
tmp.append(sa + [x for x in reversed(sa)])
X = tmp[0]
Y = tmp[1]
Z = tmp[2]
color = get_color(j, k, num_items_to_show_at_a_time)
ax.plot_wireframe(array(X), array(Y), array(Z), rstride=10, cstride=10, color=color, alpha=1-(k/num_items_to_show_at_a_time))
if rotate:
delta = 1 if int(j / 350) % 2 != 0 else -1
degrees = last_degrees + (delta * 1.0 / 20.0)
last_degrees = degrees
if vantage_point == 'low':
z_angle = 2
if vantage_point == 'med':
z_angle = 5
if vantage_point == 'high':
z_angle = 10
ax.view_init(z_angle, degrees)
filename = str(j).rjust(10, '0')
png_file = f'cache/frames/{filename}.jpg'
plt.savefig(png_file)
plt.close()
convert_to_movie()
url = upload_to_s3()
print(url)
| agpl-3.0 | -7,824,083,365,066,125,000 | 35.95302 | 183 | 0.560298 | false |
texas/tx_people | example/usage/tests/fields.py | 1 | 4000 | import random
from django.core.exceptions import ValidationError
from django.db.models import CharField, ManyToManyField
from django.test import TestCase
from tx_people import fields
from tx_people import models
from tx_people import utils
from ._utils import RandomDatesMixin
class ReducedDateFieldTestCase(RandomDatesMixin, TestCase):
def test_defaults_to_max_length_of_ten(self):
field = fields.ReducedDateField()
self.assertEqual(10, field.max_length)
def test_custom_max_length_can_be_used(self):
random_max_length = random.randint(11, 20)
field = fields.ReducedDateField(max_length=random_max_length)
self.assertEqual(random_max_length, field.max_length)
def test_includes_valid_reduced_date_validator(self):
field = fields.ReducedDateField()
self.assert_(utils.valid_reduced_date in field.validators)
def test_validates_valid_dates(self):
field = fields.ReducedDateField()
valid_dates = [
'-'.join([self.random_year, self.random_month, self.random_day]),
'-'.join([self.random_year, self.random_month]),
self.random_year,
'2012-02-29', # leap year
]
for date in valid_dates:
field.run_validators(date)
def test_raises_on_invalid_dates(self):
field = fields.ReducedDateField()
invalid_dates = [
'-'.join([self.random_day, self.random_month, self.random_year]),
'-'.join([self.random_month, self.random_year]),
'2013-02-29', # not a leap year
]
for invalid_date in invalid_dates:
with self.assertRaises(ValidationError):
field.run_validators(invalid_date)
def test_valid_reduced_date_added_to_existing_validators(self):
field = fields.ReducedDateField(validators=[self, ])
self.assert_(self in field.validators)
self.assert_(utils.valid_reduced_date in field.validators)
class OptionalCharField(TestCase):
def generate_random_field(self, **kwargs):
r = random.randint(10, 250)
return fields.OptionalCharField(max_length=r, **kwargs)
def setUp(self):
self.field = self.generate_random_field()
def test_is_a_charfield_subclass(self):
self.assert_(CharField in self.field.__class__.__mro__)
def test_null_defaults_to_true(self):
self.assertTrue(self.field.null)
self.assertFalse(self.generate_random_field(null=False).null)
def test_blank_defaults_to_true(self):
self.assertTrue(self.field.blank)
self.assertFalse(self.generate_random_field(blank=False).blank)
class OptionalReducedDateFieldTestCase(ReducedDateFieldTestCase):
def generate_random_field(self, **kwargs):
return fields.OptionalReducedDateField(**kwargs)
def setUp(self):
self.field = self.generate_random_field()
def test_is_a_charfield_subclass(self):
self.assert_(fields.ReducedDateField in self.field.__class__.__mro__)
def test_null_defaults_to_true(self):
self.assertTrue(self.field.null)
self.assertFalse(self.generate_random_field(null=False).null)
def test_blank_defaults_to_true(self):
self.assertTrue(self.field.blank)
self.assertFalse(self.generate_random_field(blank=False).blank)
class OptionalManyToManyField(TestCase):
def generate_random_field(self, **kwargs):
return fields.OptionalManyToManyField(models.Person, **kwargs)
def setUp(self):
self.field = self.generate_random_field()
def test_is_a_many_to_many_subclass(self):
self.assert_(ManyToManyField in self.field.__class__.__mro__)
def test_null_defaults_to_true(self):
self.assertTrue(self.field.null)
self.assertFalse(self.generate_random_field(null=False).null)
def test_blank_defaults_to_true(self):
self.assertTrue(self.field.blank)
self.assertFalse(self.generate_random_field(blank=False).blank)
| apache-2.0 | 4,979,684,764,089,112,000 | 34.714286 | 77 | 0.67675 | false |
bockthom/codeface | codeface/test/unit/test_getFeatureLines.py | 3 | 17058 | # This file is part of Codeface. Codeface is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2014, by Matthias Dittrich <[email protected]>
# All Rights Reserved.
import unittest
from codeface.VCS import (get_feature_lines, parse_feature_line,
parse_line, parse_sep_line, ParseError, LineType)
from operator import eq
import logging
logging.basicConfig()
class TestFeatureLineParsing(unittest.TestCase):
"""Tests for the getFeatureLines function"""
def testsepline(self):
"""Check that we can parse the header"""
self.assertEqual(",", parse_sep_line("\"sep=,\""))
self.assertEqual("-", parse_sep_line("\"sep=-\""))
self.assertEqual(",", parse_sep_line("\"sep=,\"\r\n"))
self.assertEqual(",", parse_sep_line("\"sep=,\"\n\r"))
self.assertEqual(",", parse_sep_line("\"sep=,\"\n"))
self.assertRaises(ParseError, parse_sep_line, "\"sp=,\"")
self.assertRaises(ParseError, parse_sep_line, "\"sep=,")
pass
def testline(self):
"""Check that we can parse the first header line"""
self.assertListEqual(
["FILENAME", "LINE_START", "LINE_END", "TYPE", "EXPRESSION",
"CONSTANTS"],
parse_line(
",",
"FILENAME,LINE_START,LINE_END,TYPE,EXPRESSION,CONSTANTS"))
self.assertListEqual(
["FILENAME", "LINE_START", "LINE_END", "TYPE", "EXPRESSION",
"CONSTANTS"],
parse_line(
"-",
"FILENAME-LINE_START-LINE_END-TYPE-EXPRESSION-CONSTANTS"))
self.assertListEqual(
["/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml",
"1", "8", "#if", "defined(A)", "A"],
parse_line(",", "/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml,1,8,#if,defined(A),A"))
self.assertListEqual(
["/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml",
"3", "5", "#if", "(defined(A)) && ((defined(C) || defined(D)))", "A;C;D"],
parse_line(",", "/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml,3,5,#if,(defined(A)) && ((defined(C) || defined(D))),A;C;D"))
self.assertListEqual(["FILENAME", "LINE_START"],
parse_line(",", "FILENAME,LINE_START\r\n"))
self.assertListEqual(["FILENAME", "LINE_START"],
parse_line(",", "FILENAME,LINE_START\n"))
self.assertListEqual(["FILE", "LINE_\"START"],
parse_line(",", "FILE,\"LINE_\"\"START\""))
pass
def testfeatureline(self):
"""Check that we can parse the first header line"""
startline, endline, line_type, featurelist, feature_expression = \
parse_feature_line(",", "/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml,3,5,#if,(defined(A)) && ((defined(C) || defined(D))),A;C;D")
self.assertEqual(3, startline)
self.assertEqual(5, endline)
self.assertEqual(LineType.IF, line_type)
self.assertListEqual(["A", "C", "D"], featurelist)
self.assertEqual("(defined(A)) && ((defined(C) || defined(D)))", feature_expression)
startline, endline, line_type, featurelist, feature_expression = \
parse_feature_line(",", "/tmp/tmpVemX4s_cppstats_featurelocations/_cppstats_featurelocations/tmpuAFx3b.xml,1,8,#if,defined(A),A")
self.assertEqual(1, startline)
self.assertEqual(8, endline)
self.assertEqual(LineType.IF, line_type)
self.assertListEqual(["A"], featurelist)
self.assertEqual("defined(A)", feature_expression)
startline, endline, line_type, featurelist, feature_expression = \
parse_feature_line(",", "/tmp/tmpbPbqDy_cppstats_featurelocations/_cppstats_featurelocations/tmp5pTBQ4.xml,324,335,#if,0,")
self.assertEqual(324, startline)
self.assertEqual(335, endline)
self.assertEqual(LineType.IF, line_type)
self.assertListEqual([], featurelist)
self.assertEqual("0", feature_expression)
startline, endline, line_type, featurelist, feature_expression = \
parse_feature_line(",", "/tmp/tmpY5XZci_cppstats_featurelocations/_cppstats_featurelocations/tmpWwrMnP.xml,941,943,#else,\"!(GTK_CHECK_VERSION(3, 0, 0))\",")
self.assertEqual(941, startline)
self.assertEqual(943, endline)
self.assertEqual(LineType.ELSE, line_type)
self.assertListEqual([], featurelist)
self.assertEqual("!(GTK_CHECK_VERSION(3, 0, 0))", feature_expression)
pass
class TestFeatureLines(unittest.TestCase):
"""Tests for the getFeatureLines function"""
def testsingleline(self):
"""Check that a single line is split as expected"""
feature_dict, fexpr_dict = \
get_feature_lines([(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(6), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6), set(["Base_Feature"]))
pass
def testfolllowingline(self):
"""Check that a #ifdef can follow another #ifdef"""
feature_dict, fexpr_dict = \
get_feature_lines(
[(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(6, 8, LineType.IF, ["C", "D"], "defined(C) && defined(D)")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(6), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(7), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(8), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(9), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(7), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(8), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(9), set(["Base_Feature"]))
pass
def testorderdoesntmatter(self):
"""Check that a #ifdef can follow another #ifdef"""
feature_dict, fexpr_dict = \
get_feature_lines(
[(6, 8, LineType.IF, ["C", "D"], "defined(C) && defined(D)"),
(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(6), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(7), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(8), set(["C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(9), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(7), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(8), set(["defined(C) && defined(D)"]))
self.assertSetEqual(fexpr_dict.get_line_info(9), set(["Base_Feature"]))
pass
def testnesting(self):
"""Check that a #ifdef can be nested in an another #ifdef"""
feature_dict, fexpr_dict = \
get_feature_lines(
[(3, 9, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(6, 8, LineType.IF, ["C", "D"],
"(defined(A) && defined(B)) && (defined(C) && defined(D))")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(6),
set(["A", "B", "C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(7),
set(["A", "B", "C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(8),
set(["A", "B", "C", "D"]))
self.assertSetEqual(feature_dict.get_line_info(9), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(10), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6),
set(["(defined(A) && defined(B)) && (defined(C) && defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(7),
set(["(defined(A) && defined(B)) && (defined(C) && defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(8),
set(["(defined(A) && defined(B)) && (defined(C) && defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(9), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(10), set(["Base_Feature"]))
pass
def testnestingwithsamefeatures(self):
"""Check that a #ifdef can be nested in another
#ifdef but have the same feature"""
feature_dict, fexpr_dict = \
get_feature_lines(
[(3, 9, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(6, 8, LineType.IF, ["A", "D"],
"(defined(A) && defined(B)) && (defined(D))")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]),
"line 2 should contain the Base_Feature")
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(6),
set(["A", "B", "D"]))
self.assertSetEqual(feature_dict.get_line_info(7),
set(["A", "B", "D"]))
self.assertSetEqual(feature_dict.get_line_info(8),
set(["A", "B", "D"]))
self.assertSetEqual(feature_dict.get_line_info(9), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(10), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]),
"line 2 should contain the Base_Feature")
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6),
set(["(defined(A) && defined(B)) && (defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(7),
set(["(defined(A) && defined(B)) && (defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(8),
set(["(defined(A) && defined(B)) && (defined(D))"]))
self.assertSetEqual(fexpr_dict.get_line_info(9), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(10), set(["Base_Feature"]))
pass
def testinvalidstartend(self):
"""Check we throw when end is before start"""
self.assertRaises(ParseError, get_feature_lines,
[(5, 3, LineType.IF, ["A", "B"], "defined(A) && defined(B)")], "unittest.c")
pass
def testoverlapping(self):
"""Check we throw when line is used multiple times"""
self.assertRaises(ParseError, get_feature_lines,
[(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(3, 6, LineType.IF, ["C"], "defined(C)")],
"unittest.c")
pass
def testoverlapping_2(self):
"""Check we throw when line is used multiple times"""
self.assertRaises(ParseError, get_feature_lines,
[(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(5, 6, LineType.IF, ["C"], "defined(C)")],
"unittest.c")
pass
def testelif(self):
"""Check we throw when line is used multiple times"""
# for example #elif "C" on line 5
feature_dict, fexpr_dict = \
get_feature_lines(
[(3, 5, LineType.IF, ["A", "B"], "defined(A) && defined(B)"),
(5, 6, LineType.ELIF, ["A", "B", "C"], "(!(defined(A)) && (!(defined(B)) && defined(C)")],
"unittest.c")
self.assertSetEqual(feature_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(feature_dict.get_line_info(3), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(4), set(["A", "B"]))
self.assertSetEqual(feature_dict.get_line_info(5),
set(["A", "B", "C"]))
self.assertSetEqual(feature_dict.get_line_info(6),
set(["A", "B", "C"]))
self.assertSetEqual(feature_dict.get_line_info(7), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(2), set(["Base_Feature"]))
self.assertSetEqual(fexpr_dict.get_line_info(3), set(["defined(A) && defined(B)"]), fexpr_dict.get_line_info(3))
self.assertSetEqual(fexpr_dict.get_line_info(4), set(["defined(A) && defined(B)"]))
self.assertSetEqual(fexpr_dict.get_line_info(5),
set(["(!(defined(A)) && (!(defined(B)) && defined(C)"]))
self.assertSetEqual(fexpr_dict.get_line_info(6),
set(["(!(defined(A)) && (!(defined(B)) && defined(C)"]))
self.assertSetEqual(fexpr_dict.get_line_info(7), set(["Base_Feature"]))
pass
| gpl-2.0 | 849,802,686,914,083,300 | 54.563518 | 179 | 0.579025 | false |
projectatomic/atomic-reactor | atomic_reactor/plugins/build_source_container.py | 1 | 3923 | """
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import os
import subprocess
import tempfile
from atomic_reactor.build import BuildResult
from atomic_reactor.constants import (PLUGIN_SOURCE_CONTAINER_KEY, EXPORTED_SQUASHED_IMAGE_NAME,
IMAGE_TYPE_DOCKER_ARCHIVE, PLUGIN_FETCH_SOURCES_KEY)
from atomic_reactor.plugin import BuildStepPlugin
from atomic_reactor.util import get_exported_image_metadata
class SourceContainerPlugin(BuildStepPlugin):
"""
Build source container image using
https://github.com/containers/BuildSourceImage
"""
key = PLUGIN_SOURCE_CONTAINER_KEY
def export_image(self, image_output_dir):
output_path = os.path.join(tempfile.mkdtemp(), EXPORTED_SQUASHED_IMAGE_NAME)
cmd = ['skopeo', 'copy']
source_img = 'oci:{}'.format(image_output_dir)
dest_img = 'docker-archive:{}'.format(output_path)
cmd += [source_img, dest_img]
self.log.info("Calling: %s", ' '.join(cmd))
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.log.error("failed to save docker-archive :\n%s", e.output)
raise
img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)
self.workflow.exported_image_sequence.append(img_metadata)
def run(self):
"""Build image inside current environment.
Returns:
BuildResult
"""
fetch_sources_result = self.workflow.prebuild_results.get(PLUGIN_FETCH_SOURCES_KEY, {})
source_data_dir = fetch_sources_result.get('image_sources_dir')
remote_source_data_dir = fetch_sources_result.get('remote_sources_dir')
source_exists = source_data_dir and os.path.isdir(source_data_dir)
remote_source_exists = remote_source_data_dir and os.path.isdir(remote_source_data_dir)
if not source_exists and not remote_source_exists:
err_msg = "No SRPMs directory '{}' available".format(source_data_dir)
err_msg += "\nNo Remote source directory '{}' available".format(remote_source_data_dir)
self.log.error(err_msg)
return BuildResult(logs=err_msg, fail_reason=err_msg)
if source_exists and not os.listdir(source_data_dir):
self.log.warning("SRPMs directory '%s' is empty", source_data_dir)
if remote_source_exists and not os.listdir(remote_source_data_dir):
self.log.warning("Remote source directory '%s' is empty", remote_source_data_dir)
image_output_dir = tempfile.mkdtemp()
cmd = ['bsi', '-d']
drivers = []
if source_exists:
drivers.append('sourcedriver_rpm_dir')
cmd.append('-s')
cmd.append('{}'.format(source_data_dir))
if remote_source_exists:
drivers.append('sourcedriver_extra_src_dir')
cmd.append('-e')
cmd.append('{}'.format(remote_source_data_dir))
driver_str = ','.join(drivers)
cmd.insert(2, driver_str)
cmd.append('-o')
cmd.append('{}'.format(image_output_dir))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.log.error("BSI failed with output:\n%s", e.output)
return BuildResult(logs=e.output, fail_reason='BSI utility failed build source image')
self.log.debug("Build log:\n%s\n", output)
self.export_image(image_output_dir)
return BuildResult(
logs=output,
oci_image_path=image_output_dir,
skip_layer_squash=True
)
| bsd-3-clause | -2,101,025,110,635,851,300 | 36.721154 | 99 | 0.641091 | false |
pnwairfire/eflookup | test/unit/eflookup/fccs2ef/test_import.py | 1 | 18894 | """test_import.py: Functional tests for code that imports raw data from
scientists and writes data files formatted to be included in the package
distributions
"""
import re
__author__ = "Joel Dubowy"
from eflookup.fccs2ef.importer import (
Fccs2CoverTypeImporter, CoverType2EfGroupImporter,
EfGroup2EfImporter, CatPhase2EFGroupImporter
)
# TODO: put this in base class, add base class 'test_import' method, remove
# each class' test_import method, and add IMPORTER_CLASS class variable to
# each child class. This will only work if we can somehow tell py.test
# not to run test_import if the current class is the base class
def run_test(tmpdir, input_content, importer_class, expected_output):
input_file = tmpdir.join("input.csv")
input_file.write(input_content)
input_filename = str(input_file)
output_filename = input_filename.replace('input', 'output')
importer_class(input_filename).write(output_file_name=output_filename)
assert len(tmpdir.listdir()) == 2
# TODO: assert that output_filename exists
output_content = open(output_filename, 'r').read()
var_name = re.compile('([^=]+)=').search(output_content).group(1).strip()
exec(output_content)
output = locals()[var_name]
assert expected_output == output
class TestFccs2CoverTypeImporter(object):
"""Top level functional test for importing fccs id to cover type mappings
"""
INPUT_CONTENT = """fccs_id,cover_type_id,,,
0,404,,,
1,13,,,
2,131,,,
3,136,,,
4,118,,,
"""
EXPECTED_OUTPUT = {
"0":"404",
"1":"13",
"2":"131",
"3":"136",
"4":"118"
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, Fccs2CoverTypeImporter,
self.EXPECTED_OUTPUT)
class TestCoverType2EfGroupImporter(object):
"""Top level functional test for importing cover type to ef group mappings
"""
INPUT_CONTENT = """MapID,Cover type,WF,Rx,RegionalRx,RegionalWF
1,SRM 101: Bluebunch Wheatgrass,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
2,SRM 102: Idaho Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
3,SRM 103: Green Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
4,SRM 104: Antelope Bitterbrush-Bluebunch Wheatgrass,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
5,SRM 105: Antelope Bitterbrush-Idaho Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
6,SRM 106: Bluegrass Scabland,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
7,SRM 107: Western Juniper-Big Sagebrush-Bluebunch Wheatgrass,5: Shrub,5: Shrub,30-32: W Shrub,30-32: W Shrub
13,SRM 203: Riparian Woodland,4: WF NW Conifer,3: Rx NW Conifer,27-29: W Hdwd,
"""
# Note: the output is ordered by FCCS Id
EXPECTED_OUTPUT = {
"1": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"2": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"3": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"4": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"5": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"6": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"7": {"wf": "5", "rx": "5", "regrx": "30-32", "regwf": "30-32"},
"13": {"wf": "4", "rx": "3", "regrx": "27-29", "regwf": None},
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, CoverType2EfGroupImporter,
self.EXPECTED_OUTPUT)
class TestCatPhase2EFGroupImporter(object):
"""Top level functional test for importing
"""
INPUT_CONTENT = """,,,,Note: This mapping should be used along with EF Group by FB to assign EFs.,,,,"CO2, CH4","CO, NOx, NH3, SO2, PM25","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, NH3, PM2.5","NOx, SO2","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, PM2.5","NOx, NH3, SO2","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, PM25","NOx, NH3, SO2","CO2, CO, CH4, NH3, PM25","NOx, SO2","CO2, CO, CH4, NH3, PM25","NOx, SO2","CO2, CO, CH4",,"Most of the time, the emissions module will use these rules (but see exceptions)",,,These are just for reference purposes.,,,,,,,,,,EF Group,CO2,CO,CH4,NOx,NH3,SO2,PM2.5,
"Duff = Ground fuels: upper duff, lower duff, basal accumulations (BA), squirrel middens (SM)",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
"CWD = Class 2 and 3 snags, coarse wood under woody",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
Follow this table for where residual emissions are expected (only N/A are not),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,Duff/CWD,Consume output variable,Category,CombustionPhase,Generic Assignment,9-11: SE Grass,9-11: SE Grass,12-14: SE Hdwd,12-14: SE Hdwd,15-17: SE Pine,15-17: SE Pine,18-20: SE Shrub,18-20: SE Shrub,21-23: W MC,21-23: W MC,24-26: W Grass,24-26: W Grass,27-29: W Hdwd,27-29: W Hdwd,30-32: W Shrub,30-32: W Shrub,30-32: W Shrub,30-32: W Shrub,33-35: Boreal,,Simplified Rules,EF Group,,Group #,# Cover Type,Note,,,,,,,SE grass F/S,9,1700,70.2,2.67,3.26,1.2,0.97,12.08,
C_over_crown,canopy,overstory,,C_over_crown_F,Overstory tree crowns,Flaming,General (1-6),10,9,13,12,16,15,19,18,22,21,25,24,28,27,31,30,31,30,33,,All outputs except for below:,Flaming/Short-term smoldering EF Groups 1-6,,1,Southeastern Forest,Assigned by fuelbed,,,,,,,SE Grass F,10,1710,,2.42,,,,,
,,,,C_over_crown_S,Overstory tree crowns,Smoldering,General (1-6),11,9,14,12,17,15,20,18,23,21,26,24,29,27,32,30,32,30,34,,,,,2,Boreal Forest,Assigned by fuelbed,,,,,,,SE Grass S,11,1538,,5.4,,,,,
,,,,C_over_crown_R,Overstory tree crowns,Residual,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,,C_wood_S1000hr_R,Woody RSC (7),,3,Western Forest - Rx,Assigned by fuelbed and burn type (prescribed or wildfire),,,,,,,SE Hdwd F/S,12,1688,78.9,2.42,2.43,1.79,0.63,14.32,
C_mid_crown,canopy,midstory,,C_mid_crown_F,Midstory tree crowns,Flaming,General (1-6),10,9,13,12,16,15,19,18,22,21,25,24,28,27,31,30,31,30,33,,C_wood_R1000hr_R,Woody RSC (7),,4,Western Forest - WF,Assigned by fuelbed and burn type (prescribed or wildfire),,,,,,,SE Hdwd F,13,1702,68.6,1.92,,,,,
,,,CWD,C_snagc3_R,Class 3 snag wood,Residual,Woody RSC (7),7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,,"SE Hdwd (F, S)","CO2, CO, CH4",,Shrub,,,,,,,,,,,,,,,,,,
,,,,C_herb_1live_R,Herbs - live primary layer,Residual,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,,,,,LLM,,,,,,,,,,,,,,,,,,
"""
EXPECTED_OUTPUT = {
'9-11': {
'canopy': {
'overstory': {
'smoldering': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'},
'flaming': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'},
'residual': {'CO': None, 'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'}
},
'snags class 3': {
'residual': {'CO': '7', 'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'CO': None, 'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'12-14': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'},
'flaming': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'15-17': {
'canopy': {
'overstory': {
'smoldering': {'NOx': '15', 'SO2': '15'},
'flaming': {'NOx': '15', 'SO2': '15'},
'residual': {'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NOx': '15', 'SO2': '15'}
},
'snags class 3': {
'residual': {'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NOx': None, 'SO2': None}
}
}
},
'18-20': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'},
'flaming': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'21-23': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '21', 'NOx': '21', 'SO2': '21'},
'flaming': {'NH3': '21', 'NOx': '21', 'SO2': '21'},
'residual': {'NH3': None, 'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '21', 'NOx': '21', 'SO2': '21'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'SO2': None}
}
}
},
'24-26': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'},
'flaming': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'27-29': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '27', 'NOx': '27', 'SO2': '27'},
'flaming': {'NH3': '27', 'NOx': '27', 'SO2': '27'},
'residual': {'NH3': None, 'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '27', 'NOx': '27', 'SO2': '27'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'SO2': None}
}
}
},
'30-32': {
'canopy': {
'overstory': {
'smoldering': {'NOx': '30', 'SO2': '30'},
'flaming': {'NOx': '30', 'SO2': '30'},
'residual': {'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NOx': '30', 'SO2': '30'}
},
'snags class 3': {
'residual': {'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NOx': None, 'SO2': None}
}
}
},
'33-35': {
'canopy': {
'overstory': {
'smoldering': {'CO': '34', 'CH4': '34', 'CO2': '34'},
'flaming': {'CO': '33', 'CH4': '33', 'CO2': '33'},
'residual': {'CO': None, 'CH4': None, 'CO2': None},
},
'midstory': {
'flaming': {'CO': '33', 'CH4': '33', 'CO2': '33'}
},
'snags class 3': {
'residual': {'CO': '7', 'CH4': '7', 'CO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'CO': None, 'CH4': None, 'CO2': None}
}
}
}
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, CatPhase2EFGroupImporter,
self.EXPECTED_OUTPUT)
class TestEfGroup2EfImporter(object):
"""Top level functional test for importing ef group to emission factors mappings.
"""
INPUT_CONTENT = """g/kg,,Urbanski + Liu (1-8),,,,,,,,Revised (9-32),,,,,,,,,,,,,,,,,,,,,,,,,,
,,SE pine,Boreal,Rx NW Conifer,WF NW Conifer,W Shrub,Grass,Residual CWD,Residual Duff,SE grass F/S,SE Grass F,SE Grass S,SE Hdwd F/S,SE Hdwd F,SE Hdwd S,SE Pine F/S,SE Pine F,SE Pine S,SE Shrub F/S,SE Shrub F,SE Shrub S,W MC F/S,W MC F,W MC S,W Grass F/S,W Grass F,W Grass S,W Hdwd F/S,W Hdwd F,W Hdwd S,W Shrub F/S,W Shrub F,W Shrub S,Boreal F/S,Boreal F,Boreal S
Pollutant,Formula,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35
Carbon Dioxide,CO2,1703,1641,1598,1454,1674,1705,1408,1371,1700,1710,1538,1688,1702,1580,1606,1677,1530,1703,1743,1461,1603.16,1665.93,1592.10,1531,1638,1102,1577,1711,1489,1570,1696,1549,1606,1690,1570
Carbon Monoxide,CO,76,95,105,89.3,74,61,229,257,70.2,,,78.9,68.6,129.5,94.6,72.4,156.2,74.3,72.4,93.8,108.47,83.61,139.83,55.8,45,115.3,109.3,55.3,150.6,107.2,66.4,101.6,117,73,154
Methane,CH4,2.32,3.38,4.86,4.9,3.69,1.95,13.94,7.945,2.67,2.42,5.4,2.42,1.92,5.9,3.74,2.38,8.72,2.44,2.24,3.3,5.63,3.88,7.58,1.98,1.67,4.2,5.79,1.89,6.85,2.51,2.02,4.44,5.25,2.19,7.9
Nitrogen Oxides,NOx,1.7,1,2.06,0.49,2.18,2.18,0,0.67,3.26,,,2.43,,,1.96,,,4.23,,,3.22,,,3.26,,,3.25,,,3.57,,,2.33,,
Ammonia,NH3,0.14,0.79,1.53,1.5,1.5,1.5,0.48,2.67,1.2,,,1.79,,,0.7,0.48,1.15,2.21,,,1.07,,,0.3,,,0.58,,,1.48,1.45,2.12,2.07,,
Sulfur Dioxide,SO2,1.06,1.06,1.06,0.32,0.68,0.68,0,1.76,0.97,,,0.63,,,0.79,,,0.87,,,0.88,,,0.97,,,0.52,,,0.53,,,0.15,,
PM2.5,PM2.5,12.58,21.5,17.57,26,7.06,8.51,33,35.3,12.08,,,14.32,,,29.43,17.56,49.72,12.03,,,15.30,13.73,25.38,9.89,,,10.77,6.36,11.54,7.99,6.97,9.39,21.5,,
"""
EXPECTED_OUTPUT = {
"1": {"CH4": "2.32", "CO": "76", "CO2": "1703", "NH3": "0.14", "NOx": "1.7", "PM2.5": "12.58", "SO2": "1.06"},
"10": {"CH4": "2.42", "CO2": "1710"},
"11": {"CH4": "5.4", "CO2": "1538"},
"12": {"CH4": "2.42", "CO": "78.9", "CO2": "1688", "NH3": "1.79", "NOx": "2.43", "PM2.5": "14.32", "SO2": "0.63"},
"13": {"CH4": "1.92", "CO": "68.6", "CO2": "1702"},
"14": {"CH4": "5.9", "CO": "129.5", "CO2": "1580"},
"15": {"CH4": "3.74", "CO": "94.6", "CO2": "1606", "NH3": "0.7", "NOx": "1.96", "PM2.5": "29.43", "SO2": "0.79"},
"16": {"CH4": "2.38", "CO": "72.4", "CO2": "1677", "NH3": "0.48", "PM2.5": "17.56"},
"17": {"CH4": "8.72", "CO": "156.2", "CO2": "1530", "NH3": "1.15", "PM2.5": "49.72"},
"18": {"CH4": "2.44", "CO": "74.3", "CO2": "1703", "NH3": "2.21", "NOx": "4.23", "PM2.5": "12.03", "SO2": "0.87"},
"19": {"CH4": "2.24", "CO": "72.4", "CO2": "1743"},
"2": {"CH4": "3.38", "CO": "95", "CO2": "1641", "NH3": "0.79", "NOx": "1", "PM2.5": "21.5", "SO2": "1.06"},
"20": {"CH4": "3.3", "CO": "93.8", "CO2": "1461"},
"21": {"CH4": "5.63", "CO": "108.47", "CO2": "1603.16", "NH3": "1.07", "NOx": "3.22", "PM2.5": "15.30", "SO2": "0.88"},
"22": {"CH4": "3.88", "CO": "83.61", "CO2": "1665.93", "PM2.5": "13.73"},
"23": {"CH4": "7.58", "CO": "139.83", "CO2": "1592.10", "PM2.5": "25.38"},
"24": {"CH4": "1.98", "CO": "55.8", "CO2": "1531", "NH3": "0.3", "NOx": "3.26", "PM2.5": "9.89", "SO2": "0.97"},
"25": {"CH4": "1.67", "CO": "45", "CO2": "1638"},
"26": {"CH4": "4.2", "CO": "115.3", "CO2": "1102"},
"27": {"CH4": "5.79", "CO": "109.3", "CO2": "1577", "NH3": "0.58", "NOx": "3.25", "PM2.5": "10.77", "SO2": "0.52"},
"28": {"CH4": "1.89", "CO": "55.3", "CO2": "1711", "PM2.5": "6.36"},
"29": {"CH4": "6.85", "CO": "150.6", "CO2": "1489", "PM2.5": "11.54"},
"3": {"CH4": "4.86", "CO": "105", "CO2": "1598", "NH3": "1.53", "NOx": "2.06", "PM2.5": "17.57", "SO2": "1.06"},
"30": {"CH4": "2.51", "CO": "107.2", "CO2": "1570", "NH3": "1.48", "NOx": "3.57", "PM2.5": "7.99", "SO2": "0.53"},
"31": {"CH4": "2.02", "CO": "66.4", "CO2": "1696", "NH3": "1.45", "PM2.5": "6.97"},
"32": {"CH4": "4.44", "CO": "101.6", "CO2": "1549", "NH3": "2.12", "PM2.5": "9.39"},
"33": {"CH4": "5.25", "CO": "117", "CO2": "1606", "NH3": "2.07", "NOx": "2.33", "PM2.5": "21.5", "SO2": "0.15"},
"34": {"CH4": "2.19", "CO": "73", "CO2": "1690"},
"35": {"CH4": "7.9", "CO": "154", "CO2": "1570"},
"4": {"CH4": "4.9", "CO": "89.3", "CO2": "1454", "NH3": "1.5", "NOx": "0.49", "PM2.5": "26", "SO2": "0.32"},
"5": {"CH4": "3.69", "CO": "74", "CO2": "1674", "NH3": "1.5", "NOx": "2.18", "PM2.5": "7.06", "SO2": "0.68"},
"6": {"CH4": "1.95", "CO": "61", "CO2": "1705", "NH3": "1.5", "NOx": "2.18", "PM2.5": "8.51", "SO2": "0.68"},
"7": {"CH4": "13.94", "CO": "229", "CO2": "1408", "NH3": "0.48", "NOx": "0", "PM2.5": "33", "SO2": "0"},
"8": {"CH4": "7.945", "CO": "257", "CO2": "1371", "NH3": "2.67", "NOx": "0.67", "PM2.5": "35.3", "SO2": "1.76"},
"9": {"CH4": "2.67", "CO": "70.2", "CO2": "1700", "NH3": "1.2", "NOx": "3.26", "PM2.5": "12.08", "SO2": "0.97"}
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, EfGroup2EfImporter,
self.EXPECTED_OUTPUT)
| gpl-3.0 | 4,152,691,517,601,990,700 | 52.676136 | 627 | 0.453319 | false |
matheus-fonseca/acidentes-em-rodovias | acidentes_em_rodovias/app/tests/tests_controller_consultabasica_regiao.py | 1 | 1570 | # -*- coding: utf-8 -*-
import sys, os, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from django.test import SimpleTestCase
from django.template import RequestContext, TemplateDoesNotExist, Context
from controller import consultabasica_regiao_controller as ctrl
from _mysql_exceptions import *
from nose import with_setup
from mock import MagicMock,patch,Mock
from django.utils.datastructures import MultiValueDictKeyError
class Test_Regiao(SimpleTestCase):
"""docstring for Test_Regiao"""
def setUp(self): #configura ambiente para teste
self.request = Context()
self.request.GET = dict()
self.request.GET['uf_id'] = 'DF'
self.request.GET['municipio_id'] = 'Brasilia'
#descobre qual metodo será chamado e formata a saída
func = str(self.id).split('=')[-1][:-2]
func = func.split('test_')[-1]
func = func.replace('_',' ')
out = '\rTeste de ' + func + ' '
out = out.ljust(65,'-')
sys.stderr.write(out)
self.shortDescription()
def tearDown(self):
# informa que o teste foi realizado
#print 'Done'
sys.stderr.write('Done\n')
def shortDescription(self):
return "Teste da classe Test_Regiao"
def test_consulta_por_regiao(self):
ctrl.consulta_por_regiao(None)
def test_consulta_municipios_na_regiao(self):
ctrl.consulta_municipios_na_regiao(self.request)
def test_consulta_ocorrencias_por_municipio(self):
ctrl.consulta_ocorrencias_por_municipio(self.request)
| gpl-3.0 | -4,648,626,602,068,074,000 | 33.086957 | 87 | 0.716837 | false |
ericholscher/pinax | pinax/apps/tagging_utils/widgets.py | 1 | 1693 | from django import forms
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.conf import settings
class TagAutoCompleteInput(forms.TextInput):
class Media:
css = {
'all': (settings.STATIC_URL + 'pinax/css/jquery.autocomplete.css',)
}
js = (
settings.STATIC_URL + 'pinax/js/jquery-1.3.2.min.js',
settings.STATIC_URL + 'pinax/js/jquery.bgiframe.min.js',
settings.STATIC_URL + 'pinax/js/jquery.ajaxQueue.js',
settings.STATIC_URL + 'pinax/js/jquery.autocomplete.min.js'
)
def __init__(self, app_label, model, *args, **kwargs):
self.app_label = app_label
self.model = model
super(TagAutoCompleteInput, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
output = super(TagAutoCompleteInput, self).render(name, value, attrs)
return output + mark_safe(u'''<script type="text/javascript">
jQuery("#id_%s").autocomplete('%s', {
max: 10,
highlight: false,
multiple: true,
multipleSeparator: " ",
scroll: true,
scrollHeight: 300,
matchContains: true,
autoFill: true
});
</script>''' % (
name,
reverse(
'tagging_utils_autocomplete',
args=[],
kwargs={
'app_label': self.app_label,
'model': self.model
}
)
)
)
| mit | -465,793,748,839,148,160 | 35.021277 | 79 | 0.502067 | false |
shagi/guifiadmin | vpn/conffiles.py | 1 | 2893 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from shutil import make_archive
from tempfile import mkdtemp
from django.template.loader import render_to_string
def tinc_gateway_conf(gateway):
return render_to_string(
'vpn/tinc/gateway_tinc.conf', {
'gateway': gateway,
}
)
def tinc_gateway_host(gateway):
return render_to_string(
'vpn/tinc/hosts/gateway.txt', {
'gateway': gateway,
}
)
def tinc_client_tinc_up(client):
return render_to_string(
'vpn/tinc/client_tinc_up.sh', {
'client': client,
}
)
def tinc_client_tinc_down(client):
return render_to_string(
'vpn/tinc/client_tinc_down.sh', {
'client': client,
}
)
def tinc_client_conf(client):
return render_to_string(
'vpn/tinc/client_tinc.conf', {
'client': client,
}
)
def tinc_client_openwrt_tinc_config(client):
return render_to_string(
'vpn/openwrt/config/tinc.txt', {
'client': client,
}
)
def tinc_client_openwrt_firewall_config(client):
return render_to_string(
'vpn/openwrt/config/firewall.txt', {
'client': client,
}
)
def tinc_client_host(client):
return render_to_string(
'vpn/tinc/hosts/client.txt', {
'client': client,
}
)
def tinc_client_openwrt_config_tar(client):
basedir = mkdtemp()
tinc_config_base = os.path.join(basedir, 'etc', 'tinc', client.gateway.nickname)
os.makedirs(tinc_config_base)
os.makedirs(os.path.join(tinc_config_base, 'hosts'))
with open(os.path.join(tinc_config_base, 'tinc.conf'), 'w') as conffile:
conffile.write(tinc_client_conf(client))
with open(os.path.join(tinc_config_base, 'tinc_up'), 'w') as conffile:
conffile.write(tinc_client_tinc_up(client))
with open(os.path.join(tinc_config_base, 'tinc_down'), 'w') as conffile:
conffile.write(tinc_client_tinc_down(client))
with open(os.path.join(tinc_config_base, 'hosts', client.gateway.nickname), 'w') as conffile:
conffile.write(tinc_gateway_host(client.gateway))
with open(os.path.join(tinc_config_base, 'hosts', client.member.username), 'w') as conffile:
conffile.write(tinc_client_host(client))
openwrt_config_base = os.path.join(basedir, 'etc', 'config')
os.makedirs(openwrt_config_base)
with open(os.path.join(openwrt_config_base, 'firewall'), 'w') as conffile:
conffile.write(tinc_client_openwrt_firewall_config(client))
with open(os.path.join(openwrt_config_base, 'tinc'), 'w') as conffile:
conffile.write(tinc_client_openwrt_tinc_config(client))
tarfile = make_archive('openwrt_config', 'gztar', root_dir=basedir)
with open(tarfile, 'rb') as tarfile:
return tarfile.read()
| agpl-3.0 | 2,186,271,358,143,906,800 | 26.037383 | 97 | 0.625648 | false |
tjanez/ansible | lib/ansible/modules/cloud/openstack/os_stack.py | 1 | 9095 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <[email protected]>
# (c) 2016, Steve Baker <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
template:
description:
- Path of the template file to use for the stack creation
required: false
default: None
environment:
description:
- List of environment files that should be used for the stack creation
required: false
default: None
parameters:
description:
- Dictionary of parameters for the stack creation
required: false
default: None
rollback:
description:
- Rollback stack creation
required: false
default: false
timeout:
description:
- Maximum number of seconds to wait for the stack creation
required: false
default: 3600
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
stack:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from time import sleep
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
return False
module.fail_json(msg = "Failure in creating stack: ".format(stack))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# stack API introduced in 1.8.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg='shade 1.8.0 or higher is required for this module')
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
try:
cloud = shade.openstack_cloud(**module.params)
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud)
else:
stack = _update_stack(module, stack, cloud)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 5,956,852,945,441,309,000 | 32.936567 | 109 | 0.587026 | false |
craighiller/serendipity | imgur.py | 1 | 1275 | #!/usr/bin/env python3
import json, sys, time as dt, pprint, math
import urllib
import imgur_config
from Imgur.Factory import Factory
from Imgur.Auth.Expired import Expired
try:
from urllib.request import urlopen as UrlLibOpen
from urllib.request import HTTPError
except ImportError:
from urllib2 import urlopen as UrlLibOpen
from urllib2 import HTTPError
def center_pad(s, length):
num_dashes = float(length - len(s) - 2) / 2
num_dashes_left = math.floor(num_dashes)
num_dashes_right = math.ceil(num_dashes)
return ('=' * num_dashes_left) + ' ' + s + ' ' + ('=' * num_dashes_right)
def two_column_with_period(left, right, length):
num_periods = (length - (len(left) + len(right) + 2))
return left + ' ' + ('.' * num_periods) + ' ' + right
def upload(image, name):
#config = imgur_config.config()
#factory = Factory(config)
#
# action = "upload"
#
# #handle_unauthorized_commands(factory, "upload")
#
# imgur = factory.build_api()
#
# req = factory.build_request_upload_from_data(image, name)
# res = imgur.retrieve(req)
# return(res['link'])
data = urllib.urlencode({"image":image, "name":name})
u = urllib.urlopen("https://api.imgur.com/3/image", data)
return u | mit | -5,959,340,610,859,695,000 | 28 | 77 | 0.644706 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/pwd.py | 1 | 4787 | # encoding: utf-8
# module pwd
# from (built-in)
# by generator 1.135
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
Password database entries are reported as 7-tuples containing the following
items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell.
The uid and gid items are integers, all others are strings. An
exception is raised if the entry asked for cannot be found.
"""
# no imports
# functions
def getpwall(): # real signature unknown; restored from __doc__
"""
getpwall() -> list_of_entries
Return a list of all available password database entries, in arbitrary order.
See help(pwd) for more on password database entries.
"""
pass
def getpwnam(name): # real signature unknown; restored from __doc__
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See help(pwd) for more on password database entries.
"""
pass
def getpwuid(uid): # real signature unknown; restored from __doc__
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See help(pwd) for more on password database entries.
"""
pass
# classes
from .tuple import tuple
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
pw_dir = property(lambda self: '')
"""home directory
:type: string
"""
pw_gecos = property(lambda self: '')
"""real name
:type: string
"""
pw_gid = property(lambda self: 0)
"""group id
:type: int
"""
pw_name = property(lambda self: '')
"""user name
:type: string
"""
pw_passwd = property(lambda self: '')
"""password
:type: string
"""
pw_shell = property(lambda self: '')
"""shell program
:type: string
"""
pw_uid = property(lambda self: 0)
"""user id
:type: int
"""
n_fields = 7
n_sequence_fields = 7
n_unnamed_fields = 0
from .object import object
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
""" Load a built-in module. """
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| gpl-2.0 | -8,021,968,845,025,423,000 | 24.875676 | 101 | 0.607061 | false |
4thgen/DCGAN-CIFAR10 | main.py | 1 | 5341 | import os
from GAN import GAN
'''
from CGAN import CGAN
from infoGAN import infoGAN
from ACGAN import ACGAN
from EBGAN import EBGAN
from WGAN import WGAN
from DRAGAN import DRAGAN
from LSGAN import LSGAN
from BEGAN import BEGAN
'''
from utils import show_all_variables
import tensorflow as tf
import argparse
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of GAN collections"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--gan_type', type=str, default='GAN',
#choices=['GAN', 'CGAN', 'infoGAN', 'ACGAN', 'EBGAN', 'BEGAN', 'WGAN', 'DRAGAN', 'LSGAN'],
choices=['GAN'],
help='The type of GAN', required=True)
parser.add_argument('--dataset', type=str, default='mnist', choices=['mnist', 'fashion-mnist', 'celebA','cifar10'], #add cifar10
help='The name of dataset')
parser.add_argument('--epoch', type=int, default=20, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=100, help='The size of batch')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
# --result_dir
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
# --result_dir
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
# open session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# declare instance for GAN
if args.gan_type == 'GAN':
gan = GAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
'''
elif args.gan_type == 'CGAN':
gan = CGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'ACGAN':
gan = ACGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'infoGAN':
gan = infoGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'EBGAN':
gan = EBGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'WGAN':
gan = WGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'DRAGAN':
gan = DRAGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'LSGAN':
gan = LSGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
elif args.gan_type == 'BEGAN':
gan = BEGAN(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset,
checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir)
'''
else:
raise Exception("[!] There is no option for " + args.gan_type)
# build graph
gan.build_model()
# show network architecture
show_all_variables()
# launch the graph in a session
gan.train()
print(" [*] Training finished!")
# visualize learned generator
gan.visualize_results(args.epoch-1)
print(" [*] Testing finished!")
if __name__ == '__main__':
main()
| apache-2.0 | 5,915,934,955,121,080,000 | 41.055118 | 132 | 0.604568 | false |
rschaniel/BM25F | test_score.py | 1 | 4792 | #!/usr/bin/env python
# coding: utf-8
from BM25F.core import batch
from BM25F.core import bm25f
from BM25F.core import entropy
from BM25F.core import param_dict
from BM25F.core import weight
from BM25F.exp import bag_dict
from BM25F.exp import bag_jag
from BM25F.exp import bag_of_words
from BM25F.ja import Tokenizer
from math import log
import unittest
class TestScore(unittest.TestCase):
@classmethod
def setUpClass(cls):
tokenizer = Tokenizer()
cls.bj = bag_jag()
cls.bd0 = bag_dict().read(tokenizer, {
'_id': '0',
'title': 'テストデータ',
'body': 'テスト',
'anchor': 'モニタ',
})
cls.bj.append(cls.bd0)
cls.bd1 = bag_dict().read(tokenizer, {
'_id': '1',
'title': 'テストデータ',
'body': 'テスト',
})
cls.bj.append(cls.bd1)
cls.bd2 = bag_dict().read(tokenizer, {
'_id': '2',
'body': 'テスト',
})
cls.bj.append(cls.bd2)
cls.bd3 = bag_dict().read(tokenizer, {
'_id': '3',
})
cls.bj.append(cls.bd3)
cls.query = bag_of_words()
cls.query['テスト'] = 1
cls.query['モニタ'] = 1
def test_weight(self):
self.assertEqual(
(1 * 1.0) / ((1 - 0.75) + 0.75 * 2 / (4 / 4)) +
(1 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (3 / 4)) +
(0 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (1 / 4)),
# ~ 1.3714285714285714
weight('テスト', self.bd0, self.bj))
self.assertEqual(
(0 * 1.0) / ((1 - 0.75) + 0.75 * 2 / (4 / 4)) +
(0 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (3 / 4)) +
(1 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (1 / 4)),
# ~ 0.3076923076923077
weight('モニタ', self.bd0, self.bj))
def test_weight_continuous(self):
tokenizer = Tokenizer()
bj = bag_jag()
bd0 = bag_dict().read(tokenizer, {'~pv': 1})
bj.append(bd0)
bd1 = bag_dict().read(tokenizer, {'~pv': 10})
bj.append(bd1)
bd2 = bag_dict().read(tokenizer, {'~pv': 100})
bj.append(bd2)
self.assertEqual((1 * 1.0), weight('ダミー', bd0, bj))
self.assertEqual((10 * 1.0), weight('ダミー', bd1, bj))
self.assertEqual((100 * 1.0), weight('ダミー', bd2, bj))
def test_boost(self):
boost = param_dict(default=1.0)
boost['title'] = 100
boost['body'] = 0.1
self.assertEqual(
(1 * 100) / ((1 - 0.75) + 0.75 * 2 / (4 / 4)) +
(1 * 0.1) / ((1 - 0.75) + 0.75 * 1 / (3 / 4)) +
(0 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (1 / 4)),
weight('テスト', self.bd0, self.bj, boost=boost))
def test_b(self):
b = param_dict(default=0.75)
b['title'] = 0.50
b['body'] = 1.00
self.assertEqual(
(1 * 1.0) / ((1 - 0.50) + 0.50 * 2 / (4 / 4)) +
(1 * 1.0) / ((1 - 1.00) + 1.00 * 1 / (3 / 4)) +
(0 * 1.0) / ((1 - 0.75) + 0.75 * 1 / (1 / 4)),
weight('テスト', self.bd0, self.bj, b=b))
def test_entropy(self):
self.assertEqual(
log((4 - 3 + 0.5) / (3 + 0.5)),
# ~ -0.8472978603872037
entropy('テスト', self.bj))
self.assertEqual(
log((4 - 1 + 0.5) / (1 + 0.5)),
# ~ 0.8472978603872037
entropy('モニタ', self.bj))
def test_entropy_cache(self):
obj = batch('_id', self.query, self.bj)
self.assertEqual(
log((4 - 3 + 0.5) / (3 + 0.5)),
obj.entropy_cache['テスト'])
self.assertEqual(
log((4 - 1 + 0.5) / (1 + 0.5)),
obj.entropy_cache['モニタ'])
def test_bm25f(self):
self.assertAlmostEqual(
1.37142857142857 / (1.2 + 1.37142857142857) * -0.84729786038720 +
0.30769230769230 / (1.2 + 0.30769230769230) * 0.84729786038720,
bm25f(self.query, self.bd0, self.bj))
def test_bm25f_batch(self):
obj = batch('_id', self.query, self.bj)
bds = [self.bd0, self.bd1, self.bd2, self.bd3]
expected1 = ['3']
self.assertEqual(expected1, obj.top(1, bds))
expected3 = ['3', '0', '2']
self.assertEqual(expected3, obj.top(3, bds))
expected5 = ['3', '0', '2', '1']
self.assertEqual(expected5, obj.top(5, bds))
def test_k1(self):
self.assertAlmostEqual(
1.37142857142857 / (2.0 + 1.37142857142857) * -0.84729786038720 +
0.30769230769230 / (2.0 + 0.30769230769230) * 0.84729786038720,
bm25f(self.query, self.bd0, self.bj, k1=2.0))
if __name__ == '__main__':
unittest.main()
| mit | 1,584,833,428,379,031,000 | 32.811594 | 77 | 0.475139 | false |
josephmfaulkner/stoqs | stoqs/loaders/MolecularEcology/loadSIMZ_oct2014.py | 1 | 5630 | #!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all October 2013 SIMZ activities.
Mike McCann
MBARI 24 October 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
cl = CANONLoader('stoqs_simz_oct2014', 'Sampling and Identification of Marine Zooplankton - October 2014',
description = 'Rachel Carson and Dorado surveys in Northern Monterey Bay',
# Assign a GeoOrigin in the middle of the terrain that is an appropriate
# location for rotateYUp, making fly navigation work well. All other GCC
# positions are in the GeoOrigin with rotateYUp='true' coordinate system
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-5334.11754 77527.85269 57495.84643',
'orientation': '-0.99840 0.05415 0.01651 0.88794',
'centerOfRotation': '1973.702 -553.761 -10885.8333',
'VerticalExaggeration': '10',
'geoOrigin': '36.75 -122 0',
'speed': '0.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
startDatetime = datetime.datetime(2014, 10, 15)
endDatetime = datetime.datetime(2014, 10, 23)
# Aboard the Carson use zuma:
##cl.tdsBase = 'http://zuma.rc.mbari.org/thredds/'
# On shore, use the odss server:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
# 2-second decimated dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2014/netcdf/' # Dorado archive
cl.dorado_files = [
'Dorado389_2014_289_04_289_04_decim.nc',
'Dorado389_2014_290_00_290_00_decim.nc',
'Dorado389_2014_293_00_293_00_decim.nc',
'Dorado389_2014_294_00_294_00_decim.nc',
'Dorado389_2014_295_00_295_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList' ]
# Rachel Carson Underway CTD
cl.rcuctd_base = cl.dodsBase + 'SIMZ/2014_Oct/carson/uctd/'
cl.rcuctd_files = [
'28914plm01.nc', '29014plm01.nc', '29314plm01.nc', '29414plm01.nc', '29514plm01.nc',
]
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
# Rachel Carson Profile CTD
cl.pctdDir = 'SIMZ/2014_Oct/carson/pctd/'
cl.rcpctd_base = cl.dodsBase + cl.pctdDir
cl.rcpctd_files = [
'SIMZ2014C40.nc', 'SIMZ2014C41.nc', 'SIMZ2014C42.nc', 'SIMZ2014C43.nc', 'SIMZ2014C44.nc',
'SIMZ2014C45.nc', 'SIMZ2014C46.nc', 'SIMZ2014C47.nc', 'SIMZ2014C48.nc', 'SIMZ2014C49.nc',
'SIMZ2014C50.nc', 'SIMZ2014C51.nc', 'SIMZ2014C52.nc', 'SIMZ2014C53.nc', 'SIMZ2014C54.nc',
'SIMZ2014C55.nc', 'SIMZ2014C56.nc', 'SIMZ2014C57.nc', 'SIMZ2014C58.nc', 'SIMZ2014C59.nc',
'SIMZ2014C60.nc', 'SIMZ2014C61.nc', 'SIMZ2014C62.nc', 'SIMZ2014C63.nc', 'SIMZ2014C64.nc',
'SIMZ2014C65.nc', 'SIMZ2014C66.nc', 'SIMZ2014C67.nc', 'SIMZ2014C68.nc', 'SIMZ2014C69.nc',
]
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/201407/'
cl.m1_files = ['OS_M1_20140716hourly_CMSTV.nc']
cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startDatetime
cl.m1_endDatetime = endDatetime
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/GOC12/ copied to local GOC12 dir
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'SIMZOct2013')
cl.subsample_csv_files = [
#'2013_SIMZ_AUV_STOQS.csv',
#'2013_SIMZ_Niskins_STOQS.csv',
##'2013_SIMZ_TowNets_STOQS.csv',
]
# Execute the load
cl.process_command_line()
if cl.args.test:
##cl.loadL_662(stride=1)
cl.loadDorado(stride=100)
cl.loadRCuctd(stride=100)
cl.loadRCpctd(stride=1)
cl.loadM1(stride=10)
#cl.loadSubSamples()
elif cl.args.optimal_stride:
##cl.loadL_662(stride=1)
cl.loadDorado(stride=1)
cl.loadRCuctd(stride=1)
cl.loadRCpctd(stride=1)
cl.loadM1(stride=1)
#cl.loadSubSamples()
else:
cl.stride = cl.args.stride
##cl.loadL_662()
cl.loadDorado()
cl.loadRCuctd()
cl.loadRCpctd()
cl.loadM1()
#cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
| gpl-3.0 | 6,166,183,970,358,253,000 | 39.214286 | 118 | 0.594849 | false |
voidpp/vcp | vcp/project.py | 1 | 12036 | import os
import shutil
import logging
from voidpp_tools.terminal import get_size
from collections import OrderedDict
import re
from datetime import timedelta, datetime
from .repository_command_result_box import RepositoryCommandResultBox
from .exceptions import ProjectException, RepositoryCommandException
from .project_languages import LanguageFactory
logger = logging.getLogger(__name__)
class TopologicalSorter(object):
"""
Implements Tarjan's algorithm.
"""
def __init__(self, nodes):
self.nodes = nodes
self.marks = {n: None for n in self.nodes}
self.result = []
def get_mark(self, node):
return self.marks[node.name]
def has_temp_mark(self, node):
return self.get_mark(node) == 'temp'
def has_permanent_mark(self, node):
return self.get_mark(node) == 'permanent'
def sort(self):
while True:
has_unmarked = False
for name, node in list(self.nodes.items()):
if not self.has_permanent_mark(node):
has_unmarked = True
self.__visit(node)
if not has_unmarked:
return self.result
def __visit(self, node):
if self.has_temp_mark(node):
return
if not self.has_permanent_mark(node):
self.marks[node.name] = 'temp'
for dep in list(node.get_dependent_projects().values()):
self.__visit(dep)
self.marks[node.name] = 'permanent'
self.result.insert(0, node)
class Project(object):
def __init__(self, name, vcp, data = None):
self.name = name
self.description = None
self.languages = []
self.repo = dict(
url = None,
type = None,
)
self.dependencies = {}
self.system_dependencies = {}
self.vcp = vcp
if data:
self.data = data
@property
def last_status(self):
for lang in self.languages:
if not lang.env.last_status:
return False
return True
@property
def repositories(self):
projects = list(self.get_dependent_projects().values()) + [self]
return [p.name for p in projects]
@property
def path(self):
try:
return self.vcp.repositories[self.name].path
except KeyError:
return None
@property
def initialized(self):
return self.name in self.vcp.repositories
@property
def data(self):
return OrderedDict([
('description', self.description),
('dependencies', self.dependencies),
('repo', self.repo),
('languages', [l.name for l in self.languages]),
('system_dependencies', self.system_dependencies),
])
@data.setter
def data(self, value):
self.description = value['description']
self.repo = value['repo']
self.dependencies = value['dependencies']
self.system_dependencies = value['system_dependencies']
self.languages = self.vcp.language_factory.create(self, value['languages'])
def set_dependencies_state(self):
for name in self.get_dependent_projects(recursive = False):
ref = self.dependencies[name]
try:
self.vcp.repositories[name].set_ref(ref)
logger.info("Set ref '%s' for '%s'", ref, name)
except RepositoryCommandException as e:
logger.error(e.output)
def search_for_ref_in_deps(self, project_name, projects):
ref = None
refprj = None
for prj in projects:
depref = prj.dependencies.get(project_name)
if depref is None:
continue
if ref is not None and ref != depref:
raise Exception("Found multiple refs for dep '{}', {}:{} and {}:{} ".format(project_name, prj.name, depref, refprj, ref))
ref = depref
refprj = prj.name
if ref is None:
raise Exception("Cannot find ref for '{}' but it's impossible!".format(project_name))
return ref
def init(self, base_path, status, force = False, install_deps = True, init_languages = True, ref = 'master'):
repo_exists = self.name in self.vcp.repositories
logger.debug("Start project init: '%s', force = %s, install_deps = %s, init_languages = %s", self.name, force, install_deps, init_languages)
if repo_exists and not force:
logger.debug("Project '%s' has been initialized already, skipping...", self.name)
return True
repo_dir = os.path.join(base_path, self.name)
if install_deps:
projects = self.get_sorted_dependencies()
logger.info("Dependencies of %s: %s", self.name, [p.name for p in projects])
for project in reversed(projects):
try:
ref = self.dependencies[project.name]
except KeyError as e:
ref = self.search_for_ref_in_deps(project.name, projects)
if not project.init(base_path, status, force, install_deps = False, init_languages = init_languages, ref = ref):
return False
if not repo_exists or init_languages:
tw = get_size()['cols']
label = "< Initializie {} >".format(self.name)
pre_len = 10
logger.info('\n' + '-' * pre_len + label + '-' * (tw - pre_len - len(label)))
logger.info("URL: '{}', path: '{}'".format(self.repo['url'], repo_dir))
status[self.name] = True
try:
if self.vcp.system_package_manager_handler:
packages = self.vcp.system_package_manager_handler.get_not_installed_packages(self)
if len(packages):
logger.error("Need to install these system packages: %s", ', '.join(packages))
return False
# create repo config
repo = self.vcp.repo_factory.create(repo_dir, self.repo['type'], self.name)
if repo_exists:
repo.update()
else:
# create folder
os.mkdir(repo_dir)
repo.init(self.repo['url'], ref)
self.vcp.repositories[self.name] = repo
# initialize language specific stuffs
if not repo_exists or init_languages:
for lang in self.languages:
lang.init()
if not lang.env.get_status():
status[self.name] = False
return True
except (Exception, KeyboardInterrupt) as e:
logger.exception("Error during initialize '{}'. Reverting all the work. Traceback:".format(self.name))
self.purge()
return False
def purge(self):
path = self.path
logger.info("Purge project '{}'".format(self.name))
for lang in self.languages:
lang.purge()
if self.name in self.vcp.repositories:
del self.vcp.repositories[self.name]
logger.debug("Delete repository config")
if path and os.path.isdir(path):
shutil.rmtree(path)
logger.debug("Delete repository directory '%s'", path)
def install_to(self, project, env):
for lang in self.languages:
if type(env) == type(lang.env):
lang.install_to(project, env)
def get_sorted_dependencies(self, remove_indirect_deps = False):
topo = TopologicalSorter(self.get_dependent_projects())
all_dep = topo.sort()
if remove_indirect_deps:
for prj in all_dep:
if prj.name not in self.dependencies:
all_dep.remove(prj)
return all_dep
def get_dependent_projects(self, recursive = True):
"""Get the dependencies of the Project
Args:
recursive (bool): add the dependant project's dependencies too
Returns:
dict of project name and project instances
"""
projects = {}
for name, ref in list(self.dependencies.items()):
try:
prj = self.vcp.projects[name]
except KeyError:
logger.error("Unknown project '%s' in project '%s' dependencies!", name, self.name)
continue
projects[name] = prj
if recursive:
projects.update(prj.get_dependent_projects())
return projects
def news(self, fromcache):
for name in self.repositories:
repo = self.vcp.repositories[name]
if not fromcache:
repo.fetch()
commits = repo.get_new_commits()
if len(commits):
yield RepositoryCommandResultBox(repo, "\n".join(commits))
def unreleased(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
commits = repo.get_commits_from_last_tag()
if len(commits):
yield RepositoryCommandResultBox(repo, "\n".join(commits))
def diff(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
diff = repo.diff()
if len(diff):
yield RepositoryCommandResultBox(repo, diff)
def pushables(self, remote):
for name in self.repositories:
repo = self.vcp.repositories[name]
commits = repo.pushables(remote)
if len(commits):
yield RepositoryCommandResultBox(repo, "\n".join(commits))
def untracked(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
files = repo.get_untracked_files()
if len(files):
yield RepositoryCommandResultBox(repo, "\n".join(files))
def dirty(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
files = repo.get_dirty_files()
if len(files):
yield RepositoryCommandResultBox(repo, "\n".join(files))
def fetch(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
res = repo.fetch()
if len(res):
yield RepositoryCommandResultBox(repo, res)
def standup(self, length):
lengths = OrderedDict([
('w', 60 * 24 * 7),
('d', 60 * 24),
('h', 60),
('m', 1),
])
decode_pattern = re.compile(''.join(['([\d]+{})?'.format(k) for k in lengths]))
res = decode_pattern.match(length)
if not res:
raise Exception('go back')
length_items = list(lengths)
value = 0
for idx, grp in enumerate(res.groups()):
if grp is None:
continue
abbr = length_items[idx]
val = int(grp[:-1])
value += val * lengths[abbr]
time_len = timedelta(minutes = value)
since = datetime.now() - time_len
for name in self.repositories:
repo = self.vcp.repositories[name]
res = repo.get_own_commits_since(since.isoformat())
if len(res):
yield RepositoryCommandResultBox(repo, res)
def status(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
yield RepositoryCommandResultBox(repo, repo.status())
def reset(self):
for name in self.repositories:
repo = self.vcp.repositories[name]
yield RepositoryCommandResultBox(repo, repo.reset())
def cmd(self, command):
for repo_name in self.repositories:
repo = self.vcp.repositories[repo_name]
res = repo.cmd(command)
if len(res):
yield RepositoryCommandResultBox(repo, res)
def __repr__(self):
return "<Project: %s>" % self.__dict__
| mit | 7,502,242,462,261,825,000 | 32.34072 | 148 | 0.558657 | false |
rlutz/xorn | tests/gaf/complex.py | 1 | 3924 | # Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import StringIO
import xorn.proxy
import xorn.storage
import gaf.fileformat
import gaf.read
import gaf.ref
SYMBOL_SYM = """v 20140308 2
P 100 200 200 200 1 0 0
{
T 25 250 5 8 0 1 0 0 1
pinnumber=1
T 25 250 5 8 0 1 0 0 1
pinseq=1
T 25 250 5 8 0 1 0 0 1
pinlabel=1
}
B 200 0 400 400 3 10 1 0 -1 -1 0 -1 -1 -1 -1 -1
"""
COMPONENT0_SCH = """v 20140308 2
C 0 0 1 0 0 symbol.sym
{
}
"""
COMPONENT1_SCH = """v 20140308 2
C 0 0 1 0 0 EMBEDDEDsymbol.sym
[
P 100 200 200 200 1 0 0
{
T 25 250 5 8 0 1 0 0 1
pinnumber=1
T 25 250 5 8 0 1 0 0 1
pinseq=1
T 25 250 5 8 0 1 0 0 1
pinlabel=1
}
B 200 0 400 400 3 10 1 0 -1 -1 0 -1 -1 -1 -1 -1
]
{
}
"""
PICTURE0_SCH = """v 20140308 2
G 0 0 1000 1000 0 0 0
hello-world
"""
PICTURE1_SCH = """v 20140308 2
G 0 0 1000 1000 0 0 1
hello-world
SGVsbG8gd29ybGQK
.
"""
class MockupSource:
def list(self):
return ['symbol.sym']
def get(self, symbol):
if symbol != 'symbol.sym':
raise ValueError
rev = gaf.read.read_file(
StringIO.StringIO(SYMBOL_SYM), '<test data>',
gaf.fileformat.FORMAT_SYM)
assert rev.is_transient()
return rev
gaf.clib.add_source(MockupSource(), '<test source>')
for data, load_symbols, embedded in [(COMPONENT0_SCH, False, False),
(COMPONENT1_SCH, False, True),
(COMPONENT0_SCH, True, False),
(COMPONENT1_SCH, True, True)]:
rev = gaf.read.read_file(StringIO.StringIO(data), '<test data>',
gaf.fileformat.FORMAT_SCH,
load_symbols = load_symbols)
ob, = rev.toplevel_objects()
symbol = ob.data().symbol
assert isinstance(symbol, gaf.ref.Symbol)
assert symbol.basename == 'symbol.sym'
assert symbol.embedded == embedded
if not load_symbols and not embedded:
assert symbol.prim_objs is None
continue
assert isinstance(symbol.prim_objs, xorn.storage.Revision)
assert symbol.prim_objs.is_transient() == embedded
pin, box = xorn.proxy.RevisionProxy(symbol.prim_objs).toplevel_objects()
assert isinstance(pin.data(), xorn.storage.Net)
assert isinstance(box.data(), xorn.storage.Box)
assert len(pin.attached_objects()) == 3
for data in [COMPONENT0_SCH.replace('symbol.sym', 'EMBEDDEDsymbol.sym'),
COMPONENT1_SCH.replace('EMBEDDEDsymbol.sym', 'symbol.sym')]:
# Test if inconsistencies trigger an exception
try:
gaf.read.read_file(StringIO.StringIO(data), '<test data>',
gaf.fileformat.FORMAT_SCH)
except gaf.read.ParseError:
pass
else:
raise AssertionError
for data, embedded in [(PICTURE0_SCH, False),
(PICTURE1_SCH, True)]:
rev = gaf.read.read_file(StringIO.StringIO(data), '<test data>',
gaf.fileformat.FORMAT_SCH)
ob, = rev.toplevel_objects()
pixmap = ob.data().pixmap
assert isinstance(pixmap, gaf.ref.Pixmap)
assert pixmap.filename == 'hello-world'
assert pixmap.data == ('Hello world\n' if embedded else None)
assert pixmap.embedded == embedded
| gpl-2.0 | -2,673,034,951,242,729,500 | 28.727273 | 76 | 0.638634 | false |
windskyer/nova | nova/tests/functional/api_sample_tests/test_instance_usage_audit_log.py | 1 | 1995 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class InstanceUsageAuditLogJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-instance-usage-audit-log"
def _get_flags(self):
f = super(InstanceUsageAuditLogJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.instance_usage_audit_log.'
'Instance_usage_audit_log')
return f
def test_show_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log/%s' %
urllib.quote('2012-07-05 10:00:00'))
subs = self._get_regexes()
self._verify_response('inst-usage-audit-log-show-get-resp',
subs, response, 200)
def test_index_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log')
subs = self._get_regexes()
self._verify_response('inst-usage-audit-log-index-get-resp',
subs, response, 200)
| gpl-2.0 | 5,870,727,372,018,153,000 | 38.9 | 78 | 0.653634 | false |
msabramo/github3.py | tests/unit/test_repos_repo.py | 3 | 5882 | from github3.repos.repo import Repository
from .helper import UnitHelper
class TestRepository(UnitHelper):
described_class = Repository
example_data = {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "somehexcode",
"url": "https://api.github.com/users/octocat"
},
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"description": "This your first repo!",
"private": False,
"fork": False,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"clone_url": "https://github.com/octocat/Hello-World.git",
"git_url": "git://github.com/octocat/Hello-World.git",
"ssh_url": "[email protected]:octocat/Hello-World.git",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"mirror_url": "git://git.example.com/octocat/Hello-World",
"homepage": "https://github.com",
"language": None,
"forks": 9,
"forks_count": 9,
"watchers": 80,
"watchers_count": 80,
"size": 108,
"master_branch": "master",
"open_issues": 0,
"open_issues_count": 0,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"organization": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "somehexcode",
"url": "https://api.github.com/users/octocat",
"type": "Organization"
},
"parent": {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat.gif",
"gravatar_id": "somehexcode",
"url": "https://api.github.com/users/octocat"
},
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"description": "This your first repo!",
"private": False,
"fork": True,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"clone_url": "https://github.com/octocat/Hello-World.git",
"git_url": "git://github.com/octocat/Hello-World.git",
"ssh_url": "[email protected]:octocat/Hello-World.git",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"mirror_url": "git://git.example.com/octocat/Hello-World",
"homepage": "https://github.com",
"language": None,
"forks": 9,
"forks_count": 9,
"watchers": 80,
"watchers_count": 80,
"size": 108,
"master_branch": "master",
"open_issues": 0,
"open_issues_count": 0,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z"
},
"source": {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat.gif",
"gravatar_id": "somehexcode",
"url": "https://api.github.com/users/octocat"
},
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"description": "This your first repo!",
"private": False,
"fork": True,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"clone_url": "https://github.com/octocat/Hello-World.git",
"git_url": "git://github.com/octocat/Hello-World.git",
"ssh_url": "[email protected]:octocat/Hello-World.git",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"mirror_url": "git://git.example.com/octocat/Hello-World",
"homepage": "https://github.com",
"language": None,
"forks": 9,
"forks_count": 9,
"watchers": 80,
"watchers_count": 80,
"size": 108,
"master_branch": "master",
"open_issues": 0,
"open_issues_count": 0,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z"
},
"has_issues": True,
"has_wiki": True,
"has_downloads": True
}
def test_asset(self):
"""Test retrieving an asset uses the right headers
The Releases section of the API is still in Beta and uses custom
headers
"""
assert self.instance.asset(0) is None
assert self.session.get.call_count == 0
self.instance.asset(1)
url = self.example_data['url'] + '/releases/assets/1'
self.session.get.assert_called_once_with(
url, headers={'Accept': 'application/vnd.github.manifold-preview'}
)
def test_latest_pages_build(self):
"""Test retrieving the most recent pages build."""
url = self.example_data['url'] + '/pages/builds/latest'
self.instance.latest_pages_build()
self.session.get.assert_called_once_with(url)
def test_pages(self):
"""Test retrieving information about a repository's page."""
url = self.example_data['url'] + '/pages'
self.instance.pages()
self.session.get.assert_called_once_with(url)
| bsd-3-clause | 6,162,640,146,678,408,000 | 38.213333 | 78 | 0.516321 | false |
mwillsey/crossbot | crossbot/migrations/0011_manage_predictor.py | 1 | 4482 | # Generated by Django 2.1.2 on 2018-11-04 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crossbot', '0010_fix_time_model'),
]
operations = [
migrations.AlterModelOptions(
name='MiniCrosswordModel',
options={'managed': True},
),
migrations.AlterModelOptions(
name='ModelUser',
options={'managed': True},
),
migrations.AlterModelOptions(
name='ModelDate',
options={'managed': True},
),
migrations.AlterModelOptions(
name='ModelParams',
options={'managed': True},
),
migrations.DeleteModel('MiniCrosswordModel'),
migrations.DeleteModel('ModelUser'),
migrations.DeleteModel('ModelDate'),
migrations.DeleteModel('ModelParams'),
migrations.CreateModel(
name='Prediction',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID'
)
),
(
'user',
models.ForeignKey(
on_delete=models.deletion.CASCADE,
to='crossbot.CBUser'
)
),
('date', models.DateField()),
('prediction', models.FloatField()),
('residual', models.FloatField()),
],
),
migrations.CreateModel(
name='PredictionDate',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID'
)
),
('date', models.DateField()),
('difficulty', models.FloatField()),
('difficulty_25', models.FloatField()),
('difficulty_75', models.FloatField()),
],
),
migrations.CreateModel(
name='PredictionParameter',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID'
)
),
('time', models.FloatField()),
('time_25', models.FloatField()),
('time_75', models.FloatField()),
('satmult', models.FloatField()),
('satmult_25', models.FloatField()),
('satmult_75', models.FloatField()),
('bgain', models.FloatField()),
('bgain_25', models.FloatField()),
('bgain_75', models.FloatField()),
('bdecay', models.FloatField()),
('bdecay_25', models.FloatField()),
('bdecay_75', models.FloatField()),
('skill_dev', models.FloatField()),
('date_dev', models.FloatField()),
('sigma', models.FloatField()),
('lp', models.FloatField()),
('when_run', models.DateTimeField()),
],
),
migrations.CreateModel(
name='PredictionUser',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID'
)
),
(
'user',
models.ForeignKey(
on_delete=models.deletion.CASCADE,
to='crossbot.CBUser'
)
),
('skill', models.FloatField()),
('skill_25', models.FloatField()),
('skill_75', models.FloatField()),
],
),
migrations.AlterUniqueTogether(
name='prediction',
unique_together={('user', 'date')},
),
]
| gpl-3.0 | 4,466,042,734,457,438,000 | 32.447761 | 58 | 0.404284 | false |
concrete-mixer/concrete-mixer | soundcloud-fetch.py | 1 | 3207 | import random
import re
import sys
import os
import shutil
from multiprocessing import Pool
import requests
import soundcloud
from pydub import AudioSegment
import OSC
streams = []
filename = 'concrete.conf'
lines = open(filename, 'r').read().split('\n')
for line in lines:
matches = re.match(r'^stream(\d+)Url=(.*)$', line)
if matches:
stream = matches.group(1)
streams.append({
'stream': matches.group(1),
'url': matches.group(2),
'tmp_path': '/tmp/concreteMixerStream' + stream
})
if not len(streams):
sys.exit('No streams found to download')
osc_client = OSC.OSCClient()
# this is Concrete Mixer's own app key so please use it nicely
client_id = '11bab725274cff587d5908c18cd501c2'
def download_stream_files(stream_data):
# because of Soundcloud API's T&Cs we can't store files we download
# so purge whatever might have been there
tmp_path = stream_data['tmp_path']
url = stream_data['url']
try:
shutil.rmtree(tmp_path)
except Exception as e:
print(e)
if not os.path.isdir(tmp_path):
os.mkdir(tmp_path)
sc_client = soundcloud.Client(client_id=client_id)
url = stream_data['url']
result = sc_client.get(
'/resolve', url=url
)
tracks = []
if len(result.tracks):
for track in result.tracks:
if track['downloadable']:
tracks.append({
'id': track['id'],
'ext': track['original_format'],
})
else:
sys.exit('Could not download stream files: ' + stream_data['url'])
random.shuffle(tracks)
if not len(tracks):
sys.exit("NO SOUND FILES FOR STREAM {}".format(stream_data['url']))
for track_in in tracks:
strid = str(track_in['id'])
ext = track_in['ext']
path_id = tmp_path + '/' + strid + '.'
file_in = path_id + ext
print("Got file " + file_in)
needs_conversion = ext not in ['aiff', 'wav']
if needs_conversion:
file_out = path_id + 'wav'
else:
file_out = file_in
if not os.path.isfile(file_out):
with open(file_in, 'wb') as handle:
response = requests.get(
'https://api.soundcloud.com/tracks/{}'.format(strid) +
'/download?client_id={}'.format(client_id)
)
for block in response.iter_content(1024):
handle.write(block)
handle.close()
if needs_conversion:
track_out = AudioSegment.from_file(file_in, ext)
track_out.export(file_out, format="wav")
print("Got " + file_out + ", notifying")
notify_wav(file_out)
def notify_wav(file_out):
osc_client.connect(('127.0.0.1', 2424))
oscmsg = OSC.OSCMessage()
oscmsg.setAddress('/notifyfile')
oscmsg.append('1')
oscmsg.append(file_out)
osc_client.send(oscmsg)
if __name__ == '__main__':
pool = Pool(processes=len(streams))
pool.map(download_stream_files, streams)
print("SOUNDCLOUD FILES DOWNLOAD COMPLETE")
| gpl-2.0 | 5,974,928,612,801,205,000 | 23.860465 | 75 | 0.566573 | false |
CitrineInformatics/refkit | util/arxivid.py | 1 | 5282 | """
Functions for working with arXiv identifier strings.
"""
import re
from sets import Set
def extract(value):
"""
Attempt to extract an arXiv identifier from a string.
:param value: String to extract arXiv identifier from
:raises ValueError: If value does not contain an arXiv identifier
:returns: String with the arXiv identifier that was extracted
"""
res = _extractNewFormat(value)
if res is None:
res = _extractOldFormat(value)
if res is None:
raise ValueError('arXiv identifier could not be extracted from string')
return res
def _extractNewFormat(value):
"""
Attempt to extract a new format arXiv identifier from a string.
:param value: String to extract arXiv identifier from
:returns: String with the arXiv identifier that was extracted
:returns: None if an arXiv identifier was not found
"""
matches = _newPattern.finditer(value)
for i in matches:
if _validateStartOfFormat(value, i) and _validateEndOfFormat(value, i):
return i.group('id')
return None
def _validateStartOfFormat(value, match):
"""
Make sure that characters preceding a matched arXiv identifier do not invalidate the match. The match is
considered invalid if it is preceded by any alphanumeric character.
:param value: String that was being searched
:param match: MatchObject returned from regular expression function
:returns: True if characters before match are valid
:returns: False if characters before match are not valid
"""
if match.start() > 0:
preString = value[match.start()-1:match.start()]
if re.match('\w', preString) is not None:
return False
return True
def _validateEndOfFormat(value, match):
"""
Make sure that characters following a matched arXiv identifier do not invalidate the match. The match is
considered invalid if it is followed by any alphanumeric character, unless that matches the pattern vI where
I is an integer.
:param value: String that was being searched
:param match: MatchObject returned from regular expression function
:returns: True if characters after match are valid
:returns: False if characters after match are not valid
"""
if match.end() < len(value):
postString = value[match.end():]
if re.match('\w', postString) is not None and re.match('[vV][0-9]', postString) is None:
return False
return True
def _extractOldFormat(value):
"""
Attempt to extract an old format arXiv identifier from a string.
:param value: String to extract arXiv identifier from
:returns: String with the arXiv identifier that was extracted
:returns: None if an arXiv identifier was not found
"""
try:
match = _oldPattern.search(value)
if match is not None:
id = match.group('id')
if id.split('/')[0] in _subjects and _validateEndOfFormat(value, match):
return id
return None
except:
raise
# Regular expression to match new format of arXiv identifier
# This finds strings of the form IIII.IIII (where I are all integers) and saves the matching string as 'id'
_newPattern = re.compile('(?P<id>[0-9]{4}\.[0-9]{4})')
# Regular expression to match old format of arXiv identifier
# This find strings of the form [letters]letters/numbers where numbers is of length 7
_oldPattern = re.compile('(?P<id>[a-zA-Z][a-zA-Z\-\.]+/[0-9]{7})')
# List of arxiv subject areas
_subjects = Set([\
'stat', 'stat.AP', 'stat.CO', 'stat.ML', 'stat.ME', 'stat.TH', 'q-bio', 'q-bio.BM', 'q-bio.CB', 'q-bio.GN', \
'q-bio.MN', 'q-bio.NC', 'q-bio.OT', 'q-bio.PE', 'q-bio.QM', 'q-bio.SC', 'q-bio.TO', 'cs', 'cs.AR', 'cs.AI', \
'cs.CL', 'cs.CC', 'cs.CE', 'cs.CG', 'cs.GT', 'cs.CV', 'cs.CY', 'cs.CR', 'cs.DS', 'cs.DB', 'cs.DL', 'cs.DM', \
'cs.DC', 'cs.GL', 'cs.GR', 'cs.HC', 'cs.IR', 'cs.IT', 'cs.LG', 'cs.LO', 'cs.MS', 'cs.MA', 'cs.MM', 'cs.NI', \
'cs.NE', 'cs.NA', 'cs.OS', 'cs.OH', 'cs.PF', 'cs.PL', 'cs.RO', 'cs.SE', 'cs.SD', 'cs.SC', 'nlin', 'nlin.AO', \
'nlin.CG', 'nlin.CD', 'nlin.SI', 'nlin.PS', 'math', 'math.AG', 'math.AT', 'math.AP', 'math.CT', 'math.CA', \
'math.CO', 'math.AC', 'math.CV', 'math.DG', 'math.DS', 'math.FA', 'math.GM', 'math.GN', 'math.GT', 'math.GR', \
'math.HO', 'math.IT', 'math.KT', 'math.LO', 'math.MP', 'math.MG', 'math.NT', 'math.NA', 'math.OA', 'math.OC', \
'math.PR', 'math.QA', 'math.RT', 'math.RA', 'math.SP', 'math.ST', 'math.SG', 'astro-ph', 'cond-mat', \
'cond-mat.dis-nn', 'cond-mat.mes-hall', 'cond-mat.mtrl-sci', 'cond-mat.other', 'cond-mat.soft', \
'cond-mat.stat-mech', 'cond-mat.str-el', 'cond-mat.supr-con', 'gr-qc', 'hep-ex', 'hep-lat', 'hep-ph', \
'hep-th', 'math-ph', 'nucl-ex', 'nucl-th', 'physics', 'physics.acc-ph', 'physics.ao-ph', 'physics.atom-ph', \
'physics.atm-clus', 'physics.bio-ph', 'physics.chem-ph', 'physics.class-ph', 'physics.comp-ph', \
'physics.data-an', 'physics.flu-dyn', 'physics.gen-ph', 'physics.geo-ph', 'physics.hist-ph', 'physics.ins-det', \
'physics.med-ph', 'physics.optics', 'physics.ed-ph', 'physics.soc-ph', 'physics.plasm-ph', 'physics.pop-ph', \
'physics.space-ph', 'quant-ph'])
| mit | -8,739,828,537,139,025,000 | 45.743363 | 117 | 0.633283 | false |
rohangoel96/IRCLogParser | IRCLogParser/lib/validate.py | 1 | 1058 | def validate_RT_RL_CRT(in_data, ranges, fileName):
"""
Validates the values of curve fit parameters
Args:
in_data(list): calculated values of curve fit parameters
ranges(list of list): expected values of curve fit parameters
fileName(str): fileName
Returns:
null
"""
for i in xrange(len(in_data)):
if not ranges[i][0] <= in_data[i] <= ranges[i][1]:
errorMessage (i, ranges[i], in_data[i], fileName)
def errorMessage(value_number, expected_range, actual_value, fileName):
"""
Prints error messsage if value not as expected
Args:
value_number(int): index of the value in in_data which is not as expected
expected_range(list): expected values of curve fit parameters
actual_value(int): calculated value of curve fit parameters
fileName(str): fileName
Returns:
null
"""
print "[Unexpected Value] of Arg", value_number, " @", fileName, "| EXPECTED_RANGE:", \
expected_range, "| GOT:", actual_value | mit | 4,672,944,337,430,084,000 | 30.147059 | 91 | 0.625709 | false |
schets/scikit-learn | sklearn/manifold/spectral_embedding_.py | 1 | 19647 | """Spectral Embedding"""
# Author: Gael Varoquaux <[email protected]>
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
connected_components_matrix = np.zeros(
shape=(graph.shape[0]), dtype=np.bool)
connected_components_matrix[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components_matrix.sum()
_, node_to_add = np.where(graph[connected_components_matrix] != 0)
connected_components_matrix[node_to_add] = True
if last_num_component >= connected_components_matrix.sum():
break
return connected_components_matrix
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
laplacian = _set_diag(laplacian, 1)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
# lobpcg needs native floats
laplacian = laplacian.astype(np.float)
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| bsd-3-clause | -3,809,747,056,958,255,600 | 39.509278 | 79 | 0.616532 | false |
pannal/Subliminal.bundle | Contents/Code/support/activities.py | 2 | 5022 | # coding=utf-8
from wraptor.decorators import throttle
from config import config
from items import get_item, get_item_kind_from_item, refresh_item
Activity = None
try:
from plex_activity import Activity
except ImportError:
pass
class PlexActivityManager(object):
def start(self):
activity_sources_enabled = None
if not Activity:
return
if config.plex_token:
from plex import Plex
Plex.configuration.defaults.authentication(config.plex_token)
activity_sources_enabled = ["websocket"]
Activity.on('websocket.playing', self.on_playing)
if activity_sources_enabled:
Activity.start(activity_sources_enabled)
@throttle(5, instance_method=True)
def on_playing(self, info):
# ignore non-playing states and anything too far in
if info["state"] != "playing" or info["viewOffset"] > 60000:
return
# don't trigger on the first hit ever
if "last_played_items" not in Dict:
Dict["last_played_items"] = []
Dict.Save()
return
rating_key = info["ratingKey"]
# only use integer based rating keys
try:
int(rating_key)
except ValueError:
return
if rating_key in Dict["last_played_items"] and rating_key != Dict["last_played_items"][0]:
# shift last played
Dict["last_played_items"].insert(0,
Dict["last_played_items"].pop(Dict["last_played_items"].index(rating_key)))
Dict.Save()
elif rating_key not in Dict["last_played_items"]:
# new playing; store last X recently played items
Dict["last_played_items"].insert(0, rating_key)
Dict["last_played_items"] = Dict["last_played_items"][:config.store_recently_played_amount]
Dict.Save()
if not config.react_to_activities:
return
debug_msg = "Started playing %s. Refreshing it." % rating_key
# todo: cleanup debug messages for hybrid-plus
keys_to_refresh = []
if config.activity_mode in ["refresh", "next_episode", "hybrid", "hybrid-plus"]:
# next episode or next episode and current movie
if config.activity_mode in ["next_episode", "hybrid", "hybrid-plus"]:
plex_item = get_item(rating_key)
if not plex_item:
Log.Warn("Can't determine media type of %s, skipping" % rating_key)
return
if get_item_kind_from_item(plex_item) == "episode":
next_ep = self.get_next_episode(rating_key)
if config.activity_mode == "hybrid-plus":
keys_to_refresh.append(rating_key)
if next_ep:
keys_to_refresh.append(next_ep.rating_key)
debug_msg = "Started playing %s. Refreshing next episode (%s, S%02iE%02i)." % \
(rating_key, next_ep.rating_key, int(next_ep.season.index), int(next_ep.index))
else:
if config.activity_mode in ("hybrid", "hybrid-plus"):
keys_to_refresh.append(rating_key)
elif config.activity_mode == "refresh":
keys_to_refresh.append(rating_key)
if keys_to_refresh:
Log.Debug(debug_msg)
Log.Debug("Refreshing %s", keys_to_refresh)
for key in keys_to_refresh:
refresh_item(key)
def get_next_episode(self, rating_key):
plex_item = get_item(rating_key)
if not plex_item:
return
if get_item_kind_from_item(plex_item) == "episode":
# get season
season = get_item(plex_item.season.rating_key)
if not season:
return
# determine next episode
# next episode is in the same season
if plex_item.index < season.episode_count:
# get next ep
for ep in season.children():
if ep.index == plex_item.index + 1:
return ep
# it's not, try getting the first episode of the next season
else:
# get show
show = get_item(plex_item.show.rating_key)
# is there a next season?
if season.index < show.season_count:
for other_season in show.children():
if other_season.index == season.index + 1:
next_season = other_season
for ep in next_season.children():
if ep.index == 1:
return ep
activity = PlexActivityManager()
| mit | -1,330,420,517,494,388,200 | 37.045455 | 120 | 0.521705 | false |
dongweiming/flask_reveal | social/strategies/webpy_strategy.py | 1 | 2142 | import web
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class WebpyTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
return web.template.render(tpl)(**context)
def render_string(self, html, context):
return web.template.Template(html)(**context)
class WebpyStrategy(BaseStrategy):
def __init__(self, *args, **kwargs):
self.session = web.web_session
kwargs.setdefault('tpl', WebpyTemplateStrategy)
super(WebpyStrategy, self).__init__(*args, **kwargs)
def get_setting(self, name):
return getattr(web.config, name)
def request_data(self, merge=True):
if merge:
data = web.input(_method='both')
elif web.ctx.method == 'POST':
data = web.input(_method='post')
else:
data = web.input(_method='get')
return data
def request_host(self):
return self.request.host
def redirect(self, url):
return web.seeother(url)
def html(self, content):
web.header('Content-Type', 'text/html;charset=UTF-8')
return content
def render_html(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
tpl = web.template.frender(tpl)
else:
tpl = web.template.Template(html)
return tpl(**context)
def session_get(self, name, default=None):
return self.session.get(name, default)
def session_set(self, name, value):
self.session[name] = value
def session_pop(self, name):
self.session.pop(name, None)
def session_setdefault(self, name, value):
return self.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
return web.ctx.protocol + '://' + web.ctx.host + path
def is_response(self, value):
return isinstance(value, web.Storage)
| bsd-3-clause | -1,485,324,687,506,974,500 | 29.169014 | 69 | 0.617647 | false |
lttng/lttng-scope | ttt/src/main/python/debuginfo_synth_exec.py | 2 | 1472 | from debuginfo_trace_writer import DebugInfoTraceWriter
import sys
'''
Generate a trace simulating an exec. When an exec happens, the address space
of the process is reset (previously loaded libraries are not there anymore,
and the main executable is replaced), so any know mapping should be forgotten.
In the trace, this is represented by a new statedump (when the liblttng-ust.so
library is loaded again in the new address space, its constructor is called
again, which initiates a new statedump).
'''
if len(sys.argv) < 2:
print("Please provide trace output path.", file=sys.stderr)
sys.exit(1)
def timestamp_generator():
ts = 1
while True:
yield ts
ts += 1
vpid = 1337
ts = timestamp_generator()
gen = DebugInfoTraceWriter(sys.argv[1])
baddr = 0x400000
memsz = 0x10000
gen.write_lttng_ust_statedump_start(next(ts), 0, vpid)
gen.write_lttng_ust_statedump_bin_info(next(ts), 0, vpid, baddr, memsz, "/tmp/foo", 0, 0, 0)
gen.write_lttng_ust_statedump_end(next(ts), 0, vpid)
gen.write_dummy_event(next(ts), 0, vpid, 0x400100)
baddr = 0x500000
memsz = 0x10000
gen.write_lttng_ust_statedump_start(next(ts), 0, vpid)
gen.write_lttng_ust_statedump_bin_info(next(ts), 0, vpid, baddr, memsz, "/tmp/bar", 0, 0, 0)
gen.write_lttng_ust_statedump_end(next(ts), 0, vpid)
# This event should not map to anything currently loaded.
gen.write_dummy_event(next(ts), 0, vpid, 0x400100)
gen.write_dummy_event(next(ts), 0, vpid, 0x500100)
gen.flush()
| epl-1.0 | 5,882,135,464,082,170,000 | 31.711111 | 92 | 0.726902 | false |
PeterRochford/SkillMetrics | skill_metrics/write_taylor_stats.py | 1 | 5287 | import os
import xlsxwriter
def write_taylor_stats(filename,data,**kwargs):
'''
Write statistics used in a target diagram to an Excel file.
This function writes to an Excel file FILENAME the statistics
used to create a Taylor diagram for each of the dictionaries
contained in DATA. The first 2 arguments must be the inputs as
described below followed by keyword arguments in the format of
OPTION = VALUE.
INPUTS:
filename : name for statistics Excel file
data : a dictionary containing the statistics
data['sdev'] : Standard deviations (sigma)
data['crmsd'] : Centered Root Mean Square Difference (CRMSD)
data['ccoef'] : Correlation Coefficient (r)
OUTPUTS:
None.
LIST OF OPTIONS:
A title description for each dictionary (TITLE) can be
optionally provided as well as a LABEL for each data point in
the diagram.
label = label : label for each data point in target diagram, e.g.
'OC445 (CB)'
overwrite = boolean : true/false flag to overwrite Excel file
title = title : title descriptor data set, e.g. 'Expt. 01.0'
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
Created on Dec 12, 2016
'''
option = get_write_taylor_stats_options(**kwargs)
# Check for existence of file
if os.path.isfile(filename):
if option['overwrite']:
os.remove(filename)
else:
raise ValueError('File already exists: ' + filename)
# Covert data to list if necessary
if not type(data) is list: data = [data]
# Write title information to file
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
# Write title information to file
worksheet.write(1, 0, 'Taylor Statistics')
# Determine number of dictionaries in data variable
ncell = len(data)
# Write data for each dictionary
row = 2
headers = ['Description','Standard Deviation','CRMSD','Correlation Coeff.']
for i in range(ncell):
row += 1
if len(option['title']) > 0:
worksheet.write(row, 0, option['title'][i])
# Write column headers
row += 1
for j,h in enumerate(headers):
worksheet.write(row, j, h)
# Retrieve input values as list
try: iter(data[i]['sdev'])
except TypeError:
sdev = [data[i]['sdev']]
crmsd = [data[i]['crmsd']]
ccoef = [data[i]['ccoef']]
else:
sdev = data[i]['sdev']
crmsd = data[i]['crmsd']
ccoef = data[i]['ccoef']
ndata = len(sdev)
# Write each row of data
row += 1
for j in range(ndata):
if len(option['label']) > 0:
worksheet.write(row, 0, option['label'][j])
worksheet.write(row, 1, sdev[j])
worksheet.write(row, 2, crmsd[j])
worksheet.write(row, 3, ccoef[j])
row += 1
workbook.close()
def get_write_taylor_stats_options(**kwargs):
'''
Get optional arguments for write_taylor_stats function.
Retrieves the keywords supplied to the WRITE_TARGET_STATS
function (**KWARGS), and returns the values in a OPTION dictionary.
Default values are assigned to selected optional arguments. The
function will terminate with an error if an unrecognized optional
argument is supplied.
INPUTS:
**kwargs : keyword argument list
OUTPUTS:
option : data structure containing option values.
option['title'] : title descriptor for data set.
option['overwrite'] : boolean to overwrite Excel file.
LIST OF OPTIONS:
A title description for each dataset TITLE can be optionally
provided as well as an overwrite option if the file name currently
exists.
label = label : label for each data point in target diagram, e.g.
'OC445 (CB)'
overwrite = boolean : true/false flag to overwrite Excel file
title = title : title descriptor for each data set in data, e.g.
'Expt. 01.0'
Author: Peter A. Rochford
Acorn Science & Innovation
[email protected]
Created on Dec 10, 2016
@author: rochfordp
'''
from skill_metrics import check_on_off
nargin = len(kwargs)
# Set default parameters
option = {}
option['overwrite'] = False
option['label'] = []
option['title'] = ''
if nargin == 0:
# No options requested, so return with only defaults
return option
# Load custom options, storing values in option data structure
# Check for valid keys and values in dictionary
for optname, optvalue in kwargs.items():
optname = optname.lower()
if not optname in option:
raise ValueError('Unrecognized option: ' + optname)
else:
# Replace option value with that from arguments
option[optname] = optvalue
# Check values for specific options
if optname == 'overwrite':
option['overwrite'] = check_on_off(option['overwrite'])
return option
| gpl-3.0 | -8,094,325,165,340,151,000 | 30.658683 | 79 | 0.611122 | false |
josiah-wolf-oberholtzer/consort | test/test_DependentTimespanMaker.py | 1 | 57277 | import abjad
import consort
import collections
from abjad.tools import systemtools
from abjad.tools import timespantools
def _make_timespan_inventory():
timespan_inventory = abjad.TimespanList([
consort.PerformedTimespan(
start_offset=0,
stop_offset=20,
voice_name='A',
),
consort.PerformedTimespan(
music_specifier=consort.MusicSpecifier(
labels=('labeled',),
),
start_offset=20,
stop_offset=40,
voice_name='A',
),
consort.PerformedTimespan(
music_specifier=consort.MusicSpecifier(
labels=('labeled',),
),
start_offset=25,
stop_offset=50,
voice_name='B',
),
consort.PerformedTimespan(
music_specifier=consort.MusicSpecifier(
labels=('labeled',),
),
start_offset=60,
stop_offset=80,
voice_name='A',
),
consort.PerformedTimespan(
start_offset=65,
stop_offset=100,
voice_name='B',
),
])
timespan_inventory.sort()
return timespan_inventory
def test_DependentTimespanMaker_01():
timespan_inventory = _make_timespan_inventory()
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_02():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=abjad.TimespanList()
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[]
)
''')
def test_DependentTimespanMaker_03():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
hysteresis=(1, 8),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=abjad.TimespanList()
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[]
)
''')
def test_DependentTimespanMaker_04():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_05():
music_specifiers = collections.OrderedDict([
('C', None),
('D', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='D',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='D',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_06():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(10, 90)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(10, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(90, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_07():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(10, 90)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(10, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_08():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(25, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(65, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
]
)
''')
def test_DependentTimespanMaker_09():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_stops=True,
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
]
)
''')
def test_DependentTimespanMaker_10():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(25, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(65, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
]
)
''')
def test_DependentTimespanMaker_11():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
rotation_indices=(1,),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(10, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(10, 1),
stop_offset=abjad.Offset(30, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(30, 1),
stop_offset=abjad.Offset(35, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(35, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(85, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(85, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
]
)
''')
def test_DependentTimespanMaker_12():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
rotation_indices=(0, 1),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(25, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(85, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(85, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
]
)
''')
def test_DependentTimespanMaker_13():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
labels=('labeled',),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(25, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_14():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
rotation_indices=(-1,),
labels=('labeled',),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(35, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(35, 1),
stop_offset=abjad.Offset(45, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(45, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_15():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(25, 75)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
rotation_indices=(-1,),
labels=('labeled',),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(35, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(35, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(75, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_16():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(25, 75)
timespan_maker = consort.DependentTimespanMaker(
include_inner_starts=True,
include_inner_stops=True,
rotation_indices=(-1,),
labels=('no-such-label',),
voice_names=(
'A',
'B',
),
)
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=_make_timespan_inventory(),
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
]
)
''')
def test_DependentTimespanMaker_17():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
voice_names=(
'A',
),
)
timespan_inventory = _make_timespan_inventory()
timespan_inventory.extend([
consort.SilentTimespan(40, 50, voice_name='A'),
consort.SilentTimespan(55, 60, voice_name='A'),
consort.SilentTimespan(80, 90, voice_name='A'),
])
timespan_inventory.sort()
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='A',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(55, 1),
stop_offset=abjad.Offset(60, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(90, 1),
voice_name='A',
),
]
)
''')
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=timespan_inventory,
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='A',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(55, 1),
stop_offset=abjad.Offset(60, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(90, 1),
voice_name='A',
),
]
)
''')
def test_DependentTimespanMaker_18():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
padding=1,
voice_names=(
'A',
),
)
timespan_inventory = _make_timespan_inventory()
timespan_inventory.extend([
consort.SilentTimespan(40, 50, voice_name='A'),
consort.SilentTimespan(55, 60, voice_name='A'),
consort.SilentTimespan(80, 90, voice_name='A'),
])
timespan_inventory.sort()
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=timespan_inventory,
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.SilentTimespan(
start_offset=abjad.Offset(-1, 1),
stop_offset=abjad.Offset(0, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(20, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(40, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(40, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(25, 1),
stop_offset=abjad.Offset(50, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(41, 1),
voice_name='C',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='A',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(55, 1),
stop_offset=abjad.Offset(60, 1),
voice_name='A',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(59, 1),
stop_offset=abjad.Offset(60, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
music_specifier=consort.tools.MusicSpecifier(
labels=('labeled',),
),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(60, 1),
stop_offset=abjad.Offset(80, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='B',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(81, 1),
voice_name='C',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(90, 1),
voice_name='A',
),
]
)
''')
def test_DependentTimespanMaker_19():
music_specifiers = collections.OrderedDict([
('C', None),
])
target_timespan = abjad.Timespan(0, 100)
timespan_maker = consort.DependentTimespanMaker(
hysteresis=10,
voice_names=['A'],
)
timespan_inventory = abjad.TimespanList([
consort.PerformedTimespan(0, 10, voice_name='A'),
consort.PerformedTimespan(5, 15, voice_name='A'),
consort.PerformedTimespan(20, 30, voice_name='A'),
consort.PerformedTimespan(40, 50, voice_name='A'),
consort.PerformedTimespan(65, 75, voice_name='A'),
consort.PerformedTimespan(80, 100, voice_name='A'),
])
timespan_inventory = timespan_maker(
target_timespan=target_timespan,
music_specifiers=music_specifiers,
timespan_inventory=timespan_inventory,
)
assert format(timespan_inventory) == abjad.String.normalize(
r'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(10, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(30, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(5, 1),
stop_offset=abjad.Offset(15, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(20, 1),
stop_offset=abjad.Offset(30, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(40, 1),
stop_offset=abjad.Offset(50, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(75, 1),
voice_name='A',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(65, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='C',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(80, 1),
stop_offset=abjad.Offset(100, 1),
voice_name='A',
),
]
)
''')
| mit | -452,016,342,722,227,500 | 36.534076 | 65 | 0.441696 | false |
lscsoft/ligotimegps | docs/conf.py | 1 | 2535 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os.path
from urllib.request import urlretrieve
from ligotimegps import __version__ as VERSION
# -- Project information -----------------------------------------------------
project = 'ligotimegps'
copyright = "2010-2016, Kipp Cannon; 2017-2018 Duncan Macleod"
author = 'Duncan Macleod'
# The full version, including alpha/beta/rc tags
release = VERSION
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx_automodapi.automodapi',
'sphinx_tabs.tabs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# Default file type
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# Epilogue
rst_epilog = """
.. |lal.LIGOTimeGPS| replace:: `lal.LIGOTimeGPS`
.. _lal.LIGOTimeGPS: https://lscsoft.docs.ligo.org/lalsuite/lal/struct_l_i_g_o_time_g_p_s.html
"""
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extensions --------------------------------------------------------------
# Intersphinx directory
intersphinx_mapping = {
'https://docs.python.org/': None, # python
}
# Don't inherit in automodapi
numpydoc_show_class_members = False
automodapi_inherited_members = False
| gpl-3.0 | 5,281,640,337,460,886,000 | 28.476744 | 94 | 0.668245 | false |
patricksnape/imageio | tests/test_freeimage.py | 1 | 16073 | """ Tests for imageio's freeimage plugin
"""
import os
import sys
import numpy as np
from pytest import raises, skip
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
import imageio
from imageio import core
from imageio.core import get_remote_file, IS_PYPY
test_dir = get_test_dir()
# Create test images LUMINANCE
im0 = np.zeros((42, 32), np.uint8)
im0[:16, :] = 200
im1 = np.zeros((42, 32, 1), np.uint8)
im1[:16, :] = 200
# Create test image RGB
im3 = np.zeros((42, 32, 3), np.uint8)
im3[:16, :, 0] = 250
im3[:, :16, 1] = 200
im3[50:, :16, 2] = 100
# Create test image RGBA
im4 = np.zeros((42, 32, 4), np.uint8)
im4[:16, :, 0] = 250
im4[:, :16, 1] = 200
im4[50:, :16, 2] = 100
im4[:, :, 3] = 255
im4[20:, :, 3] = 120
fnamebase = os.path.join(test_dir, 'test')
def get_ref_im(colors, crop, float):
""" Get reference image with
* colors: 0, 1, 3, 4
* cropping: 0-> none, 1-> crop, 2-> crop with non-contiguous data
* float: False, True
"""
assert colors in (0, 1, 3, 4)
assert crop in (0, 1, 2)
assert float in (False, True)
rim = [im0, im1, None, im3, im4][colors]
if float:
rim = rim.astype(np.float32) / 255.0
if crop == 1:
rim = rim[:-1, :-1].copy()
elif crop == 2:
rim = rim[:-1, :-1]
return rim
def assert_close(im1, im2, tol=0.0):
if im1.ndim == 3 and im1.shape[-1] == 1:
im1 = im1.reshape(im1.shape[:-1])
if im2.ndim == 3 and im2.shape[-1] == 1:
im2 = im2.reshape(im2.shape[:-1])
assert im1.shape == im2.shape
diff = im1.astype('float32') - im2.astype('float32')
diff[15:17, :] = 0 # Mask edge artifacts
diff[:, 15:17] = 0
assert np.abs(diff).max() <= tol
# import visvis as vv
# vv.subplot(121); vv.imshow(im1); vv.subplot(122); vv.imshow(im2)
def test_get_ref_im():
""" A test for our function to get test images """
crop = 0
for f in (False, True):
for colors in (0, 1, 3, 4):
rim = get_ref_im(0, crop, f)
assert rim.flags.c_contiguous is True
assert rim.shape[:2] == (42, 32)
crop = 1
for f in (False, True):
for colors in (0, 1, 3, 4):
rim = get_ref_im(0, crop, f)
assert rim.flags.c_contiguous is True
assert rim.shape[:2] == (41, 31)
if IS_PYPY:
return 'PYPY cannot have non-contiguous data'
crop = 2
for f in (False, True):
for colors in (0, 1, 3, 4):
rim = get_ref_im(0, crop, f)
assert rim.flags.c_contiguous is False
assert rim.shape[:2] == (41, 31)
def test_get_fi_lib():
need_internet()
from imageio.plugins._freeimage import get_freeimage_lib
lib = get_freeimage_lib()
assert os.path.isfile(lib)
def test_freeimage_format():
# Format
F = imageio.formats['PNG']
# Reader
R = F.get_reader(core.Request('chelsea.png', 'ri'))
assert len(R) == 1
assert isinstance(R.get_meta_data(), dict)
assert isinstance(R.get_meta_data(0), dict)
raises(IndexError, R.get_data, 2)
raises(IndexError, R.get_meta_data, 2)
# Writer
W = F.get_writer(core.Request(fnamebase + '.png', 'wi'))
W.append_data(im0)
W.set_meta_data({'foo': 3})
raises(RuntimeError, W.append_data, im0)
def test_freeimage_lib():
fi = imageio.plugins.freeimage.fi
# Error messages
imageio.plugins._freeimage.fi._messages.append('this is a test')
assert imageio.plugins._freeimage.fi.get_output_log()
imageio.plugins._freeimage.fi._show_any_warnings()
imageio.plugins._freeimage.fi._get_error_message()
# Test getfif
raises(ValueError, fi.getFIF, 'foo.png', 'x') # mode must be r or w
raises(ValueError, fi.getFIF, 'foo.notvalid', 'w') # invalid ext
raises(ValueError, fi.getFIF, 'foo.iff', 'w') # We cannot write iff
def test_png():
for float in (False, True):
for crop in (0, 1, 2):
for colors in (0, 1, 3, 4):
fname = fnamebase + '%i.%i.%i.png' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
assert_close(rim * mul, im, 0.1) # lossless
# Run exact same test, but now in pypy backup mode
try:
imageio.plugins._freeimage.TEST_NUMPY_NO_STRIDES = True
for float in (False, True):
for crop in (0, 1, 2):
for colors in (0, 1, 3, 4):
fname = fnamebase + '%i.%i.%i.png' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
assert_close(rim * mul, im, 0.1) # lossless
finally:
imageio.plugins._freeimage.TEST_NUMPY_NO_STRIDES = False
# Parameters
im = imageio.imread('chelsea.png', ignoregamma=True)
imageio.imsave(fnamebase + '.png', im, interlaced=True)
# Parameter fail
raises(TypeError, imageio.imread, 'chelsea.png', notavalidkwarg=True)
raises(TypeError, imageio.imsave, fnamebase + '.png', im, notavalidk=True)
# Compression
imageio.imsave(fnamebase + '1.png', im, compression=0)
imageio.imsave(fnamebase + '2.png', im, compression=9)
s1 = os.stat(fnamebase + '1.png').st_size
s2 = os.stat(fnamebase + '2.png').st_size
assert s2 < s1
# Fail
raises(ValueError, imageio.imsave, fnamebase + '.png', im, compression=12)
# Quantize
if sys.platform.startswith('darwin'):
return # quantization segfaults on my osx VM
imageio.imsave(fnamebase + '1.png', im, quantize=256)
imageio.imsave(fnamebase + '2.png', im, quantize=4)
im = imageio.imread(fnamebase + '2.png') # touch palette read code
s1 = os.stat(fnamebase + '1.png').st_size
s2 = os.stat(fnamebase + '2.png').st_size
assert s1 > s2
# Fail
fname = fnamebase + '1.png'
raises(ValueError, imageio.imsave, fname, im[:, :, :3], quantize=300)
raises(ValueError, imageio.imsave, fname, im[:, :, 0], quantize=100)
def test_png_dtypes():
# See issue #44
# Two images, one 0-255, one 0-200
im1 = np.zeros((100, 100, 3), dtype='uint8')
im2 = np.zeros((100, 100, 3), dtype='uint8')
im1[20:80, 20:80, :] = 255
im2[20:80, 20:80, :] = 200
fname = fnamebase + '.dtype.png'
# uint8
imageio.imsave(fname, im1)
assert_close(im1, imageio.imread(fname))
imageio.imsave(fname, im2)
assert_close(im2, imageio.imread(fname))
# float scaled
imageio.imsave(fname, im1 / 255.0)
assert_close(im1, imageio.imread(fname))
imageio.imsave(fname, im2 / 255.0)
assert_close(im2, imageio.imread(fname))
# float not scaled
imageio.imsave(fname, im1 * 1.0)
assert_close(im1, imageio.imread(fname))
imageio.imsave(fname, im2 * 1.0)
assert_close(im1, imageio.imread(fname)) # scaled
# int16
imageio.imsave(fname, im1.astype('int16'))
assert_close(im1, imageio.imread(fname))
imageio.imsave(fname, im2.astype('int16'))
assert_close(im1, imageio.imread(fname)) # scaled
def test_jpg():
for float in (False, True):
for crop in (0, 1, 2):
for colors in (0, 1, 3):
fname = fnamebase + '%i.%i.%i.jpg' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
assert_close(rim * mul, im, 1.1) # lossy
# No alpha in JPEG
raises(Exception, imageio.imsave, fname, im4)
# Parameters
imageio.imsave(fnamebase + '.jpg', im3, progressive=True, optimize=True,
baseline=True)
# Parameter fail
raises(TypeError, imageio.imread, fnamebase + '.jpg', notavalidkwarg=True)
raises(TypeError, imageio.imsave, fnamebase + '.jpg', im, notavalidk=True)
# Compression
imageio.imsave(fnamebase + '1.jpg', im3, quality=10)
imageio.imsave(fnamebase + '2.jpg', im3, quality=90)
s1 = os.stat(fnamebase + '1.jpg').st_size
s2 = os.stat(fnamebase + '2.jpg').st_size
assert s2 > s1
raises(ValueError, imageio.imsave, fnamebase + '.jpg', im, quality=120)
def test_jpg_more():
need_internet()
# Test broken JPEG
fname = fnamebase + '_broken.jpg'
open(fname, 'wb').write(b'this is not an image')
raises(Exception, imageio.imread, fname)
#
bb = imageio.imsave(imageio.RETURN_BYTES, get_ref_im(3, 0, 0), 'JPEG')
with open(fname, 'wb') as f:
f.write(bb[:400])
f.write(b' ')
f.write(bb[400:])
raises(Exception, imageio.imread, fname)
# Test EXIF stuff
fname = get_remote_file('images/rommel.jpg')
im = imageio.imread(fname)
assert im.shape[0] > im.shape[1]
im = imageio.imread(fname, exifrotate=False)
assert im.shape[0] < im.shape[1]
im = imageio.imread(fname, exifrotate=2) # Rotation in Python
assert im.shape[0] > im.shape[1]
# Write the jpg and check that exif data is maintained
if sys.platform.startswith('darwin'):
return # segfaults on my osx VM, why?
imageio.imsave(fnamebase + 'rommel.jpg', im)
im = imageio.imread(fname)
assert im.meta.EXIF_MAIN
def test_bmp():
for float in (False, True):
for crop in (0, 1, 2):
for colors in (0, 1, 3, 4):
fname = fnamebase + '%i.%i.%i.bmp' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
assert_close(rim * mul, im, 0.1) # lossless
# Compression
imageio.imsave(fnamebase + '1.bmp', im3, compression=False)
imageio.imsave(fnamebase + '2.bmp', im3, compression=True)
s1 = os.stat(fnamebase + '1.bmp').st_size
s2 = os.stat(fnamebase + '2.bmp').st_size
assert s1 + s2 # todo: bug in FreeImage? assert s1 < s2
# Parameter fail
raises(TypeError, imageio.imread, fnamebase + '1.bmp', notavalidkwarg=True)
raises(TypeError, imageio.imsave, fnamebase + '1.bmp', im, notavalidk=True)
def test_gif():
# The not-animated gif
for float in (False, True):
for crop in (0, 1, 2):
for colors in (0, 3, 4):
if colors > 1 and sys.platform.startswith('darwin'):
continue # quantize fails, see also png
fname = fnamebase + '%i.%i.%i.gif' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
if colors in (0, 1):
im = im[:, :, 0]
else:
im = im[:, :, :3]
rim = rim[:, :, :3]
assert_close(rim * mul, im, 1.1) # lossless
# Parameter fail
raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)
def test_animated_gif():
if sys.platform.startswith('darwin'):
skip('On OSX quantization of freeimage is unstable')
# Get images
im = get_ref_im(4, 0, 0)
ims = []
for i in range(10):
im = im.copy()
im[:, -5:, 0] = i * 20
ims.append(im)
# Store - animated GIF always poops out RGB
for float in (False, True):
for colors in (3, 4):
ims1 = ims[:]
if float:
ims1 = [x.astype(np.float32) / 256 for x in ims1]
ims1 = [x[:, :, :colors] for x in ims1]
fname = fnamebase + '.animated.%i.gif' % colors
imageio.mimsave(fname, ims1, duration=0.2)
# Retrieve
ims2 = imageio.mimread(fname)
ims1 = [x[:, :, :3] for x in ims] # fresh ref
ims2 = [x[:, :, :3] for x in ims2] # discart alpha
for im1, im2 in zip(ims1, ims2):
assert_close(im1, im2, 1.1)
# We can also store grayscale
fname = fnamebase + '.animated.%i.gif' % 1
imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
imageio.mimsave(fname, [x[:, :, :1] for x in ims], duration=0.2)
# Irragular duration. You probably want to check this manually (I did)
duration = [0.1 for i in ims]
for i in [2, 5, 7]:
duration[i] = 0.5
imageio.mimsave(fnamebase + '.animated_irr.gif', ims, duration=duration)
# Other parameters
imageio.mimsave(fnamebase + '.animated.loop2.gif', ims, loop=2, fps=20)
R = imageio.read(fnamebase + '.animated.loop2.gif')
W = imageio.save(fnamebase + '.animated.palettes100.gif', palettesize=100)
assert W._palettesize == 128
# Fail
raises(IndexError, R.get_meta_data, -1)
raises(ValueError, imageio.mimsave, fname, ims, palettesize=300)
raises(ValueError, imageio.mimsave, fname, ims, quantizer='foo')
raises(ValueError, imageio.mimsave, fname, ims, duration='foo')
# Add one duplicate image to ims to touch subractangle with not change
ims.append(ims[-1])
# Test subrectangles
imageio.mimsave(fnamebase + '.subno.gif', ims, subrectangles=False)
imageio.mimsave(fnamebase + '.subyes.gif', ims, subrectangles=True)
s1 = os.stat(fnamebase + '.subno.gif').st_size
s2 = os.stat(fnamebase + '.subyes.gif').st_size
assert s2 < s1
# Meta (dummy, because always {}
assert isinstance(imageio.read(fname).get_meta_data(), dict)
def test_ico():
if os.getenv('TRAVIS', '') == 'true' and sys.version_info >= (3, 4):
skip('Freeimage ico is unstable for this Travis build')
for float in (False, True):
for crop in (0, ):
for colors in (1, 3, 4):
fname = fnamebase + '%i.%i.%i.ico' % (float, crop, colors)
rim = get_ref_im(colors, crop, float)
rim = rim[:32, :32] # ico needs nice size
imageio.imsave(fname, rim)
im = imageio.imread(fname)
mul = 255 if float else 1
assert_close(rim * mul, im, 0.1) # lossless
# Meta data
R = imageio.read(fnamebase + '0.0.1.ico')
assert isinstance(R.get_meta_data(0), dict)
assert isinstance(R.get_meta_data(None), dict) # But this print warning
R.close()
writer = imageio.save(fnamebase + 'I.ico')
writer.set_meta_data({})
writer.close()
# Parameters. Note that with makealpha, RGBA images are read in incorrectly
im = imageio.imread(fnamebase + '0.0.1.ico', makealpha=True)
assert im.ndim == 3 and im.shape[-1] == 4
# Parameter fail
raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)
if sys.platform.startswith('win'): # issue #21
skip('Windows has a known issue with multi-icon files')
# Multiple images
im = get_ref_im(4, 0, 0)[:32, :32]
ims = [np.repeat(np.repeat(im, i, 1), i, 0) for i in (1, 2)] # SegF on win
ims = im, np.column_stack((im, im)), np.row_stack((im, im)) # error on win
imageio.mimsave(fnamebase + 'I2.ico', ims)
ims2 = imageio.mimread(fnamebase + 'I2.ico')
for im1, im2 in zip(ims, ims2):
assert_close(im1, im2, 0.1)
def test_mng():
pass # MNG seems broken in FreeImage
#ims = imageio.imread(get_remote_file('images/mngexample.mng'))
def test_other():
# Cannot save float
im = get_ref_im(3, 0, 1)
raises(Exception, imageio.imsave, fnamebase + '.jng', im, 'JNG')
if __name__ == '__main__':
#test_animated_gif()
run_tests_if_main()
| bsd-2-clause | 7,109,370,580,232,880,000 | 32.696017 | 79 | 0.577117 | false |
caspervg/geontology | tests/test.py | 1 | 1981 | from geontology import GeoOntology
ont = GeoOntology("tests/geo_ontology.ttl", frmt='n3')
ont.add_geo_column("http://linkedgeodata.org/ontology/OilPlatform", name="OilPlatformLocation", field1="lat",
field2="lon", type='PointFeature', desc="Oil platforms")
ont.add_info_column("http://linkedgeodata.org/ontology/maxspeed", name="MaxSpeed", field="speed", type="integer",
desc="Maximum allowed speed")
serialized = ont.serialize()
assert """col:OilPlatformLocation a :DuoGeoColumn ;
prop:defines <http://linkedgeodata.org/ontology/OilPlatform> ;
prop:description "Oil platforms" ;
prop:field1 "lat" ;
prop:field2 "lon" ;
prop:name "oilplatformlocation" ;
prop:type "http://move.ugent.be/geodata/ontology/PointFeature" .""" in serialized
assert """col:MaxSpeed a :InfoColumn ;
prop:defines <http://linkedgeodata.org/ontology/maxspeed> ;
prop:description "Maximum allowed speed" ;
prop:field "speed" ;
prop:name "maxspeed" ;
prop:type "integer" .""" in serialized
ont.add_info_column("http://linkedgeodata.org/ontology/maxspeed", name="MaxSpeed2", field="speed1", type="integer",
desc="Maximum allowed speed 2", unit="http://purl.obolibrary.org/obo/UO_0000094")
serialized = ont.serialize()
assert """col:MaxSpeed2 a :InfoColumn ;
prop:defines <http://linkedgeodata.org/ontology/maxspeed> ;
prop:description "Maximum allowed speed 2" ;
prop:field "speed1" ;
prop:name "maxspeed2" ;
prop:type "integer" ;
prop:unit [ a <http://purl.obolibrary.org/obo/UO_0000094> ] .""" in serialized
ont.set_fields('Accuracy', field='my_acc_field')
serialized = ont.serialize()
assert """col:Accuracy a :AccuracyColumn ;
prop:defines <http://sensorml.com/ont/swe/property/PositionalAccuracy> ;
prop:description "Accuracy of the measurement at this point" ;
prop:field "my_acc_field" ;
prop:name "accuracy" ;
prop:type "real" .""" in serialized | mit | -8,410,615,971,626,329,000 | 40.291667 | 115 | 0.688541 | false |
baohaojun/ibus-sdim | engine/table.py | 1 | 10818 | # -*- coding: utf-8 -*-
# vim:et sts=4 sw=4
#
# ibus-sdim - The Tables engine for IBus
#
# Copyright (c) 2008-2009 Yu Yuwei <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# $Id: $
#
__all__ = (
"tabengine",
)
def _str_percent_decode(str):
return str.replace("%20", " ").replace("%25", "%")
import os
from gi import require_version
require_version('IBus', '1.0')
from gi.repository import IBus
import keysyms
import re
from gettext import dgettext
_ = lambda a : dgettext ("ibus-sdim", a)
N_ = lambda a : a
import dbus
import socket
import errno
import traceback
class KeyEvent:
all_mods = [
'', #no modifier
'A', # alt
'AC',# alt ctrl
'ACS', #alt ctrl shift
'AS',
'C',
'CS',
'S'
]
def __init__(self, keyval, is_press, state):
self.code = keyval
self.mask = state
self.name = ''
if not is_press:
self.mask |= IBus.ModifierType.RELEASE_MASK
return
try:
if self.code < 0x80:
self.name = chr(self.code)
if self.name == ' ':
self.name = 'space'
else:
self.mask &= ~IBus.ModifierType.SHIFT_MASK
else:
self.name = keysyms.keycode_to_name(self.code).lower()
except:
print("bad key: code is %x\n", self.code)
traceback.print_exc()
self.name = keysyms.keycode_to_name(self.code).lower()
if self.name in ("control_l",
"control_r",
"alt_l",
"alt_r",
"shift_l",
"shift_r",
):
self.name = ""
return
mods = ''
if self.mask & IBus.ModifierType.MOD1_MASK:
mods += 'A'
if self.mask & IBus.ModifierType.CONTROL_MASK:
mods += 'C'
if self.mask & IBus.ModifierType.SHIFT_MASK:
mods += 'S'
if mods != '':
self.name = mods + ' ' + self.name
def __str__(self):
return self.name
class tabengine (IBus.Engine):
'''The IM Engine for Tables'''
_page_size = 10
def do_connect(self):
if self.sock:
self.do_disconnect()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect(("localhost", 31415))
except socket.error as serr:
if serr.errno != errno.ECONNREFUSED:
raise serr
print("Failed to connect to sdim server\n")
os.system("~/system-config/gcode/ime-py/ime-server.py >/dev/null 2>&1&")
import time
for i in range(1, 30):
time.sleep(.1)
try:
self.sock.connect(("localhost", 31415))
break
except socket.error as serr:
if serr.errno != errno.ECONNREFUSED:
raise serr
print("Still not connected to sdim server @" + str(i), "\n")
self.sock = self.sock.makefile("rwb", 0)
def do_disconnect(self):
if self.sock:
self.sock.close()
self.sock = None
def __init__ (self, bus, obj_path):
print('obj_path is', obj_path, "\n")
super(tabengine,self).__init__ (connection=bus.get_connection(),
object_path=obj_path)
self._bus = bus
self.sock = None
self.do_connect()
self.clear_data()
self._lookup_table = IBus.LookupTable ()
self._lookup_table.set_page_size(tabengine._page_size)
self._name = 'sdim'
print('name is', self._name, "\n")
self._config_section = "engine/%s" % self._name
# config module
self._config = self._bus.get_config ()
self._on = True
self.reset ()
def clear_data(self):
self._preedit_str = ''
self._cands = []
self._aux_str = ''
self._commit_str = ''
self._cands_str = ''
self._cand_idx = '0'
self._active = ''
def reset (self):
self._update_ui ()
def do_destroy(self):
self.reset ()
self.do_focus_out ()
self.do_disconnect()
super(tabengine,self).destroy()
def _update_preedit (self):
'''Update Preedit String in UI'''
_str = self._preedit_str
if _str == '':
super(tabengine, self).update_preedit_text(IBus.Text.new_from_string(''), 0, False)
else:
# because ibus now can only insert preedit into txt, so...
attrs = IBus.AttrList()
attrs.append(IBus.attr_underline_new(IBus.AttrUnderline.SINGLE, 0, len(_str)))
text = IBus.Text.new_from_string(_str)
i = 0
while attrs.get(i) != None:
attr = attrs.get(i)
text.append_attribute(attr.get_attr_type(),
attr.get_value(),
attr.get_start_index(),
attr.get_end_index())
i += 1
super(tabengine, self).update_preedit_text(text, len(_str), True)
def _update_aux (self):
'''Update Aux String in UI'''
_aux = self._aux_str
if _aux:
attrs = IBus.AttrList()
attrs.append(IBus.attr_foreground_new(0x9515b5, 0, len(_aux)))
text = IBus.Text.new_from_string(_aux)
i = 0
while attrs.get(i) != None:
attr = attrs.get(i)
text.append_attribute(attr.get_attr_type(),
attr.get_value(),
attr.get_start_index(),
attr.get_end_index())
i += 1
super(tabengine, self).update_auxiliary_text(text, True)
else:
self.hide_auxiliary_text()
def _update_lookup_table (self):
'''Update Lookup Sdim in UI'''
if self._cands_str == '':
self.hide_lookup_table()
return
_cands = self._cands_str.split()
_cands = [_str_percent_decode(str) for str in _cands]
if hasattr(self._lookup_table, "clean"):
self._lookup_table.clean()
else:
self._lookup_table.clear()
for cand in _cands:
self._lookup_table.append_candidate(IBus.Text.new_from_string(cand))
index = int(self._cand_idx) % 10
self._lookup_table.set_cursor_pos(index)
if hasattr(self._lookup_table, 'show_cursor'):
self._lookup_table.show_cursor(True)
else:
self._lookup_table.set_cursor_visible(True)
self.update_lookup_table ( self._lookup_table, True)
def _update_ui (self):
'''Update User Interface'''
self._update_lookup_table ()
self._update_preedit ()
self._update_aux ()
self.commit_string()
def commit_string (self):
if self._commit_str == '':
return
commit = self._commit_str
self._commit_str = ''
super(tabengine,self).commit_text(IBus.Text.new_from_string(commit))
def do_process_key_event(self, keyval, keycode, state):
'''Process Key Events
Key Events include Key Press and Key Release,
IBus.ModifierType.means Key Pressed
'''
key = KeyEvent(keyval, state & IBus.ModifierType.RELEASE_MASK == 0, state)
# ignore NumLock mask
key.mask &= ~IBus.ModifierType.MOD2_MASK
result = self._process_key_event (key)
return result
def _process_key_event (self, key):
'''Internal method to process key event'''
key = str(key)
if key == '':
return False
if self._preedit_str == '' and len(key) != 1:
return False
self._really_process_key(key)
self._update_ui()
return True
def _really_process_key (self, key):
try:
self.sock.write(("keyed " + key + "\n").encode('UTF-8'))
except:
traceback.print_exc()
self.do_connect()
return
self.clear_data()
while True:
try:
line = self.sock.readline().decode('UTF-8')
except:
self.do_connect()
self._aux_str = "Error with sock connection"
break
if not line:
break
line = line[:-1]
if line.find('commit: ') == 0:
self._commit_str = line[len('commit: '):]
elif line.find('hint: ') == 0:
self._aux_str = line[len('hint: '):]
elif line.find('comp: ') == 0:
self._preedit_str = line[len('comp: '):]
elif line.find('cands: ') == 0:
self._cands_str = line[len('cands: '):]
elif line.find('cand_index: ') == 0:
self._cand_idx = line[len('cand_index: '):]
elif line.find('active: ') == 0:
self._active = line[len('active: '):]
elif line == "end:":
break
else:
self._aux_str = line
def do_focus_in (self):
if self._on:
self._update_ui ()
def do_focus_out (self):
pass
def do_enable (self):
self._on = True
if not self.sock:
self.do_connect()
self.do_focus_in()
def do_disable (self):
self.reset()
self.do_disconnect()
self._on = False
# for further implementation :)
@classmethod
def CONFIG_VALUE_CHANGED(cls, bus, section, name, value):
config = bus.get_config()
if section != self._config_section:
return
@classmethod
def CONFIG_RELOADED(cls, bus):
config = bus.get_config()
if section != self._config_section:
return
| lgpl-2.1 | 8,023,913,865,966,862,000 | 29.732955 | 95 | 0.509983 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/evolve/Crossovers.py | 1 | 22844 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.crossovers In this module we have the genetic operators of crossover (or recombination)
# for each chromosome representation.
# -----------------------------------------------------------------
# Import standard modules
import math
# Import other evolve modules
import utils
import constants
# Import the relevant PTS classes and modules
from ..core.tools.random import prng
# -----------------------------------------------------------------
def G1DBinaryStringXSinglePoint(genome, **args):
"""
The crossover of 1D Binary String, Single Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
utils.raiseException("The Binary String have one element, can't use the Single Point Crossover method !", TypeError)
cut = prng.randint(1, len(gMom))
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
# -----------------------------------------------------------------
def G1DBinaryStringXTwoPoint(genome, **args):
"""
The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
utils.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [prng.randint(1, len(gMom)), prng.randint(1, len(gMom))]
if cuts[0] > cuts[1]:
utils.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
# -----------------------------------------------------------------
def G1DBinaryStringXUniform(genome, **args):
"""
The G1DList Uniform Crossover
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if utils.randomFlipCoin(constants.CDefG1DBinaryStringUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverSinglePoint(genome, **args):
"""
The crossover of G1DList, Single Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
utils.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = prng.randint(1, len(gMom))
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverTwoPoint(genome, **args):
"""
The G1DList crossover, Two Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
utils.raiseException("The 1D List have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [prng.randint(1, len(gMom)), prng.randint(1, len(gMom))]
if cuts[0] > cuts[1]:
utils.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverUniform(genome, **args):
"""
The G1DList Uniform Crossover
Each gene has a 50% chance of being swapped between mom and dad
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if utils.randomFlipCoin(constants.CDefG1DListCrossUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverOX(genome, **args):
""" The OX Crossover for G1DList (order crossover) """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
listSize = len(gMom)
c1, c2 = [prng.randint(1, len(gMom)), prng.randint(1, len(gMom))]
while c1 == c2:
c2 = prng.randint(1, len(gMom))
if c1 > c2:
h = c1
c1 = c2
c2 = h
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
P1 = [c for c in gMom[c2:] + gMom[:c2] if c not in gDad[c1:c2]]
sister.genomeList = P1[listSize - c2:] + gDad[c1:c2] + P1[:listSize - c2]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
P2 = [c for c in gDad[c2:] + gDad[:c2] if c not in gMom[c1:c2]]
brother.genomeList = P2[listSize - c2:] + gMom[c1:c2] + P2[:listSize - c2]
assert listSize == len(sister)
assert listSize == len(brother)
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverEdge(genome, **args):
""" THe Edge Recombination crossover for G1DList (widely used for TSP problem)
See more information in the `Edge Recombination Operator <http://en.wikipedia.org/wiki/Edge_recombination_operator>`_
Wikipedia entry.
"""
gMom, sisterl = args["mom"], []
gDad, brotherl = args["dad"], []
mom_edges, dad_edges, merge_edges = utils.G1DListGetEdgesComposite(gMom, gDad)
for c, u in (sisterl, set(gMom)), (brotherl, set(gDad)):
curr = None
for i in xrange(len(gMom)):
curr = prng.choice(tuple(u)) if not curr else curr
c.append(curr)
u.remove(curr)
d = [v for v in merge_edges.get(curr, []) if v in u]
if d:
curr = prng.choice(d)
else:
s = [v for v in mom_edges.get(curr, []) if v in u]
s += [v for v in dad_edges.get(curr, []) if v in u]
curr = prng.choice(s) if s else None
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
sister.genomeList = sisterl
brother.genomeList = brotherl
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverCutCrossfill(genome, **args):
"""
The crossover of G1DList, Cut and crossfill, for permutations
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1: utils.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = prng.randint(1, len(gMom))
if args["count"] >= 1:
sister = gMom.clone()
mother_part = gMom[0:cut]
sister.resetStats()
i = (len(sister) - cut)
x = 0
for v in gDad:
if v in mother_part:
continue
if x >= i:
break
sister[cut + x] = v
x += 1
if args["count"] == 2:
brother = gDad.clone()
father_part = gDad[0:cut]
brother.resetStats()
i = (len(brother) - cut)
x = 0
for v in gMom:
if v in father_part:
continue
if x >= i:
break
brother[cut + x] = v
x += 1
return (sister, brother)
# -----------------------------------------------------------------
def G1DListCrossoverRealSBX(genome, **args):
"""
Experimental SBX Implementation - Follows the implementation in NSGA-II (Deb, et.al)
Some implementation `reference <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
And another reference to the `Simulated Binary Crossover <http://www.mitpressjournals.org/doi/abs/10.1162/106365601750190406>`_.
.. warning:: This crossover method is Data Type Dependent, which means that
must be used for 1D genome of real values.
"""
EPS = constants.CDefG1DListSBXEPS
# Crossover distribution index
eta_c = constants.CDefG1DListSBXEtac
gMom = args["mom"]
gDad = args["dad"]
# Get the variable bounds ('gDad' could have been used; but I love Mom:-))
lb = gMom.getParam("rangemin", constants.CDefRangeMin)
ub = gMom.getParam("rangemax", constants.CDefRangeMax)
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in range(0, len(gMom)):
if math.fabs(gMom[i] - gDad[i]) > EPS:
if gMom[i] > gDad[i]:
#swap
temp = gMom[i]
gMom[i] = gDad[i]
gDad[i] = temp
#random number betwn. 0 & 1
u = prng.random_sample()
beta = 1.0 + 2 * (gMom[i] - lb) / (1.0 * (gDad[i] - gMom[i]))
alpha = 2.0 - beta ** (-(eta_c + 1.0))
if u <= (1.0 / alpha):
beta_q = (u * alpha) ** (1.0 / ((eta_c + 1.0) * 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha)) ** (1.0 / (1.0 * (eta_c + 1.0)))
brother[i] = 0.5 * ((gMom[i] + gDad[i]) - beta_q * (gDad[i] - gMom[i]))
beta = 1.0 + 2.0 * (ub - gDad[i]) / (1.0 * (gDad[i] - gMom[i]))
alpha = 2.0 - beta ** (-(eta_c + 1.0))
if u <= (1.0 / alpha):
beta_q = (u * alpha) ** (1.0 / ((eta_c + 1) * 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha)) ** (1.0 / (1.0 * (eta_c + 1.0)))
sister[i] = 0.5 * ((gMom[i] + gDad[i]) + beta_q * (gDad[i] - gMom[i]))
if brother[i] > ub:
brother[i] = ub
if brother[i] < lb:
brother[i] = lb
if sister[i] > ub:
sister[i] = ub
if sister[i] < lb:
sister[i] = lb
if prng.random_sample() > 0.5:
# Swap
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
else:
sister[i] = gMom[i]
brother[i] = gDad[i]
return (sister, brother)
# -----------------------------------------------------------------
def G2DListCrossoverUniform(genome, **args):
"""
The G2DList Uniform Crossover
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if utils.randomFlipCoin(constants.CDefG2DListCrossUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
# -----------------------------------------------------------------
def G2DListCrossoverSingleVPoint(genome, **args):
""" The crossover of G2DList, Single Vertical Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = prng.randint(1, gMom.getWidth())
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
# -----------------------------------------------------------------
def G2DListCrossoverSingleHPoint(genome, **args):
""" The crossover of G2DList, Single Horizontal Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = prng.randint(1, gMom.getHeight())
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
# -----------------------------------------------------------------
def G2DBinaryStringXUniform(genome, **args):
"""
The G2DBinaryString Uniform Crossover
.. versionadded:: 0.6
The *G2DBinaryStringXUniform* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if utils.randomFlipCoin(constants.CDefG2DBinaryStringUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
# -----------------------------------------------------------------
def G2DBinaryStringXSingleVPoint(genome, **args):
"""
The crossover of G2DBinaryString, Single Vertical Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleVPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = prng.randint(1, gMom.getWidth())
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
# -----------------------------------------------------------------
def G2DBinaryStringXSingleHPoint(genome, **args):
"""
The crossover of G2DBinaryString, Single Horizontal Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleHPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = prng.randint(1, gMom.getHeight())
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
# -----------------------------------------------------------------
def GTreeCrossoverSinglePoint(genome, **args):
"""
The crossover for GTree, Single Point
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
node_mom_stack = []
all_mom_nodes = []
node_mom_tmp = None
node_dad_stack = []
all_dad_nodes = []
node_dad_tmp = None
node_mom_stack.append(gMom.getRoot())
node_dad_stack.append(gDad.getRoot())
while (len(node_mom_stack) > 0) and (len(node_dad_stack) > 0):
node_mom_tmp = node_mom_stack.pop()
node_dad_tmp = node_dad_stack.pop()
if node_mom_tmp != gMom.getRoot():
all_mom_nodes.append(node_mom_tmp)
all_dad_nodes.append(node_dad_tmp)
node_mom_stack.extend(node_mom_tmp.getChilds())
node_dad_stack.extend(node_dad_tmp.getChilds())
if len(all_mom_nodes) == 0 or len(all_dad_nodes) == 0:
return (gMom, gDad)
if len(all_dad_nodes) == 1:
nodeDad = all_dad_nodes[0]
else:
nodeDad = prng.choice(all_dad_nodes)
if len(all_mom_nodes) == 1:
nodeMom = all_mom_nodes[0]
else:
nodeMom = prng.choice(all_mom_nodes)
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
return (sister, brother)
# -----------------------------------------------------------------
def GTreeCrossoverSinglePointStrict(genome, **args):
"""
The crossover of Tree, Strict Single Point
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required), and
the distr_leaft (>= 0.0 and <= 1.0), which represents the probability
of leaf selection when findin random nodes for crossover.
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 10)
distr_leaf = gMom.getParam("distr_leaf", None)
if max_depth is None:
utils.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
utils.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
if distr_leaf is None:
dadRandom = gDad.getRandomNode()
momRandom = gMom.getRandomNode()
else:
if utils.randomFlipCoin(distr_leaf):
momRandom = gMom.getRandomNode(1)
else:
momRandom = gMom.getRandomNode(2)
if utils.randomFlipCoin(distr_leaf):
dadRandom = gDad.getRandomNode(1)
else:
dadRandom = gDad.getRandomNode(2)
assert momRandom is not None
assert dadRandom is not None
# Optimize here
mH = gMom.getNodeHeight(momRandom)
dH = gDad.getNodeHeight(dadRandom)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# The depth of the crossover is greater than the max_depth
if (dD + mH <= max_depth) and (mD + dH <= max_depth):
break
if i == (max_attempt - 1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
# -----------------------------------------------------------------
def GTreeGPCrossoverSinglePoint(genome, **args):
"""
The crossover of the GTreeGP, Single Point for Genetic Programming
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required).
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 15)
if max_depth is None:
utils.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
utils.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
dadRandom = gDad.getRandomNode()
if dadRandom.getType() == constants.nodeType["TERMINAL"]:
momRandom = gMom.getRandomNode(1)
elif dadRandom.getType() == constants.nodeType["NONTERMINAL"]:
momRandom = gMom.getRandomNode(2)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# Two nodes are root
if mD == 0 and dD == 0:
continue
mH = gMom.getNodeHeight(momRandom)
if dD + mH > max_depth:
continue
dH = gDad.getNodeHeight(dadRandom)
if mD + dH > max_depth:
continue
break
if i == (max_attempt - 1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
# -----------------------------------------------------------------
| mit | -3,296,907,227,072,843,000 | 25.779601 | 133 | 0.550366 | false |
spaceone/tehbot | tehbot/plugins/xkcd/__init__.py | 1 | 1749 | from tehbot.plugins import *
import urllib2
import urllib
import lxml.html
import lxml.etree
import json
# patch XPath 2.0 function into XPath 1.0 API oO
etree_funcs = lxml.etree.FunctionNamespace(None)
etree_funcs["lower-case"] = lambda ctx, x: x[0].text_content().lower() if x else ""
url = "http://xkcd.com%s"
# http://stackoverflow.com/questions/6937525/escaping-xpath-literal-with-python
def toXPathStringLiteral(s):
if "'" not in s: return "'%s'" % s
if '"' not in s: return '"%s"' % s
return "concat('%s')" % s.replace("'", "',\"'\",'")
class XkcdPlugin(StandardPlugin):
def __init__(self):
StandardPlugin.__init__(self)
self.parser.add_argument("search_term", nargs="?")
def execute(self, connection, event, extra, dbconn):
self.parser.set_defaults(user=event.source.nick)
try:
pargs = self.parser.parse_args(extra["args"])
if self.parser.help_requested:
return self.parser.format_help().strip()
except Exception as e:
return u"Error: %s" % str(e)
txt = "\x0303[xkcd]\x03 "
try:
tree = lxml.html.parse(urllib2.urlopen(url % "/archive/"))
res = [(e.text_content(), e.attrib["href"]) for e in tree.xpath("//a[contains(lower-case(.), %s)]" % toXPathStringLiteral(pargs.search_term))]
if not res:
txt += "No results."
else:
txt += ", ".join("%s (%s)" % (a, url % b) for a, b in res[:3])
except:
info = json.load(urllib2.urlopen(url % "/info.0.json"))
p = "/%d/" % info["num"]
txt += "%s - %s" % (url % p, info["safe_title"])
return txt
register_plugin("xkcd", XkcdPlugin())
| mit | 3,080,110,359,981,649,000 | 32.634615 | 154 | 0.568897 | false |
lucperkins/heron | heron/tools/tracker/tests/python/mock_proto.py | 1 | 5403 | ''' mock_proto.py '''
from heronpy.api import api_constants
import heron.proto.execution_state_pb2 as protoEState
import heron.proto.physical_plan_pb2 as protoPPlan
import heron.proto.tmaster_pb2 as protoTmaster
import heron.proto.topology_pb2 as protoTopology
# pylint: disable=no-self-use, missing-docstring
class MockProto(object):
''' Mocking Proto'''
topology_name = "mock_topology_name"
topology_id = "mock_topology_id"
cluster = "mock_topology_cluster"
environ = "mock_topology_environ"
def create_mock_spout(self,
spout_name,
output_streams,
spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = api_constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self,
bolt_name,
input_streams,
output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = api_constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(
self,
spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
# Stream1
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout"
# Spout1
spout = self.create_mock_spout("mock_spout", [stream1], spout_parallelism)
topology.spouts.extend([spout])
# Bolt1
bolt = self.create_mock_bolt("mock_bolt", [stream1], [], bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = "mock_topology_id"
topology.name = "mock_topology_name"
# Streams
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout1"
stream2 = protoTopology.StreamId()
stream2.id = "mock_stream2"
stream2.component_name = "mock_spout1"
stream3 = protoTopology.StreamId()
stream3.id = "mock_stream3"
stream3.component_name = "mock_bolt1"
stream4 = protoTopology.StreamId()
stream4.id = "mock_stream4"
stream4.component_name = "mock_bolt2"
# Spouts
spout1 = self.create_mock_spout("mock_spout1",
[stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
# Bolts
bolt1 = self.create_mock_bolt("mock_bolt1",
[stream1],
[stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt("mock_bolt2",
[stream2],
[stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt("mock_bolt3",
[stream3, stream4],
[],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(
self,
spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism,
bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism,
bolt1_parallelism,
bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
| apache-2.0 | -6,319,299,497,972,068,000 | 30.596491 | 78 | 0.618915 | false |
gavlyukovskiy/p6spy | docs/conf.py | 1 | 11931 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# p6spy documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 7 22:33:59 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'p6spy'
copyright = '2018, p6spy team'
author = 'p6spy team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.8'
# The full version, including alpha/beta/rc tags.
release = '3.8.2-SNAPSHOT'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'p6spy v3.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'p6spydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'p6spy.tex', 'p6spy Documentation',
'p6spy team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'p6spy', 'p6spy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'p6spy', 'p6spy Documentation',
author, 'p6spy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
| apache-2.0 | -6,399,739,008,555,051,000 | 26.746512 | 80 | 0.69156 | false |
vintasoftware/django-role-permissions | rolepermissions/roles.py | 1 | 7914 | from __future__ import unicode_literals
import inspect
from six import add_metaclass
from django.contrib.auth.models import Group, Permission
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from rolepermissions.utils import camelToSnake, camel_or_snake_to_title
from rolepermissions.exceptions import RoleDoesNotExist
registered_roles = {}
class RolesManager(object):
def __iter__(cls):
return iter(registered_roles)
@classmethod
def retrieve_role(cls, role_name):
if role_name in registered_roles:
return registered_roles[role_name]
@classmethod
def get_roles_names(cls):
return registered_roles.keys()
@classmethod
def get_roles(cls):
return registered_roles.values()
class RolesClassRegister(type):
def __new__(cls, name, parents, dct):
role_class = super(RolesClassRegister, cls).__new__(cls, name, parents, dct)
if object not in parents:
registered_roles[role_class.get_name()] = role_class
return role_class
@add_metaclass(RolesClassRegister)
class AbstractUserRole(object):
@classmethod
def get_name(cls):
if hasattr(cls, 'role_name'):
return cls.role_name
return camelToSnake(cls.__name__)
@classmethod
def assign_role_to_user(cls, user):
"""
Assign this role to a user.
:returns: :py:class:`django.contrib.auth.models.Group` The group for the
new role.
"""
group, _created = Group.objects.get_or_create(name=cls.get_name())
user.groups.add(group)
permissions_to_add = cls.get_default_true_permissions()
user.user_permissions.add(*permissions_to_add)
return group
@classmethod
def _get_adjusted_true_permissions(cls, user):
"""
Get all true permissions for a user excluding ones that
have been explicitly revoked.
"""
from rolepermissions.permissions import available_perm_status
default_true_permissions = set()
user_permission_states = available_perm_status(user)
adjusted_true_permissions = set()
# Grab the default true permissions from each of the user's roles
for role in get_user_roles(user):
default_true_permissions.update(role.get_default_true_permissions())
# For each of those default true permissions, only keep ones
# that haven't been explicitly revoked
for permission in default_true_permissions:
if user_permission_states[permission.codename]:
adjusted_true_permissions.add(permission)
return adjusted_true_permissions
@classmethod
def remove_role_from_user(cls, user):
"""
Remove this role from a user.
WARNING: Any permissions that were explicitly granted to the user
that are also defined to be granted by this role will be revoked
when this role is revoked.
Example:
>>> class Doctor(AbstractUserRole):
... available_permissions = {
... "operate": False,
... }
>>>
>>> class Surgeon(AbstractUserRole):
... available_permissions = {
... "operate": True,
... }
>>>
>>> grant_permission(user, "operate")
>>> remove_role(user, Surgeon)
>>>
>>> has_permission(user, "operate")
False
>>>
In the example, the user no longer has the ``"operate"`` permission,
even though it was set explicitly before the ``Surgeon`` role was removed.
"""
# Grab the adjusted true permissions before the removal
current_adjusted_true_permissions = cls._get_adjusted_true_permissions(user)
group, _created = cls.get_or_create_group()
user.groups.remove(group)
# Grab the adjusted true permissions after the removal
new_adjusted_true_permissions = cls._get_adjusted_true_permissions(user)
# Remove true permissions that were default granted only by the removed role
permissions_to_remove = (current_adjusted_true_permissions
.difference(new_adjusted_true_permissions))
user.user_permissions.remove(*permissions_to_remove)
return group
@classmethod
def permission_names_list(cls):
available_permissions = getattr(cls, 'available_permissions', {})
return available_permissions.keys()
@classmethod
def get_all_permissions(cls):
permission_names = list(cls.permission_names_list())
if permission_names:
return cls.get_or_create_permissions(permission_names)
return []
@classmethod
def get_default_true_permissions(cls):
if hasattr(cls, 'available_permissions'):
permission_names = [
key for (key, default) in
cls.available_permissions.items() if default]
return cls.get_or_create_permissions(permission_names)
return []
@classmethod
def get_or_create_permissions(cls, permission_names):
user_ct = ContentType.objects.get_for_model(get_user_model())
permissions = list(Permission.objects.filter(
content_type=user_ct, codename__in=permission_names).all())
missing_permissions = set(permission_names) - set((p.codename for p in permissions))
if len(missing_permissions) > 0:
for permission_name in missing_permissions:
permission, created = get_or_create_permission(permission_name)
if created: # assert created is True
permissions.append(permission)
return permissions
@classmethod
def get_default(cls, permission_name):
return cls.available_permissions[permission_name]
@classmethod
def get_or_create_group(cls):
return Group.objects.get_or_create(name=cls.get_name())
def get_or_create_permission(codename, name=camel_or_snake_to_title):
"""
Get a Permission object from a permission name.
@:param codename: permission code name
@:param name: human-readable permissions name (str) or callable that takes codename as
argument and returns str
"""
user_ct = ContentType.objects.get_for_model(get_user_model())
return Permission.objects.get_or_create(content_type=user_ct, codename=codename,
defaults={'name': name(codename) if callable(name) else name})
def retrieve_role(role_name):
"""Get a Role object from a role name."""
return RolesManager.retrieve_role(role_name)
def get_user_roles(user):
"""Get a list of a users's roles."""
if user:
groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related.
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return []
def _assign_or_remove_role(user, role, method_name):
role_cls = role
if not inspect.isclass(role):
role_cls = retrieve_role(role)
if not role_cls:
raise RoleDoesNotExist
getattr(role_cls, method_name)(user)
return role_cls
def assign_role(user, role):
"""Assign a role to a user."""
return _assign_or_remove_role(user, role, "assign_role_to_user")
def remove_role(user, role):
"""Remove a role from a user."""
return _assign_or_remove_role(user, role, "remove_role_from_user")
def clear_roles(user):
"""Remove all roles from a user."""
roles = get_user_roles(user)
for role in roles:
role.remove_role_from_user(user)
return roles
| mit | -2,105,976,347,229,885,400 | 31.040486 | 124 | 0.633687 | false |
pulinagrawal/nupic | tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/description.py | 1 | 13364 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'type': 'DateEncoder',
'timeOfDay': (21, 1)},
'timestamp_dayOfWeek': {
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder',
'dayOfWeek': (21, 1)},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 | 6,866,481,523,454,306,000 | 36.225627 | 117 | 0.601691 | false |
terranum-ch/GraphLink | test/test_gk_node.py | 1 | 2935 | #!/urs/bin/python
import os
import pytest
import graphviz
from .context import graphlink
from .context import OUTPUT_TEST_PATH
from graphlink.core.gk_node import GKNode
from graphlink.core.gk_node import GK_SHAPE_TYPE
@pytest.fixture()
def get_test_data_folder():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
def test_graphviz():
dot = graphviz.Digraph(comment='test_graph')
dot.node("A")
dot.node("B")
dot.node("C")
dot.edges(['AB', 'BC'])
dot.render(filename=os.path.join(OUTPUT_TEST_PATH, "test_graph1.gv"))
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "test_graph1.gv"))
def test_gknode():
mym1 = GKNode("test1")
mym2 = GKNode("test2")
dot = graphviz.Graph(comment='test_graph2')
mym1.create_node(dot)
mym2.create_node(dot)
dot.edge('test1', 'test2')
dot.render(filename=os.path.join(OUTPUT_TEST_PATH, "test_graph2.gv"))
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "test_graph2.gv"))
def test_gknode_init():
mym1 = GKNode()
assert mym1.m_name is None
assert mym1.m_description is None
assert mym1.m_shapetype == GK_SHAPE_TYPE[0]
assert mym1.m_external_link is None
mym2 = GKNode("coucou", shape=GK_SHAPE_TYPE[2])
assert mym2.m_name == "coucou"
assert mym2.m_shapetype == GK_SHAPE_TYPE[2]
assert mym2.m_external_link is None
assert mym2.m_description is None
def test_gknode_image(get_test_data_folder):
mym1 = GKNode("Chief")
assert mym1.set_image("not_existing_picture.png") is False
img_path = os.path.join(get_test_data_folder, "person-icon.jpg")
assert mym1.set_image(img_path) is True
assert mym1.m_shapetype == GK_SHAPE_TYPE[4] # image
assert mym1.m_external_link == img_path
mym3 = GKNode("John", img_path)
assert mym3.m_external_link == img_path
mym2 = GKNode("Bob", img_path)
assert mym2.m_external_link == img_path
mym4 = GKNode("+41791234567")
# test printing with image
dot = graphviz.Graph()
mym1.create_node(dot)
mym2.create_node(dot)
mym3.create_node(dot)
dot.edge(mym1.m_name, mym2.m_name)
dot.edge(mym1.m_name, mym3.m_name)
dot.edge(mym3.m_name, mym4.m_name)
dot.edge(mym4.m_name, mym2.m_name)
dot.render(filename=os.path.join(OUTPUT_TEST_PATH, "test_img1.gv"))
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "test_img1.gv"))
def test_gknode_save():
testfile = os.path.join(OUTPUT_TEST_PATH, "save_test_file.gkn")
os.remove(testfile) if os.path.exists(testfile) else None
mym1 = GKNode("John", shape=GK_SHAPE_TYPE[2])
assert mym1.save_to_file(testfile) is True
def test_gknode_load():
mym2 = GKNode()
assert mym2.load_from_file("toto") is False
assert mym2.load_from_file(os.path.join(OUTPUT_TEST_PATH, "save_test_file.gkn")) is True
assert mym2.m_name == "John"
assert mym2.m_shapetype == GK_SHAPE_TYPE[2]
| apache-2.0 | 7,626,252,318,791,831,000 | 28.35 | 92 | 0.672913 | false |
romanalexander/opendota | dotastats/tasks.py | 1 | 4185 | import traceback
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from djcelery import celery
from dotastats.models import MatchHistoryQueue, MatchDetails, SteamPlayer
from dotastats.json.steamapi import GetMatchDetails, GetMatchHistory, GetPlayerNames, GetMatchHistoryBySequenceNum
from celery.utils.log import get_task_logger
MATCH_FRESHNESS = settings.DOTA_MATCH_REFRESH
LOCK_EXPIRE = 20 * 1
logger = get_task_logger(__name__)
@celery.task(name='tasks.poll_match_sequence')
def poll_match_sequence():
"""Celery task that handles the background loading of new matches in bulk.
Returns True if work was handled, None if there was no work to be done.
"""
lock_id = "poll_match_sequence_lock"
success_value = True
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE)
release_lock = lambda: cache.delete(lock_id)
if acquire_lock():
logger.debug("Queue locked.")
try:
if GetMatchHistoryBySequenceNum() == None: # No work was handled.
success_value = None
except Exception, e:
success_value = False
logger.error(traceback.format_exc())
logger.error("Error creating object.")
finally:
logger.debug("Lock released.")
release_lock()
return success_value
@celery.task(name='tasks.poll_steamplayers_queue')
def poll_steamplayers_queue():
"""Celery task that handles the constant background refreshing of SteamPlayers.
This task will take up to 100 old SteamPlayers and update them.
Returns True if work was handled; None if no work to be done.
"""
account_list = []
accounts = SteamPlayer.get_refresh()
for account in accounts:
account_list.append(account.pk)
if len(account_list) > 0:
GetPlayerNames(account_list)
return True
return None
@celery.task(name='tasks.poll_match_history_queue')
def poll_match_history_queue():
"""Celery task that handles the constant background loading of matches.
This task will first empty the MatchHistoryQueue, or look for more matches if nothing in queue.
If there is no work at all, it will refresh old MatchDetails according to staleness.
Returns True if work was handled; False if there was an error; None if no work to be done.
"""
lock_id = "poll_match_history_queue_lock"
success_value = True
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE)
release_lock = lambda: cache.delete(lock_id)
if acquire_lock():
logger.debug("Queue locked.")
queue_object = None
force_refresh = False
try:
if cache.get(lock_id + '_time') == None:
GetMatchHistory()
logger.debug("Ran out of work. Attempting more from history..")
cache.set(lock_id + '_time', True, LOCK_EXPIRE)
else:
try:
queue_object = MatchHistoryQueue.objects.latest()
logger.debug("Got work from MatchHistoryQueue")
except ObjectDoesNotExist:
queue_object = None
if queue_object == None:
queue_object = MatchDetails.get_refresh()
if queue_object:
force_refresh = True
logger.debug("Got work from stale MatchDetails.")
if queue_object:
logger.debug("Attempting to retreive match_id: " + str(queue_object.pk))
GetMatchDetails(queue_object.pk, force_refresh=force_refresh)
logger.debug("Retreived and set match_id: " + str(queue_object.pk))
else:
logger.debug("No work to be done. Sleeping.")
success_value = None
except Exception, e:
success_value = False
logger.error(traceback.format_exc())
logger.error("Error creating object.")
finally:
logger.debug("Lock released.")
release_lock()
return success_value
| mit | -4,770,386,650,370,292,000 | 38.11215 | 114 | 0.624851 | false |
vincentlooi/FCIS | fcis/test.py | 1 | 5377 | # --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Guodong Zhang, Haozhi Qi
# --------------------------------------------------------
import _init_paths
import argparse
import os
import os.path as osp
import sys
import cv2
import pprint
import logging
import mxnet as mx
# from function.test_fcis import test_fcis
from dataset import *
from symbols import resnet_v1_101_fcis
from utils.load_model import load_param, load_param_file
from utils.load_data import load_gt_sdsdb
from utils.create_logger import create_logger
from core.loader import TestLoader
from core.tester import Predictor, pred_eval
from config.config import config, update_config
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append('.')
# sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
# configuration file is required
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
# model file is required
parser.add_argument('--model', dest='model', help='path to trained model (.params file)',
required=True, type=str)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--ignore_cache', help='ignore cached results boxes', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
args = parser.parse_args()
return args
def test_net(args):
# init config
cfg_path = args.cfg
update_config(cfg_path)
# test parameters
has_rpn = config.TEST.HAS_RPN
if not has_rpn:
raise NotImplementedError, "Network without RPN is not implemented"
# load model
model_path = args.model
if '.params' not in model_path:
model_path += ".params"
assert osp.exists(model_path), ("Could not find model path %s"%(model_path))
arg_params, aux_params = load_param_file(model_path, process=True)
print("\nLoaded model %s\n"%(model_path))
# gpu stuff
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
# load test dataset
cfg_ds = config.dataset
ds_name = cfg_ds.dataset
ds_path = cfg_ds.dataset_path
test_image_set = cfg_ds.test_image_set
# logger
logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.test_image_set)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
if ds_name.lower() == "labelme":
# from utils.load_data import load_labelme_gt_sdsdb
imdb = labelme(test_image_set, ds_path, cfg_ds.root_path, mask_size=config.MASK_SIZE,
binary_thresh=config.BINARY_THRESH, classes=cfg_ds.CLASSES)
else:
imdb = eval(ds_name)(test_image_set, cfg_ds.root_path, ds_path, result_path=output_path,
binary_thresh=config.BINARY_THRESH, mask_size=config.MASK_SIZE)
sdsdb = imdb.gt_sdsdb()
# load network
network = resnet_v1_101_fcis()
sym = network.get_symbol(config, is_train=False)
# get test data iter
test_data = TestLoader(sdsdb, config, batch_size=len(ctx), shuffle=args.shuffle, has_rpn=has_rpn)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
network.infer_shape(data_shape_dict)
network.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = []
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# # create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# print(test_data.provide_data_single[0][1])
# print(test_data.provide_label)
# start detection
pred_eval(predictor, test_data, imdb, config, vis=args.vis, ignore_cache=args.ignore_cache, thresh=args.thresh, logger=logger)
def main():
args = parse_args()
print('Called with argument:', args)
test_net(args)
# test_fcis(config, config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path,
# ctx, os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix), config.TEST.test_epoch,
# args.vis, args.ignore_cache, args.shuffle, config.TEST.HAS_RPN, config.dataset.proposal, args.thresh, logger=logger, output_path=final_output_path)
if __name__ == '__main__':
main()
| apache-2.0 | -7,227,037,302,334,648,000 | 35.578231 | 176 | 0.657616 | false |
drallensmith/neat-python | examples/picture2d/evolve_interactive.py | 1 | 9101 | """
This is an example that amounts to an offline picbreeder.org without any nice features. :)
Left-click on thumbnails to pick images to breed for next generation, right-click to
render a high-resolution version of an image. Genomes and images chosen for breeding
and rendering are saved to disk.
This example also demonstrates how to customize species stagnation.
"""
import math
import os
import pickle
import pygame
from multiprocessing import Pool
import neat
from neat.six_util import itervalues
from common import eval_mono_image, eval_gray_image, eval_color_image
class InteractiveStagnation(object):
"""
This class is used as a drop-in replacement for the default species stagnation scheme.
A species is only marked as stagnant if the user has not selected one of its output images
within the last `max_stagnation` generations.
"""
def __init__(self, config, reporters):
self.max_stagnation = int(config.get('max_stagnation'))
self.reporters = reporters
@classmethod
def parse_config(cls, param_dict):
config = {'max_stagnation': 15}
config.update(param_dict)
return config
@classmethod
def write_config(cls, f, config):
f.write('max_stagnation = {}\n'.format(config['max_stagnation']))
def update(self, species_set, generation):
result = []
for s in itervalues(species_set.species):
# If any member of the species is selected (i.e., has a fitness above zero),
# mark the species as improved.
for m in s.members.values():
if m.fitness > 0:
s.last_improved = generation
break
stagnant_time = generation - s.last_improved
is_stagnant = stagnant_time >= self.max_stagnation
result.append((s.key, s, is_stagnant))
return result
class PictureBreeder(object):
def __init__(self, thumb_width, thumb_height, full_width, full_height,
window_width, window_height, scheme, num_workers):
"""
:param thumb_width: Width of preview image
:param thumb_height: Height of preview image
:param full_width: Width of full rendered image
:param full_height: Height of full rendered image
:param window_width: Width of the view window
:param window_height: Height of the view window
:param scheme: Image type to generate: mono, gray, or color
"""
self.generation = 0
self.thumb_width = thumb_width
self.thumb_height = thumb_height
self.full_width = full_width
self.full_height = full_height
self.window_width = window_width
self.window_height = window_height
assert scheme in ('mono', 'gray', 'color')
self.scheme = scheme
# Compute the number of thumbnails we can show in the viewer window, while
# leaving one row to handle minor variations in the population size.
self.num_cols = int(math.floor((window_width - 16) / (thumb_width + 4)))
self.num_rows = int(math.floor((window_height - 16) / (thumb_height + 4)))
self.pool = Pool(num_workers)
def make_image_from_data(self, image_data):
# For mono and grayscale, we need a palette because the evaluation function
# only returns a single integer instead of an (R, G, B) tuple.
if self.scheme == 'color':
image = pygame.Surface((self.thumb_width, self.thumb_height))
else:
image = pygame.Surface((self.thumb_width, self.thumb_height), depth=8)
palette = tuple([(i, i, i) for i in range(256)])
image.set_palette(palette)
for r, row in enumerate(image_data):
for c, color in enumerate(row):
image.set_at((r, c), color)
return image
def make_thumbnails(self, genomes, config):
img_func = eval_mono_image
if self.scheme == 'gray':
img_func = eval_gray_image
elif self.scheme == 'color':
img_func = eval_color_image
jobs = []
for genome_id, genome in genomes:
jobs.append(self.pool.apply_async(img_func, (genome, config, self.thumb_width, self.thumb_height)))
thumbnails = []
for j in jobs:
# TODO: This code currently generates the image data using the multiprocessing
# pool, and then does the actual image construction here because pygame complained
# about not being initialized if the pool workers tried to construct an image.
# Presumably there is some way to fix this, but for now this seems fast enough
# for the purposes of a demo.
image_data = j.get()
thumbnails.append(self.make_image_from_data(image_data))
return thumbnails
def make_high_resolution(self, genome, config):
genome_id, genome = genome
# Make sure the output directory exists.
if not os.path.isdir('rendered'):
os.mkdir('rendered')
if self.scheme == 'gray':
image_data = eval_gray_image(genome, config, self.full_width, self.full_height)
elif self.scheme == 'color':
image_data = eval_color_image(genome, config, self.full_width, self.full_height)
else:
image_data = eval_mono_image(genome, config, self.full_width, self.full_height)
image = self.make_image_from_data(image_data)
pygame.image.save(image, "rendered/rendered-{}-{}.png".format(os.getpid(), genome_id))
with open("rendered/genome-{}-{}.bin".format(os.getpid(), genome_id), "wb") as f:
pickle.dump(genome, f, 2)
def eval_fitness(self, genomes, config):
selected = []
rects = []
for n, (genome_id, genome) in enumerate(genomes):
selected.append(False)
row, col = divmod(n, self.num_cols)
rects.append(pygame.Rect(4 + (self.thumb_width + 4) * col,
4 + (self.thumb_height + 4) * row,
self.thumb_width, self.thumb_height))
pygame.init()
screen = pygame.display.set_mode((self.window_width, self.window_height))
pygame.display.set_caption("Interactive NEAT-python generation {0}".format(self.generation))
buttons = self.make_thumbnails(genomes, config)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
running = False
break
if event.type == pygame.MOUSEBUTTONDOWN:
clicked_button = -1
for n, button in enumerate(buttons):
if rects[n].collidepoint(pygame.mouse.get_pos()):
clicked_button = n
break
if event.button == 1:
selected[clicked_button] = not selected[clicked_button]
else:
self.make_high_resolution(genomes[clicked_button], config)
if running:
screen.fill((128, 128, 192))
for n, button in enumerate(buttons):
screen.blit(button, rects[n])
if selected[n]:
pygame.draw.rect(screen, (255, 0, 0), rects[n], 3)
pygame.display.flip()
for n, (genome_id, genome) in enumerate(genomes):
if selected[n]:
genome.fitness = 1.0
pygame.image.save(buttons[n], "image-{}.{}.png".format(os.getpid(), genome_id))
with open("genome-{}-{}.bin".format(os.getpid(), genome_id), "wb") as f:
pickle.dump(genome, f, 2)
else:
genome.fitness = 0.0
def run():
# 128x128 thumbnails, 1500x1500 rendered images, 1100x810 viewer, grayscale images, 4 worker processes.
pb = PictureBreeder(128, 128, 1500, 1500, 1100, 810, 'gray', 4)
# Determine path to configuration file.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'interactive_config')
# Note that we provide the custom stagnation class to the Config constructor.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, InteractiveStagnation,
config_path)
# Make sure the network has the expected number of outputs.
if pb.scheme == 'color':
config.output_nodes = 3
else:
config.output_nodes = 1
config.pop_size = pb.num_cols * pb.num_rows
pop = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
pop.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
while 1:
pb.generation = pop.generation + 1
pop.run(pb.eval_fitness, 1)
if __name__ == '__main__':
run()
| bsd-3-clause | 7,536,118,259,714,346,000 | 37.400844 | 111 | 0.59466 | false |
bitmovin/bitmovin-python | bitmovin/bitmovin.py | 1 | 1234 | from .bitmovin_object import BitmovinObject
from .rest import BitmovinHttpClient
from .services import InputService, OutputService, FilterService, CodecConfigurationService, EncodingService, \
ManifestService, InfrastructureService, AnalyticsService
class Bitmovin(BitmovinObject):
def __init__(self, api_key, api_base_url=None, tenant_org_id=None):
super().__init__()
self._api_key = api_key
self._api_base_url = api_base_url
self._tenant_org_id = tenant_org_id
self._http_client = BitmovinHttpClient(api_key=self._api_key, base_url=self._api_base_url, tenant_org_id=self._tenant_org_id)
self.inputs = InputService(http_client=self._http_client)
self.outputs = OutputService(http_client=self._http_client)
self.filters = FilterService(http_client=self._http_client)
self.codecConfigurations = CodecConfigurationService(http_client=self._http_client)
self.encodings = EncodingService(http_client=self._http_client)
self.manifests = ManifestService(http_client=self._http_client)
self.infrastructures = InfrastructureService(http_client=self._http_client)
self.analytics = AnalyticsService(http_client=self._http_client)
| unlicense | 7,958,707,509,641,524,000 | 57.761905 | 133 | 0.726094 | false |
apache/allura | Allura/allura/model/timeline.py | 2 | 4850 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import bson
import logging
from ming.odm import Mapper
from tg import tmpl_context as c
from activitystream import ActivityDirector
from activitystream.base import NodeBase, ActivityObjectBase
from activitystream.managers import Aggregator as BaseAggregator
from allura.lib import security
from allura.tasks.activity_tasks import create_timelines
log = logging.getLogger(__name__)
class Director(ActivityDirector):
"""Overrides the default ActivityDirector to kick off background
timeline aggregations after an activity is created.
"""
def create_activity(self, actor, verb, obj, target=None,
related_nodes=None, tags=None):
if c.project and c.project.notifications_disabled:
return
from allura.model.project import Project
super(Director, self).create_activity(actor, verb, obj,
target=target,
related_nodes=related_nodes,
tags=tags)
# aggregate actor and follower's timelines
if actor.node_id:
create_timelines.post(actor.node_id)
# aggregate project and follower's timelines
for node in [obj, target] + (related_nodes or []):
if isinstance(node, Project):
create_timelines.post(node.node_id)
class Aggregator(BaseAggregator):
pass
class ActivityNode(NodeBase):
@property
def node_id(self):
return "%s:%s" % (self.__class__.__name__, self._id)
class ActivityObject(ActivityObjectBase):
'''
Allura's base activity class.
'''
@property
def activity_name(self):
"""Override this for each Artifact type."""
return "%s %s" % (self.__mongometa__.name.capitalize(), self._id)
@property
def activity_url(self):
return self.url()
@property
def activity_extras(self):
"""Return a BSON-serializable dict of extra stuff to store on the
activity.
"""
return {"allura_id": self.allura_id}
@property
def allura_id(self):
"""Return a string which uniquely identifies this object and which can
be used to retrieve the object from mongo.
"""
return "%s:%s" % (self.__class__.__name__, self._id)
def has_activity_access(self, perm, user, activity):
"""Return True if user has perm access to this object, otherwise
return False.
"""
if self.project is None or getattr(self, 'deleted', False):
return False
return security.has_access(self, perm, user, self.project)
class TransientActor(NodeBase, ActivityObjectBase):
"""An activity actor which is not a persistent Node in the network.
"""
def __init__(self, activity_name):
NodeBase.__init__(self)
ActivityObjectBase.__init__(self)
self.activity_name = activity_name
def get_activity_object(activity_object_dict):
"""Given a BSON-serialized activity object (e.g. activity.obj dict in a
timeline), return the corresponding :class:`ActivityObject`.
"""
extras_dict = activity_object_dict.activity_extras
if not extras_dict:
return None
allura_id = extras_dict.get('allura_id')
if not allura_id:
return None
classname, _id = allura_id.split(':', 1)
cls = Mapper.by_classname(classname).mapped_class
try:
_id = bson.ObjectId(_id)
except bson.errors.InvalidId:
pass
return cls.query.get(_id=_id)
def perm_check(user):
"""
Return a function that returns True if ``user`` has 'read' access to a given activity,
otherwise returns False.
"""
def _perm_check(activity):
obj = get_activity_object(activity.obj)
return obj is None or obj.has_activity_access('read', user, activity)
return _perm_check
| apache-2.0 | 6,539,335,614,294,883,000 | 31.77027 | 90 | 0.643505 | false |
ZeitOnline/zeit.today | src/zeit/today/clickimport.py | 1 | 1330 | from cStringIO import StringIO
import argparse
import gocept.runner
import logging
import os.path
import requests
import zeit.connector.interfaces
import zeit.connector.resource
import zope.app.appsetup.product
import zope.component
log = logging.getLogger(__name__)
@gocept.runner.once(principal=gocept.runner.from_config(
'zeit.today', 'principal'))
def import_to_dav():
parser = argparse.ArgumentParser(
description='Download from URL, upload to DAV')
parser.add_argument('--source', help='Source URL')
parser.add_argument('--target', help='Target uniqueId')
args = parser.parse_args()
if not (args.source and args.target):
raise Exception('Both --source and --target are required')
log.info('Importing %s to %s', args.source, args.target)
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.today')
content = requests.get(args.source, stream=True, auth=(
config['clickcounter-username'], config['clickcounter-password'])).text
connector = zope.component.getUtility(zeit.connector.interfaces.IConnector)
resource = zeit.connector.resource.Resource(
args.target, os.path.basename(args.target), 'rawxml',
data=StringIO(content),
contentType='application/xml')
connector.add(resource, verify_etag=False)
| bsd-3-clause | -5,760,793,349,174,709,000 | 32.25 | 79 | 0.718045 | false |
emiller/terminal.sublime-package | Terminal/open_terminal.py | 1 | 1066 | """
@author emiller
@date 01/22/2013
@description Simple Sublime plugin that enables the familiar
Open Terminal Here support within the editor.
"""
import sublime, sublime_plugin, os, sys
class OpenTerminalCommand(sublime_plugin.TextCommand):
def run(self, edit):
path = os.path.dirname(self.view.file_name())
if not os.path.exists(path):
sublime.error_message('Hmm, that path doesn\'t exist anymore.')
return
platform = sys.platform.lower()
terminal = None
if platform.startswith('linux'):
terminal = 'cd "%s"; nohup x-terminal-emulator 1>/dev/null 2>&1' % path
elif platform.startswith('osx') or platform.startswith('darwin'):
terminal = 'nohup open -a Terminal "%s" 1>/dev/null 2>&1' % path
if terminal is None:
sublime.message_dialog('Sorry, only Linux and OSX are supported currently.')
return
try:
os.system("%s &" % terminal)
except Exception, e:
sublime.error_message('Unable to open terminal (%s) because: %s' % (terminal, str(e)))
| mit | -2,399,296,585,747,105,000 | 29.457143 | 92 | 0.65197 | false |
talha81/TACTIC-DEV | src/pyasm/security/security.py | 1 | 62804 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["Login", "LoginInGroup", "LoginGroup", "Site", "Ticket", "Security", "NoDatabaseSecurity", "License", "LicenseException", "get_security_version"]
import hashlib, os, sys, types
from pyasm.common import *
from pyasm.search import *
from access_manager import *
from access_rule import *
def get_security_version():
version = Config.get_value("security", "version")
if version:
version = int(version)
else:
# default to version 2 beginning in 4.0
version = 2
return version
class LoginInGroupException(Exception):
pass
class Login(SObject):
SEARCH_TYPE = "sthpw/login"
def get_defaults(my):
'''specifies the defaults for this sobject'''
defaults = super(Login, my).get_defaults()
# add the password "tactic" to any user that does not have one
# specified
defaults['password']= "39195b0707436a7ecb92565bf3411ab1"
defaults['code'] = my.get_value('login')
return defaults
def update_trigger(my):
# the login groups are cached, so when an update has been made,
# this cache has to be refreshed
security = Environment.get_security()
security.reset_access_manager()
def get_primary_key(my):
return "login"
def get_foreign_key(my):
return "login"
def get_icon_context(my, context=None):
return "icon"
def get_description(my):
return my.get_full_name()
def get_login(my):
return my.get_value("login")
def get_full_name(my):
return "%s %s" % (my.get_value("first_name"), my.get_value("last_name"))
def get_full_email(my):
return "%s %s <%s>" % (my.get_value("first_name"), \
my.get_value("last_name"), my.get_value("email") )
def has_user_license(my):
'''determines if this user has a user level license'''
license_type = my.get_value('license_type', no_exception=True)
return license_type in ['', 'user']
def add_to_group(my, group_name):
'''Adds a user to the specified group'''
if type(group_name) in types.StringTypes:
my.__validate_group(group_name)
else:
group_name = group_name.get_value("login_group")
# use the sobject as well
LoginInGroup.create_by_name( my.get_value("login"), group_name )
def remove_from_group(my, group_name):
'''removes the user from a specfied group'''
if type(group_name) in types.StringTypes:
my.__validate_group(group_name)
else:
group_name = group_name.get_value("login_group")
# use the sobject as well
login_in_group = LoginInGroup.get_by_names( \
my.get_value("login"), group_name)
if login_in_group != None:
login_in_group.delete()
def remove_all_groups(my, except_list=[]):
'''removes the user from a specfied group'''
connectors = LoginInGroup.get_by_login_name(my.get_value("login"))
remaining = []
for login_in_group in connectors:
if login_in_group.get_value("login_group") in except_list:
remaining.append(login_in_group)
continue
login_in_group.delete()
return remaining
def __validate_group(my, group_name):
group = LoginGroup.get_by_group_name(group_name)
if not group:
raise LoginInGroupException('This group [%s] does not exist' % group_name)
def get_sub_group_names(my):
'''Returns all of the names as a list of strings
of the sub_groups a group contains'''
connectors = LoginInGroup.get_by_login_name(my.get_login() )
group_names = [ x.get_value("login_group") for x in connectors ]
return group_names
def get_sub_groups(my):
sub_group_names = my.get_sub_group_names()
if not sub_group_names or sub_group_names == ['']:
return []
tmp = ["'%s'" % x for x in sub_group_names ]
# add the default group
#tmp.append("'default'")
tmp = ", ".join(tmp)
search = Search("sthpw/login_group")
search.add_where("\"login_group\" in (%s)" % tmp )
groups = search.get_sobjects()
# check to see if the default is there
"""
has_default = False
for group in groups:
if group.get_login_group() == 'default':
has_default = True
if not has_default:
default_group = SearchType.create("sthpw/login_group")
default_group.set_value("login_group", "default")
groups.append(default_group)
"""
(NONE, MIN, LOW, MED, HI) = range(5)
access_level = NONE
project_codes = set()
for group in groups:
group_access_level = group.get_value("access_level", no_exception=True)
project_code = group.get_value("project_code")
if project_code:
project_codes.add(project_code)
if group_access_level == 'high':
group_access_level = HI
elif group_access_level == 'medium':
group_access_level = MED
elif group_access_level == 'low':
group_access_level = LOW
elif group_access_level == 'min':
group_access_level = MIN
elif group_access_level == 'none':
group_access_level = NONE
else:
group_access_level = LOW
if group_access_level > access_level:
access_level = group_access_level
groups.append(my.get_security_level_group(access_level, project_codes))
return groups
def get_default_security_level():
''' this should match the default in get_sub_group()'''
return "low"
get_default_security_level = staticmethod(get_default_security_level)
def get_security_level_group(access_level, project_codes=[]):
(NONE, MIN, LOW, MED, HI) = range(5)
assert access_level in [NONE, MIN, LOW, MED, HI]
xml = []
xml.append('''<rules>''')
if access_level == HI:
if project_codes:
for project_code in project_codes:
xml.append('''<rule group="project" code="%s" access="allow"/>''' % project_code)
else:
xml.append('''<rule group="project" code="*" access="allow"/>''')
xml.append('''<rule group="search_type" code="*" access="allow"/>''')
xml.append('''<rule group="link" element="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" pipeline="*" access="allow"/>''')
elif access_level == MED:
if project_codes:
for project_code in project_codes:
xml.append('''<rule group="project" code="%s" access="allow"/>''' % project_code)
else:
xml.append('''<rule group="project" code="*" access="allow"/>''')
xml.append('''<rule group="search_type" code="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" pipeline="*" access="allow"/>''')
elif access_level == LOW:
if project_codes:
for project_code in project_codes:
xml.append('''<rule group="project" code="%s" access="allow"/>''' % project_code)
xml.append('''<rule group="search_type" code="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" access="allow"/>''')
xml.append('''<rule group="process" process="*" pipeline="*" access="allow"/>''')
elif access_level == MIN:
if project_codes:
for project_code in project_codes:
xml.append('''<rule group="project" code="%s" access="allow"/>''' % project_code)
xml.append('''<rule group="search_type" code="*" access="allow"/>''')
else: # no security access
if project_codes:
for project_code in project_codes:
xml.append('''<rule group="project" code="%s" access="allow"/>''' % project_code)
xml.append('''</rules>''')
xml = "\n".join(xml)
default_group = SearchType.create("sthpw/login_group")
default_group.set_value("login_group", "default")
default_group.set_value("access_rules", xml)
return default_group
get_security_level_group = staticmethod(get_security_level_group)
# static methods
def get_search():
return Search("sthpw/login")
get_search = staticmethod(get_search)
def get_by_code(code):
return Login.get_by_login(code)
get_by_code = staticmethod(get_by_code)
def get_by_login(login_name, namespace=None):
if not login_name:
return None
# find the ticket in the database
cached = Login.get_cached_obj(login_name)
if cached:
return cached
case_insensitive = False
# handle admin as a special virtual user
if Config.get_value("security", "case_insensitive_login", no_exception=True) == 'true':
login_name = login_name.lower()
case_insensitive = True
if login_name == "admin":
search = Search("sthpw/login")
search.set_show_retired(True)
if case_insensitive:
search.add_regex_filter("login", '^%s'%login_name, op='EQI')
search.add_regex_filter("login", '%s$'%login_name, op='EQI')
else:
search.add_filter("login", login_name)
login = search.get_sobject()
if not login:
login = SearchType.create("sthpw/login")
login.set_force_insert()
# MySQL does not support table ID's at 0
if Sql.get_default_database_type() != 'MySQL':
login.set_value("id", 0)
login.set_id(0)
login.set_value("login", "admin")
login.set_value("first_name", "Adminstrator")
login.set_value("last_name", "")
login.set_value("display_name", "Administrator")
password = Config.get_value("security", "password")
if not password:
password = "39195b0707436a7ecb92565bf3411ab1"
login.set_value("password", password)
elif not login.get("password"):
password = Config.get_value("security", "password")
if not password:
password = "39195b0707436a7ecb92565bf3411ab1"
login.set_value("password", password)
else:
search = Search("sthpw/login")
# make sure it's case insensitive
if case_insensitive:
search.add_regex_filter("login", '^%s'%login_name, op='EQI')
search.add_regex_filter("login", '%s$'%login_name, op='EQI')
else:
search.add_filter("login", login_name)
if namespace:
search.add_filter("namespace", namespace)
search.set_show_retired(True)
login = search.get_sobject()
dict = Container.get(SObject._get_cached_key(Login.SEARCH_TYPE))
dict[login_name] = login
return login
get_by_login = staticmethod(get_by_login)
def set_password(my, password):
encrypted = hashlib.md5(password).hexdigest()
my.set_value("password", encrypted)
my.commit()
def create(user_name, password, first_name, last_name, groups=None, namespace=None):
login = SearchType.create("sthpw/login")
login.set_value("login", user_name)
# encrypt the password
encrypted = hashlib.md5(password).hexdigest()
login.set_value("password", encrypted)
login.set_value("first_name", first_name)
login.set_value("last_name", last_name)
if groups != None:
login.set_value("groups", groups)
if namespace != None:
login.set_value("namespace", namespace)
login.commit()
return login
create = staticmethod(create)
def get_default_encrypted_password():
return "39195b0707436a7ecb92565bf3411ab1"
get_default_encrypted_password = staticmethod(get_default_encrypted_password)
def get_default_password():
return "tactic"
get_default_password = staticmethod(get_default_password)
def encrypt_password(password):
encrypted = hashlib.md5(password).hexdigest()
return encrypted
encrypt_password = staticmethod(encrypt_password)
class LoginGroup(Login):
SEARCH_TYPE = "sthpw/login_group"
def get_defaults(my):
defaults = {}
defaults['code'] = my.get_value("login_group")
# LoginGroupTrigger handles the update event
return defaults
def is_admin(my):
group = my.get_value("login_group")
return group == "admin"
def get_primary_key(my):
return "login_group"
def get_foreign_key(my):
return "login_group"
def get_description(my):
return my.get_value('description')
def get_login_group(my):
return my.get_value("login_group")
def get_sub_group_names(my):
sub_groups_str = my.get_value("sub_groups")
return sub_groups_str.split("|")
def get_access_rules(my):
return my.get_value("access_rules")
def get_xml_root(my, name):
if name == "access_rules":
return "rules"
def get_logins(my):
connectors = LoginInGroup.get_by_group_name(my.get_login_group())
if not connectors:
return []
tmp = ["'%s'" % x.get_value("login") for x in connectors ]
tmp = ", ".join(tmp)
search = Search("sthpw/login")
search.add_where( "\"login\" in (%s)" % tmp )
logins = search.get_sobjects()
return logins
# static methods
def get_by_code(code):
return LoginGroup.get_by_group_name(code)
get_by_code = staticmethod(get_by_code)
def get_by_group_name(group_name):
# find the group in the database
search = Search("sthpw/login_group")
search.add_filter("login_group", group_name)
group = search.get_sobject()
return group
get_by_group_name = staticmethod(get_by_group_name)
def get(namespace, group_names):
assert isinstance(group_names, list)
search = Search("sthpw/login_group")
search.add_filter("namespace", namespace)
search.add_filters("login_group", group_names)
return search.get_sobjects()
get = staticmethod(get)
def get_group_names(cls, login_name=''):
if not login_name:
login_name = Environment.get_user_name()
group_names = []
login_in_groups = LoginInGroup.get_by_login_name(login_name)
if login_in_groups:
group_names = SObject.get_values(login_in_groups, 'login_group')
return group_names
get_group_names = classmethod(get_group_names)
def get_login_codes_in_group(cls, login_name=None):
if not login_name:
login_name = Environment.get_user_name()
key = "LoginGroup:Groups_in_login"
groups_dict = Container.get(key)
if groups_dict == None:
groups_dict = {}
Container.put(key, groups_dict)
results = groups_dict.get(login_name)
if results != None:
return results
group_names = cls.get_group_names(login_name)
login_codes = set()
for group_name in group_names:
group = LoginGroup.get_by_code(group_name)
if group:
logins = group.get_logins()
for login in logins:
login_code = login.get_value("login")
login_codes.add(login_code)
else:
print "This group [%s] no longer exists" %group_name
results = list(login_codes)
groups_dict[login_name] = results
return results
get_login_codes_in_group = classmethod(get_login_codes_in_group)
def get_by_project(project_code=None):
if not project_code:
from pyasm.biz import Project
project = Project.get()
project_code = project.get_code()
# at the moment, the only way to tell if a group is "in" a project
# is by the security rules
search = Search("sthpw/login_group")
groups = search.get_sobjects()
project_groups = []
for group in groups:
access_level = group.get_value("access_level")
group_project_code = group.get_value("project_code")
if group_project_code:
if project_code == group_project_code:
project_groups.append(group)
continue
elif access_level in ['high', 'medium']:
project_groups.append(group)
continue
access_rules = group.get_xml_value("access_rules")
node = access_rules.get_node("rules/rule[@group='project' and @code='%s']" % project_code)
if node is not None:
project_groups.append( group )
else:
node = access_rules.get_node("rules/rule[@group='project' and @code='*']")
if node is not None:
project_groups.append( group )
return project_groups
get_by_project = staticmethod(get_by_project)
class LoginInGroup(SObject):
SEARCH_TYPE = "sthpw/login_in_group"
def get_by_names(login_name, group_name):
search = Search( LoginInGroup.SEARCH_TYPE )
search.add_filter("login", login_name)
search.add_filter("login_group", group_name)
sobject = search.get_sobject()
return sobject
get_by_names = staticmethod(get_by_names)
def get_by_login_name(cls, login_name):
search = Search( LoginInGroup.SEARCH_TYPE )
search.add_filter("login", login_name)
sobjects = cls.get_by_search(search, "%s|%s" %(cls.SEARCH_TYPE, login_name), is_multi=True)
return sobjects
get_by_login_name = classmethod(get_by_login_name)
def get_by_group_name(login_name):
search = Search( LoginInGroup.SEARCH_TYPE )
search.add_filter("login_group", login_name)
sobjects = search.get_sobjects()
return sobjects
get_by_group_name = staticmethod(get_by_group_name)
def create(login, login_group):
return LoginInGroup.create_by_name( \
login.get_value("login"), login_group.get_value("login_group") )
create = staticmethod(create)
def create_by_name(login_name, group_name):
sobject = SearchType.create( LoginInGroup.SEARCH_TYPE )
sobject.set_value( "login", login_name)
sobject.set_value( "login_group", group_name)
sobject.commit()
return sobject
create_by_name = staticmethod(create_by_name)
class Site(object):
'''This is used to manage various "sites" (databases) within a single
TACTIC installation. Tickets are scoped by site which determines
the location of database.'''
def get_max_users(my):
return
#
# Virtual methods
#
def get_by_login(cls, login):
return ""
get_by_login = classmethod(get_by_login)
def build_ticket(cls, ticket):
return ticket
build_ticket = classmethod(build_ticket)
def get_by_ticket(cls, ticket):
return ""
get_by_ticket = classmethod(get_by_ticket)
def get_connect_data(cls, site):
return {}
get_connect_data = classmethod(get_connect_data)
def get_asset_dir(cls, file_object=None, alias=None):
return
get_asset_dir = classmethod(get_asset_dir)
def get_web_dir(cls, file_object=None, alias=None):
return
get_web_dir = classmethod(get_web_dir)
def get_default_project(cls):
return
#######################
def get(cls):
class_name = Config.get_value("security", "site_class")
if not class_name:
class_name = "pyasm.security.Site"
#class_name = "spt.modules.portal.PortalSite"
try:
site = Common.create_from_class_path(class_name)
except Exception, e:
site = Site()
return site
get = classmethod(get)
def get_site(cls):
'''Set the global site for this "session"'''
site = Container.get("site")
if not site:
return ""
return site
get_site = classmethod(get_site)
def set_site(cls, site):
'''Set the global site for this "session"'''
if not site:
return
Container.put("site", site)
set_site = classmethod(set_site)
def get_db_resource(cls, site, database):
if not site:
return None
site_obj = cls.get()
data = site_obj.get_connect_data(site)
if data:
host = data.get('host')
port = data.get('port')
vendor = data.get('vendor')
user = data.get('user')
password = data.get('password')
else:
return None
db_resource = DbResource(database, host=host, port=port, vendor=vendor, user=user, password=password)
return db_resource
get_db_resource = classmethod(get_db_resource)
class Ticket(SObject):
'''When a user logins, a ticket is created. This ticket is stored in the
database as a long unique string of alpha-numeric characters. It is stored
on the browser as a cookie and allows the user to login with a password.
The ticket has an expiry date set in the Tactic config file'''
def get_key(my):
'''get the alphanumeric unique code for the session'''
return my.get_value("ticket")
def get_by_key(key):
'''class method to get Ticket sobject by it's key'''
# find the ticket in the database
search = Search("sthpw/ticket")
search.add_filter("ticket", key)
ticket = search.get_sobject()
return ticket
get_by_key = staticmethod(get_by_key)
def get_by_valid_key(key):
'''class method to get Ticket sobject by it's key. The key must be
valid in that it has not yet expired.'''
# find the ticket in the database
search = Search("sthpw/ticket")
search.add_filter("ticket", key)
now = search.get_database_impl().get_timestamp_now()
search.add_where('("expiry" > %s or "expiry" is NULL)' % now)
ticket = search.get_sobject()
return ticket
get_by_valid_key = staticmethod(get_by_valid_key)
def create(key, login, expiry=None, interval=None, category=None):
'''creation function to create a new ticket
@keyparam:
expiry: exact expiry timestamp
interval: 5 day or 24 hour from now
category: type of ticket
'''
# For now, tickets always come from the default database
impl = Sql.get_default_database_impl()
now = impl.get_timestamp_now()
# expire in 1 hour
if expiry == -1:
expiry = "NULL"
elif not expiry:
if not interval:
interval = Config.get_value("security","ticket_expiry")
if not interval:
interval = "10 hour"
#expiry = "%s + '%s'" % (now, interval)
offset, type = interval.split(" ")
expiry = impl.get_timestamp_now(offset=offset, type=type)
ticket = SearchType.create("sthpw/ticket")
ticket.set_value("ticket", key)
ticket.set_value("login", login)
ticket.set_value("timestamp", now, quoted=0)
if category:
ticket.set_value("category", category)
"""
if category == 'gui':
search = Search("sthpw/ticket")
search.add_filter("login", login)
search.add_filter("category", category)
cur_tickets = search.get_sobjects()
for cur_ticket in cur_tickets:
#cur_ticket.set_value("expiry", "now()", quoted=False)
#cur_ticket.commit()
cur_ticket.delete()
"""
# it makes no sense for Sqlite sessions to expire
# FIXME: this is a bit of a hack until we figure out how
# timestamps work in sqlite (all are converted to GMT?!)
if impl.get_database_type() in ['Sqlite', 'MySQL']:
print "WARNING: no expiry on ticket for Sqlite and MySQL"
ticket.set_value("expiry", 'NULL', quoted=0)
else:
ticket.set_value("expiry", expiry, quoted=0)
ticket.commit(triggers="none")
return ticket
create = staticmethod(create)
def update_session_expiry():
security = Environment.get_security()
login_ticket = security.get_ticket()
impl = Sql.get_default_database_impl()
timeout = Config.get_value("security","inactive_ticket_expiry")
if not timeout:
return
offset,type = timeout.split(" ")
expiry = impl.get_timestamp_now(offset=offset, type=type)
Ticket.update_expiry(login_ticket,expiry)
update_session_expiry = staticmethod(update_session_expiry)
def update_expiry(ticket,expiry):
ticket.set_value("expiry", expiry, quoted=0)
ticket.commit(triggers="none")
update_expiry = staticmethod(update_expiry)
# DEPRECATED
class NoDatabaseSecurity(Base):
def __init__(my):
#my._login = SearchType.create("sthpw/login")
my._access_manager = AccessManager()
my.is_logged_in_flag = False
pass
def is_logged_in(my):
return my.is_logged_in_flag
def get_license(my):
return License()
def login_with_ticket(my, key, add_access_rules=True):
None
def login_user(my, login_name, password, expiry=None, domain=None):
my.is_logged_in_flag = True
def get_login(my):
return None
def get_user_name(my):
return None
def get_group_names(my):
return ['admin']
def add_access_rules(my):
pass
def get_ticket(my):
return None
def check_access(my, group, key, access, value=None, is_match=False, default="edit"):
return True
def get_access(my, group, key, default=None):
return "edit"
def alter_search(my, search):
pass
def get_access_manager(my):
return my._access_manager
class Security(Base):
'''main class dealing with user identification'''
def __init__(my, verify_license=False):
my._login_var = None
my._is_logged_in = 0
my._groups = []
my._group_names = []
my._ticket = None
my.add_access_rules_flag = True
# define an access manager object
my._access_manager = AccessManager()
my.license = License.get(verify=verify_license)
my.login_cache = None
def _get_my_login(my):
return my._login_var
def _set_my_login(my, login):
my._login_var = login
if my._login_var and my._login_var.get_value("login") == 'admin':
my._access_manager.set_admin(True)
else:
my._access_manager.set_admin(False)
_login = property(_get_my_login, _set_my_login)
def get_version(cls):
return get_security_version()
get_version = classmethod(get_version)
def is_logged_in(my):
return my._is_logged_in
def is_admin(my):
return my._access_manager.is_admin()
def set_admin(my, flag):
return my._access_manager.set_admin(flag)
def get_license(my):
return my.license
def reread_license(my):
my.license = License.get()
return my.license
def get_groups(my):
return my._groups
def get_group_names(my):
group_names = my._group_names
group_names.sort()
return group_names
def is_in_group(my, group_name):
return group_name in my._group_names
def get_login(my):
return my._login
def get_user_name(my):
return my._login.get_login()
def get_ticket(my):
return my._ticket
def get_ticket_key(my):
if my._ticket:
return my._ticket.get_key()
else:
return ""
def clear_ticket(my):
my_ticket = ""
def get_access_manager(my):
return my._access_manager
def reset_access_manager(my):
my._access_manager = AccessManager()
my.add_access_rules()
def sign_out(my):
my._is_logged_in = 0
my._login = None
my._groups = []
my._group_names = []
my._ticket = None
def get_start_link(my):
for group in my._groups:
start_link = group.get_value("start_link")
if start_link:
return start_link
def _do_login(my):
'''function to actually log in the user'''
# get from cache
#from pyasm.biz import LoginCache
#my.login_cache = LoginCache.get("logins")
# find all of the groups for this login
#login = my._login.get_login()
#my._groups = my.login_cache.get_attr("%s:groups" % login)
#my._groups = my.login_cache.get_attr("%s:groups" % login)
#my._groups = my.login_cache.get_attr("%s:groups" % login)
#my._group_names = my.login_cache.get_attr("%s:group_names" % login)
my._groups = None
if my._groups == None:
#print "recaching!!!!"
my._groups = []
my._group_names = []
my._find_all_login_groups()
# set the results to the cache
#my.login_cache.set_attr("%s:groups" % login, my._groups)
#my.login_cache.set_attr("%s:group_names" % login, my._group_names)
# go through all of the group names and add their respective
# rules to the access manager
if my.add_access_rules_flag:
my.add_access_rules()
# record that the login is logged in
my._is_logged_in = 1
def login_as_batch(my, login_name=None):
'''function that logs in through a batch command'''
# default to admin. Generally batch is run as admin.
if not login_name:
login_name = "admin"
# login must exist in the database
my._login = Login.get_by_login(login_name)
if not my._login:
raise SecurityException("Security failed: Unrecognized user: '%s'" % login_name)
# create a new ticket for the user
my._ticket = my._generate_ticket(login_name)
my._do_login()
def login_as_guest(my):
'''function that logs in as guest'''
login_name = "guest"
group_name = "guest"
search = Search("sthpw/login")
search.add_filter("login", login_name)
my._login = search.get_sobject()
if not my._login:
# login must exist in the database
my._login = SearchType.create("sthpw/login")
my._login.set_value("code", login_name)
my._login.set_value("login", login_name)
my._login.set_value("first_name", "Guest")
my._login.set_value("last_name", "User")
my._login.set_value("display_name", "Guest")
my._login.commit()
# create a login group
search = Search("sthpw/login_group")
search.add_filter("login_group", login_name)
group = search.get_sobject()
if not group:
group = SearchType.create("sthpw/login_group")
group.set_value("login_group", group_name)
group.commit()
login_in_group = SearchType.create("sthpw/login_in_group")
login_in_group.set_value("login", login_name)
login_in_group.set_value("login_group", group_name)
login_in_group.commit()
# clear the login_in_group cache
LoginInGroup.clear_cache()
my._find_all_login_groups()
# create a new ticket for the user
my._ticket = my._generate_ticket(login_name)
my._do_login()
access_manager = my.get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule column="login" value="cow" search_type="sthpw/login" op="!=" group="search_filter"/>
</rules>
''')
access_manager.add_xml_rules(xml)
def login_with_ticket(my, key, add_access_rules=True):
'''login with the alpha numeric ticket key found in the Ticket
sobject.'''
if key == "":
return None
# set the site if the key has one
site = Site.get().get_by_ticket(key)
Site.get().set_site(site)
my.add_access_rules_flag = add_access_rules
#from pyasm.biz import CacheContainer
#cache = CacheContainer.get("sthpw/ticket")
#cache.build_cache_by_column("ticket")
#ticket = cache.get_sobject_by_key("ticket", key)
ticket = Ticket.get_by_valid_key(key)
if ticket is None:
# if ticket does not exist, make sure we are signed out and leave
return None
# try getting from global cache
from pyasm.biz import CacheContainer
login_code = ticket.get_value("login")
cache = CacheContainer.get("sthpw/login")
if cache:
my._login = cache.get_sobject_by_key("login", login_code)
# if it doesn't exist, try the old method
if not my._login:
my._login = Login.get_by_login( ticket.get_value("login") )
if my._login is None:
return None
# store the ticket
my._ticket = ticket
my._do_login()
#print "done: ", time.time() - start
#print "--- end security - login_with_ticket"
if my._login.get("login") == "guest":
access_manager = my.get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule column="login" value="$LOGIN" search_type="sthpw/login" group="search_filter"/>
</rules>
''')
access_manager.add_xml_rules(xml)
return my._login
def login_with_session(my, sid, add_access_rules):
from tactic_client_lib import TacticServerStub
server = TacticServerStub.get()
# TEST: this is a test authentication with Drupal
my.add_access_rules_flag = add_access_rules
from pyasm.security import Sudo
sudo = Sudo()
# authenticate use some external method
if sid:
expr = '''@SOBJECT(table/sessions?project=drupal['sid','%s'])''' % sid
#print "expr: ", expr
session = server.eval(expr, single=True)
else:
session = {}
if session:
uid = session.get("uid")
expr = '''@SOBJECT(table/users?project=drupal['uid','%s'])''' % uid
drupal_user = server.eval(expr, single=True)
else:
drupal_user = {}
if not drupal_user:
return None
sudo.exit()
# at this point, the user is authenticated
user_name = drupal_user.get("name")
#print "login: ", user_name
# if the user doesn't exist, then autocreate one
my._login = Search.get_by_code("sthpw/login", user_name)
if not my._login:
my._login = SearchType.create("sthpw/login")
my._login.set_value("code", user_name)
my._login.set_value("login", user_name)
my._login.set_value("first_name", user_name)
my._login.set_value("password", drupal_user.get('pass'))
my._login.set_value("email", drupal_user.get('mail'))
my._login.commit()
# do we need a tactic ticket as well ...?
#my._ticket = my._generate_ticket(user_name, expiry=None)
search = Search("sthpw/ticket")
search.add_filter("ticket", sid)
my._ticket = search.get_sobject()
if not my._ticket:
my._ticket = SearchType.create("sthpw/ticket")
my._ticket.set_value("login", user_name)
my._ticket.set_value("ticket", sid)
my._ticket.commit()
my._do_login()
def login_user_without_password(my, login_name, expiry=None):
'''login a user without a password. This should be used sparingly'''
search = Search("sthpw/login")
search.add_filter("login", login_name)
my._login = search.get_sobject()
# user still has to exist
if not my._login:
raise SecurityException("Login [%s] does not exist" % login_name)
my._ticket = my._generate_ticket(login_name, expiry)
my._do_login()
def login_user(my, login_name, password, expiry=None, domain=None):
'''login user with a name and password combination
The login has the following modes:
autocreate : this autocreates the user if it does not exist
cache : this caches the user in the login table, but information
is always pulled from the source when thes method is called
'''
# check for backwards compatibility
authenticate_version = Config.get_value(
"security", "authenticate_version", no_exception=True)
if authenticate_version == '1':
return login_user_version_1(login_name, password, expiry)
# admin always uses the standard authenticate class
auth_class = None
if login_name == 'admin':
auth_class = "pyasm.security.TacticAuthenticate"
# verify using the specified authenticate class
if not auth_class:
auth_class = Config.get_value("security", "authenticate_class",
no_exception=True)
if not auth_class:
auth_class = "pyasm.security.TacticAuthenticate"
# handle the windows domain, manually typed in domain overrides
if login_name.find('\\') != -1:
domain, login_name = login_name.split('\\', 1)
if domain and login_name !='admin':
auth_login_name = "%s\\%s" % (domain, login_name)
else:
auth_login_name = login_name
authenticate = Common.create_from_class_path(auth_class)
is_authenticated = authenticate.verify(auth_login_name, password)
if is_authenticated != True:
raise SecurityException("Login/Password combination incorrect")
mode = authenticate.get_mode()
if not mode:
mode = Config.get_value( "security", "authenticate_mode", no_exception=True)
if not mode:
mode = 'default'
# lowercase name if case-insensitive is set to true
if Config.get_value("security", "case_insensitive_login", no_exception=True) == 'true':
login_name = login_name.lower()
# when mode is autocreate, then the user entry is created automatically
# on first entry. Future verifies will use the login in stored in the
# database.
if mode == 'autocreate':
# get the login from the authentication class
my._login = Login.get_by_login(login_name)
if not my._login:
my._login = SearchType.create("sthpw/login")
my._login.set_value('login', login_name)
authenticate.add_user_info( my._login, password)
my._login.commit(triggers=False)
# when mode is cache, it does autocreate and update user_info every time
# this is called
elif mode == 'cache':
# get the login from the authentication class
my._login = Login.get_by_login(login_name)
if not my._login:
my._login = SearchType.create("sthpw/login")
my._login.set_value('login', login_name)
try:
authenticate.add_user_info( my._login, password)
except Exception, e:
raise SecurityException("Error updating user info: %s" % e.__str__())
# verify that this won't create too many users. Floating licenses
# can have any number of users
if my._login.has_user_license():
num_left = my.license.get_num_licenses_left()
if num_left <= 0:
raise SecurityException("Number of active users exceeds licenses")
my._login.commit()
else:
# get the login from database and don't bother updating
my._login = Login.get_by_login(login_name)
# if it doesn't exist, then the login fails
if not my._login:
raise SecurityException("Login/Password combination incorrect")
# if the user is disabled, then they cannot log in
license_type = my._login.get_value("license_type", no_exception=True)
if license_type == "disabled":
raise SecurityException("User [%s] is disabled" % my._login.get_value('login'))
# check if the user has a floating license
elif license_type == 'float':
try:
my.license.verify_floating(login_name)
except LicenseException, e:
raise SecurityException(str(e))
# create a new ticket for the user
my._ticket = my._generate_ticket(login_name, expiry, category="gui")
# clear the login_in_group cache
LoginInGroup.clear_cache()
my._do_login()
# DEPRECATED as 2.5
def login_user_version_1(my, login_name, password, expiry=None):
'''login user with a name and password combination
The login has the following modes:
autocreate : this autocreates the user if it does not exist
cache : this caches the user in the login table, but information
is always pulled from the source when thes method is called
'''
# check to see if this user exists
test_login = Login.get_by_login(login_name)
if test_login:
autocreate = False
else:
# if the user does not already exist, check to see if the user
# is autocreated
autocreate = Config.get_value("security", "auto_create_user", no_exception=True)
if autocreate == 'true':
autocreate = True
else:
autocreate = False
auth_class = Config.get_value("security", "authenticate_class", no_exception=True)
if not auth_class:
auth_class = "pyasm.security.TacticAuthenticate"
# get once again (why??)
my._login = Login.get_by_login(login_name)
if not my._login:
if autocreate:
# if autocreate is on, create a "virtual" user
my._login = SearchType.create("sthpw/login")
my._login.set_value("login", login_name)
else:
raise SecurityException("Login/Password combination incorrect")
authenticate = Common.create_from_class_path(auth_class)
is_authenticated = authenticate.authenticate(my._login, password)
if is_authenticated != True:
raise SecurityException("Login/Password combination incorrect")
# if the user is disabled, then they cannot log in
if my._login.get_value("license_type", no_exception=True) == "disabled":
raise SecurityException("User [%s] is disabled" % my._login.get_value('login'))
# if no exception has occured the user is authenticated.
# If autocreate is on, then the user is created in Tactic as well
if autocreate:
# put this in a transaction
from pyasm.command import Command
class CreateUser(Command):
def execute(my):
# FIXME: should probably centralize password encryption
#encrypted = md5.new(password).hexdigest()
encrypted = hashlib.md5(password).hexdigest()
my._login.set_value("password", encrypted)
# provide the opportunity for authenticate to set values
# on creation
authenticate.add_user_info(my._login)
my._login.commit()
cmd = CreateUser()
cmd._login = my._login
Command.execute_cmd(cmd)
else:
# allow the authentication class to add specific user info
authenticate.add_user_info(my._login)
# create a new ticket for the user
my._ticket = my._generate_ticket(login_name, expiry)
my._do_login()
def _generate_ticket(my, login_name, expiry=None, category=None):
# create a new ticket for the user
ticket_key = Common.generate_random_key()
ticket_key = Site.get().build_ticket(ticket_key)
ticket = Ticket.create(ticket_key,login_name, expiry, category=category)
return ticket
def compare_access(my, user_access, required_access):
return my._access_manager.compare_access(user_access, required_access)
def check_access(my, group, key, access, value=None, is_match=False, default="edit"):
'''convenience function to check the security level to the access
manager'''
return my._access_manager.check_access(group, key, access, value, is_match, default=default)
def get_access(my, group, key, default=None):
return my._access_manager.get_access(group, key, default=None)
def alter_search(my, search):
'''convenience function to alter a search for security reasons'''
# set that the security filter has been added
search.set_security_filter()
return my._access_manager.alter_search(search)
def is_login_in_group(my, group):
'''returns whether the user is in the give group'''
if group in my._group_names:
return True
else:
return False
def _find_all_login_groups(my, group=None):
if not group:
groups = my._login.get_sub_groups()
for group in groups:
group_name = group.get_login_group()
if group_name in my._group_names:
continue
my._groups.append(group)
my._group_names.append(group.get_login_group())
my._find_all_login_groups(group)
else:
# break any circular loops
group_name = group.get_login_group()
if group_name in my._group_names:
return
my._groups.append(group)
my._group_names.append(group.get_login_group())
# go through the subgroups
sub_groups = group.get_sub_groups()
for sub_group in sub_groups:
my._find_all_login_groups(sub_group)
# make sure my._groups is an array
if my._groups == None:
my._groups = []
#for x in my._groups:
# print x.get_login_group()
def add_access_rules(my):
if my._login and my._login.get_value("login") == 'admin':
my._access_manager.set_admin(True)
return
for group in my._groups:
login_group = group.get_value("login_group")
if login_group == "admin":
my._access_manager.set_admin(True)
return
# go through all of the groups and add access rules
for group in my._groups:
access_rules_xml = group.get_xml_value("access_rules")
my._access_manager.add_xml_rules(access_rules_xml)
# DEPRECATED
# get all of the security rules
#security_rules = AccessRule.get_by_groups(my._groups)
#for rule in security_rules:
# access_rules_xml = rule.get_xml_value("rule")
# my._access_manager.add_xml_rules(access_rules_xml)
import pickle, os, base64
from Crypto.PublicKey import RSA
from Crypto.Hash import MD5
# HACK: From PyCrypto-2.0.1 to PyCrypt-2.3, the install datastructure: RSAobj
# was changed to _RSAobj. This means that the unwrapped key, which is basically
# a pickled tuple has a the RSAobj in it. This causes a stack trace when
# doing a pickle.loads. The following remaps the module to have the old
# RSAobj so that everything maps correctly.
#
try:
RSA.RSAobj = RSA._RSAobj
except:
# Skipping
pass
class LicenseKey(object):
def __init__(my, public_key):
# unwrap the public key (for backwards compatibility
unwrapped_key = my.unwrap("Key", public_key)
try:
# get the size and key object
haspass, my.size, keyobj = pickle.loads(unwrapped_key)
my.algorithm, my.keyobj = pickle.loads(keyobj)
except Exception, e:
raise LicenseException("License key corrupt. Please verify license file. %s" %e.__str__())
def verify_string(my, raw, signature):
# unwrap the signature
unwrapped_signature = my.unwrap("Signature", signature)
# deconstruct the signature
algorithm, raw_signature = pickle.loads(unwrapped_signature)
assert my.algorithm == algorithm
# MD5 the raw text
m = MD5.new()
m.update(raw)
d = m.digest()
if my.keyobj.verify(d, raw_signature):
return True
else:
return False
def unwrap(my, type, msg):
msg = msg.replace("<StartPycrypto%s>" % type, "")
msg = msg.replace("<EndPycrypto%s>" % type, "")
binary = base64.decodestring(msg)
return binary
class LicenseException(Exception):
pass
class License(object):
license_path = "%s/tactic-license.xml" % Environment.get_license_dir()
NO_LICENSE = 'no_license'
def __init__(my, path=None, verify=True):
my.message = ""
my.status = "NOT FOUND"
my.licensed = False
my.xml = None
if path:
my.license_path = path
my.verify_flag = verify
try:
my.parse_license()
except LicenseException, e:
my.message = e.__str__()
print "WARNING: ", my.message
my.licensed = False
# this is the minimal acceptable data for my.xml, dont't set to None
# this should be the place to redefine it if applicable
if not my.xml:
my.xml = Xml('<%s/>'%my.NO_LICENSE)
else:
my.licensed = True
def parse_license(my, check=False):
'''check = True is only used for creation verification'''
if not os.path.exists(my.license_path):
raise LicenseException("Cannot find license file [%s]" % my.license_path )
my.xml = Xml()
try:
my.xml.read_file(my.license_path, cache=False)
except XmlException, e:
my.xml.read_string("<license/>")
raise LicenseException("Error parsing license file: malformed xml license file [%s] e: %s" % (my.license_path, e))
# verify signature
signature = str(my.xml.get_value("license/signature"))
signature = signature.strip()
data_node = my.xml.get_node("license/data")
data = my.xml.to_string(data_node).strip()
public_key = str(my.xml.get_value("license/public_key"))
# the data requires a very specific spacing. 4Suite puts out a
# different dump and lxml and unfortunately, the license key is
# dependent on the spacing.
#print "data: [%s]" % data
data = data.replace(" ", " ")
data = data.replace(" </data>", "</data>")
#print "data: [%s]" % data
# verify the signature
if my.verify_flag:
key = LicenseKey(public_key)
if not key.verify_string(data, signature):
# will be redefined in constructor
my.xml = None
if check ==True:
raise TacticException("Data and signature in license file do not match in [%s]" % my.license_path)
else:
raise LicenseException("Data and signature in license file do not match in [%s]" % my.license_path)
my.verify_license()
#my.verify()
def is_licensed(my):
return my.licensed
def get_message(my):
return my.message
def verify(my):
try:
my.verify_license()
my.licensed = True
return True
except LicenseException, e:
my.message = e.__str__()
my.licensed = False
my.LICENSE = None
return False
def verify_floating(my, login_name=None):
# check if the user has a floating license
floating_max = my.get_max_floating_users()
if not floating_max:
raise LicenseException("No floating licenses are available")
floating_current_users = my.get_current_floating_users()
floating_current = len(floating_current_users)
#print "foating_max: ", floating_max
#print "foating_current: ", floating_current
#print "login_name: ", login_name
#print "floating_current_users: ", floating_current_users
# if the user is in the list, then this user is already logged in
if login_name and login_name in floating_current_users:
return True
if floating_current >= floating_max:
raise LicenseException("Too many users. Please try again later")
def get_data(my, key):
value = my.xml.get_value("license/data/%s" % key)
return value
def get_max_users(my):
site_obj = Site.get()
value = site_obj.get_max_users()
if not value:
value = my.xml.get_value("license/data/max_users")
try:
value = int(value)
except ValueError:
value = 10
return value
def get_max_floating_users(my):
value = my.xml.get_value("license/data/max_floating_users")
try:
value = int(value)
except ValueError:
value = 0
return value
def get_num_licenses_left(my):
max_users = my.get_max_users()
current_users = my.get_current_users()
left = max_users - current_users
return left
floating_current_users = my.get_current_floating_users()
floating_current = len(floating_current_users)
def get_expiry_date(my):
value = my.xml.get_value("license/data/expiry_date")
return value
def get_current_users(my):
sql = DbContainer.get("sthpw")
select = Select()
select.set_database("sthpw")
#select.set_database(db_resource)
select.add_table("login")
columns = sql.get_column_info("login").keys()
if 'license_type' in columns:
select.add_op('begin')
select.add_filter("license_type", 'user')
select.add_filter("license_type", "NULL", quoted=False, op="is")
select.add_op('or')
select.add_op('begin')
select.add_filter("s_status", "NULL", quoted=False, op="is")
select.add_filter("s_status", "retired", op="!=")
select.add_op('or')
#select.add_filter("login", "('admin','guest')", quoted=False, op="not in")
select.add_filters("login", ['admin','guest'], op="not in")
num_users = select.execute_count()
#statement = select.get_count()
#print "statement: ", statement
#num_users = sql.get_value(statement)
#num_users = int(num_users)
return num_users
def get_current_floating_users(my):
'''Get the current floating licenses used
The definition of a used floating license is a user who has an
unexpired ticket
'''
#import time
#start = time.time()
sql = DbContainer.get("sthpw")
impl = sql.get_database_impl()
# use users
select = Select()
select.set_database(sql)
select.add_table("login")
select.add_join("ticket", column="login", column2="login", join="INNER")
select.add_where('"expiry" is not NULL')
select.add_filter("expiry", impl.get_timestamp_now(), quoted=False, op=">")
select.add_column("login", distinct=True)
# only count float licenses
columns = sql.get_columns("login")
if 'license_type' in columns:
select.add_where("\"license_type\" = 'float'")
#statement = select.get_count()
statement = select.get_statement()
#print "statement: ", statement
login_names = sql.do_query(statement)
login_names = [x[0] for x in login_names]
#num_float = len(login_names)
return login_names
#return num_float
def verify_license(my):
'''Reads the license file.'''
# go through the checks
if not my.xml:
raise LicenseException(my.message)
#raise LicenseException("Parsing of licensing file [%s] failed. Renew it in the Projects tab." % my.license_path )
node = my.xml.get_node("license")
if node is None:
no_lic_node = my.xml.get_node(my.NO_LICENSE)
if no_lic_node is not None:
raise LicenseException(my.message)
else:
raise LicenseException("Parsing of license file [%s] failed." % my.license_path )
version = my.xml.get_value("license/version")
# for now, there is only one version of the license
if 1:
# check for mac address, if it exists in license
license_mac = my.xml.get_value("license/data/mac_address")
license_mac = license_mac.strip()
if license_mac:
mac = my.get_mac_address()
if mac != license_mac:
raise LicenseException("License mac address do not match")
# check for expiry date, if it exists
license_expiry = my.xml.get_value("license/data/expiry_date")
license_expiry = license_expiry.strip()
if license_expiry:
current = Date().get_db_time()
if current> license_expiry:
raise LicenseException("License expired on [%s] in [%s]" % (license_expiry, my.license_path))
# check for tactic version
license_version = my.xml.get_value("license/data/tactic_version")
release_version = Environment.get_release_version()
if not license_version:
raise LicenseException("License file not locked to a specific version of TACTIC")
try:
if license_version in ["EPL", "ALL"]:
# really big
license_version = 10**6
else:
parts = license_version.split(".")
license_version = float("%s.%s" % (parts[0],parts[1]))
parts = release_version.split(".")
release_version = float("%s.%s" % (parts[0],parts[1]))
except:
raise LicenseException("Incorrect format for version in license file")
else:
if release_version > license_version:
raise LicenseException("License not valid for this version of TACTIC. License is for v%s" % license_version)
# check for max users
license_users = my.get_max_users()
if license_users:
license_users = int(license_users)
try:
current = my.get_current_users()
except DatabaseException:
# set it to zero. If there is a database error, then
# it doesn't really matter because nobody can use the
# software anways
current = 0
if current > license_users:
raise LicenseException("Too many users for license [%s]" % my.license_path)
#print "License verified ... "
def get_mac_address(my):
'''copied from Newsgroup somewhere'''
if sys.platform == 'win32':
for line in os.popen("ipconfig /all"):
if line.lstrip().startswith('Physical Address'):
mac = line.split(':')[1].strip().replace('-',':')
break
else:
for line in os.popen("/sbin/ifconfig"):
if line.find('Ether') > -1:
mac = line.split()[4]
break
return mac
# global license variable
LICENSE = None
LAST_CHECK = None
LAST_MTIME = None
def get(cls, verify=True):
# reparse every hour
now = Date()
now = now.get_db_time()
last = Date(db=cls.LAST_CHECK)
last.add_hours(1)
next = last.get_db_time()
# reparse if the license file has been modified
exists = os.path.exists(cls.license_path)
if exists:
mtime = os.path.getmtime(cls.license_path)
else:
mtime = None
if not exists or not cls.LICENSE \
or not cls.LAST_CHECK \
or now > next \
or not cls.LAST_MTIME \
or mtime > cls.LAST_MTIME:
cls.LICENSE = License()
else:
if verify:
cls.LICENSE.verify()
cls.LAST_CHECK = now
cls.LAST_MTIME = mtime
return cls.LICENSE
get = classmethod(get)
| epl-1.0 | -4,296,692,532,009,081,000 | 30.528112 | 156 | 0.568244 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.