repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
maria-msu-seclab/mpotrDevelopment | libraries/dbus-python-1.2.0/test/test-signals.py | 1 | 6184 | #!/usr/bin/env python
# Copyright (C) 2004 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2007 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import unittest
import time
import logging
builddir = os.path.normpath(os.environ["DBUS_TOP_BUILDDIR"])
pydir = os.path.normpath(os.environ["DBUS_TOP_SRCDIR"])
import dbus
import _dbus_bindings
import dbus.glib
import dbus.service
try:
from gi.repository import GObject as gobject
except ImportError:
raise SystemExit(77)
logging.basicConfig()
logging.getLogger().setLevel(1)
logger = logging.getLogger('test-signals')
pkg = dbus.__file__
if not pkg.startswith(pydir):
raise Exception("DBus modules (%s) are not being picked up from the package"%pkg)
if not _dbus_bindings.__file__.startswith(builddir):
raise Exception("DBus modules (%s) are not being picked up from the package"%_dbus_bindings.__file__)
NAME = "org.freedesktop.DBus.TestSuitePythonService"
IFACE = "org.freedesktop.DBus.TestSuiteInterface"
OBJECT = "/org/freedesktop/DBus/TestSuitePythonObject"
class TestSignals(unittest.TestCase):
def setUp(self):
logger.info('setUp()')
self.bus = dbus.SessionBus()
self.remote_object = self.bus.get_object(NAME, OBJECT)
self.remote_object_fallback_trivial = self.bus.get_object(NAME,
OBJECT + '/Fallback')
self.remote_object_fallback = self.bus.get_object(NAME,
OBJECT + '/Fallback/Badger')
self.remote_object_follow = self.bus.get_object(NAME, OBJECT,
follow_name_owner_changes=True)
self.iface = dbus.Interface(self.remote_object, IFACE)
self.iface_follow = dbus.Interface(self.remote_object_follow, IFACE)
self.fallback_iface = dbus.Interface(self.remote_object_fallback, IFACE)
self.fallback_trivial_iface = dbus.Interface(
self.remote_object_fallback_trivial, IFACE)
self.in_test = None
def signal_test_impl(self, iface, name, test_removal=False):
self.in_test = name
# using append rather than assignment here to avoid scoping issues
result = []
def _timeout_handler():
logger.debug('_timeout_handler for %s: current state %s', name, self.in_test)
if self.in_test == name:
main_loop.quit()
def _signal_handler(s, sender, path):
logger.debug('_signal_handler for %s: current state %s', name, self.in_test)
if self.in_test not in (name, name + '+removed'):
return
logger.info('Received signal from %s:%s, argument is %r',
sender, path, s)
result.append('received')
main_loop.quit()
def _rm_timeout_handler():
logger.debug('_timeout_handler for %s: current state %s', name, self.in_test)
if self.in_test == name + '+removed':
main_loop.quit()
logger.info('Testing %s', name)
match = iface.connect_to_signal('SignalOneString', _signal_handler,
sender_keyword='sender',
path_keyword='path')
logger.info('Waiting for signal...')
iface.EmitSignal('SignalOneString', 0)
source_id = gobject.timeout_add(1000, _timeout_handler)
main_loop.run()
if not result:
raise AssertionError('Signal did not arrive within 1 second')
logger.debug('Removing match')
match.remove()
gobject.source_remove(source_id)
if test_removal:
self.in_test = name + '+removed'
logger.info('Testing %s', name)
result = []
iface.EmitSignal('SignalOneString', 0)
source_id = gobject.timeout_add(1000, _rm_timeout_handler)
main_loop.run()
if result:
raise AssertionError('Signal should not have arrived, but did')
gobject.source_remove(source_id)
def testFallback(self):
self.signal_test_impl(self.fallback_iface, 'Fallback')
def testFallbackTrivial(self):
self.signal_test_impl(self.fallback_trivial_iface, 'FallbackTrivial')
def testSignal(self):
self.signal_test_impl(self.iface, 'Signal')
def testRemoval(self):
self.signal_test_impl(self.iface, 'Removal', True)
def testSignalAgain(self):
self.signal_test_impl(self.iface, 'SignalAgain')
def testRemovalAgain(self):
self.signal_test_impl(self.iface, 'RemovalAgain', True)
def testSignalF(self):
self.signal_test_impl(self.iface_follow, 'Signal')
def testRemovalF(self):
self.signal_test_impl(self.iface_follow, 'Removal', True)
def testSignalAgainF(self):
self.signal_test_impl(self.iface_follow, 'SignalAgain')
def testRemovalAgainF(self):
self.signal_test_impl(self.iface_follow, 'RemovalAgain', True)
if __name__ == '__main__':
main_loop = gobject.MainLoop()
gobject.threads_init()
dbus.glib.init_threads()
logger.info('Starting unit test')
unittest.main()
| gpl-2.0 | -5,020,675,400,546,335,000 | 36.93865 | 105 | 0.654754 | false | 3.860175 | true | false | false |
sergiohr/NeuroDB | test/test_nodos.py | 1 | 7625 | '''
Created on Apr 5, 2015
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
import psycopg2
import time
import neurodb.neodb.core
from math import e, pow
from scipy.optimize import leastsq
import neurodb
import random
from sklearn.cluster import KMeans, AgglomerativeClustering, MiniBatchKMeans
from neurodb.cfsfdp import libcd
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array_1d_int = npct.ndpointer(dtype=np.int64, ndim=1, flags='CONTIGUOUS')
array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
def get_points(id_block, channel):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
query = """SELECT spike.p1, spike.p2, spike.p3 from SPIKE
JOIN segment ON id_segment = segment.id
JOIN recordingchannel ON id_recordingchannel = recordingchannel.id
WHERE segment.id_block = %s
AND recordingchannel.index = %s"""%(id_block, channel)
cursor = dbconn.cursor()
cursor.execute(query)
results = cursor.fetchall()
points = []
for i in range(len(results)):
p1 = results[i][0]
p2 = results[i][1]
p3 = results[i][2]
points.append([p1,p2,p3])
return np.array(points)
def ajuste(local_density, coeficientes):
vajuste = np.zeros(len(local_density))
for j in range(len(local_density)):
vajuste[j] = np.polynomial.polynomial.polyval(local_density[j], coeficientes)
return vajuste
def select_nodes(id_project, id_session, channel, n_nodos):
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_session))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
random.shuffle(spikes)
len_spikes = len(spikes)
len_nodo = np.ceil(float(len_spikes)/float(n_nodos))
nodos = []
for i in range(n_nodos):
nodo = []
j = 0
while(spikes != [] and j<len_nodo):
nodo.append(spikes.pop())
j = j + 1
nodos.append(nodo)
return nodos
def select_nodes_r(id_project, id_session, channel, n_nodos):
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_session))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
len_spikes = len(spikes)
len_nodo = np.ceil(float(len_spikes)/float(n_nodos))
nodos = []
for i in range(n_nodos):
nodo = random.sample(spikes,int(len_nodo))
nodos.append(nodo)
return nodos
def get_centers(nodos, nnodos, points):
centersT = []
rho = np.array([], np.float64)
delta = np.array([], np.float64)
ncenters = 0
spikes = np.array([], np.float64)
cl = np.array([], np.float64)
for i in range(nnodos):
spikes_id = nodos[i]
spikes_id = np.array(spikes_id, np.float64)
nspikes = len(spikes_id)
local_density = np.empty(nspikes)
distance_to_higher_density = np.empty(nspikes)
cluster_index = np.empty(nspikes)
nneigh = np.empty(nspikes)
centers = np.empty(nspikes)
dc = libcd.get_dc(connect, spikes_id, nspikes, np.float(1.8), points)
libcd.cluster_dp(connect, local_density, distance_to_higher_density, spikes_id,
cluster_index, nneigh, centers, dc, points, nspikes, "gaussian")
print "nodo %s procesado. ncenters:%s"%(i,int(centers[0]))
ncenters = centers[0] + ncenters
for j in range(int(centers[0])):
centersT.append([local_density[int(centers[j+1])], distance_to_higher_density[int(centers[j+1])]])
rho = np.concatenate((rho,local_density))
delta = np.concatenate((delta, distance_to_higher_density))
spikes = np.concatenate((spikes, spikes_id))
cl = np.concatenate((cl, cluster_index))
# plt.plot(local_density, distance_to_higher_density, 'o')
# plt.show()
ncenters = np.ceil(ncenters/nnodos)
plt.plot(rho, delta, 'ro')
plt.show()
centersT = np.array(centersT)
return centersT, ncenters, spikes, cl
if __name__ == '__main__':
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
project = 19
id_block = "76"
#id_block = "76"
channel = "1"
points = 3
nnodos = 1
nodos = select_nodes_r(project, id_block, channel, nnodos)
color = ['bo', 'ro', 'go', 'co', 'ko', 'mo', 'b^', 'r^', 'g^', 'c^', 'k^', 'm^', 'bx', 'rx', 'gx', 'cx', 'kx', 'mx']
centers, nclusters, spikes, cl= get_centers(nodos, nnodos, points)
print "clusters: %s"%nclusters
km = KMeans(n_clusters = int(nclusters))
#km = MiniBatchKMeans(n_clusters = int(ncenters))
aw = AgglomerativeClustering(linkage='ward', n_clusters=int(nclusters))
km.fit(centers)
aw.fit(centers)
# plt.plot(km.cluster_centers_[0][0], km.cluster_centers_[0][1], 'kx')
# plt.plot(km.cluster_centers_[1][0], km.cluster_centers_[1][1], 'kx')
# plt.plot(km.cluster_centers_[2][0], km.cluster_centers_[2][1], 'kx')
# c = np.array(centers, np.float64)
#
# centersC = np.empty(len(c[:,1]))
# labels = np.empty(len(c[:,1]))
# x = np.array(c[:,0], np.float64)
# y = np.array(c[:,1], np.float64)
# libcd.dp(x, y, len(c[:,1]), labels, centersC, "gaussian")
for i in range(len(centers)):
plt.plot(centers[i][0], centers[i][1], color[int(aw.labels_[i])])
plt.show()
pass
#
# local_density = np.empty(nspikes)
# distance_to_higher_density = np.empty(nspikes)
# cluster_index = np.empty(nspikes)
# nneigh = np.empty(nspikes)
# centers = np.empty(nspikes)
#
# dc = libcd.get_dc(connect, spikes_id, nspikes, np.float(1.8), points)
# libcd.cluster_dp(connect, local_density, distance_to_higher_density, spikes_id,
# cluster_index, nneigh, centers, dc, points, nspikes, "gaussian")
#
# plt.plot(local_density, distance_to_higher_density, 'bo')
# plt.show()
#
# for i in range(int(cluster_index.max())+1):
# plt.subplot(int(cluster_index.max())+1,1,i+1)
# k = 0
# for j in range(nspikes):
# if cluster_index[j] == i:
# spikes = neurodb.neodb.core.spikedb.get_from_db(dbconn, id_block = id_block, channel = channel, id = int(spikes_id[j]))
# signal = spikes[0].waveform
# plt.plot(signal)
# k = 1 + k
#
# title = str(i) +": "+ str(k)
# plt.title(title)
# plt.show()
#
# pass
| gpl-3.0 | -619,141,681,476,917,100 | 30.508264 | 137 | 0.582951 | false | 3.094562 | false | false | false |
dandanvidi/catalytic-rates | scripts/get_data_subunits_composition.py | 1 | 1713 | from bs4 import BeautifulSoup
import pandas as pd
from scripts.kapp import CACHABLE
import urllib
C = CACHABLE()
reactions = C.map_model_reaction_to_genes().set_index(0)
genes = {row[0:5]:row[56:63] for row in open('data/all_ecoli_genes.txt', 'r')
if row[0:5] in reactions.values}
new_dict = {}
for j,(b, EG) in enumerate(genes.iteritems()):
sock = urllib.urlopen("http://ecocyc.org/ECOLI/NEW-IMAGE?type=GENE&object=%s" %EG)
html = sock.read()
doc = BeautifulSoup(html)
classes = doc.findAll('p')
subunits = 1
for item in classes:
title = item.contents[0].strip()
if title == 'Subunit composition of':
for s in item.findAll('sub'):
try:
subunits = int(s.contents[0].strip())
except ValueError:
continue
break
print j, b, "->", subunits, " subunits"
new_dict[b] = subunits
subunits = pd.DataFrame(new_dict.items())
subunits.to_csv("cache/subunits.csv")
#
#
#
#
#
#
#
#
#
#
#
#
#
# m = 0
# try:
# a = doc.findAll('p')[4]
# except:
# continue
# if 'Subunit composition of' in str(a):
# try:
# a = doc.findAll('sub')
# except:
# continue
#
# print a
# if j >2:
# break
# if 'Subunit composition of' in str(a):
# m = int(str(a.sub).split('<sub>')[1][0])
# break
# if m == 0:
# m =1
# new_dict[b] = m
# print j, EG, "->", m, " subunits"
subunits = pd.DataFrame(new_dict.items())
subunits.to_csv("cache/subunits.csv") | mit | 7,014,327,657,803,190,000 | 21.25974 | 87 | 0.502627 | false | 3.137363 | false | false | false |
Silvenga/LinuxScripts | solusvm/emailed-status.py | 1 | 5038 | #!/usr/bin/env python
######################################################################
#
# Email Status of SolusVM VPS's
# Designed as a Cron script
#
######################################################################
#
# Example
#
#Node0
# bw:
# 15.5GB/1.0TB
# [#---------------------------------------] 2%
#
#Node1
# bw:
# 2.6GB/1000.0GB
# [----------------------------------------] 0%
#
#Node2
# hdd:
# 4.9GB/30.0GB
# [######----------------------------------] 16%
# bw:
# 8.3GB/1.0TB
# [----------------------------------------] 1%
#
#Node3
# hdd:
# 23.7GB/50.0GB
# [###################---------------------] 47%
# bw:
# 372.8GB/500.0GB
# [##############################----------] 75%
#
#
######################################################################
###### Settings start ################################################
######################################################################
# Hosts to check the status of (in order)
# Put as many as your want
HOSTS = [
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://usadmin.inceptionhosting.com", # API host
'name': "Node0" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://solus.fliphost.net", # API host
'name': "Node1" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "http://master.weloveservers.net", # API host
'name': "Node2" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://vpscp.ramnode.com", # API host
'name': "Node3" # Name
}
]
# Email Settings
# Uses the local email SMTP server, so watch out
EMAIL_FROM = "[email protected]"
EMAIL_TO = "[email protected]"
EMAIL_SUBJECT = "Server Status Report"
# Possible values: "bw", "hdd", "mem" (in order)
CHECK = ["hdd", "bw", "mem"]
# Do not show blank values (Usually due to the server being a KVM/XEN)
REMOVE_BLANKS = True
# Steps (the size of the status bars)
STEPS = 40
######################################################################
###### Settings end ##################################################
######################################################################
import subprocess
import re
import os
import smtplib
from email.mime.text import MIMEText
######################################################################
###### Functions start ###############################################
######################################################################
def run(args):
proc = subprocess.Popen([args], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def parseStatus(str):
parser = re.compile(r'</*\w+>')
array = parser.split(str)
array = filter(None, array)
lookup = {
'status': array[0],
'statusmsg': array[1],
'vmstat': array[2],
'hostname': array[3],
'ipaddress': array[4],
'hdd': parseType(array[5]),
'bw': parseType(array[6]),
'mem': parseType(array[7])
}
return lookup
def parseType(str):
parser = re.compile(r',')
array = parser.split(str)
array = filter(None, array)
lookup = {
'max': sizeOf(array[0]),
'used': sizeOf(array[1]),
'left': sizeOf(array[2]),
'precent': array[3]
}
return lookup
def pullStatus(host):
result = run(
"curl -s \"" + host['url'] +
"/api/client/command.php?key=" + host['key'] +
"&hash=" + host['hash'] +
"&action=status&bw=true&mem=true&hdd=true\""
)
return parseStatus(result)
def sizeOf(str):
# http://stackoverflow.com/a/1094933/2001966
num = float(str)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def saveHost(host):
status = pullStatus(host)
str = ""
for type in CHECK:
if(not(REMOVE_BLANKS) or (status[type]['used'] != "0.0bytes")):
str += " " + type + ":" + "\n"
str += " " + status[type]['used'] + "/" + status[type]['max'] + "\n"
str += " " + statusBar(status[type]['precent']) + " " + status[type]['precent'] + "%" + "\n"
return str
def statusBar(precent):
value = float(precent)
value = STEPS * (value / 100)
value = round(value)
value = int(value)
str = ""
for x in range(0, value):
str += "#"
for x in range(value, STEPS):
str += "-"
return "[" + str + "]"
######################################################################
###### Functions end #################################################
######################################################################
str = ""
for host in HOSTS:
str += (host['name'] + "\n")
str += (saveHost(host) + "\n")
msg = MIMEText(str)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
server = smtplib.SMTP( "localhost", 25 )
server.sendmail( EMAIL_FROM, EMAIL_TO, msg.as_string() )
server.quit()
| gpl-2.0 | -8,179,583,440,252,265,000 | 25.239583 | 101 | 0.462485 | false | 3.275683 | false | false | false |
eonpatapon/nova | nova/api/openstack/__init__.py | 18 | 17376 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo_config import cfg
from oslo_log import log as logging
import routes
import six
import stevedore
import webob.dec
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova.i18n import _LC
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.i18n import translate
from nova import notifications
from nova import utils
from nova import wsgi as base_wsgi
api_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether the V3 API is enabled or not'),
cfg.ListOpt('extensions_blacklist',
default=[],
help='A list of v3 API extensions to never load. '
'Specify the extension aliases here.'),
cfg.ListOpt('extensions_whitelist',
default=[],
help='If the list is not empty then a v3 API extension '
'will only be loaded if it exists in this list. Specify '
'the extension aliases here.')
]
api_opts_group = cfg.OptGroup(name='osapi_v3', title='API v3 Options')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_group(api_opts_group)
CONF.register_opts(api_opts, api_opts_group)
# List of v3 API extensions which are considered to form
# the core API and so must be present
# TODO(cyeoh): Expand this list as the core APIs are ported to V3
API_V3_CORE_EXTENSIONS = set(['os-consoles',
'extensions',
'os-flavor-extra-specs',
'os-flavor-manage',
'flavors',
'ips',
'os-keypairs',
'os-flavor-access',
'server-metadata',
'servers',
'versions'])
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_LE("Caught error: %s"), six.text_type(inner))
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
user_locale = req.best_match_language()
inner_msg = translate(inner.message, user_locale)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
notifications.send_api_fault(req.url, status, inner)
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class PlainMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' in kwargs:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, ext_mgr=None, init_only=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr, init_only)
self._setup_ext_routes(mapper, ext_mgr, init_only)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr, init_only):
for resource in ext_mgr.get_resources():
LOG.debug('Extending resource: %s',
resource.collection)
if init_only is not None and resource.collection not in init_only:
continue
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
msg_format_dict = {'collection': collection,
'ext_name': extension.extension.name}
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
msg_format_dict)
continue
LOG.debug('Extension %(ext_name)s extended resource: '
'%(collection)s',
msg_format_dict)
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr, init_only):
raise NotImplementedError()
class APIRouterV21(base_wsgi.Router):
"""Routes requests on the OpenStack v2.1 API to the appropriate controller
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
@staticmethod
def api_extension_namespace():
# TODO(oomichi): This namespaces will be changed after moving all v3
# APIs to v2.1.
return 'nova.api.v3.extensions'
def __init__(self, init_only=None, v3mode=False):
# TODO(cyeoh): bp v3-api-extension-framework. Currently load
# all extensions but eventually should be able to exclude
# based on a config file
# TODO(oomichi): We can remove v3mode argument after moving all v3 APIs
# to v2.1.
def _check_load_extension(ext):
if (self.init_only is None or ext.obj.alias in
self.init_only) and isinstance(ext.obj,
extensions.V3APIExtensionBase):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return self._register_extension(ext)
return False
if not CONF.osapi_v3.enabled:
LOG.info(_LI("V3 API has been disabled by configuration"))
return
self.init_only = init_only
LOG.debug("v3 API Extension Blacklist: %s",
CONF.osapi_v3.extensions_blacklist)
LOG.debug("v3 API Extension Whitelist: %s",
CONF.osapi_v3.extensions_whitelist)
in_blacklist_and_whitelist = set(
CONF.osapi_v3.extensions_whitelist).intersection(
CONF.osapi_v3.extensions_blacklist)
if len(in_blacklist_and_whitelist) != 0:
LOG.warning(_LW("Extensions in both blacklist and whitelist: %s"),
list(in_blacklist_and_whitelist))
self.api_extension_manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.api_extension_namespace(),
check_func=_check_load_extension,
invoke_on_load=True,
invoke_kwds={"extension_info": self.loaded_extension_info})
if v3mode:
mapper = PlainMapper()
else:
mapper = ProjectMapper()
self.resources = {}
# NOTE(cyeoh) Core API support is rewritten as extensions
# but conceptually still have core
if list(self.api_extension_manager):
# NOTE(cyeoh): Stevedore raises an exception if there are
# no plugins detected. I wonder if this is a bug.
self._register_resources_check_inherits(mapper)
self.api_extension_manager.map(self._register_controllers)
missing_core_extensions = self.get_missing_core_extensions(
self.loaded_extension_info.get_extensions().keys())
if not self.init_only and missing_core_extensions:
LOG.critical(_LC("Missing core API extensions: %s"),
missing_core_extensions)
raise exception.CoreAPIMissing(
missing_apis=missing_core_extensions)
LOG.info(_LI("Loaded extensions: %s"),
sorted(self.loaded_extension_info.get_extensions().keys()))
super(APIRouterV21, self).__init__(mapper)
def _register_resources_list(self, ext_list, mapper):
for ext in ext_list:
self._register_resources(ext, mapper)
def _register_resources_check_inherits(self, mapper):
ext_has_inherits = []
ext_no_inherits = []
for ext in self.api_extension_manager:
for resource in ext.obj.get_resources():
if resource.inherits:
ext_has_inherits.append(ext)
break
else:
ext_no_inherits.append(ext)
self._register_resources_list(ext_no_inherits, mapper)
self._register_resources_list(ext_has_inherits, mapper)
@staticmethod
def get_missing_core_extensions(extensions_loaded):
extensions_loaded = set(extensions_loaded)
missing_extensions = API_V3_CORE_EXTENSIONS - extensions_loaded
return list(missing_extensions)
@property
def loaded_extension_info(self):
raise NotImplementedError()
def _register_extension(self, ext):
raise NotImplementedError()
def _register_resources(self, ext, mapper):
"""Register resources defined by the extensions
Extensions define what resources they want to add through a
get_resources function
"""
handler = ext.obj
LOG.debug("Running _register_resources on %s", ext.obj)
for resource in handler.get_resources():
LOG.debug('Extended resource: %s', resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.ResourceV21(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
# non core-API plugins use the collection name as the
# member name, but the core-API plugins use the
# singular/plural convention for member/collection names
if resource.member_name:
member_name = resource.member_name
else:
member_name = resource.collection
mapper.resource(member_name, resource.collection,
**kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _register_controllers(self, ext):
"""Register controllers defined by the extensions
Extensions define what resources they want to add through
a get_controller_extensions function
"""
handler = ext.obj
LOG.debug("Running _register_controllers on %s", ext.obj)
for extension in handler.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': ext_name, 'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': ext_name, 'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
| apache-2.0 | -6,131,045,606,676,235,000 | 38.312217 | 79 | 0.587132 | false | 4.500389 | true | false | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/testing/mlp.py | 2 | 2916 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
from __future__ import absolute_import
from tvm import relay
from .init import create_workload
def get_net(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
"""Get network a simple multilayer perceptron.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data",
shape=data_shape,
dtype=dtype)
data = relay.nn.batch_flatten(data)
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=128)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=64)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
fc3 = relay.nn.dense(act2, relay.var("fc3_weight"), units=num_classes)
fc3 = relay.nn.bias_add(fc3, relay.var("fc3_bias"), axis=-1)
mlp = relay.nn.softmax(data=fc3)
args = relay.analysis.free_vars(mlp)
return relay.Function(args, mlp)
def get_workload(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
"""Get benchmark workload for a simple multilayer perceptron.
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.relay.Module
The relay module that contains a mlp network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
| apache-2.0 | 4,633,262,919,590,435,000 | 30.021277 | 74 | 0.64369 | false | 3.71465 | false | false | false |
Starch/paperwork | src/paperwork/frontend/util/canvas/animators.py | 3 | 3783 | # Paperwork - Using OCR to grep dead trees the easy way
# Copyright (C) 2014 Jerome Flesch
#
# Paperwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Paperwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paperwork. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from paperwork.frontend.util.canvas import Canvas
class Animator(GObject.GObject):
__gsignals__ = {
'animator-start': (GObject.SignalFlags.RUN_LAST, None, ()),
'animator-end': (GObject.SignalFlags.RUN_LAST, None, ()),
}
def __init__(self,
drawer,
attr_name, attr_values, # one value per canvas tick
canvas=None):
GObject.GObject.__init__(self)
self.drawer = drawer
self.attr_name = attr_name
self.attr_values = attr_values
self.canvas = canvas
self.started = False
self.stopped = False
self.previous_pos = self.drawer.relative_position
self.previous_size = self.drawer.relative_size
def set_canvas(self, canvas):
self.canvas = canvas
def on_tick(self):
if len(self.attr_values) <= 0:
if not self.stopped:
self.stopped = True
self.emit('animator-end')
return
if not self.started:
self.started = True
self.emit('animator-start')
setattr(self.drawer, self.attr_name, self.attr_values[0])
self.attr_values = self.attr_values[1:]
self.canvas.redraw((self.previous_pos, self.previous_size))
self.previous_pos = self.drawer.relative_position
self.previous_size = self.drawer.relative_size
self.canvas.redraw((self.previous_pos, self.previous_size))
class LinearSimpleAnimator(Animator):
def __init__(self, drawer,
target_value,
time_length, # ms
attr_name='angle',
canvas=None):
nb_values = int(time_length / Canvas.TICK_INTERVAL)
assert(nb_values)
value_intervals = (
(target_value - getattr(drawer, attr_name)) / nb_values
)
values = [
getattr(drawer, attr_name) + (i * value_intervals)
for i in range(0, nb_values + 1)
]
if values[-1] != target_value:
values.append(target_value)
Animator.__init__(self, drawer, attr_name, values, canvas)
GObject.type_register(LinearSimpleAnimator)
class LinearCoordAnimator(Animator):
def __init__(self, drawer,
target_coord,
time_length, # ms
attr_name='position',
canvas=None):
nb_coords = int(time_length / Canvas.TICK_INTERVAL)
assert(nb_coords)
pos_intervals = (
(target_coord[0] - getattr(drawer, attr_name)[0]) / nb_coords,
(target_coord[1] - getattr(drawer, attr_name)[1]) / nb_coords,
)
coords = [
(getattr(drawer, attr_name)[0] + (i * pos_intervals[0]),
getattr(drawer, attr_name)[1] + (i * pos_intervals[1]))
for i in range(0, nb_coords + 1)
]
Animator.__init__(self, drawer, attr_name, coords, canvas)
GObject.type_register(LinearCoordAnimator)
| gpl-3.0 | 8,807,668,755,939,996,000 | 33.706422 | 74 | 0.595559 | false | 3.760437 | false | false | false |
daviddoria/itkHoughTransform | Wrapping/WrapITK/Languages/SwigInterface/pygccxml-1.0.0/pygccxml/declarations/container_traits.py | 13 | 20892 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
defines few algorithms, that deals with different properties of std containers
"""
import types
import string
import calldef
import cpptypes
import namespace
import templates
import type_traits
import class_declaration
std_namespaces = ( 'std', 'stdext', '__gnu_cxx' )
class defaults_eraser:
@staticmethod
def normalize( type_str ):
return type_str.replace( ' ', '' )
@staticmethod
def replace_basic_string( cls_name ):
strings = {
'std::string' : ( 'std::basic_string<char,std::char_traits<char>,std::allocator<char> >'
, 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >' )
, 'std::wstring' : ( 'std::basic_string<wchar_t,std::char_traits<wchar_t>,std::allocator<wchar_t> >'
, 'std::basic_string<wchar_t, std::char_traits<wchar_t>, std::allocator<wchar_t> >' ) }
new_name = cls_name
for short_name, long_names in strings.iteritems():
for lname in long_names:
new_name = new_name.replace( lname, short_name )
return new_name
class recursive_impl:
@staticmethod
def decorated_call_prefix( cls_name, text, doit ):
has_text = cls_name.startswith( text )
if has_text:
cls_name = cls_name[ len( text ): ]
answer = doit( cls_name )
if has_text:
answer = text + answer
return answer
@staticmethod
def decorated_call_suffix( cls_name, text, doit ):
has_text = cls_name.endswith( text )
if has_text:
cls_name = cls_name[: len( text )]
answer = doit( cls_name )
if has_text:
answer = answer + text
return answer
@staticmethod
def erase_call( cls_name ):
global find_container_traits
c_traits = find_container_traits( cls_name )
if not c_traits:
return cls_name
return c_traits.remove_defaults( cls_name )
@staticmethod
def erase_recursive( cls_name ):
ri = defaults_eraser.recursive_impl
no_std = lambda cls_name: ri.decorated_call_prefix( cls_name, 'std::', ri.erase_call )
no_stdext = lambda cls_name: ri.decorated_call_prefix( cls_name, 'stdext::', no_std )
no_gnustd = lambda cls_name: ri.decorated_call_prefix( cls_name, '__gnu_cxx::', no_stdext )
no_const = lambda cls_name: ri.decorated_call_prefix( cls_name, 'const ', no_gnustd )
no_end_const = lambda cls_name: ri.decorated_call_suffix( cls_name, ' const', no_const )
return no_end_const( cls_name )
@staticmethod
def erase_recursive( cls_name ):
return defaults_eraser.recursive_impl.erase_recursive( cls_name )
@staticmethod
def erase_allocator( cls_name, default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 2 != len( c_args ):
return
value_type = c_args[0]
tmpl = string.Template( "$container< $value_type, $allocator<$value_type> >" )
tmpl = tmpl.substitute( container=c_name, value_type=value_type, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_container( cls_name, default_container_name='std::deque' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 2 != len( c_args ):
return
value_type = c_args[0]
dc_no_defaults = defaults_eraser.erase_recursive( c_args[1] )
if defaults_eraser.normalize( dc_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_container_name, [value_type] ) ):
return
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_container_compare( cls_name, default_container_name='std::vector', default_compare='std::less' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 3 != len( c_args ):
return
dc_no_defaults = defaults_eraser.erase_recursive( c_args[1] )
if defaults_eraser.normalize( dc_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_container_name, [c_args[0]] ) ):
return
dcomp_no_defaults = defaults_eraser.erase_recursive( c_args[2] )
if defaults_eraser.normalize( dcomp_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_compare, [c_args[0]] ) ):
return
value_type = defaults_eraser.erase_recursive( c_args[0] )
return templates.join( c_name, [value_type] )
@staticmethod
def erase_compare_allocator( cls_name, default_compare='std::less', default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 3 != len( c_args ):
return
value_type = c_args[0]
tmpl = string.Template( "$container< $value_type, $compare<$value_type>, $allocator<$value_type> >" )
tmpl = tmpl.substitute( container=c_name
, value_type=value_type
, compare=default_compare
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_map_compare_allocator( cls_name, default_compare='std::less', default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 4 != len( c_args ):
return
key_type = c_args[0]
mapped_type = c_args[1]
tmpls = [
string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< const $key_type, $mapped_type> > >" )
, string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< $key_type const, $mapped_type> > >" )
, string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< $key_type, $mapped_type> > >" )]
for tmpl in tmpls:
tmpl = tmpl.substitute( container=c_name
, key_type=key_type
, mapped_type=mapped_type
, compare=default_compare
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name
, [ defaults_eraser.erase_recursive( key_type )
, defaults_eraser.erase_recursive( mapped_type )] )
@staticmethod
def erase_hash_allocator( cls_name ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if len( c_args ) < 3:
return
default_hash=None
default_less='std::less'
default_equal_to='std::equal_to'
default_allocator='std::allocator'
tmpl = None
if 3 == len( c_args ):
default_hash='hash_compare'
tmpl = "$container< $value_type, $hash<$value_type, $less<$value_type> >, $allocator<$value_type> >"
elif 4 == len( c_args ):
default_hash='hash'
tmpl = "$container< $value_type, $hash<$value_type >, $equal_to<$value_type >, $allocator<$value_type> >"
else:
return
value_type = c_args[0]
tmpl = string.Template( tmpl )
for ns in std_namespaces:
inst = tmpl.substitute( container=c_name
, value_type=value_type
, hash= ns + '::' + default_hash
, less=default_less
, equal_to=default_equal_to
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( inst ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_hashmap_compare_allocator( cls_name ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
default_hash=None
default_less='std::less'
default_allocator='std::allocator'
default_equal_to = 'std::equal_to'
tmpl = None
key_type = None
mapped_type = None
if 2 < len( c_args ):
key_type = c_args[0]
mapped_type = c_args[1]
else:
return
if 4 == len( c_args ):
default_hash = 'hash_compare'
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type, $less<$key_type> >, $allocator< std::pair< const $key_type, $mapped_type> > >" )
if key_type.startswith( 'const ' ) or key_type.endswith( ' const' ):
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type, $less<$key_type> >, $allocator< std::pair< $key_type, $mapped_type> > >" )
elif 5 == len( c_args ):
default_hash = 'hash'
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type >, $equal_to<$key_type>, $allocator< $mapped_type> >" )
if key_type.startswith( 'const ' ) or key_type.endswith( ' const' ):
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type >, $equal_to<$key_type>, $allocator< $mapped_type > >" )
else:
return
for ns in std_namespaces:
inst = tmpl.substitute( container=c_name
, key_type=key_type
, mapped_type=mapped_type
, hash=ns + '::' + default_hash
, less=default_less
, equal_to = default_equal_to
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( inst ):
return templates.join( c_name
, [ defaults_eraser.erase_recursive( key_type )
, defaults_eraser.erase_recursive( mapped_type )] )
class container_traits_impl_t:
"""this class implements the functionality needed for convinient work with
STD container classes.
Implemented functionality:
- find out whether a declaration is STD container or not
- find out container value( mapped ) type
This class tries to be useful as much, as possible. For example, for class
declaration( and not definition ) it parsers the class name in order to
extract all the information.
"""
def __init__( self
, container_name
, element_type_index
, element_type_typedef
, defaults_remover
, key_type_index=None
, key_type_typedef=None ):
"""
container_name - std container name
element_type_index - position of value\\mapped type within template
arguments list
element_type_typedef - class typedef to the value\\mapped type
key_type_index - position of key type within template arguments list
key_type_typedef - class typedef to the key type
"""
self._name = container_name
self.remove_defaults_impl = defaults_remover
self.element_type_index = element_type_index
self.element_type_typedef = element_type_typedef
self.key_type_index = key_type_index
self.key_type_typedef = key_type_typedef
def name(self):
return self._name
def get_container_or_none( self, type ):
"""returns reference to the class declaration or None"""
type = type_traits.remove_alias( type )
type = type_traits.remove_cv( type )
cls = None
if isinstance( type, cpptypes.declarated_t ):
cls = type_traits.remove_alias( type.declaration )
elif isinstance( type, class_declaration.class_t ):
cls = type
elif isinstance( type, class_declaration.class_declaration_t ):
cls = type
else:
return
if not cls.name.startswith( self.name() + '<' ):
return
for ns in std_namespaces:
if type_traits.impl_details.is_defined_in_xxx( ns, cls ):
return cls
def is_my_case( self, type ):
"""checks, whether type is STD container or not"""
return bool( self.get_container_or_none( type ) )
def class_declaration( self, type ):
"""returns reference to the class declaration"""
cls = self.get_container_or_none( type )
if not cls:
raise TypeError( 'Type "%s" is not instantiation of std::%s' % ( type.decl_string, self.name() ) )
return cls
def is_sequence( self, type ):
#raise exception if type is not container
unused = self.class_declaration( type )
return self.key_type_index is None
def is_mapping( self, type ):
return not self.is_sequence( type )
def __find_xxx_type( self, type, xxx_index, xxx_typedef, cache_property_name ):
cls = self.class_declaration( type )
result = getattr( cls.cache, cache_property_name )
if not result:
if isinstance( cls, class_declaration.class_t ):
xxx_type = cls.typedef( xxx_typedef, recursive=False ).type
result = type_traits.remove_declarated( xxx_type )
else:
xxx_type_str = templates.args( cls.name )[xxx_index]
result = type_traits.impl_details.find_value_type( cls.top_parent, xxx_type_str )
if None is result:
raise RuntimeError( "Unable to find out %s '%s' key\\value type."
% ( self.name(), cls.decl_string ) )
setattr( cls.cache, cache_property_name, result )
return result
def element_type( self, type ):
"""returns reference to the class value\\mapped type declaration"""
return self.__find_xxx_type( type
, self.element_type_index
, self.element_type_typedef
, 'container_element_type')
def key_type( self, type ):
"""returns reference to the class key type declaration"""
if not self.is_mapping( type ):
raise TypeError( 'Type "%s" is not "mapping" container' % str( type ) )
return self.__find_xxx_type( type
, self.key_type_index
, self.key_type_typedef
, 'container_key_type' )
def remove_defaults( self, type_or_string ):
"""remove template defaults from a template class instantiation
For example:
std::vector< int, std::allocator< int > >
will become
std::vector< int >
"""
name = type_or_string
if not isinstance( type_or_string, types.StringTypes ):
name = self.class_declaration( type_or_string ).name
if not self.remove_defaults_impl:
return name
no_defaults = self.remove_defaults_impl( name )
if not no_defaults:
return name
else:
return no_defaults
create_traits = container_traits_impl_t
list_traits = create_traits( 'list'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
deque_traits = create_traits( 'deque'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
queue_traits = create_traits( 'queue'
, 0
, 'value_type'
, defaults_eraser.erase_container )
priority_queue_traits = create_traits( 'priority_queue'
, 0
, 'value_type'
, defaults_eraser.erase_container_compare )
vector_traits = create_traits( 'vector'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
stack_traits = create_traits( 'stack'
, 0
, 'value_type'
, defaults_eraser.erase_container )
map_traits = create_traits( 'map'
, 1
, 'mapped_type'
, defaults_eraser.erase_map_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
multimap_traits = create_traits( 'multimap'
, 1
, 'mapped_type'
, defaults_eraser.erase_map_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
hash_map_traits = create_traits( 'hash_map'
, 1
, 'mapped_type'
, defaults_eraser.erase_hashmap_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
hash_multimap_traits = create_traits( 'hash_multimap'
, 1
, 'mapped_type'
, defaults_eraser.erase_hashmap_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
set_traits = create_traits( 'set'
, 0
, 'value_type'
, defaults_eraser.erase_compare_allocator)
multiset_traits = create_traits( 'multiset'
, 0
, 'value_type'
, defaults_eraser.erase_compare_allocator )
hash_set_traits = create_traits( 'hash_set'
, 0
, 'value_type'
, defaults_eraser.erase_hash_allocator )
hash_multiset_traits = create_traits( 'hash_multiset'
, 0
, 'value_type'
, defaults_eraser.erase_hash_allocator )
container_traits = (
list_traits
, deque_traits
, queue_traits
, priority_queue_traits
, vector_traits
, stack_traits
, map_traits
, multimap_traits
, hash_map_traits
, hash_multimap_traits
, set_traits
, hash_set_traits
, multiset_traits
, hash_multiset_traits )
"""tuple of all STD container traits classes"""
def find_container_traits( cls_or_string ):
if isinstance( cls_or_string, types.StringTypes ):
if not templates.is_instantiation( cls_or_string ):
return None
name = templates.name( cls_or_string )
if name.startswith( 'std::' ):
name = name[ len( 'std::' ): ]
for cls_traits in container_traits:
if cls_traits.name() == name:
return cls_traits
else:
for cls_traits in container_traits:
if cls_traits.is_my_case( cls_or_string ):
return cls_traits
| apache-2.0 | 8,086,877,949,867,987,000 | 42.076289 | 171 | 0.531543 | false | 4.150179 | false | false | false |
mcfletch/AutobahnPython | examples/twisted/wamp/auth/persona/server.py | 7 | 9085 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
from twisted.python import log
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
from twisted.internet.defer import Deferred
import json
import urllib
import Cookie
from autobahn.util import newid, utcnow
from autobahn.websocket import http
class ServerProtocol(WampWebSocketServerProtocol):
## authid -> cookie -> set(connection)
def onConnect(self, request):
protocol, headers = WampWebSocketServerProtocol.onConnect(self, request)
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
self._authenticated = self.factory._cookies[self._cbtid]['authenticated']
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
from autobahn.twisted.wamp import RouterSession
from autobahn.wamp import types
class MyRouterSession(RouterSession):
def onOpen(self, transport):
RouterSession.onOpen(self, transport)
print "transport authenticated: {}".format(self._transport._authenticated)
def onHello(self, realm, details):
print "onHello: {} {}".format(realm, details)
if self._transport._authenticated is not None:
return types.Accept(authid = self._transport._authenticated)
else:
return types.Challenge("mozilla-persona")
return accept
def onLeave(self, details):
if details.reason == "wamp.close.logout":
cookie = self._transport.factory._cookies[self._transport._cbtid]
cookie['authenticated'] = None
for proto in cookie['connections']:
proto.sendClose()
def onAuthenticate(self, signature, extra):
print "onAuthenticate: {} {}".format(signature, extra)
dres = Deferred()
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = signature
audience = 'http://127.0.0.1:8080/'
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "[email protected]",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
try:
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self._transport.factory._cookies[self._transport._cbtid]['authenticated'] = res['email']
log.msg("Authenticated user {}".format(res['email']))
dres.callback(types.Accept(authid = res['email']))
else:
log.msg("Authentication failed!")
dres.callback(types.Deny())
except Exception as e:
print "ERRR", e
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
dres.callback(types.Deny())
d.addCallbacks(done, error)
return dres
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if True or args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
## start an embedded application component ..
##
session_factory.add(TimeService())
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug_wamp = args.debug)
transport_factory.protocol = ServerProtocol
transport_factory._cookies = {}
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
| apache-2.0 | -6,196,339,740,024,204,000 | 29.901361 | 141 | 0.628398 | false | 4.255269 | false | false | false |
Pakketeretet2/lammps | python/lammps.py | 1 | 38678 | # ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, [email protected]
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrappers on LAMMPS library via ctypes
# for python3 compatibility
from __future__ import print_function
# imports for simple LAMMPS python wrapper module "lammps"
import sys,traceback,types
from ctypes import *
from os.path import dirname,abspath,join
from inspect import getsourcefile
# imports for advanced LAMMPS python wrapper modules "PyLammps" and "IPyLammps"
from collections import namedtuple
import os
import select
import re
import sys
def get_ctypes_int(size):
if size == 4:
return c_int32
elif size == 8:
return c_int64
return c_int
class MPIAbortException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class lammps(object):
# detect if Python is using version of mpi4py that can pass a communicator
has_mpi4py = False
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] in ['2','3']: has_mpi4py = True
except:
pass
# create instance of LAMMPS
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
self.comm = comm
self.opened = 0
# determine module location
modpath = dirname(abspath(getsourcefile(lambda:0)))
self.lib = None
# if a pointer to a LAMMPS object is handed in,
# all symbols should already be available
try:
if ptr: self.lib = CDLL("",RTLD_GLOBAL)
except:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of lammps.py with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
if any([f.startswith('liblammps') and f.endswith('.dylib') for f in os.listdir(modpath)]):
lib_ext = ".dylib"
else:
lib_ext = ".so"
if not self.lib:
try:
if not name: self.lib = CDLL(join(modpath,"liblammps" + lib_ext),RTLD_GLOBAL)
else: self.lib = CDLL(join(modpath,"liblammps_%s" % name + lib_ext),
RTLD_GLOBAL)
except:
if not name: self.lib = CDLL("liblammps" + lib_ext,RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s" % name + lib_ext,RTLD_GLOBAL)
# define ctypes API for each library method
# NOTE: should add one of these for each lib function
self.lib.lammps_extract_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),
POINTER(c_double),POINTER(c_double),POINTER(c_double),
POINTER(c_int),POINTER(c_int)]
self.lib.lammps_extract_box.restype = None
self.lib.lammps_reset_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),c_double,c_double,c_double]
self.lib.lammps_reset_box.restype = None
self.lib.lammps_gather_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms.restype = None
self.lib.lammps_gather_atoms_concat.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms_concat.restype = None
self.lib.lammps_gather_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_gather_atoms_subset.restype = None
self.lib.lammps_scatter_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_scatter_atoms.restype = None
self.lib.lammps_scatter_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_scatter_atoms_subset.restype = None
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if comm:
if not lammps.has_mpi4py:
raise Exception('Python mpi4py version is not 2 or 3')
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = lammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp))
else:
if lammps.has_mpi4py:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
# set default types
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
self._installed_packages = None
# add way to insert Python callback for fix external
self.callback = {}
self.FIX_EXTERNAL_CALLBACK_FUNC = CFUNCTYPE(None, c_void_p, self.c_bigint, c_int, POINTER(self.c_tagint), POINTER(POINTER(c_double)), POINTER(POINTER(c_double)))
self.lib.lammps_set_fix_external_callback.argtypes = [c_void_p, c_char_p, self.FIX_EXTERNAL_CALLBACK_FUNC, c_void_p]
self.lib.lammps_set_fix_external_callback.restype = None
# shut-down LAMMPS instance
def __del__(self):
if self.lmp and self.opened:
self.lib.lammps_close(self.lmp)
self.opened = 0
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
self.opened = 0
def version(self):
return self.lib.lammps_version(self.lmp)
def file(self,file):
if file: file = file.encode()
self.lib.lammps_file(self.lmp,file)
# send a single command
def command(self,cmd):
if cmd: cmd = cmd.encode()
self.lib.lammps_command(self.lmp,cmd)
if self.has_exceptions and self.lib.lammps_has_error(self.lmp):
sb = create_string_buffer(100)
error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100)
error_msg = sb.value.decode().strip()
if error_type == 2:
raise MPIAbortException(error_msg)
raise Exception(error_msg)
# send a list of commands
def commands_list(self,cmdlist):
cmds = [x.encode() for x in cmdlist if type(x) is str]
args = (c_char_p * len(cmdlist))(*cmds)
self.lib.lammps_commands_list(self.lmp,len(cmdlist),args)
# send a string of commands
def commands_string(self,multicmd):
if type(multicmd) is str: multicmd = multicmd.encode()
self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd))
# extract lammps type byte sizes
def extract_setting(self, name):
if name: name = name.encode()
self.lib.lammps_extract_setting.restype = c_int
return int(self.lib.lammps_extract_setting(self.lmp,name))
# extract global info
def extract_global(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
# extract global info
def extract_box(self):
boxlo = (3*c_double)()
boxhi = (3*c_double)()
xy = c_double()
yz = c_double()
xz = c_double()
periodicity = (3*c_int)()
box_change = c_int()
self.lib.lammps_extract_box(self.lmp,boxlo,boxhi,
byref(xy),byref(yz),byref(xz),
periodicity,byref(box_change))
boxlo = boxlo[:3]
boxhi = boxhi[:3]
xy = xy.value
yz = yz.value
xz = xz.value
periodicity = periodicity[:3]
box_change = box_change.value
return boxlo,boxhi,xy,yz,xz,periodicity,box_change
# extract per-atom info
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def extract_atom(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
@property
def numpy(self):
if not self._numpy:
import numpy as np
class LammpsNumpyWrapper:
def __init__(self, lmp):
self.lmp = lmp
def _ctype_to_numpy_int(self, ctype_int):
if ctype_int == c_int32:
return np.int32
elif ctype_int == c_int64:
return np.int64
return np.intc
def extract_atom_iarray(self, name, nelem, dim=1):
if name in ['id', 'molecule']:
c_int_type = self.lmp.c_tagint
elif name in ['image']:
c_int_type = self.lmp.c_imageint
else:
c_int_type = c_int
np_int_type = self._ctype_to_numpy_int(c_int_type)
if dim == 1:
tmp = self.lmp.extract_atom(name, 0)
ptr = cast(tmp, POINTER(c_int_type * nelem))
else:
tmp = self.lmp.extract_atom(name, 1)
ptr = cast(tmp[0], POINTER(c_int_type * nelem * dim))
a = np.frombuffer(ptr.contents, dtype=np_int_type)
a.shape = (nelem, dim)
return a
def extract_atom_darray(self, name, nelem, dim=1):
if dim == 1:
tmp = self.lmp.extract_atom(name, 2)
ptr = cast(tmp, POINTER(c_double * nelem))
else:
tmp = self.lmp.extract_atom(name, 3)
ptr = cast(tmp[0], POINTER(c_double * nelem * dim))
a = np.frombuffer(ptr.contents)
a.shape = (nelem, dim)
return a
self._numpy = LammpsNumpyWrapper(self)
return self._numpy
# extract compute info
def extract_compute(self,id,style,type):
if id: id = id.encode()
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
if style == 0:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
else:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# extract fix info
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
if id: id = id.encode()
if style == 0:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif (style == 1) or (style == 2):
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
else:
return None
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
else:
return None
# extract variable info
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
if name: name = name.encode()
if group: group = group.encode()
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal".encode())
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in range(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# return current value of thermo keyword
def get_thermo(self,name):
if name: name = name.encode()
self.lib.lammps_get_thermo.restype = c_double
return self.lib.lammps_get_thermo(self.lmp,name)
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
if name: name = name.encode()
if value: value = str(value).encode()
return self.lib.lammps_set_variable(self.lmp,name,value)
# reset simulation box size
def reset_box(self,boxlo,boxhi,xy,yz,xz):
cboxlo = (3*c_double)(*boxlo)
cboxhi = (3*c_double)(*boxhi)
self.lib.lammps_reset_box(self.lmp,cboxlo,cboxhi,xy,yz,xz)
# return vector of atom properties gathered across procs
# 3 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def gather_atoms(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_concat(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_subset(self,name,type,count,ndata,ids):
if name: name = name.encode()
if type == 0:
data = ((count*ndata)*c_int)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
elif type == 1:
data = ((count*ndata)*c_double)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
else: return None
return data
# scatter vector of atom properties across procs
# 2 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def scatter_atoms(self,name,type,count,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
def scatter_atoms_subset(self,name,type,count,ndata,ids,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
# create N atoms on all procs
# N = global number of atoms
# id = ID of each atom (optional, can be None)
# type = type of each atom (1 to Ntypes) (required)
# x = coords of each atom as (N,3) array (required)
# v = velocity of each atom as (N,3) array (optional, can be None)
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
# ditto for gather_atoms() above
def create_atoms(self,n,id,type,x,v,image=None,shrinkexceed=False):
if id:
id_lmp = (c_int * n)()
id_lmp[:] = id
else:
id_lmp = id
if image:
image_lmp = (c_int * n)()
image_lmp[:] = image
else:
image_lmp = image
type_lmp = (c_int * n)()
type_lmp[:] = type
self.lib.lammps_create_atoms(self.lmp,n,id_lmp,type_lmp,x,v,image_lmp,
shrinkexceed)
@property
def has_exceptions(self):
""" Return whether the LAMMPS shared library was compiled with C++ exceptions handling enabled """
return self.lib.lammps_config_has_exceptions() != 0
@property
def has_gzip_support(self):
return self.lib.lammps_config_has_gzip_support() != 0
@property
def has_png_support(self):
return self.lib.lammps_config_has_png_support() != 0
@property
def has_jpeg_support(self):
return self.lib.lammps_config_has_jpeg_support() != 0
@property
def has_ffmpeg_support(self):
return self.lib.lammps_config_has_ffmpeg_support() != 0
@property
def installed_packages(self):
if self._installed_packages is None:
self._installed_packages = []
npackages = self.lib.lammps_config_package_count()
sb = create_string_buffer(100)
for idx in range(npackages):
self.lib.lammps_config_package_name(idx, sb, 100)
self._installed_packages.append(sb.value.decode())
return self._installed_packages
def set_fix_external_callback(self, fix_name, callback, caller=None):
import numpy as np
def _ctype_to_numpy_int(ctype_int):
if ctype_int == c_int32:
return np.int32
elif ctype_int == c_int64:
return np.int64
return np.intc
def callback_wrapper(caller_ptr, ntimestep, nlocal, tag_ptr, x_ptr, fext_ptr):
if cast(caller_ptr,POINTER(py_object)).contents:
pyCallerObj = cast(caller_ptr,POINTER(py_object)).contents.value
else:
pyCallerObj = None
tptr = cast(tag_ptr, POINTER(self.c_tagint * nlocal))
tag = np.frombuffer(tptr.contents, dtype=_ctype_to_numpy_int(self.c_tagint))
tag.shape = (nlocal)
xptr = cast(x_ptr[0], POINTER(c_double * nlocal * 3))
x = np.frombuffer(xptr.contents)
x.shape = (nlocal, 3)
fptr = cast(fext_ptr[0], POINTER(c_double * nlocal * 3))
f = np.frombuffer(fptr.contents)
f.shape = (nlocal, 3)
callback(pyCallerObj, ntimestep, nlocal, tag, x, f)
cFunc = self.FIX_EXTERNAL_CALLBACK_FUNC(callback_wrapper)
cCaller = cast(pointer(py_object(caller)), c_void_p)
self.callback[fix_name] = { 'function': cFunc, 'caller': caller }
self.lib.lammps_set_fix_external_callback(self.lmp, fix_name.encode(), cFunc, cCaller)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
################################################################################
# Alternative Python Wrapper
# Written by Richard Berger <[email protected]>
################################################################################
class OutputCapture(object):
""" Utility class to capture LAMMPS library output """
def __init__(self):
self.stdout_pipe_read, self.stdout_pipe_write = os.pipe()
self.stdout_fd = 1
def __enter__(self):
self.stdout = os.dup(self.stdout_fd)
os.dup2(self.stdout_pipe_write, self.stdout_fd)
return self
def __exit__(self, type, value, tracebac):
os.dup2(self.stdout, self.stdout_fd)
os.close(self.stdout)
os.close(self.stdout_pipe_read)
os.close(self.stdout_pipe_write)
# check if we have more to read from the pipe
def more_data(self, pipe):
r, _, _ = select.select([pipe], [], [], 0)
return bool(r)
# read the whole pipe
def read_pipe(self, pipe):
out = ""
while self.more_data(pipe):
out += os.read(pipe, 1024).decode()
return out
@property
def output(self):
return self.read_pipe(self.stdout_pipe_read)
class Variable(object):
def __init__(self, lammps_wrapper_instance, name, style, definition):
self.wrapper = lammps_wrapper_instance
self.name = name
self.style = style
self.definition = definition.split()
@property
def value(self):
if self.style == 'atom':
return list(self.wrapper.lmp.extract_variable(self.name, "all", 1))
else:
value = self.wrapper.lmp_print('"${%s}"' % self.name).strip()
try:
return float(value)
except ValueError:
return value
class AtomList(object):
def __init__(self, lammps_wrapper_instance):
self.lmp = lammps_wrapper_instance
self.natoms = self.lmp.system.natoms
self.dimensions = self.lmp.system.dimensions
def __getitem__(self, index):
if self.dimensions == 2:
return Atom2D(self.lmp, index + 1)
return Atom(self.lmp, index + 1)
class Atom(object):
def __init__(self, lammps_wrapper_instance, index):
self.lmp = lammps_wrapper_instance
self.index = index
@property
def id(self):
return int(self.lmp.eval("id[%d]" % self.index))
@property
def type(self):
return int(self.lmp.eval("type[%d]" % self.index))
@property
def mol(self):
return self.lmp.eval("mol[%d]" % self.index)
@property
def mass(self):
return self.lmp.eval("mass[%d]" % self.index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index),
self.lmp.eval("z[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
self.lmp.set("atom", self.index, "z", value[2])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index),
self.lmp.eval("vz[%d]" % self.index))
@velocity.setter
def velocity(self, value):
self.lmp.set("atom", self.index, "vx", value[0])
self.lmp.set("atom", self.index, "vy", value[1])
self.lmp.set("atom", self.index, "vz", value[2])
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index),
self.lmp.eval("fz[%d]" % self.index))
@property
def charge(self):
return self.lmp.eval("q[%d]" % self.index)
class Atom2D(Atom):
def __init__(self, lammps_wrapper_instance, index):
super(Atom2D, self).__init__(lammps_wrapper_instance, index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index))
@velocity.setter
def velocity(self, value):
self.lmp.set("atom", self.index, "vx", value[0])
self.lmp.set("atom", self.index, "vy", value[1])
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index))
class variable_set:
def __init__(self, name, variable_dict):
self._name = name
array_pattern = re.compile(r"(?P<arr>.+)\[(?P<index>[0-9]+)\]")
for key, value in variable_dict.items():
m = array_pattern.match(key)
if m:
g = m.groupdict()
varname = g['arr']
idx = int(g['index'])
if varname not in self.__dict__:
self.__dict__[varname] = {}
self.__dict__[varname][idx] = value
else:
self.__dict__[key] = value
def __str__(self):
return "{}({})".format(self._name, ','.join(["{}={}".format(k, self.__dict__[k]) for k in self.__dict__.keys() if not k.startswith('_')]))
def __repr__(self):
return self.__str__()
def get_thermo_data(output):
""" traverse output of runs and extract thermo data columns """
if isinstance(output, str):
lines = output.splitlines()
else:
lines = output
runs = []
columns = []
in_run = False
current_run = {}
for line in lines:
if line.startswith("Per MPI rank memory allocation"):
in_run = True
elif in_run and len(columns) == 0:
# first line after memory usage are column names
columns = line.split()
current_run = {}
for col in columns:
current_run[col] = []
elif line.startswith("Loop time of "):
in_run = False
columns = None
thermo_data = variable_set('ThermoData', current_run)
r = {'thermo' : thermo_data }
runs.append(namedtuple('Run', list(r.keys()))(*list(r.values())))
elif in_run and len(columns) > 0:
values = [float(x) for x in line.split()]
for i, col in enumerate(columns):
current_run[col].append(values[i])
return runs
class PyLammps(object):
"""
More Python-like wrapper for LAMMPS (e.g., for iPython)
See examples/ipython for usage
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
if ptr:
if isinstance(ptr,PyLammps):
self.lmp = ptr.lmp
elif isinstance(ptr,lammps):
self.lmp = ptr
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=None,comm=comm)
print("LAMMPS output is captured by PyLammps wrapper")
self._cmd_history = []
self.runs = []
def __del__(self):
if self.lmp: self.lmp.close()
self.lmp = None
def close(self):
if self.lmp: self.lmp.close()
self.lmp = None
def version(self):
return self.lmp.version()
def file(self,file):
self.lmp.file(file)
def write_script(self,filename):
""" Write LAMMPS script file containing all commands executed up until now """
with open(filename, "w") as f:
for cmd in self._cmd_history:
f.write("%s\n" % cmd)
def command(self,cmd):
self.lmp.command(cmd)
self._cmd_history.append(cmd)
def run(self, *args, **kwargs):
output = self.__getattr__('run')(*args, **kwargs)
if(lammps.has_mpi4py):
output = self.lmp.comm.bcast(output, root=0)
self.runs += get_thermo_data(output)
return output
@property
def last_run(self):
if len(self.runs) > 0:
return self.runs[-1]
return None
@property
def atoms(self):
return AtomList(self)
@property
def system(self):
output = self.info("system")
d = self._parse_info_system(output)
return namedtuple('System', d.keys())(*d.values())
@property
def communication(self):
output = self.info("communication")
d = self._parse_info_communication(output)
return namedtuple('Communication', d.keys())(*d.values())
@property
def computes(self):
output = self.info("computes")
return self._parse_element_list(output)
@property
def dumps(self):
output = self.info("dumps")
return self._parse_element_list(output)
@property
def fixes(self):
output = self.info("fixes")
return self._parse_element_list(output)
@property
def groups(self):
output = self.info("groups")
return self._parse_groups(output)
@property
def variables(self):
output = self.info("variables")
vars = {}
for v in self._parse_element_list(output):
vars[v['name']] = Variable(self, v['name'], v['style'], v['def'])
return vars
def eval(self, expr):
value = self.lmp_print('"$(%s)"' % expr).strip()
try:
return float(value)
except ValueError:
return value
def _split_values(self, line):
return [x.strip() for x in line.split(',')]
def _get_pair(self, value):
return [x.strip() for x in value.split('=')]
def _parse_info_system(self, output):
lines = output[6:-2]
system = {}
for line in lines:
if line.startswith("Units"):
system['units'] = self._get_pair(line)[1]
elif line.startswith("Atom style"):
system['atom_style'] = self._get_pair(line)[1]
elif line.startswith("Atom map"):
system['atom_map'] = self._get_pair(line)[1]
elif line.startswith("Atoms"):
parts = self._split_values(line)
system['natoms'] = int(self._get_pair(parts[0])[1])
system['ntypes'] = int(self._get_pair(parts[1])[1])
system['style'] = self._get_pair(parts[2])[1]
elif line.startswith("Kspace style"):
system['kspace_style'] = self._get_pair(line)[1]
elif line.startswith("Dimensions"):
system['dimensions'] = int(self._get_pair(line)[1])
elif line.startswith("Orthogonal box"):
system['orthogonal_box'] = [float(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Boundaries"):
system['boundaries'] = self._get_pair(line)[1]
elif line.startswith("xlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("ylo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("zlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("Molecule type"):
system['molecule_type'] = self._get_pair(line)[1]
elif line.startswith("Bonds"):
parts = self._split_values(line)
system['nbonds'] = int(self._get_pair(parts[0])[1])
system['nbondtypes'] = int(self._get_pair(parts[1])[1])
system['bond_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Angles"):
parts = self._split_values(line)
system['nangles'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['angle_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Dihedrals"):
parts = self._split_values(line)
system['ndihedrals'] = int(self._get_pair(parts[0])[1])
system['ndihedraltypes'] = int(self._get_pair(parts[1])[1])
system['dihedral_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Impropers"):
parts = self._split_values(line)
system['nimpropers'] = int(self._get_pair(parts[0])[1])
system['nimpropertypes'] = int(self._get_pair(parts[1])[1])
system['improper_style'] = self._get_pair(parts[2])[1]
return system
def _parse_info_communication(self, output):
lines = output[6:-3]
comm = {}
for line in lines:
if line.startswith("MPI library"):
comm['mpi_version'] = line.split(':')[1].strip()
elif line.startswith("Comm style"):
parts = self._split_values(line)
comm['comm_style'] = self._get_pair(parts[0])[1]
comm['comm_layout'] = self._get_pair(parts[1])[1]
elif line.startswith("Processor grid"):
comm['proc_grid'] = [int(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Communicate velocities for ghost atoms"):
comm['ghost_velocity'] = (self._get_pair(line)[1] == "yes")
elif line.startswith("Nprocs"):
parts = self._split_values(line)
comm['nprocs'] = int(self._get_pair(parts[0])[1])
comm['nthreads'] = int(self._get_pair(parts[1])[1])
return comm
def _parse_element_list(self, output):
lines = output[6:-3]
elements = []
for line in lines:
element_info = self._split_values(line.split(':')[1].strip())
element = {'name': element_info[0]}
for key, value in [self._get_pair(x) for x in element_info[1:]]:
element[key] = value
elements.append(element)
return elements
def _parse_groups(self, output):
lines = output[6:-3]
groups = []
group_pattern = re.compile(r"(?P<name>.+) \((?P<type>.+)\)")
for line in lines:
m = group_pattern.match(line.split(':')[1].strip())
group = {'name': m.group('name'), 'type': m.group('type')}
groups.append(group)
return groups
def lmp_print(self, s):
""" needed for Python2 compatibility, since print is a reserved keyword """
return self.__getattr__("print")(s)
def __dir__(self):
return ['angle_coeff', 'angle_style', 'atom_modify', 'atom_style', 'atom_style',
'bond_coeff', 'bond_style', 'boundary', 'change_box', 'communicate', 'compute',
'create_atoms', 'create_box', 'delete_atoms', 'delete_bonds', 'dielectric',
'dihedral_coeff', 'dihedral_style', 'dimension', 'dump', 'fix', 'fix_modify',
'group', 'improper_coeff', 'improper_style', 'include', 'kspace_modify',
'kspace_style', 'lattice', 'mass', 'minimize', 'min_style', 'neighbor',
'neigh_modify', 'newton', 'nthreads', 'pair_coeff', 'pair_modify',
'pair_style', 'processors', 'read', 'read_data', 'read_restart', 'region',
'replicate', 'reset_timestep', 'restart', 'run', 'run_style', 'thermo',
'thermo_modify', 'thermo_style', 'timestep', 'undump', 'unfix', 'units',
'variable', 'velocity', 'write_restart']
def __getattr__(self, name):
def handler(*args, **kwargs):
cmd_args = [name] + [str(x) for x in args]
with OutputCapture() as capture:
self.command(' '.join(cmd_args))
output = capture.output
if 'verbose' in kwargs and kwargs['verbose']:
print(output)
lines = output.splitlines()
if len(lines) > 1:
return lines
elif len(lines) == 1:
return lines[0]
return None
return handler
class IPyLammps(PyLammps):
"""
iPython wrapper for LAMMPS which adds embedded graphics capabilities
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
super(IPyLammps, self).__init__(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
def image(self, filename="snapshot.png", group="all", color="type", diameter="type",
size=None, view=None, center=None, up=None, zoom=1.0):
cmd_args = [group, "image", filename, color, diameter]
if size:
width = size[0]
height = size[1]
cmd_args += ["size", width, height]
if view:
theta = view[0]
phi = view[1]
cmd_args += ["view", theta, phi]
if center:
flag = center[0]
Cx = center[1]
Cy = center[2]
Cz = center[3]
cmd_args += ["center", flag, Cx, Cy, Cz]
if up:
Ux = up[0]
Uy = up[1]
Uz = up[2]
cmd_args += ["up", Ux, Uy, Uz]
if zoom:
cmd_args += ["zoom", zoom]
cmd_args.append("modify backcolor white")
self.write_dump(*cmd_args)
from IPython.core.display import Image
return Image('snapshot.png')
def video(self, filename):
from IPython.display import HTML
return HTML("<video controls><source src=\"" + filename + "\"></video>")
| gpl-2.0 | 8,843,681,301,166,624,000 | 31.917447 | 165 | 0.60939 | false | 3.292866 | false | false | false |
datalogics/scons | test/Errors/preparation.py | 2 | 2136 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we print a useful message (and exit non-zero) if an external
error occurs while deciding if a Node is current or not.
"""
import sys
import TestSCons
test = TestSCons.TestSCons()
install = test.workpath('install')
install_file = test.workpath('install', 'file')
work_file = test.workpath('work', 'file')
test.subdir('install', 'work')
test.write(['work', 'SConstruct'], """\
Alias("install", Install(r"%(install)s", File('file')))
# Make a directory where we expect the File() to be. This causes an
# IOError or OSError when we try to open it to read its signature.
import os
os.mkdir(r'%(work_file)s')
""" % locals())
if sys.platform == 'win32':
error_message = "Permission denied"
else:
error_message = "Is a directory"
expect = """\
scons: *** [%(install_file)s] %(work_file)s: %(error_message)s
""" % locals()
test.run(chdir = 'work',
arguments = 'install',
status = 2,
stderr = expect)
test.pass_test()
| mit | -6,140,333,870,987,321,000 | 30.880597 | 73 | 0.711142 | false | 3.753954 | true | false | false |
duncant/stupid_python_tricks | primes.py | 1 | 8937 | # This file is part of stupid_python_tricks written by Duncan Townsend.
#
# stupid_python_tricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# stupid_python_tricks is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with stupid_python_tricks. If not, see <http://www.gnu.org/licenses/>.
from itertools import *
from fractions import gcd
from operator import itemgetter
def simple():
"""A simple prime generator using the Sieve of Eratosthenes.
This is not intended to be fast, but is instead intended to be so
simple that its correctness is obvious.
"""
stream = count(2)
while True:
prime = next(stream)
sieve = (lambda n: lambda i: i % n)(prime)
stream = ifilter(sieve, stream)
yield prime
def take(n, stream):
return islice(stream, None, n, None)
def drop(n, stream):
return islice(stream, n, None, None)
def nth(n, stream):
try:
return next(drop(n, stream))
except StopIteration:
raise IndexError("Can't get element off the end of generator")
class Wheel(object):
class Spokes(object):
def __init__(self, iterator, length, last):
self.iterator = take(length, iterator)
self.length = length
self.last = last
self.cache = []
def __len__(self):
return self.length
def __getitem__(self, key):
cache = self.cache
if key >= len(cache):
try:
it_next = self.iterator.next
append = cache.append
while key >= len(cache):
append(it_next())
except StopIteration:
raise IndexError("%s index out of range or iterator ended early" % type(self).__name__)
return cache[key % self.length]
def index(self, needle):
left = 0
left_value = self[left]
right = self.length-1
right_value = self.last
while True:
guess = ((right - left) * max(needle - left_value, 0) \
// max(right_value - left_value, 1)) + left
guess_value = self[guess]
if guess_value == needle:
# base case; needle is found
return guess
elif guess_value < needle:
left = guess + 1
left_value = self[left]
elif guess-1 < 0 or self[guess-1] < needle:
# base case; needle isn't present; return the
# index of the next-largest element
return guess
else:
right = guess - 1
right_value = self[right]
def __init__(self, smaller, prime):
if smaller is None and prime is None:
self.modulus = 1
self.spokes = self.Spokes((1,), 1, 1)
else:
self.modulus = smaller.modulus * prime
self.spokes = self.Spokes(ifilter(lambda x: x % prime,
smaller),
len(smaller.spokes)*(prime-1),
self.modulus)
def _index_unsafe(self, elem):
cycle, raw_spoke = divmod(elem, self.modulus)
spoke = self.spokes.index(raw_spoke)
return (cycle, spoke)
def index(self, elem):
ret = self._index_unsafe(elem)
if self[ret] != elem:
raise IndexError("%d is not in %s" % (elem, type(self).__name__))
return ret
def __getitem__(self, (cycle, spoke)):
return cycle*self.modulus + self.spokes[spoke]
def __contains__(self, elem):
return gcd(elem, self.modulus) == 1
def __iter__(self):
spokes = self.spokes
modulus = self.modulus
for i in count():
for j in spokes:
yield i*modulus + j
def roll(self, cycles, sieve=None):
modulus = self.modulus
spokes = self.spokes
# populate the sieve if it's not supplied
if sieve is None:
sieve = {}
for p in takewhile(lambda p: p < modulus, simple()):
if p in self:
for q in dropwhile(lambda q: q < p,
takewhile(lambda q: q < modulus,
simple())):
hazard = p*q
if hazard > modulus and hazard in self:
sieve[hazard] = (p, None, None)
break
# update the sieve for our wheel size
to_delete = set()
to_insert = set()
for hazard, (prime, _, __) in sieve.iteritems():
if hazard in self:
cycle, spoke = self._index_unsafe(hazard // prime)
sieve[hazard] = (prime, cycle, spoke)
else:
to_delete.add(hazard)
if prime in self:
cycle, spoke = self._index_unsafe(hazard // prime)
to_insert.add((prime, cycle, spoke))
for hazard in to_delete:
del sieve[hazard]
for prime, cycle, spoke in sorted(to_insert):
hazard = prime * self[(cycle, spoke)]
while hazard in sieve:
spoke += 1
cycle_incr, spoke = divmod(spoke, len(spokes))
cycle += cycle_incr
hazard = prime * self[(cycle, spoke)]
sieve[hazard] = (prime, cycle, spoke)
del to_insert
del to_delete
# assert len(frozenset(imap(itemgetter(0), \
# sieve.itervalues()))) \
# == len(sieve)
# assert all(imap(lambda hazard: hazard in self, sieve.iterkeys()))
# perform the wheel factorization
candidate_stream = drop(len(spokes), self)
if cycles is not None:
candidate_stream = take(len(spokes)*cycles, candidate_stream)
# sieve the result
for candidate in candidate_stream:
if candidate in sieve:
hazard = candidate
prime, cycle, spoke = sieve[hazard]
# assert hazard == prime * self[(cycle, spoke)]
while hazard in sieve:
spoke += 1
cycle_incr, spoke = divmod(spoke, len(spokes))
cycle += cycle_incr
hazard = prime * self[(cycle, spoke)]
# assert hazard in self
del sieve[candidate]
sieve[hazard] = (prime, cycle, spoke)
else:
cycle, spoke = self._index_unsafe(candidate)
sieve[candidate**2] = (candidate, cycle, spoke)
yield candidate
# assert all(imap(lambda h: h > candidate, sieve.iterkeys()))
class __metaclass__(type):
def __iter__(cls):
last = cls(None, None)
yield last
for prime in simple():
last = cls(last, prime)
yield last
def __repr__(self):
return "<%s.%s with modulus %d>" % \
(__name__, type(self).__name__, self.modulus)
def fixed_wheel(index):
w = nth(index, Wheel)
return chain(takewhile(lambda p: p < w.modulus, simple()),
w.roll(None))
def variable_wheel():
sieve = {}
return chain.from_iterable( ( wheel.roll(prime-1, sieve)
for wheel, prime in izip(Wheel, simple()) ) )
def _check_fixed(index, up_to):
try:
import pyprimes.sieves
good_stream = pyprimes.sieves.best_sieve()
except ImportError:
good_stream = simple()
for i, (a, b) in enumerate(take(up_to,
izip(fixed_wheel(index),
good_stream))):
if a != b:
return i
def _check_variable(up_to):
try:
import pyprimes.sieves
good_stream = pyprimes.sieves.best_sieve()
except ImportError:
good_stream = simple()
for i, (a, b) in enumerate(take(up_to,
izip(variable_wheel(),
good_stream))):
if a != b:
return i
if __name__ == '__main__':
import sys
print nth(int(sys.argv[1]), variable_wheel())
| lgpl-3.0 | 2,632,453,963,812,360,700 | 32.347015 | 107 | 0.511805 | false | 4.164492 | false | false | false |
gangadhar-kadam/sapphire_app | accounts/doctype/shipping_rule/test_shipping_rule.py | 1 | 2126 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
import unittest
from accounts.doctype.shipping_rule.shipping_rule import FromGreaterThanToError, ManyBlankToValuesError, OverlappingConditionError
class TestShippingRule(unittest.TestCase):
def test_from_greater_than_to(self):
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].from_value = 101
self.assertRaises(FromGreaterThanToError, shipping_rule.insert)
def test_many_zero_to_values(self):
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].to_value = 0
self.assertRaises(ManyBlankToValuesError, shipping_rule.insert)
def test_overlapping_conditions(self):
for range_a, range_b in [
((50, 150), (0, 100)),
((50, 150), (100, 200)),
((50, 150), (75, 125)),
((50, 150), (25, 175)),
((50, 150), (50, 150)),
]:
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].from_value = range_a[0]
shipping_rule.doclist[1].to_value = range_a[1]
shipping_rule.doclist[2].from_value = range_b[0]
shipping_rule.doclist[2].to_value = range_b[1]
self.assertRaises(OverlappingConditionError, shipping_rule.insert)
test_records = [
[
{
"doctype": "Shipping Rule",
"label": "_Test Shipping Rule",
"calculate_based_on": "Net Total",
"company": "_Test Company",
"account": "_Test Account Shipping Charges - _TC",
"cost_center": "_Test Cost Center - _TC"
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 0,
"to_value": 100,
"shipping_amount": 50.0
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 101,
"to_value": 200,
"shipping_amount": 100.0
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 201,
"shipping_amount": 0.0
},
{
"doctype": "Applicable Territory",
"parentfield": "valid_for_territories",
"territory": "_Test Territory"
}
]
] | agpl-3.0 | -1,303,449,078,409,612,000 | 29.385714 | 130 | 0.674036 | false | 2.940526 | true | false | false |
zstars/weblabdeusto | experiments/unmanaged/vm_services/unix_passwd/launch.py | 4 | 2236 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005-2009 University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
####################################################
#
# This script must be run as root in a UNIX system.
# Any call to http://(this-host):PORT/?sessionid=foo
# Will cause the user USERNAME to have "foo" as
# password. This is useful for sharing the session
# with the user through SSH or other systems based
# on the systems password.
#
PORT = 18080
USERNAME = 'weblab'
PASSWD_PATH = "/usr/bin/passwd"
####################################################
import pexpect
import time
import urllib
import traceback
import BaseHTTPServer
def change_password(new_passwd):
passwd = pexpect.spawn("%s %s" % (PASSWD_PATH, USERNAME))
for _ in range(2):
# wait for password: to come out of passwd's stdout
passwd.expect("password: ")
# send pass to passwd's stdin
passwd.sendline(new_passwd)
time.sleep(0.1)
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
_, query_args = urllib.splitquery(self.path)
arguments = dict([ urllib.splitvalue(query_arg) for query_arg in query_args.split('&') ])
session_id = arguments.get('sessionid')
if session_id is None:
self.send_error(400)
self.end_headers()
self.wfile.write("fail: sessionid argument is required")
else:
try:
change_password(session_id)
except Exception, e:
traceback.print_exc()
self.send_error(500)
self.end_headers()
self.wfile.write("Internal error: %s" % str(e))
else:
self.send_response(200)
self.end_headers()
self.wfile.write("ok")
self.wfile.close()
server = BaseHTTPServer.HTTPServer(('',PORT), RequestHandlerClass = Handler)
server.serve_forever()
| bsd-2-clause | -8,197,825,213,115,346,000 | 29.616438 | 97 | 0.600447 | false | 3.907343 | false | false | false |
NORDUnet/niscanner | ni_scanner.py | 1 | 1971 | from configparser import ConfigParser
from utils.cli import CLI
from api.queue import Queue
from api.nerds import NerdsApi
from scanner.host import HostScanner
from scanner.exceptions import ScannerExeption
from utils.url import url_concat
import logging
FORMAT = '%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('ni_scanner')
def process_host(queue, nerds_api):
item = queue.next("Host")
while item:
try:
queue.processing(item)
scanner = HostScanner(item)
nerds = scanner.process()
if not nerds:
# Error occured :(
logger.error("Unable to scan item %s", str(item))
queue.failed(item)
else:
logger.debug("Posting nerds data")
nerds_api.send(nerds)
queue.done(item)
except ScannerExeption as e:
logger.error("%s", e)
failed(queue, item)
except Exception as e:
logger.error("Unable to process host %s got error: %s", item, str(e))
failed(queue, item)
item = queue.next("Host")
def failed(queue, item):
try:
queue.failed(item)
except Exception as e:
logger.error("Problem with reaching NI, got error: %s", e)
def main():
args = CLI().options()
try:
config = ConfigParser()
config.readfp(open(args.config))
except IOError:
logger.error("Config file '%s' is missing", args.config)
return None
# ready :)
api_user = config.get("NI", "api_user")
api_key = config.get("NI", "api_key")
queue_url = url_concat(config.get("NI", "url"), "scan_queue/")
queue = Queue(queue_url, api_user, api_key)
nerds_url = url_concat(config.get("NI", "url"), "nerds/")
nerds_api = NerdsApi(nerds_url, api_user, api_key)
process_host(queue, nerds_api)
if __name__ == "__main__":
main()
| bsd-3-clause | -1,758,669,678,059,107,800 | 28.41791 | 81 | 0.593607 | false | 3.60989 | true | false | false |
onedata/web-client | bamboos/docker/environment/docker.py | 1 | 6982 | # coding=utf-8
"""Author: Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Functions wrapping capabilities of docker binary.
"""
import json
import os
import subprocess
import sys
# noinspection PyDefaultArgument
def run(image, docker_host=None, detach=False, dns_list=[], add_host={},
envs={}, hostname=None, interactive=False, link={}, tty=False, rm=False,
reflect=[], volumes=[], name=None, workdir=None, user=None, group=None,
group_add=[], cpuset_cpus=None, privileged=False, run_params=[], command=None,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('run')
if detach:
cmd.append('-d')
for addr in dns_list:
cmd.extend(['--dns', addr])
for key, value in add_host.iteritems():
cmd.extend(['--add-host', '{0}:{1}'.format(key, value)])
for key in envs:
cmd.extend(['-e', '{0}={1}'.format(key, envs[key])])
if hostname:
cmd.extend(['-h', hostname])
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
for container, alias in link.items():
cmd.extend(['--link', '{0}:{1}'.format(container, alias)])
if name:
cmd.extend(['--name', name])
if rm:
cmd.append('--rm')
for path, read in reflect:
vol = '{0}:{0}:{1}'.format(os.path.abspath(path), read)
cmd.extend(['-v', vol])
# Volume can be in one of three forms
# 1. 'path_on_docker'
# 2. ('path_on_host', 'path_on_docker', 'ro'/'rw')
# 3. {'volumes_from': 'volume name'}
for entry in volumes:
if isinstance(entry, tuple):
path, bind, readable = entry
vol = '{0}:{1}:{2}'.format(os.path.abspath(path), bind, readable)
cmd.extend(['-v', vol])
elif isinstance(entry, dict):
volume_name = entry['volumes_from']
cmd.extend(['--volumes-from', volume_name])
else:
cmd.extend(['-v', entry])
if workdir:
cmd.extend(['-w', os.path.abspath(workdir)])
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
for g in group_add:
cmd.extend(['--group-add', g])
if privileged:
cmd.append('--privileged')
if cpuset_cpus:
cmd.extend(['--cpuset-cpus', cpuset_cpus])
cmd.extend(run_params)
cmd.append(image)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
elif command is not None:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def exec_(container, command, docker_host=None, user=None, group=None,
detach=False, interactive=False, tty=False, privileged=False,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('exec')
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
if detach:
cmd.append('-d')
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
if privileged:
cmd.append('--privileged')
cmd.append(container)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
else:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def inspect(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['inspect', container])
out = subprocess.check_output(cmd, universal_newlines=True)
return json.loads(out)[0]
def logs(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['logs', container])
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
def remove(containers, docker_host=None, force=False,
link=False, volumes=False):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('rm')
if force:
cmd.append('-f')
if link:
cmd.append('-l')
if volumes:
cmd.append('-v')
cmd.extend(containers)
subprocess.check_call(cmd)
def cp(container, src_path, dest_path, to_container=False):
"""Copying file between docker container and host
:param container: str, docker id or name
:param src_path: str
:param dest_path: str
:param to_container: bool, if True file will be copied from host to
container, otherwise from docker container to host
"""
cmd = ["docker", "cp"]
if to_container:
cmd.extend([src_path, "{0}:{1}".format(container, dest_path)])
else:
cmd.extend(["{0}:{1}".format(container, src_path), dest_path])
subprocess.check_call(cmd)
def login(user, password, repository='hub.docker.com'):
"""Logs into docker repository."""
subprocess.check_call(['docker', 'login', '-u', user, '-p', password,
repository])
def build_image(image, build_args):
"""Builds and tags docker image."""
subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',
image] + build_args)
def tag_image(image, tag):
"""Tags docker image."""
subprocess.check_call(['docker', 'tag', image, tag])
def push_image(image):
"""Pushes docker image to the repository."""
subprocess.check_call(['docker', 'push', image])
def pull_image(image):
"""Pulls docker image from the repository."""
subprocess.check_call(['docker', 'pull', image])
def remove_image(image):
"""Removes docker image."""
subprocess.check_call(['docker', 'rmi', '-f', image])
def create_volume(path, name, image, command):
cmd = ['docker']
cmd.append('create')
cmd.append('-v')
cmd.append(path)
cmd.append('--name')
cmd.append(name)
cmd.append(image)
cmd.append(command)
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
| mit | 4,307,532,115,299,974,000 | 25.24812 | 86 | 0.589229 | false | 3.755783 | false | false | false |
youlanhai/directory_cache | gen_dir_list.py | 1 | 1422 | # -*- coding: utf-8 -*-
import os
import sys
import struct
USAGE = """
python gen_dir_list.py src_path [dst_path]
"""
def usage(): print USAGE
def format_path(path):
if len(path) == 0: return "./"
path = path.replace('\\', '/')
if path[-1] != '/': path += '/'
return path
def list_all_files(path):
ret = []
files = os.listdir(path)
files.sort()
for fname in files:
fpath = path + fname
if os.path.isdir(fpath):
fpath += '/'
child_ret = list_all_files(fpath)
ret.append( (fname, child_ret) )
else:
ret.append( (fname, None) )
return ret
def output_dirs(path, handle):
name, children = path
handle.write( struct.pack("H", len(name)) )
handle.write( name )
nchild = 0xffff if children is None else len(children)
handle.write( struct.pack("H", nchild) )
if children is not None:
for child in children:
output_dirs(child, handle)
return
def gen_dir_list(src_path, dst_path):
src_path = format_path(src_path)
dst_path = format_path(dst_path)
print "collect files: ", src_path
paths = list_all_files(src_path)
filename = dst_path + "ora.dir"
print "write fo file: ", filename
handle = open(filename, "wb")
output_dirs((".", paths), handle)
handle.close()
def main():
if len(sys.argv) < 2:
return usage()
src_path = sys.argv[1]
dst_path = sys.argv[2] if len(sys.argv) > 2 else src_path
gen_dir_list(src_path, dst_path)
if __name__ == "__main__":
main()
| mit | 8,937,673,335,269,717,000 | 17.710526 | 58 | 0.635021 | false | 2.729367 | false | false | false |
pojdrovic/Miscellaneous_Files | sentiment_price.py | 1 | 5415 | import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import datetime
import csv
from numpy import *
from scipy.stats.kde import gaussian_kde
import os
##############################
# Petar Ojdrovic
##############################
# parses sentiment data
# compares with underlying prices
import bisect # for bisection search
def find_le(a, x):
'Find rightmost value less than or equal to x'
i = bisect.bisect_right(a, x)
if i:
return i-1#a[i-1]
raise ValueError
def find_ge(a, x):
'Find leftmost item greater than or equal to x'
i = bisect.bisect_left(a, x)
if i != len(a):
return i#a[i]
raise ValueError
#### load prices ####
data = loadtxt('Prices_NYSE100_2011-2013_5.csv', delimiter=',', dtype='string')
tickers = data[0,1:]
times = data[1:,0]
times = [datetime.datetime.strptime(t, '%Y%m%d %H:%M') for t in times] # datetime object
ords = [t.toordinal() for t in times] # ordinals (integer for each day)
P = data[1:,1:].astype(float)
#### compare with news data ####
# load news data
with open('Alex_Top100_2011to2013/Alex_Top100_2011.xml') as input:
lines = input.readlines()
with open('Alex_Top100_2011to2013/Alex_Top100_2012.xml') as input:
for l in input:
lines.append(l)
with open('Alex_Top100_2011to2013/Alex_Top100_2013.xml') as input:
for l in input:
lines.append(l)
# loop through tickers...
for tick in tickers[:10]:
n = where(array(tickers)==tick)[0][0]
newsdat = []
newstime = []
for i in range(len(lines)):
if '<Row>' in lines[i] and '>'+tick+'<' in lines[i+3]:
day = lines[i+5].split('Type="String">')[1].split('</Data>')[0]
minute = lines[i+6].split('Type="String">')[1].split('</Data>')[0][:8]
sentiment = float(lines[i+7].split('Type="Number">')[1].split('</Data>')[0])
confidence = float(lines[i+8].split('Type="Number">')[1].split('</Data>')[0])
novelty = float(lines[i+9].split('Type="Number">')[1].split('</Data>')[0])
relevance = float(lines[i+11].split('Type="Number">')[1].split('</Data>')[0])
newsdat.append([sentiment, confidence, novelty, relevance])
newstime.append([day, minute])
newsdat = array(newsdat)
if len(newsdat)==0: # no events for this ticker
continue
X = [] # high quality events
for i in range(len(newsdat)):
if newsdat[i,0]!=0.0 and newsdat[i,1]>0.95 and newsdat[i,2]==1.0 and newsdat[i,3]==1.0:
event_time = datetime.datetime.strptime(newstime[i][0]+' '+newstime[i][1],'%Y-%m-%d %H:%M:%S')
X.append([event_time, newsdat[i,0]])
L = [] # check to see if news anticipates (intraday)
F = [] # check to see if news follows (intraday)
L_o = [] # overnight
F_o = [] # overnight
for x in X:
if x[0].toordinal() in ords:
# intraday
if (x[0].time() >= datetime.time(9,30)) and (x[0].time() <= datetime.time(16,00)):
close_p = P[find_le(ords, x[0].toordinal()),n] # close price that day
open_p = P[find_ge(ords, x[0].toordinal()),n]
recent_p = P[find_le(times, x[0]),n] # most recent price before news
L.append([x[1], (close_p-recent_p)/recent_p])
F.append([x[1], (recent_p-open_p)/open_p])
# overnight
else:
close_p = P[find_le(ords, x[0].toordinal()),n] # close price that day
open_p = P[find_ge(ords, x[0].toordinal()),n]
recent_p = P[find_le(times, x[0]),n] # most recent price before news
next_close_p = P[find_le(ords, x[0].toordinal()+1),n] # should revise to handle Fridays...
L_o.append([x[1], (next_close_p - recent_p)/recent_p])
F_o.append([x[1], (close_p - open_p)/open_p])
L = array(L)
F = array(F)
print(tick+': '+str(sum(L[:,0]==1))+' positive, '+str(sum(L[:,0]==-1))+' negative')
# make KDE plots
b = 1.5*max(abs(array([min(L[:,1]), max(L[:,1]), min(F[:,1]), max(F[:,1])])))
xs = arange(-b, b, 2*b/1000.0)
kde_L_p = gaussian_kde([L[i,1] for i in range(len(L)) if L[i,0]>0]) # leading, positive
y_L_p = kde_L_p.evaluate(xs)
kde_L_n = gaussian_kde([L[i,1] for i in range(len(L)) if L[i,0]<0]) # leading, negative
y_L_n = kde_L_n.evaluate(xs)
kde_F_p = gaussian_kde([F[i,1] for i in range(len(F)) if F[i,0]>0]) # following, positive
y_F_p = kde_F_p.evaluate(xs)
kde_F_n = gaussian_kde([F[i,1] for i in range(len(F)) if F[i,0]<0]) # following, negative
y_F_n = kde_F_n.evaluate(xs)
fig = plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
ax.plot(xs, y_L_p, linewidth=2, color='r')
ax.plot(xs, y_L_n, linewidth=2, color='b')
ax.fill_between(xs, y_L_p, color='r', alpha=0.2)
ax.fill_between(xs, y_L_n, color='b', alpha=0.2)
ax.legend(('Positive', 'Negative'), loc='upper left')
top = (int(max([max(y_L_p), max(y_L_n)]))/10)*10+10
ax.plot([0, 0], [0, top], color='k', linewidth=2)
ax.grid()
plt.title(tick,size=20)
pdf = PdfPages(tick+'_leading_intraday.pdf')
pdf.savefig()
pdf.close()
plt.close()
fig = plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
ax.plot(xs, y_F_p, linewidth=2, color='r')
ax.plot(xs, y_F_n, linewidth=2, color='b')
ax.fill_between(xs, y_F_p, color='r', alpha=0.2)
ax.fill_between(xs, y_F_n, color='b', alpha=0.2)
ax.legend(('Positive', 'Negative'), loc='upper left')
top = (int(max([max(y_F_p), max(y_F_n)]))/10)*10+10
ax.plot([0, 0], [0, top], color='k', linewidth=2)
ax.grid()
plt.title(tick,size=20)
pdf = PdfPages(tick+'_following_intraday.pdf')
pdf.savefig()
pdf.close()
plt.close() | apache-2.0 | -7,030,200,346,733,347,000 | 36.611111 | 97 | 0.618283 | false | 2.464725 | false | false | false |
pandegroup/vs-utils | vs_utils/utils/rdkit_utils/tests/test_conformers.py | 2 | 4580 | """
Tests for conformers.py.
"""
import numpy as np
import unittest
from rdkit import Chem
from vs_utils.utils.rdkit_utils import conformers
class TestConformerGenerator(unittest.TestCase):
"""
Tests for ConformerGenerator.
"""
def setUp(self):
"""
Set up tests.
"""
aspirin_smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O aspirin'
self.mol = Chem.MolFromSmiles(aspirin_smiles.split()[0])
self.mol.SetProp('_Name', 'aspirin')
assert self.mol.GetNumConformers() == 0
self.engine = conformers.ConformerGenerator()
def test_generate_conformers(self):
"""
Generate molecule conformers using default parameters.
"""
mol = self.engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
# check that molecule names are retained
assert self.mol.GetProp('_Name') == mol.GetProp('_Name')
def test_mmff94_minimization(self):
"""
Generate conformers and minimize with MMFF94 force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_mmff94s_minimization(self):
"""
Generate conformers and minimize with MMFF94s force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94s')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_embed_molecule(self):
"""
Test ConformerGenerator.embed_molecule.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
def test_minimize_conformers(self):
"""
Test ConformerGenerator.minimize_conformers.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
start = self.engine.get_conformer_energies(mol)
self.engine.minimize_conformers(mol)
finish = self.engine.get_conformer_energies(mol)
# check that all minimized energies are lower
assert np.all(start > finish), (start, finish)
def test_get_conformer_energies(self):
"""
Test ConformerGenerator.get_conformer_energies.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
energies = self.engine.get_conformer_energies(mol)
# check that the number of energies matches the number of
# conformers
assert len(energies) == mol.GetNumConformers()
def test_prune_conformers(self):
"""
Test ConformerGenerator.prune_conformers.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
engine.minimize_conformers(mol)
energies = engine.get_conformer_energies(mol)
pruned = engine.prune_conformers(mol)
pruned_energies = engine.get_conformer_energies(pruned)
# check that the number of conformers is not to large
assert pruned.GetNumConformers() <= engine.max_conformers
# check that the number of conformers has not increased
assert pruned.GetNumConformers() <= mol.GetNumConformers()
# check that lowest energy conformer was selected
assert np.allclose(min(energies), min(pruned_energies))
# check that pruned energies are taken from the original set
for energy in pruned_energies:
assert np.allclose(min(np.fabs(energies - energy)), 0)
# check that conformers are in order of increasing energy
sort = np.argsort(pruned_energies)
assert np.array_equal(sort, np.arange(len(pruned_energies))), sort
def test_get_conformer_rmsd(self):
"""
Test ConformerGenerator.get_conformer_rmsd.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
rmsd = engine.get_conformer_rmsd(mol)
# check for a valid distance matrix
assert rmsd.shape[0] == rmsd.shape[1] == mol.GetNumConformers()
assert np.allclose(np.diag(rmsd), 0)
assert np.array_equal(rmsd, rmsd.T)
# check for non-zero off-diagonal values
assert np.all(rmsd[np.triu_indices_from(rmsd, k=1)] > 0), rmsd
| gpl-3.0 | -8,888,732,276,072,261,000 | 33.69697 | 74 | 0.641048 | false | 3.788255 | true | false | false |
tonygalmiche/is_mrp | wizard/generate_previsions.py | 1 | 30993 | # -*- coding: utf-8 -*-
import time
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
class mrp_generate_previsions(osv.osv_memory):
_name = "mrp.previsions.generate"
_description = "Generate previsions"
_columns = {
'max_date': fields.date('Date Max', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'mrp.previsions.generate', context=c),
}
def _check_date_max(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.max_date <= time.strftime('%Y-%m-%d'):
return False
return True
_constraints = [
(_check_date_max, u'La date max doit être supérieure à la date de jour', ['max_date']),
]
#Calculer les dates entre deux dates différentes
def list_dates_availables(self, cr, uid, date_max, context=None):
list_dates = []
date = time.strftime('%Y-%m-%d')
from_dt = datetime.datetime.strptime(date, '%Y-%m-%d')
to_dt = datetime.datetime.strptime(date_max, '%Y-%m-%d')
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
j=0
while(j <= diff_day):
d = datetime.datetime.strptime(date, '%Y-%m-%d') + datetime.timedelta(days=j)
j +=1
list_dates.append(d.strftime('%Y-%m-%d'))
list_dates.sort()
return list_dates
#Déterminer la liste des produits utilisés dans les commandes, les OF et les prévisions suggérées
def list_products_availables(self, cr, uid, niveau, context=None):
lst_products = []
if niveau == 0:
#Chercher dans les commandes
order_obj = self.pool.get('sale.order.line')
sale_ids = order_obj.search(cr, uid, [], context=context)
if sale_ids:
for sale in order_obj.browse(cr, uid, sale_ids, context=context):
if sale.product_id not in lst_products:
lst_products.append(sale.product_id)
else:
continue
#Chercher dans les ordres de fabrication
mrp_obj = self.pool.get('mrp.production')
mrp_ids = mrp_obj.search(cr, uid, [], context=context)
if mrp_ids:
for mrp in mrp_obj.browse(cr, uid, mrp_ids, context=context):
if mrp.product_id not in lst_products:
lst_products.append(mrp.product_id)
else:
continue
else: #Chercher dans les prévision besoin suggéré de niveau adéquat
prevision_obj = self.pool.get('mrp.prevision')
prevision_ids = prevision_obj.search(cr, uid, [('type','=','ft'),('niveau','=',niveau),], context=context)
if prevision_ids:
for prev in prevision_obj.browse(cr, uid, prevision_ids, context=context):
if prev.product_id not in lst_products:
lst_products.append(prev.product_id)
else:
continue
return lst_products
#Calculer la somme des quantités des commandes
def sum_qty_cmd(self, cr, uid, date, product, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
procurement_obj = self.pool.get('procurement.order')
stock_move_obj = self.pool.get('stock.move')
if date == time.strftime('%Y-%m-%d'):
sale_ids = sale_obj.search(cr, uid, [('date_expedition','<=', date)], context=context)
line_ids = sale_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('order_id','in', sale_ids), ('product_id','=',product)])
else:
sale_ids = sale_obj.search(cr, uid, [('date_expedition','=', date)], context=context)
line_ids = sale_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('order_id','in', sale_ids), ('product_id','=',product)])
qty = 0
if line_ids:
draft_line_ids = sale_line_obj.search(cr, uid, [('id','in', line_ids), ('state','=', 'draft')], context=context)
if draft_line_ids:
for line in sale_line_obj.read(cr, uid, draft_line_ids, ['product_uom_qty'], context=context):
qty += line['product_uom_qty']
confirm_line_ids = sale_line_obj.search(cr, uid, [('id','in', line_ids), ('state','!=', 'draft')], context=context)
proc_line_ids = procurement_obj.search(cr, uid, [('sale_line_id','in',confirm_line_ids)], context=context)
if proc_line_ids:
for line_id in proc_line_ids:
proc_line = procurement_obj.browse(cr, uid, line_id, context=context)
deliv_line_ids = stock_move_obj.search(cr, uid, [('procurement_id','=',line_id)], context=context)
product_qty = proc_line.sale_line_id.product_uom_qty
if deliv_line_ids:
for deliv_line in stock_move_obj.read(cr, uid, deliv_line_ids, ['product_uom_qty', 'state'], context=context):
if deliv_line['state'] == 'done':
product_qty -= deliv_line['product_uom_qty']
else:
continue
qty += product_qty
return qty
#Retourner la quantité des produits non fabriqués
def sum_qty_of(self, cr, uid, lst_mrp, context=None):
qty_of = 0
if lst_mrp:
for prod in self.pool.get('mrp.production').browse(cr, uid, lst_mrp, context=context):
done = 0.0
for move in prod.move_created_ids2:
if move.product_id == prod.product_id:
if not move.scrapped:
done += move.product_qty
qty_prod = (prod.product_qty - done)
qty_of += qty_prod
return qty_of
#Calculer la somme des quantités des ordres de fabrications lancés
def list_mrp_prod(self, cr, uid, date, product, context=None):
date1 = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(date + ' 00:00:00', '%Y-%m-%d %H:%M:%S'))
date2 = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(date + ' 23:59:59', '%Y-%m-%d %H:%M:%S'))
if date == time.strftime('%Y-%m-%d'):
cr.execute("SELECT DISTINCT(id) FROM mrp_production WHERE product_id = %s AND date_planned <= %s AND state not in ('cancel', 'done')", (product, date2,))
else:
cr.execute("SELECT DISTINCT(id) FROM mrp_production WHERE product_id = %s AND date_planned <= %s AND date_planned >= %s AND state not in ('cancel', 'done')", (product, date2, date1,))
lst_mrp = [t[0] for t in cr.fetchall()]
if lst_mrp:
return self.sum_qty_of(cr, uid, lst_mrp, context=context)
else:
return 0
#Calculer la somme des quantités des prévisions de suggestion
def sum_qty_prevision_sug(self, cr, uid, prevision, context=None):
if prevision:
return prevision.quantity
else:
return 0
#Calculer la somme des quantités des prévision de type besoin suggérés
def sum_qty_besoin_sugg(self, cr, uid, date, product, niveau, context=None):
if date == time.strftime('%Y-%m-%d'):
cr.execute("SELECT SUM(quantity) FROM mrp_prevision " \
"WHERE type = 'ft' AND start_date <= %s AND product_id = %s AND niveau = %s", (date, product, niveau,))
else:
cr.execute("SELECT SUM(quantity) FROM mrp_prevision " \
"WHERE type = 'ft' AND start_date = %s AND product_id = %s AND niveau = %s", (date, product, niveau))
qty_ft = cr.fetchone()
if qty_ft[0] is None:
return 0
else:
return qty_ft[0]
#déterminer la date max des besoins suggérés
def date_max_suggestion(self, cr, uid, product, niveau, context=None):
cr.execute("SELECT max(start_date) FROM mrp_prevision " \
"WHERE product_id = %s and niveau = %s ", (product, niveau,))
max_date = cr.fetchone()
if max_date[0] is None:
return ''
else:
return max_date[0]
# Calculer la quantité réelle en stock
def calcul_qty_stock_reel(self, cr, uid, product, context=None):
inventory_obj = self.pool.get('stock.inventory.line')
cr.execute('SELECT MAX(inv.id) FROM stock_inventory inv ' \
'JOIN stock_inventory_line inv_line ON inv.id = inv_line.inventory_id ' \
'WHERE inv_line.product_id = %s ', (product.id, ))
last_inventory = cr.fetchone()
inventory_ids = inventory_obj.search(cr, uid, [('product_id','=',product.id), ('inventory_id','=',last_inventory[0])], context=context)
qty_stock = - product.is_stock_secu
if inventory_ids:
for inv in inventory_obj.browse(cr, uid, inventory_ids, context=context):
qty_stock += inv.product_qty
return qty_stock
#Calculer le stock theorique
def calcul_stock_theorique(self, cr, uid, qty_stock, qty, qty_mrp, qty_prev, qty_four, context=None):
qty_th = qty_stock - qty + qty_mrp + qty_prev + qty_four
return qty_th
#Calculer la quantité de la prévision en fonction de lot mini et multiple de
def calcul_prevision_qty(self, cr, uid, stock_th, product, context=None):
if -(stock_th) <= product.lot_mini:
prev_qty = product.lot_mini + (product.lot_mini * product.is_perte / 100)
return prev_qty
else: # la valeur absolu de stock_th est superieure au lot_mini
qty1 = -(stock_th) - product.lot_mini
qty2 = qty1 / product.multiple
if int(qty2) < qty2:
qty2 = int(qty2) + 1
qty = product.lot_mini + (qty2 * product.multiple)
prev_qty = qty + (qty * product.is_perte / 100)
return prev_qty
#Calculer la somme des quantités des commandes
def sum_qty_cmd_four(self, cr, uid, date, product, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
stock_move_obj = self.pool.get('stock.move')
if date == time.strftime('%Y-%m-%d'):
line_ids = purchase_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('date_planned','<=', date), ('product_id','=',product)])
else:
line_ids = purchase_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('date_planned','=', date), ('product_id','=',product)])
qty = 0
if line_ids:
draft_line_ids = purchase_line_obj.search(cr, uid, [('id','in', line_ids), ('state','=', 'draft')], context=context)
if draft_line_ids:
for line in purchase_line_obj.read(cr, uid, draft_line_ids, ['product_qty'], context=context):
qty += line['product_qty']
confirm_line_ids = purchase_line_obj.search(cr, uid, [('id','in', line_ids), ('state','!=', 'draft')], context=context)
if confirm_line_ids:
for line_id in confirm_line_ids:
recept_line_ids = stock_move_obj.search(cr, uid, [('purchase_line_id','=',line_id)], context=context)
line = purchase_line_obj.read(cr, uid, line_id, ['product_qty'], context=context)
product_qty = line['product_qty']
if recept_line_ids:
for recept_line in stock_move_obj.read(cr, uid, recept_line_ids, ['product_uom_qty', 'state'], context=context):
if recept_line['state'] == 'done':
product_qty -= recept_line['product_uom_qty']
else:
continue
qty += product_qty
return qty
def prevision_fournisseur(self, cr, uid, product, context=None):
cr.execute("SELECT MAX(id) FROM mrp_prevision " \
"WHERE type = 'sa' AND product_id = %s ", (product,))
prevision_id = cr.fetchone()
if prevision_id[0] is None:
return False
else:
return True
#Retourner Vrai s'il faut créer une prévision fournisseur
def create_prevision_sug_cmd_four(self, cr, uid, product, date, stock_four, context=None):
cr.execute("SELECT MAX(id) FROM mrp_prevision " \
"WHERE type = 'sa' AND product_id = %s ", (product,))
prevision_id = cr.fetchone()
if prevision_id[0] is None:
return True
else:
prevision_obj = self.pool.get('mrp.prevision')
prevision = prevision_obj.browse(cr, uid, prevision_id[0], context=context)
if self.calcul_prevision_qty(cr, uid, (prevision.stock_th - stock_four), prevision.product_id, context=context) <= prevision.quantity:
return False
else:
return True
#Calculer la date debut de la prevision
def calcul_date_prevision(self, cr, uid, date, quantity, product, type, company, context=None):
time_production = quantity * product.temps_realisation
delai = product.produce_delay + product.delai_cq
start_date = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=delai) - datetime.timedelta(seconds=time_production)
start_time = start_date.strftime('%H:%M:%S')
if start_time > '01:00:00':
start_date = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=(delai + 1)) - datetime.timedelta(seconds=time_production)
start_date = start_date.strftime('%Y-%m-%d')
partner = False
if type == 'fs':
partner = company
if type == 'sa':
if product.seller_ids:
partner = product.seller_ids[0].name
start_date = self.format_start_date(cr, uid, start_date, partner, context)
return start_date
# déterminer la date de début en prenant en considération les jours de fermetures de l'usine et de fournisseur
def format_start_date(self, cr, uid, date, partner, context=None):
is_api = self.pool.get('is.api')
if partner:
# jours de fermeture de la société
jours_fermes = is_api.num_closing_days(cr, uid, partner, context=context)
# Jours de congé de la société
leave_dates = is_api.get_leave_dates(cr, uid, partner, context=context)
num_day = time.strftime('%w', time.strptime(date, '%Y-%m-%d'))
date = is_api.get_working_day(cr, uid, date, num_day, jours_fermes, leave_dates, context=context)
return date
def chiffre_texte(self, cr, uid, num_od, context=None):
if len(num_od) == 1:
return '0000'
elif len(num_od) == 2:
return '000'
elif len(num_od) == 3:
return '00'
elif len(num_od) == 4:
return '0'
else:
return ''
# structurer le nom de la prévision
def formater_nom_prevision(self, cr, uid, type, num_od, context=None):
part = self.chiffre_texte(cr, uid, str(num_od), context) + str(num_od)
if type == 'fs':
return 'FS-' + part
elif type == 'ft':
return 'FT-' + part
else:
return 'SA-' + part
#Créer une prévision
def create_prevision(self, cr, uid, product, quantity, start_date, end_date, type, niveau, stock_th, num_od_fs, num_od_sa, note, context=None):
prevision_obj = self.pool.get('mrp.prevision')
if type in ('fs', 'ft'):
num_od = num_od_fs
if type == 'sa':
num_od = num_od_sa
prevision_values = {
'num_od': num_od,
'name': self.formater_nom_prevision(cr, uid, type, num_od, context),
'type': type,
'product_id': product,
'quantity': quantity,
'start_date': start_date,
'end_date': end_date,
'niveau': niveau,
'stock_th': stock_th,
'note': note,
}
prevision = prevision_obj.create(cr, uid, prevision_values, context=context)
return prevision
#Déterminer le premier niveau de la nomenclature d'un produit
def get_product_boms(self, cr, uid, product, context=None):
boms = []
bom_obj = self.pool.get('mrp.bom')
template_id = product.product_tmpl_id and product.product_tmpl_id.id or False
if template_id:
bom_ids = bom_obj.search(cr, uid, [('product_tmpl_id','=',template_id),], context=context)
if bom_ids:
for line in bom_obj.browse(cr, uid, bom_ids[0], context=context).bom_line_ids:
boms.append(line.id)
return boms
#Vérifier si un produit a une nomenclature ou non
def product_nomenclature(self, cr, uid, product, context=None):
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').read(cr, uid, product, ['product_tmpl_id'], context=context)
template_id = product['product_tmpl_id'] and product['product_tmpl_id'][0] or False
if template_id:
bom_ids = bom_obj.search(cr, uid, [('product_tmpl_id','=',template_id),], context=context)
if bom_ids:
if bom_obj.browse(cr, uid, bom_ids[0], context=context).bom_line_ids :
return True
return False
def generate_previsions(self, cr, uid, ids, context=None):
prevision_obj = self.pool.get('mrp.prevision')
bom_line_obj = self.pool.get('mrp.bom.line')
company_obj = self.pool.get('res.company')
result = []
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
company = company_obj.browse(cr, uid, data['company_id'][0], context=context)
if data:
#Chercher les dates entre la date d'aujourd'hui et la date max
dates = self.list_dates_availables(cr, uid, data['max_date'], context=context)
print 'dates *******', dates
#supprimer les previsions de type "suggestion de fabrication" existantes
prevision_ids = prevision_obj.search(cr, uid, [('active','=',True),], context=context)
prevision_obj.unlink(cr, uid, prevision_ids, context=context)
niveau = 0
lst_items = []
num_od_fs = 0
num_od_sa = 0
while (niveau < 10):
#Créer des FS pour les produits ayant des commandes et des Ordres de fabrication si le niveau = 0
#Créer des FS pour les produits ayant des prévision de type Besoin suggéré si le niveau > 1
lst_products = self.list_products_availables(cr, uid, niveau, context=context)
print 'lst_products ******', lst_products
if lst_products:
res_fs = []
for product in lst_products:
#Initialiser la prevision et le stock theorique
prevision = None
stock_theor = 0
exist_item = False
if lst_items:
for item in lst_items:
if item['product_id'] == product.id:
exist_item = True
else:
continue
if not lst_items or not exist_item:
lst_items.append({'product_id':product.id, 'stock_reel':0, 'date_max_ft': '', 'qty_four':0, 'niv_four':10, 'sum_stock_th':0, 'sum_qty_prev':0 })
print 'lst_items******', lst_items
for date in dates:
#Calculer la somme des quantités des commandes si niveau = 0
#Calculer la somme des quantités des prévisions besoin suggéré si niveau > 0
qty = 0
if niveau == 0:
qty = self.sum_qty_cmd(cr, uid, date, product.id, context=context)
else:
qty = self.sum_qty_besoin_sugg(cr, uid, date, product.id, niveau, context=context)
#Calculer la somme des quantités des ordres de fabrications
qty_mrp = self.list_mrp_prod(cr, uid, date, product.id, context=None)
#Calculer la somme des quantités des prévisions de suggestion
qty_prev = self.sum_qty_prevision_sug(cr, uid, prevision, context=context)
#Calculer la somme des quantités des commandes fournisseurs
qty_four = 0
for item in lst_items:
if item['product_id'] == product.id:
if niveau < item['niv_four']:
item['niv_four'] = niveau
date_max = self.date_max_suggestion(cr, uid, product.id, niveau, context=context)
item['date_max_ft'] = date_max
qty_four = self.sum_qty_cmd_four(cr, uid, date, product.id, context=context)
if niveau == item['niv_four'] and date <= item['date_max_ft']:
item['qty_four'] += qty_four
else:
if date == time.strftime('%Y-%m-%d'):
qty_four = item['qty_four']
else:
qty_four = 0
else:
continue
#Calculer le stock theorique
if date == time.strftime('%Y-%m-%d'): #Première itération
qty_stock = self.calcul_qty_stock_reel(cr, uid, product, context=context)
for item in lst_items:
if item['product_id'] == product.id:
if niveau == item['niv_four']:
item['stock_reel'] = qty_stock
else:
qty_stock = item['stock_reel']
else:
continue
stock_th = self.calcul_stock_theorique(cr, uid, qty_stock, qty, qty_mrp, qty_prev, qty_four, context=context)
else: #Reste des itérations
stock_th = self.calcul_stock_theorique(cr, uid, stock_theor, qty, qty_mrp, qty_prev, qty_four, context=context)
#Mettre à jour le stock reel et la quantité des commandes fournisseurs
for item in lst_items:
if item['product_id'] == product.id:
if stock_th <= 0:
item['stock_reel'] = 0
item['qty_four'] = 0
else:
sum_items = item['stock_reel'] + item['qty_four']
if sum_items > stock_th:
diff = sum_items - stock_th
if item['stock_reel'] >= diff:
item['stock_reel'] -= diff
else:
qty = diff - item['stock_reel']
item['qty_four'] -= qty
item['stock_reel'] = 0
else:
pass
#Si le stock theorique est negatif, on crée une prévision de suggestion
if stock_th < 0:
#Calculer la quantité de la prévision en fonction de lot mini et multiple de
quantity = self.calcul_prevision_qty(cr, uid, stock_th, product, context=context)
#Si il existe des prévisions qui peuvent satisfaire la quantité a créer, on ne crée pas une nouvelle prévision
create_prev = True
if not self.product_nomenclature(cr, uid, product.id, context=context):
for item in lst_items:
if item['product_id'] == product.id:
sum_qty = self.calcul_prevision_qty(cr, uid, item['sum_stock_th'] + stock_th, product, context=context)
if sum_qty <= item['sum_qty_prev']:
item['sum_stock_th'] += stock_th
stock_th = 0
create_prev = False
else:
create_prev = True
else:
continue
type_prev = 'sa'
num_od_sa += 1
else:
type_prev = 'fs'
num_od_fs += 1
#Calculer la date debut de la prevision
start_date = self.calcul_date_prevision(cr, uid, date, quantity, product, type_prev, company.partner_id, context=context)
if create_prev:
prevision_id = self.create_prevision(cr, uid, product.id, quantity, start_date, date, type_prev, niveau, stock_th, num_od_fs, num_od_sa, '', context=context)
result.append(prevision_id)
res_fs.append(prevision_id)
prevision_init = prevision_obj.browse(cr, uid, prevision_id, context=context)
prevision = prevision_init
stock_theor = stock_th
for elem in lst_items:
if elem['product_id'] == product.id and type_prev == 'sa':
elem['sum_stock_th'] += stock_th
elem['sum_qty_prev'] += quantity
else:
continue
else:
prevision = None
stock_theor = stock_th
else:
prevision = None
stock_theor = stock_th
#Créer des prévisions Besoin de suggestion
if res_fs:
niveau += 1
res_ft = []
for prevision in prevision_obj.browse(cr, uid, res_fs, context=context):
bom_ids = self.get_product_boms(cr, uid, prevision.product_id, context=context)
if bom_ids:
for bom in bom_line_obj.browse(cr, uid, bom_ids, context=context):
qty = prevision.quantity * bom.product_qty
note = 'Prevision: ' + str(prevision.name) + '\n' + 'Produit: ' + str(prevision.product_id.default_code)
prev_bes_sug_id = self.create_prevision(cr, uid, bom.product_id.id, qty, prevision.start_date, prevision.end_date, 'ft', niveau, 0, prevision.num_od, num_od_sa, note, context=context)
res_ft.append(prev_bes_sug_id)
result.append(prev_bes_sug_id)
if not res_ft:
niveau = 10
else:
niveau = 10
action_model = False
data_pool = self.pool.get('ir.model.data')
action = {}
action_model,action_id = data_pool.get_object_reference(cr, uid, 'is_mrp', "action_mrp_prevision_form")
if action_model:
action_pool = self.pool.get(action_model)
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,result))+"])]"
return action
mrp_generate_previsions()
| lgpl-3.0 | -4,707,348,792,026,300,000 | 50.4 | 219 | 0.478328 | false | 4.033925 | false | false | false |
USGSDenverPychron/pychron | pychron/dvc/dvc.py | 1 | 41939 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import json
import os
import shutil
import time
from datetime import datetime
from itertools import groupby
from math import isnan
from apptools.preferences.preference_binding import bind_preference
from git import Repo
from traits.api import Instance, Str, Set, List, provides
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.filetools import remove_extension, list_subdirectories
from pychron.core.i_datastore import IDatastore
from pychron.core.progress import progress_loader, progress_iterator
from pychron.database.interpreted_age import InterpretedAge
from pychron.dvc import dvc_dump, dvc_load, analysis_path, repository_path, AnalysisNotAnvailableError
from pychron.dvc.defaults import TRIGA, HOLDER_24_SPOKES, LASER221, LASER65
from pychron.dvc.dvc_analysis import DVCAnalysis, PATH_MODIFIERS
from pychron.dvc.dvc_database import DVCDatabase
from pychron.dvc.func import find_interpreted_age_path, GitSessionCTX, push_repositories
from pychron.dvc.meta_repo import MetaRepo, Production
from pychron.envisage.browser.record_views import InterpretedAgeRecordView
from pychron.git.hosts import IGitHost, CredentialException
from pychron.git_archive.repo_manager import GitRepoManager, format_date, get_repository_branch
from pychron.globals import globalv
from pychron.loggable import Loggable
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import RATIO_KEYS, INTERFERENCE_KEYS
TESTSTR = {'blanks': 'auto update blanks', 'iso_evo': 'auto update iso_evo'}
class DVCException(BaseException):
def __init__(self, attr):
self._attr = attr
def __repr__(self):
return 'DVCException: neither DVCDatabase or MetaRepo have {}'.format(self._attr)
def __str__(self):
return self.__repr__()
class Tag(object):
name = None
path = None
@classmethod
def from_analysis(cls, an):
tag = cls()
tag.name = an.tag
tag.record_id = an.record_id
tag.repository_identifier = an.repository_identifier
tag.path = analysis_path(an.record_id, an.repository_identifier, modifier='tags')
return tag
def dump(self):
obj = {'name': self.name}
if not self.path:
self.path = analysis_path(self.record_id, self.repository_identifier, modifier='tags', mode='w')
# with open(self.path, 'w') as wfile:
# json.dump(obj, wfile, indent=4)
dvc_dump(obj, self.path)
class DVCInterpretedAge(InterpretedAge):
def from_json(self, obj):
for a in ('age', 'age_err', 'kca', 'kca_err', 'age_kind', 'kca_kind', 'mswd',
'sample', 'material', 'identifier', 'nanalyses', 'irradiation'):
setattr(self, a, obj[a])
@provides(IDatastore)
class DVC(Loggable):
"""
main interface to DVC backend. Delegates responsibility to DVCDatabase and MetaRepo
"""
db = Instance('pychron.dvc.dvc_database.DVCDatabase')
meta_repo = Instance('pychron.dvc.meta_repo.MetaRepo')
meta_repo_name = Str
organization = Str
default_team = Str
current_repository = Instance(GitRepoManager)
auto_add = True
pulled_repositories = Set
selected_repositories = List
def __init__(self, bind=True, *args, **kw):
super(DVC, self).__init__(*args, **kw)
if bind:
self._bind_preferences()
# self.synchronize()
# self._defaults()
def initialize(self, inform=False):
self.debug('Initialize DVC')
if not self.meta_repo_name:
self.warning_dialog('Need to specify Meta Repository name in Preferences')
return
self.open_meta_repo()
# update meta repo.
self.meta_pull()
if self.db.connect():
# self._defaults()
return True
def open_meta_repo(self):
mrepo = self.meta_repo
root = os.path.join(paths.dvc_dir, self.meta_repo_name)
self.debug('open meta repo {}'.format(root))
if os.path.isdir(os.path.join(root, '.git')):
self.debug('Opening Meta Repo')
mrepo.open_repo(root)
else:
url = self.make_url(self.meta_repo_name)
self.debug('cloning meta repo url={}'.format(url))
path = os.path.join(paths.dvc_dir, self.meta_repo_name)
self.meta_repo.clone(url, path)
def synchronize(self, pull=True):
"""
pull meta_repo changes
:return:
"""
if pull:
self.meta_repo.pull()
else:
self.meta_repo.push()
def load_analysis_backend(self, ln, isotope_group):
db = self.db
with db.session_ctx():
ip = db.get_identifier(ln)
dblevel = ip.level
irrad = dblevel.irradiation.name
level = dblevel.name
pos = ip.position
fd = self.meta_repo.get_flux(irrad, level, pos)
prodname, prod = self.meta_repo.get_production(irrad, level)
cs = self.meta_repo.get_chronology(irrad)
x = datetime.now()
now = time.mktime(x.timetuple())
if fd['lambda_k']:
isotope_group.arar_constants.lambda_k = fd['lambda_k']
isotope_group.trait_set(j=fd['j'],
# lambda_k=lambda_k,
production_ratios=prod.to_dict(RATIO_KEYS),
interference_corrections=prod.to_dict(INTERFERENCE_KEYS),
chron_segments=cs.get_chron_segments(x),
irradiation_time=cs.irradiation_time,
timestamp=now)
return True
def repository_db_sync(self, reponame):
repo = self._get_repository(reponame, as_current=False)
ps = []
ans = self.db.repository_analyses(reponame)
for ai in ans:
p = analysis_path(ai.record_id, reponame)
obj = dvc_load(p)
sample = None
project = None
material = None
changed = False
for attr, v in (('sample', sample),
('project', project),
('material', material)):
if obj.get(attr) != v:
obj[attr] = v
changed = True
if changed:
ps.append(p)
dvc_dump(obj, p)
if ps:
repo.pull()
repo.add_paths(ps)
repo.commit('Synced repository with database {}'.format(self.db.datasource_url))
repo.push()
def repository_transfer(self, ans, dest):
def key(x):
return x.repository_identifier
destrepo = self._get_repository(dest, as_current=False)
for src, ais in groupby(sorted(ans, key=key), key=key):
repo = self._get_repository(src, as_current=False)
for ai in ais:
ops, nps = self._transfer_analysis_to(dest, src, ai.runid)
repo.add_paths(ops)
destrepo.add_paths(nps)
# update database
dbai = self.db.get_analysis_uuid(ai.uuid)
for ri in dbai.repository_associations:
if ri.repository == src:
ri.repository = dest
# commit src changes
repo.commit('Transferred analyses to {}'.format(dest))
dest.commit('Transferred analyses from {}'.format(src))
def _transfer_analysis_to(self, dest, src, rid):
p = analysis_path(rid, src)
np = analysis_path(rid, dest)
obj = dvc_load(p)
obj['repository_identifier'] = dest
dvc_dump(obj, p)
ops = [p]
nps = [np]
shutil.move(p, np)
for modifier in ('baselines', 'blanks', 'extraction',
'intercepts', 'icfactors', 'peakcenter', '.data'):
p = analysis_path(rid, src, modifier=modifier)
np = analysis_path(rid, dest, modifier=modifier)
shutil.move(p, np)
ops.append(p)
nps.append(np)
return ops, nps
def get_flux(self, irrad, level, pos):
fd = self.meta_repo.get_flux(irrad, level, pos)
return fd['j']
def freeze_flux(self, ans):
self.info('freeze flux')
def ai_gen():
key = lambda x: x.irradiation
lkey = lambda x: x.level
rkey = lambda x: x.repository_identifier
for irrad, ais in groupby(sorted(ans, key=key), key=key):
for level, ais in groupby(sorted(ais, key=lkey), key=lkey):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj['positions']
for repo, ais in groupby(sorted(ais, key=rkey), key=rkey):
yield repo, irrad, level, {ai.irradiation_position: positions[ai.irradiation_position] for ai in
ais}
added = []
def func(x, prog, i, n):
repo, irrad, level, d = x
if prog:
prog.change_message('Freezing Flux {}{} Repository={}'.format(irrad, level, repo))
root = os.path.join(paths.repository_dataset_dir, repo, 'flux', irrad)
r_mkdir(root)
p = os.path.join(root, level)
if os.path.isfile(p):
dd = dvc_load(p)
dd.update(d)
dvc_dump(d, p)
added.append((repo, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<FLUX_FREEZE>')
def freeze_production_ratios(self, ans):
self.info('freeze production ratios')
def ai_gen():
key = lambda x: x.irradiation
lkey = lambda x: x.level
for irrad, ais in groupby(sorted(ans, key=key), key=key):
for level, ais in groupby(sorted(ais, key=lkey), key=lkey):
pr = self.meta_repo.get_production(irrad, level)
for ai in ais:
yield pr, ai
added = []
def func(x, prog, i, n):
pr, ai = x
if prog:
prog.change_message('Freezing Production {}'.format(ai.runid))
p = analysis_path(ai.runid, ai.repository_identifier, 'productions', mode='w')
pr.dump(path=p)
added.append((ai.repository_identifier, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<PR_FREEZE>')
def _commit_freeze(self, added, msg):
key = lambda x: x[0]
rr = sorted(added, key=key)
for repo, ps in groupby(rr, key=key):
rm = GitRepoManager()
rm.open_repo(repo, paths.repository_dataset_dir)
rm.add_paths(ps)
rm.smart_pull()
rm.commit(msg)
# database
# analysis manual edit
# def manual_intercepts(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'intercepts')
#
# def manual_blanks(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'blanks')
#
# def manual_baselines(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'baselines')
#
# def manual_baselines(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'baselines')
def manual_edit(self, runid, repository_identifier, values, errors, modifier):
self.debug('manual edit {} {} {}'.format(runid, repository_identifier, modifier))
self.debug('values {}'.format(values))
self.debug('errors {}'.format(errors))
path = analysis_path(runid, repository_identifier, modifier=modifier)
with open(path, 'r') as rfile:
obj = json.load(rfile)
for k, v in values.iteritems():
o = obj[k]
o['manual_value'] = v
o['use_manual_value'] = True
for k, v in errors.iteritems():
o = obj[k]
o['manual_error'] = v
o['use_manual_error'] = True
dvc_dump(obj, path)
return path
def revert_manual_edits(self, runid, repository_identifier):
ps = []
for mod in ('intercepts', 'blanks', 'baselines', 'icfactors'):
path = analysis_path(runid, repository_identifier, modifier=mod)
with open(path, 'r') as rfile:
obj = json.load(rfile)
for item in obj.itervalues():
if isinstance(item, dict):
item['use_manual_value'] = False
item['use_manual_error'] = False
ps.append(path)
dvc_dump(obj, path)
msg = '<MANUAL> reverted to non manually edited'
self.commit_manual_edits(repository_identifier, ps, msg)
def commit_manual_edits(self, repository_identifier, ps, msg):
if self.repository_add_paths(repository_identifier, ps):
self.repository_commit(repository_identifier, msg)
# analysis processing
def analysis_has_review(self, ai, attr):
return True
# test_str = TESTSTR[attr]
# repo = self._get_experiment_repo(ai.experiment_id)
# for l in repo.get_log():
# if l.message.startswith(test_str):
# self.debug('{} {} reviewed'.format(ai, attr))
# return True
# else:
# self.debug('{} {} not reviewed'.format(ai, attr))
def update_analyses(self, ans, modifier, msg):
key = lambda x: x.repository_identifier
ans = sorted(ans, key=key)
mod_repositories = []
for expid, ais in groupby(ans, key=key):
paths = map(lambda x: analysis_path(x.record_id, x.repository_identifier, modifier=modifier), ais)
# print expid, modifier, paths
if self.repository_add_paths(expid, paths):
self.repository_commit(expid, msg)
mod_repositories.append(expid)
# ais = map(analysis_path, ais)
# if self.experiment_add_analyses(exp, ais):
# self.experiment_commit(exp, msg)
# mod_experiments.append(exp)
return mod_repositories
def update_tag(self, an):
tag = Tag.from_analysis(an)
tag.dump()
expid = an.repository_identifier
return self.repository_add_paths(expid, tag.path)
def save_icfactors(self, ai, dets, fits, refs):
if fits and dets:
self.info('Saving icfactors for {}'.format(ai))
ai.dump_icfactors(dets, fits, refs, reviewed=True)
def save_blanks(self, ai, keys, refs):
if keys:
self.info('Saving blanks for {}'.format(ai))
ai.dump_blanks(keys, refs, reviewed=True)
def save_fits(self, ai, keys):
if keys:
self.info('Saving fits for {}'.format(ai))
ai.dump_fits(keys, reviewed=True)
def save_flux(self, identifier, j, e):
self.meta_pull()
with self.session_ctx(use_parent_session=False):
irp = self.get_identifier(identifier)
if irp:
level = irp.level
irradiation = level.irradiation
self.save_j(irradiation.name, level.name, irp.position, identifier, j, e, 0, 0, None, None)
self.meta_commit('User manual edited flux')
self.meta_push()
def save_j(self, irradiation, level, pos, identifier, j, e, mj, me, decay, analyses, add=True):
self.info('Saving j for {}{}:{} {}, j={} +/-{}'.format(irradiation, level,
pos, identifier, j, e))
self.meta_repo.update_flux(irradiation, level, pos, identifier, j, e, mj, me, decay, analyses, add)
with self.session_ctx(use_parent_session=False):
ip = self.get_identifier(identifier)
ip.j = j
ip.j_err = e
def remove_irradiation_position(self, irradiation, level, hole):
db = self.db
dbpos = db.get_irradiation_position(irradiation, level, hole)
if dbpos:
db.delete(dbpos)
self.meta_repo.remove_irradiation_position(irradiation, level, hole)
def find_interpreted_ages(self, identifiers, repositories):
ias = []
for idn in identifiers:
path = find_interpreted_age_path(idn, repositories)
if path:
obj = dvc_load(path)
name = obj.get('name')
ias.append(InterpretedAgeRecordView(idn, path, name))
return ias
def find_references(self, ans, atypes, hours, exclude=None, make_records=True, **kw):
records = self.db.find_references(ans, atypes, hours, exclude=exclude, **kw)
if records:
if make_records:
records = self.make_analyses(records)
return records
def make_interpreted_ages(self, ias):
def func(x, prog, i, n):
if prog:
prog.change_message('Making Interpreted age {}'.format(x.name))
obj = dvc_load(x.path)
ia = DVCInterpretedAge()
ia.from_json(obj)
return ia
return progress_loader(ias, func, step=25)
def get_analysis(self, uuid):
an = self.db.get_analysis_uuid(uuid)
if an:
return self.make_analyses(an.record_views)
def make_analysis(self, record, *args, **kw):
a = self.make_analyses((record,), *args, **kw)
if a:
return a[0]
def make_analyses(self, records, calculate_f_only=False):
if not records:
return
globalv.active_analyses = records
# load repositories
st = time.time()
def func(xi, prog, i, n):
if prog:
prog.change_message('Syncing repository= {}'.format(xi))
self.sync_repo(xi, use_progress=False)
exps = {r.repository_identifier for r in records}
progress_iterator(exps, func, threshold=1)
# for ei in exps:
make_record = self._make_record
def func(*args):
# t = time.time()
try:
r = make_record(calculate_f_only=calculate_f_only, *args)
# print 'make time {}'.format(time.time()-t)
return r
except BaseException:
pass
ret = progress_loader(records, func, threshold=1, step=25)
et = time.time() - st
n = len(records)
self.debug('Make analysis time, total: {}, n: {}, average: {}'.format(et, n, et / float(n)))
return ret
# repositories
def repository_add_paths(self, repository_identifier, paths):
repo = self._get_repository(repository_identifier)
return repo.add_paths(paths)
def repository_commit(self, repository, msg):
self.debug('Experiment commit: {} msg: {}'.format(repository, msg))
repo = self._get_repository(repository)
repo.commit(msg)
def remote_repositories(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
ri = gi.get_repos(self.organization)
rs.extend(ri)
else:
self.warning_dialog('GitLab or GitHub plugin is required')
return rs
def remote_repository_names(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
ri = gi.get_repository_names(self.organization)
rs.extend(ri)
else:
self.warning_dialog('GitLab or GitHub plugin is required')
return rs
def check_githost_connection(self):
git_service = self.application.get_service(IGitHost)
return git_service.test_connection(self.organization)
def make_url(self, name):
git_service = self.application.get_service(IGitHost)
return git_service.make_url(name, self.organization)
def git_session_ctx(self, repository_identifier, message):
return GitSessionCTX(self, repository_identifier, message)
def sync_repo(self, name, use_progress=True):
"""
pull or clone an repo
"""
root = os.path.join(paths.repository_dataset_dir, name)
exists = os.path.isdir(os.path.join(root, '.git'))
self.debug('sync repository {}. exists={}'.format(name, exists))
if exists:
repo = self._get_repository(name)
repo.pull(use_progress=use_progress)
return True
else:
self.debug('getting repository from remote')
names = self.remote_repository_names()
service = self.application.get_service(IGitHost)
if name in names:
service.clone_from(name, root, self.organization)
return True
else:
self.debug('name={} not in available repos from service={}, organization={}'.format(name,
service.remote_url,
self.organization))
for ni in names:
self.debug('available repo== {}'.format(ni))
def rollback_repository(self, expid):
repo = self._get_repository(expid)
cpaths = repo.get_local_changes()
# cover changed paths to a list of analyses
# select paths to revert
rpaths = ('.',)
repo.cmd('checkout', '--', ' '.join(rpaths))
for p in rpaths:
self.debug('revert changes for {}'.format(p))
head = repo.get_head(hexsha=False)
msg = 'Changes to {} reverted to Commit: {}\n' \
'Date: {}\n' \
'Message: {}'.format(expid, head.hexsha[:10],
format_date(head.committed_date),
head.message)
self.information_dialog(msg)
def push_repository(self, repo):
self.debug('push repository {}'.format(repo))
for gi in self.application.get_services(IGitHost):
self.debug('pushing to remote={}, url={}'.format(gi.default_remote_name, gi.remote_url))
repo.push(remote=gi.default_remote_name)
def push_repositories(self, changes):
for gi in self.application.get_services(IGitHost):
push_repositories(changes, gi.default_remote_name, quiet=False)
# IDatastore
def get_greatest_aliquot(self, identifier):
return self.db.get_greatest_aliquot(identifier)
def get_greatest_step(self, identifier, aliquot):
return self.db.get_greatest_step(identifier, aliquot)
def is_connected(self):
return self.db.connected
def connect(self, *args, **kw):
return self.db.connect(*args, **kw)
# meta repo
def update_flux(self, *args, **kw):
self.meta_repo.update_flux(*args, **kw)
def set_identifier(self, *args):
self.meta_repo.set_identifier(*args)
def update_chronology(self, name, doses):
self.meta_repo.update_chronology(name, doses)
self.meta_commit('updated chronology for {}'.format(name))
def meta_pull(self, **kw):
return self.meta_repo.smart_pull(**kw)
def meta_push(self):
self.meta_repo.push()
def meta_add_all(self):
self.meta_repo.add_unstaged(paths.meta_root, add_all=True)
def meta_commit(self, msg):
changes = self.meta_repo.has_staged()
if changes:
self.debug('meta repo has changes: {}'.format(changes))
self.meta_repo.report_status()
self.meta_repo.commit(msg)
self.meta_repo.clear_cache = True
else:
self.debug('no changes to meta repo')
def add_production(self, irrad, name, prod):
self.meta_repo.add_production_to_irradiation(irrad, name, prod)
def get_production(self, irrad, name):
return self.meta_repo.get_production(irrad, name)
# get
def get_local_repositories(self):
return list_subdirectories(paths.repository_dataset_dir)
def get_repository(self, exp):
return self._get_repository(exp)
def get_meta_head(self):
return self.meta_repo.get_head()
def get_irradiation_geometry(self, irrad, level):
dblevel = self.db.get_irradiation_level(irrad, level)
return self.meta_repo.get_irradiation_holder_holes(dblevel.holder)
def get_irradiation_names(self):
irrads = self.db.get_irradiations()
return [i.name for i in irrads]
# add
def add_interpreted_age(self, ia):
a = ia.get_ma_scaled_age()
mswd = ia.preferred_mswd
if isnan(mswd):
mswd = 0
d = dict(age=float(nominal_value(a)),
age_err=float(std_dev(a)),
display_age_units=ia.age_units,
age_kind=ia.preferred_age_kind,
kca_kind=ia.preferred_kca_kind,
kca=float(ia.preferred_kca_value),
kca_err=float(ia.preferred_kca_error),
mswd=float(mswd),
include_j_error_in_mean=ia.include_j_error_in_mean,
include_j_error_in_plateau=ia.include_j_error_in_plateau,
include_j_error_in_individual_analyses=ia.include_j_error_in_individual_analyses,
sample=ia.sample,
material=ia.material,
identifier=ia.identifier,
nanalyses=ia.nanalyses,
irradiation=ia.irradiation)
d['analyses'] = [dict(uuid=ai.uuid, tag=ai.tag, plateau_step=ia.get_is_plateau_step(ai))
for ai in ia.all_analyses]
self._add_interpreted_age(ia, d)
def add_repository_association(self, expid, runspec):
db = self.db
dban = db.get_analysis_uuid(runspec.uuid)
if dban:
for e in dban.repository_associations:
if e.repository == expid:
break
else:
db.add_repository_association(expid, dban)
src_expid = runspec.repository_identifier
if src_expid != expid:
repo = self._get_repository(expid)
for m in PATH_MODIFIERS:
src = analysis_path(runspec.record_id, src_expid, modifier=m)
dest = analysis_path(runspec.record_id, expid, modifier=m, mode='w')
shutil.copyfile(src, dest)
repo.add(dest, commit=False)
repo.commit('added repository association')
else:
self.warning('{} not in the database {}'.format(runspec.runid, self.db.name))
def add_material(self, name, grainsize=None):
db = self.db
added = False
if not db.get_material(name, grainsize):
added = True
db.add_material(name, grainsize)
return added
def add_project(self, name, pi=None, **kw):
added = False
db = self.db
if not db.get_project(name, pi):
added = True
db.add_project(name, pi, **kw)
return added
def add_sample(self, name, project, material, grainsize=None, note=None):
added = False
db = self.db
if not db.get_sample(name, project, material, grainsize):
added = True
db.add_sample(name, project, material, grainsize, note=note)
return added
def add_principal_investigator(self, name):
added = False
db = self.db
if not db.get_principal_investigator(name):
db.add_principal_investigator(name)
added = True
return added
def add_irradiation_position(self, irrad, level, pos, identifier=None, **kw):
db = self.db
added = False
if not db.get_irradiation_position(irrad, level, pos):
db.add_irradiation_position(irrad, level, pos, identifier, **kw)
self.meta_repo.add_position(irrad, level, pos)
added = True
return added
def add_irradiation_level(self, name, irradiation, holder, production_name, **kw):
added = False
dblevel = self.get_irradiation_level(irradiation, name)
if dblevel is None:
added = True
self.db.add_irradiation_level(name, irradiation, holder, production_name, **kw)
self.meta_repo.add_level(irradiation, name)
self.meta_repo.update_level_production(irradiation, name, production_name)
return added
def clone_repository(self, identifier):
root = os.path.join(paths.repository_dataset_dir, identifier)
if not os.path.isdir(root):
self.debug('cloning {}'.format(root))
url = self.make_url(identifier)
Repo.clone_from(url, root)
else:
self.debug('{} already exists'.format(identifier))
def add_repository(self, identifier, principal_investigator, inform=True):
self.debug('trying to add repository identifier={}, pi={}'.format(identifier, principal_investigator))
root = os.path.join(paths.repository_dataset_dir, identifier)
if os.path.isdir(root):
self.debug('already a directory {}'.format(identifier))
return True
names = self.remote_repository_names()
if identifier in names:
# make sure also in the database
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('Repository "{}" already exists'.format(identifier))
return True
else:
if os.path.isdir(root):
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('{} already exists.'.format(root))
else:
gs = self.application.get_services(IGitHost)
ret = False
for i, gi in enumerate(gs):
self.info('Creating repository at {}. {}'.format(gi.name, identifier))
if gi.create_repo(identifier, organization=self.organization, auto_init=True):
ret = True
if self.default_team:
gi.set_team(self.default_team, self.organization, identifier,
permission='push')
url = gi.make_url(identifier, self.organization)
if i == 0:
try:
repo = Repo.clone_from(url, root)
except BaseException, e:
self.debug('failed cloning repo. {}'.format(e))
ret = False
self.db.add_repository(identifier, principal_investigator)
else:
repo.create_remote(gi.default_remote_name or 'origin', url)
return ret
def add_irradiation(self, name, doses=None, add_repo=False, principal_investigator=None):
if self.db.get_irradiation(name):
self.warning('irradiation {} already exists'.format(name))
return
self.db.add_irradiation(name)
self.meta_repo.add_irradiation(name)
self.meta_repo.add_chronology(name, doses)
root = os.path.join(paths.meta_root, name)
p = os.path.join(root, 'productions')
if not os.path.isdir(p):
os.mkdir(p)
with open(os.path.join(root, 'productions.json'), 'w') as wfile:
json.dump({}, wfile)
if add_repo and principal_investigator:
self.add_repository('Irradiation-{}'.format(name), principal_investigator)
return True
def add_load_holder(self, name, path_or_txt):
self.db.add_load_holder(name)
self.meta_repo.add_load_holder(name, path_or_txt)
def copy_production(self, pr):
"""
@param pr: irrad_ProductionTable object
@return:
"""
pname = pr.name.replace(' ', '_')
path = os.path.join(paths.meta_root, 'productions', '{}.json'.format(pname))
if not os.path.isfile(path):
obj = {}
for attr in INTERFERENCE_KEYS + RATIO_KEYS:
obj[attr] = [getattr(pr, attr), getattr(pr, '{}_err'.format(attr))]
dvc_dump(obj, path)
# private
def _add_interpreted_age(self, ia, d):
p = analysis_path(ia.identifier, ia.repository_identifier, modifier='ia', mode='w')
dvc_dump(d, p)
def _load_repository(self, expid, prog, i, n):
if prog:
prog.change_message('Loading repository {}. {}/{}'.format(expid, i, n))
self.sync_repo(expid)
def _make_record(self, record, prog, i, n, calculate_f_only=False):
meta_repo = self.meta_repo
if prog:
# this accounts for ~85% of the time!!!
prog.change_message('Loading analysis {}. {}/{}'.format(record.record_id, i, n))
expid = record.repository_identifier
if not expid:
exps = record.repository_ids
self.debug('Analysis {} is associated multiple repositories '
'{}'.format(record.record_id, ','.join(exps)))
expid = None
if self.selected_repositories:
rr = [si for si in self.selected_repositories if si in exps]
if rr:
if len(rr) > 1:
expid = self._get_requested_experiment_id(rr)
else:
expid = rr[0]
if expid is None:
expid = self._get_requested_experiment_id(exps)
if isinstance(record, DVCAnalysis):
a = record
else:
# self.debug('use_repo_suffix={} record_id={}'.format(record.use_repository_suffix, record.record_id))
try:
rid = record.record_id
if record.use_repository_suffix:
rid = '-'.join(rid.split('-')[:-1])
a = DVCAnalysis(rid, expid)
a.group_id = record.group_id
except AnalysisNotAnvailableError:
self.info('Analysis {} not available. Trying to clone repository "{}"'.format(rid, expid))
try:
self.sync_repo(expid)
except CredentialException:
self.warning_dialog('Invalid credentials for GitHub/GitLab')
return
try:
a = DVCAnalysis(rid, expid)
except AnalysisNotAnvailableError:
self.warning_dialog('Analysis {} not in repository {}'.format(rid, expid))
return
# get repository branch
a.branch = get_repository_branch(os.path.join(paths.repository_dataset_dir, expid))
# a.set_tag(record.tag)
# load irradiation
if a.irradiation and a.irradiation not in ('NoIrradiation',):
# self.debug('Irradiation {}'.format(a.irradiation))
chronology = meta_repo.get_chronology(a.irradiation)
a.set_chronology(chronology)
frozen_production = self._get_frozen_production(rid, a.repository_identifier)
if frozen_production:
pname, prod = frozen_production.name, frozen_production
else:
pname, prod = meta_repo.get_production(a.irradiation, a.irradiation_level)
a.set_production(pname, prod)
fd = meta_repo.get_flux(record.irradiation,
record.irradiation_level,
record.irradiation_position_position)
a.j = fd['j']
if fd['lambda_k']:
a.arar_constants.lambda_k = fd['lambda_k']
a.standard_age = fd['standard_age']
a.standard_name = fd['standard_name']
a.standard_material = fd['standard_material']
if calculate_f_only:
a.calculate_F()
else:
a.calculate_age()
return a
def _get_frozen_production(self, rid, repo):
path = analysis_path(rid, repo, 'productions')
if path:
return Production(path)
def _get_repository(self, repository_identifier, as_current=True):
repo = None
if as_current:
repo = self.current_repository
path = repository_path(repository_identifier)
if repo is None or repo.path != path:
self.debug('make new repomanager for {}'.format(path))
repo = GitRepoManager()
repo.path = path
repo.open_repo(path)
if as_current:
self.current_repository = repo
return repo
def _bind_preferences(self):
prefid = 'pychron.dvc'
for attr in ('meta_repo_name', 'organization', 'default_team'):
bind_preference(self, attr, '{}.{}'.format(prefid, attr))
prefid = 'pychron.dvc.db'
for attr in ('username', 'password', 'name', 'host', 'kind', 'path'):
bind_preference(self.db, attr, '{}.{}'.format(prefid, attr))
self._meta_repo_name_changed()
def _meta_repo_name_changed(self):
paths.meta_root = os.path.join(paths.dvc_dir, self.meta_repo_name)
def _defaults(self):
self.debug('writing defaults')
# self.db.create_all(Base.metadata)
self.db.add_save_user()
for tag, func in (('irradiation holders', self._add_default_irradiation_holders),
('productions', self._add_default_irradiation_productions),
('load holders', self._add_default_load_holders)):
d = os.path.join(self.meta_repo.path, tag.replace(' ', '_'))
if not os.path.isdir(d):
os.mkdir(d)
if self.auto_add:
func()
elif self.confirmation_dialog('You have no {}. Would you like to add some defaults?'.format(tag)):
func()
def _add_default_irradiation_productions(self):
ds = (('TRIGA.txt', TRIGA),)
self._add_defaults(ds, 'productions')
def _add_default_irradiation_holders(self):
ds = (('24Spokes.txt', HOLDER_24_SPOKES),)
self._add_defaults(ds, 'irradiation_holders', )
def _add_default_load_holders(self):
ds = (('221.txt', LASER221),
('65.txt', LASER65))
self._add_defaults(ds, 'load_holders', self.db.add_load_holder)
def _add_defaults(self, defaults, root, dbfunc=None):
commit = False
repo = self.meta_repo
for name, txt in defaults:
p = os.path.join(repo.path, root, name)
if not os.path.isfile(p):
with open(p, 'w') as wfile:
wfile.write(txt)
repo.add(p, commit=False)
commit = True
if dbfunc:
name = remove_extension(name)
dbfunc(name)
if commit:
repo.commit('added default {}'.format(root.replace('_', ' ')))
def __getattr__(self, item):
try:
return getattr(self.db, item)
except AttributeError:
try:
return getattr(self.meta_repo, item)
except AttributeError, e:
print e, item
# raise DVCException(item)
# defaults
def _db_default(self):
return DVCDatabase(kind='mysql',
username='root',
password='Argon',
host='localhost',
name='pychronmeta')
def _meta_repo_default(self):
return MetaRepo()
if __name__ == '__main__':
paths.build('_dev')
idn = '24138'
exps = ['Irradiation-NM-272']
print find_interpreted_age_path(idn, exps)
# d = DVC(bind=False)
# with open('/Users/ross/Programming/githubauth.txt') as rfile:
# usr = rfile.readline().strip()
# pwd = rfile.readline().strip()
# d.github_user = usr
# d.github_password = pwd
# d.organization = 'NMGRLData'
# d.add_experiment('Irradiation-NM-273')
# ============= EOF =============================================
| apache-2.0 | 502,340,480,352,728,260 | 35.853251 | 120 | 0.557023 | false | 3.910032 | false | false | false |
alfa-addon/addon | plugin.video.alfa/channels/pornxbit.py | 1 | 4893 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from bs4 import BeautifulSoup
host = 'https://www.pornxbit.com'
# gounlimited, woolf, openload
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas" , action="lista", url=host + "/full-movie/?asgtbndr=1"))
itemlist.append(item.clone(title="Nuevos" , action="lista", url=host + "/porn-videos/?filter=latest&asgtbndr=1"))
itemlist.append(item.clone(title="Mas vistos" , action="lista", url=host + "/porn-videos/?filter=most-viewed&asgtbndr=1"))
itemlist.append(item.clone(title="Mejor valorado" , action="lista", url=host + "/porn-videos/?filter=popular&asgtbndr=1"))
itemlist.append(item.clone(title="Mas largo" , action="lista", url=host + "/porn-videos/?filter=longest&asgtbndr=1"))
itemlist.append(item.clone(title="PornStar" , action="categorias", url=host + "/actors/"))
# itemlist.append(item.clone(title="Canal" , action="categorias", url=host + "/sites/?sort_by=avg_videos_popularity&from=01"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "%s/?s=%s" % (host,texto)
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('div', class_='videos-list')
matches = soup.find_all('article', id=re.compile(r"^post-\d+"))
for elem in matches:
url = elem.a['href']
title = elem.a['title']
thumbnail = elem.img['src']
plot = ""
itemlist.append(item.clone(action="lista", title=title, url=url,
thumbnail=thumbnail , plot=plot) )
next_page = soup.find('a', class_='current')
if next_page:
next_page = next_page.parent.find_next_sibling("li").a['href']
itemlist.append(item.clone(action="categorias", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def lista(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('main')
matches = soup.find_all('article', class_=re.compile(r"^post-\d+"))
for elem in matches:
url = elem.a['href']
title = elem.a['title'].replace("–", "-")
thumbnail = elem.img['data-src']
time = elem.find('span', class_='duration')
quality = elem.find('span', class_='hd-video')
if time:
time = time.text.strip()
else:
time = ""
if quality:
quality = quality.text
title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (time,quality,title)
else:
title = "[COLOR yellow]%s[/COLOR] %s" % (time,title)
plot = ""
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=thumbnail, contentTitle=title ))
next_page = soup.find('a', class_='current')
if next_page:
next_page = next_page.parent.find_next_sibling("li").a['href']
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('div', class_='responsive-player')
matches = soup.find_all('iframe')
for elem in matches:
url = elem['src']
itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# logger.debug(url)
return itemlist
| gpl-3.0 | -1,498,140,840,486,068,000 | 36.899225 | 130 | 0.608713 | false | 3.426069 | false | false | false |
rnowotniak/qclib | gpent3.py | 1 | 7110 | #!/usr/bin/python
#
# Genetic Programming algorithm for for evolving
# 3-qubit entanglement production quantum circuit
#
# Copyright (C) 2006 Robert Nowotniak <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# based on:
# [Rub00] Ben I. P. Rubinstein. Evolving quantum circuits using genetic programming
#
from random import choice,randint
from qclib import *
from copy import deepcopy as dc
import sys
class Node:
''' Genetic Programming Tree Node '''
def __init__(self, type, target, control):
self.type = type # T, H, I lub CNot
# T -- Pi/8 gates (shifts the phase with the Pi/4 angle)
self.target = target
self.control = control
def __repr__(self):
return '(%s, %s, %s)' % (self.type, self.target, self.control)
def randNode(qubits = 3):
''' Generate random GP Tree Node '''
return Node(
choice(('I', 'H', 'T', 'CNot')),
''.join([choice(['0', '1']) for x in xrange(qubits)]),
''.join([choice(['0', '1']) for x in xrange(qubits)]))
def randGenotype(qubits = 3, length = 4):
''' Generate random genotype (GP Tree) '''
result = []
for i in xrange(length):
result.append(randNode(qubits))
return result
def phenotype(genotype):
''' Transforms genotype into phenotypes (QCircuits) space '''
stages = []
for n in genotype:
qubits = len(n.target)
trgt = int(n.target, 2) % qubits
ctrl = int(n.control, 2) % qubits
if n.type == 'CNot' and ctrl != trgt:
cnot = CNot(ctrl, trgt)
gates = [cnot]
gates += [I] * (qubits - cnot.size)
gates.reverse()
else:
gates = [I] * (qubits - trgt - 1)
if n.type == 'H':
gates.append(h)
elif n.type == 'I':
gates.append(I)
elif n.type == 'CNot':
gates.append(Not())
elif n.type == 'T':
gates.append(T)
else:
raise Exception()
gates += [I] * (qubits - len(gates))
s = Stage(*gates)
stages.append(s)
return QCircuit(*stages)
input = Ket(0, 3) # |000>
expected = s2 * Ket(0, 3) + s2 * Ket(7, 3)
qubits = 3
def fitness(indiv):
output = indiv(input)
return sum(abs(output.matrix - expected.matrix))
poplen = 100
elitism = 5
nstages = 5
Ngen = 100
pc = 0.7
pm = 0.03
nm = 2
# Generate random population
population = []
for i in xrange(poplen):
population.append(randGenotype(qubits = qubits, length = nstages))
f = open('log.txt', 'w')
print population
best = None
best_val = None
for epoch in xrange(Ngen):
print 'epoch ' + str(epoch)
fvalues = []
for i in xrange(poplen):
fvalues.append(fitness(phenotype(population[i])))
# for roulette selection
sects = [-v for v in fvalues]
m = min(sects)
if m < 0:
sects = [s - m + (0.01 * abs(m)) for s in sects]
sects /= sum(sects)
# accumulated probabilities
for i in xrange(1, poplen):
sects[i] = sects[i - 1] + sects[i]
sects[-1] = 1.0
if best == None or min(fvalues) < best_val:
best_val = min(fvalues)
best = population[fvalues.index(best_val)]
f.write('%d %f %f %f %f\n' % (epoch, best_val, min(fvalues), max(fvalues), sum(fvalues) / len(fvalues)))
newpop = []
# elitism
if elitism > 0:
ranking = {}
for i in xrange(poplen):
ranking[i] = fvalues[i]
kvs = ranking.items()
kvs = [(v,k) for (k,v) in kvs]
kvs.sort()
kvs = [(k,v) for (v,k) in kvs]
for e in xrange(elitism):
newpop.append(dc(population[kvs[e][0]]))
while len(newpop) < poplen:
# select genetic operation probabilistically
r = random()
if r <= pm:
op = 'mutation'
elif r <= pm + pc:
op = 'crossover'
else:
op = 'reproduction'
# select two individuals by roulette
r = random()
for j in xrange(len(sects)):
if r <= sects[j]:
indiv1 = j
break
r = random()
for j in xrange(len(sects)):
if r <= sects[j]:
indiv2 = j
break
if op == 'reproduction':
newpop.append(dc(population[indiv1]))
elif op == 'crossover':
par1 = indiv1
par2 = indiv2
# crossover type
crosstype = choice(('gate', 'target', 'control'))
if crosstype == 'gate':
cp = randint(1, nstages - 1)
child1 = dc(population[par1][:cp] + population[par2][cp:])
child2 = dc(population[par2][:cp] + population[par1][cp:])
elif crosstype == 'target':
child1 = dc(population[par1])
child2 = dc(population[par2])
g1 = choice(child1)
g2 = choice(child2)
cp = randint(0, len(g1.target))
# crossover target qubit binary strings
control1 = g1.target[:cp] + g2.target[cp:]
control2 = g2.target[:cp] + g1.target[cp:]
g1.target = control1
g2.target = control2
elif crosstype == 'control':
child1 = dc(population[par1])
child2 = dc(population[par2])
g1 = choice(child1)
g2 = choice(child2)
cp = randint(0, len(g1.control))
# crossover control qubit binary strings
target1 = g1.target[:cp] + g2.target[cp:]
target2 = g2.target[:cp] + g1.target[cp:]
g1.target = target1
g2.target = target2
else:
assert(False)
# add the offspring to new population
newpop.append(child1)
newpop.append(child2)
elif op == 'mutation':
# mutation
child = dc(population[indiv1])
done = []
for i in xrange(nm):
while True:
gi = choice(xrange(len(child)))
if gi not in done:
break
done.append(gi)
child[gi] = randNode(qubits = qubits)
newpop.append(child)
else:
# NOT REACHABLE
assert(False)
population = newpop
print best_val
print best
f.close()
| gpl-3.0 | 8,001,447,439,096,344,000 | 28.625 | 108 | 0.533333 | false | 3.535554 | false | false | false |
Trion/nightsky | client/Communicator.py | 1 | 3683 | """
Communication class
"""
import sys
import glob
import serial
import time
class Communicator:
"""
Class that's responsible for communication with the arduino
"""
class CommunicationFaultException(Exception):
"""
Exception for communication problems
"""
def __init__(self, expectedMsg, msg):
"""
constructor
@param expectedMsg excepted content of the message
@param msg the content of the message you got
"""
super().__init__(self,
'Expected content: "{0}" | Content you got: "{1}"'
.format(expectedMsg, msg))
class CompressedClipTooLong(Exception):
"""
Exception for too long clips
"""
def __init__(self):
"""
constructor
"""
super().__init__(self, 'Compressed clip is too long!')
serialPort = None
@classmethod
def getPorts(cls):
"""
returns a list of available serial ports
@return a list with available ports
@see https://stackoverflow.com/questions/12090503/listing-available-
com-ports-with-python
"""
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith(
'cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
possiblePorts = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
possiblePorts.append(port)
except (OSError, serial.SerialException):
pass
result = []
for port in possiblePorts:
try:
s = serial.Serial(port, 9600, timeout=2, writeTimeout=2)
time.sleep(2) # Sleep for windows
s.write(b'ping')
pong = s.read(4)
if pong == b'nsd1':
result.append(port)
except serial.SerialTimeoutException:
pass
return result
@classmethod
def start(cls, port):
"""
Starts a transmission.
@param port port of the Nightsky device
@raise CommunicationFaultException when the helo response is wrong
"""
cls.serialPort = serial.Serial(port)
time.sleep(2) # Sleep for windows
cls.serialPort.write(b'helo')
heloResp = cls.serialPort.read(4)
if heloResp != b'helo':
cls.serialPort.close()
raise cls.CommunicationFaultException(b'helo', heloResp)
@classmethod
def transmitFrame(cls, frame):
"""
Transmits a frame.
@param frame compressed frame as bytes
"""
cls.serialPort.write(frame)
resp = cls.serialPort.read(4)
if resp == b'done':
raise cls.CompressedClipTooLong()
if resp != b'ok ':
raise cls.CommunicationFaultException(b'ok ', resp)
@classmethod
def end(cls):
"""
Ends the transmission.
"""
cls.serialPort.write(b'\x00\x00')
doneResp = cls.serialPort.read(4) # Wait for "done" from device
cls.serialPort.close()
if doneResp != b'done':
raise cls.CommunicationFaultException(b'done', doneResp)
| mit | -2,156,457,496,349,875,000 | 27.550388 | 79 | 0.537877 | false | 4.475091 | false | false | false |
nardorb/OneStop | handlers/editor.py | 1 | 3213 | from webapp2_extras import security
from handlers import base
from library import messages
from models.profile import Profile
from forms.profile import ProfileForm
from forms.profile_update import ProfileUpdateForm
class EditorHandler(base.BaseHandler):
def create(self):
form = ProfileForm(self.request.POST)
if self.request.method == 'POST' and form.validate():
name = ' '.join([form.first_name.data,
form.last_name.data])
# Create the webapp2_extras.auth user.
model = self.auth.store.user_model
ok, user = model.create_user(form.data['email'],
password_raw=form.data['password'])
if not ok:
self.session.add_flash(messages.EDITOR_CREATE_ERROR,
level='error')
return self.redirect_to('editors.list')
# Create the profile.
profile = Profile(name=name,
email=form.data['email'],
is_editor=True,
auth_user_id=user.key.id())
profile.put()
# Force reload of profile object
Profile.get(profile.key())
self.session.add_flash(messages.EDITOR_CREATE_SUCCESS)
return self.redirect_to('editors.list')
return self.render_to_response('editors/form.haml', {'form': form})
def delete(self, id):
editor = Profile.get_by_id(int(id))
if not editor or not editor.is_editor:
self.session.add_flash(messages.EDITOR_NOT_FOUND, level='error')
return self.redirect_to('editors.list')
editor.delete()
self.session.add_flash(messages.EDITOR_DELETE_SUCCESS)
return self.redirect_to('editors.list')
def list(self):
editors = Profile.all().filter('is_editor = ', True)
return self.render_to_response('editors/list.haml', {'editors': editors})
def update(self, id):
editor = Profile.get_by_id(int(id))
if not editor or not editor.is_editor:
self.session.add_flash(messages.EDITOR_NOT_FOUND, level='error')
self.redirect_to('editors.list')
form = ProfileUpdateForm(self.request.POST, obj=editor)
form.user_id = editor.key().id()
if self.request.method == 'GET':
names = editor.name.split(' ')
form.first_name.data = names[0]
form.last_name.data = names[1]
form.profile_id = editor.key().id()
if self.request.method == 'POST' and form.validate():
# Access to the user model is only needed in this section.
user = editor.get_auth_user()
editor.name = ' '.join([form.first_name.data, form.last_name.data])
if form.email.data != editor.email:
user.auth_ids.remove(editor.email)
user.auth_ids.append(form.email.data)
editor.email = form.email.data
if form.password.data:
user.password = security.generate_password_hash(form.password.data,
length=12)
editor.put()
user.put()
# Force reload of profile object
Profile.get(editor.key())
self.session.add_flash(messages.EDITOR_UPDATE_SUCCESS)
return self.redirect_to('editors.list')
return self.render_to_response('editors/form.haml', {'form': form})
| gpl-2.0 | -8,371,113,525,092,969,000 | 32.123711 | 77 | 0.626517 | false | 3.705882 | false | false | false |
odoo-turkiye/odoo | addons/l10n_tr/res_partner.py | 1 | 1403 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2014 7Gates Interactive Technologies
# <http://www.7gates.co>
# @author Erdem Uney
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'vat_dept': fields.char('Tax Department', size=32, help="Tax Identification Department."),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['vat_dept'] | agpl-3.0 | 8,542,456,350,436,746,000 | 40.294118 | 99 | 0.597292 | false | 4.200599 | false | false | false |
qilicun/python | python2/diveintopythonzh-cn-5.4b/roman/stage2/roman2.py | 4 | 1300 | """Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
def fromRoman(s):
"""convert Roman numeral to integer"""
pass
| gpl-3.0 | 6,807,275,561,479,470,000 | 27.26087 | 66 | 0.52 | false | 3.68272 | false | false | false |
anatolikalysch/VMAttack | setup.py | 1 | 1739 | # coding=utf-8
__author__ = 'Anatoli Kalysch'
import pip
import sys
from os import getcwd, system, remove
from shutil import copyfile
def do(action, dependency):
return pip.main([action, dependency])
def usage():
print "Usage: python setup.py <install | uninstall>"
dependencies = ["distorm3", 'idacute']
if __name__ == '__main__':
print '[*] Starting dependency handling!'
stub_name = 'VMAttack_plugin_stub.py'
for dependency in dependencies:
try:
if sys.argv[1] in ["install", "uninstall"]:
retval = do(sys.argv[1], dependency)
else:
retval = do("install", dependency)
if retval == 0:
continue
else:
print '[!] An error occured! Please resolve issues with dependencies and try again.'
except IndexError:
usage()
sys.exit(1)
try:
if sys.argv[1] == 'uninstall':
with open('install_dir') as f:
ida_dir = f.read()
if ida_dir:
remove(ida_dir + stub_name)
sys.exit(0)
except:
pass
print '[*] Setting up environment and installing Plugin.'
# set up environment variable on Windows: setx Framework C:\path\to\Framework\
plugin_dir = getcwd()
system('setx VMAttack %s' % plugin_dir)
# copy stub into the IDA PRO Plugin directory
ida_dir = raw_input('Please input full path to the IDA *plugin* folder (e.g. X:\IDA\plugins\): ')
if not ida_dir.endswith(r'\\'):
ida_dir += r'\\'
with open('install_dir', 'w') as f:
f.write(ida_dir)
copyfile(stub_name, ida_dir+stub_name)
print '[*] Install complete. All Done!'
| mit | 2,384,027,443,882,719,000 | 27.048387 | 101 | 0.570443 | false | 3.772234 | false | false | false |
janol77/flask-app | app/modules/user/models.py | 2 | 1267 | from app.db import db
from werkzeug import generate_password_hash
from libs.tools import code_generator
class User(db.Document):
name = db.StringField(required=True)
state = db.StringField(required=True)
code = db.StringField(required=True)
active = db.BooleanField(required=True, default=True)
password = db.StringField()
email = db.StringField(required=True)
rol = db.StringField(required=True)
protected = db.BooleanField(required=True, default=False)
deleted = db.BooleanField(required=True, default=False)
def generate_password(self):
"""Calculate the password."""
self.password = generate_password_hash(self.password)
def generate_code(self):
"""Calculate the password."""
self.code = code_generator(size=30, hexdigits=True)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.id.__str__()
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
| gpl-3.0 | 185,565,925,932,048,540 | 31.487179 | 77 | 0.66693 | false | 4.251678 | false | false | false |
GNOME/libical-glib | tests/recurrence-type.py | 1 | 3366 | #!/usr/bin/env python3
#GI_TYPELIB_PATH=$PREFIX/lib/girepository-1.0/ ./recurrence-type.py
###############################################################################
#
# Copyright (C) 2015 William Yu <[email protected]>
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of version 2.1. of the GNU Lesser General Public License
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from gi.repository import ICalGLib
weekday = ICalGLib.RecurrenceType.day_day_of_week (0);
assert (weekday == ICalGLib.RecurrenceTypeWeekday.NO_WEEKDAY);
weekday = ICalGLib.RecurrenceType.day_day_of_week (1);
assert (weekday == ICalGLib.RecurrenceTypeWeekday.SUNDAY_WEEKDAY);
assert (ICalGLib.RecurrenceType.day_position(15) == 1);
assert (ICalGLib.RecurrenceType.day_position(16) == 2);
assert (ICalGLib.RecurrenceType.day_position(25) == 3);
string = "COUNT=10;FREQ=DAILY";
recurrence = ICalGLib.RecurrenceType.from_string (string);
assert (recurrence.as_string_r() == "FREQ=DAILY;COUNT=10");
by_second = recurrence.get_by_second();
# The value is dependent on the libical version.
assert len(by_second) == 61 or len(by_second) == 62;
by_minute = recurrence.get_by_minute();
assert len(by_minute) == 61;
by_hour = recurrence.get_by_hour();
assert len(by_hour) == 25;
by_day = recurrence.get_by_day();
# The value is dependent on the libical version.
assert len(by_day) == 364 or len(by_day) == 386;
by_month_day = recurrence.get_by_month_day();
assert len(by_month_day) == 32;
by_year_day = recurrence.get_by_year_day();
# The value is dependent on the libical version.
assert len(by_year_day) == 367 or len(by_year_day) == 386;
by_week_no = recurrence.get_by_week_no();
# The value is dependent on the libical version.
assert len(by_week_no) == 54 or len(by_week_no) == 56;
by_month = recurrence.get_by_month();
# The value is dependent on the libical version.
assert len(by_month) == 13 or len(by_month) == 14;
by_set_pos = recurrence.get_by_set_pos();
# The value is dependent on the libical version.
assert len(by_set_pos) == 367 or len(by_set_pos) == 386;
recurrence.set_by_second(0, 1);
by_second = recurrence.get_by_second();
assert by_second[0] == 1;
recurrence = ICalGLib.RecurrenceType.from_string (string);
assert (ICalGLib.recur_string_to_weekday ("MO") == ICalGLib.RecurrenceTypeWeekday.MONDAY_WEEKDAY);
start = 100000;
result = ICalGLib.recur_expand_recurrence (string, start, 10);
secs_per_day = 24*60*60;
for i in range (0, 9):
assert (result[i] == start + i*secs_per_day);
string = "19970101T183248Z/19970102T071625Z";
period = ICalGLib.PeriodType.from_string (string);
start = period.get_start();
iter = ICalGLib.RecurIterator.new (recurrence, start);
timetype = iter.next();
day = timetype.get_day();
ref = 1;
while day != 0:
assert (day == ref);
ref += 1;
timetype = iter.next();
day = timetype.get_day();
| lgpl-2.1 | -7,128,049,963,211,403,000 | 36.820225 | 98 | 0.68776 | false | 3.024259 | false | false | false |
RhubarbSin/arin-whois-rws | payload/poc.py | 1 | 114862 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011,2012,2013 American Registry for Internet Numbers
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Generated Sat Aug 31 15:00:09 2013 by generateDS.py version 2.10a.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class poc(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, registrationDate=None, ref=None, note=None, asns=None, city=None, companyName=None, iso3166_1=None, firstName=None, handle=None, lastName=None, emails=None, middleName=None, nets=None, orgs=None, phones=None, postalCode=None, comment=None, iso3166_2=None, streetAddress=None, updateDate=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
if isinstance(registrationDate, basestring):
initvalue_ = datetime_.datetime.strptime(registrationDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = registrationDate
self.registrationDate = initvalue_
self.ref = ref
self.note = note
self.asns = asns
self.city = city
self.companyName = companyName
self.iso3166_1 = iso3166_1
self.firstName = firstName
self.handle = handle
self.lastName = lastName
self.emails = emails
self.middleName = middleName
self.nets = nets
self.orgs = orgs
self.phones = phones
self.postalCode = postalCode
self.comment = comment
self.iso3166_2 = iso3166_2
self.streetAddress = streetAddress
if isinstance(updateDate, basestring):
initvalue_ = datetime_.datetime.strptime(updateDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = updateDate
self.updateDate = initvalue_
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if poc.subclass:
return poc.subclass(*args_, **kwargs_)
else:
return poc(*args_, **kwargs_)
factory = staticmethod(factory)
def get_registrationDate(self): return self.registrationDate
def set_registrationDate(self, registrationDate): self.registrationDate = registrationDate
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_note(self): return self.note
def set_note(self, note): self.note = note
def get_asns(self): return self.asns
def set_asns(self, asns): self.asns = asns
def get_city(self): return self.city
def set_city(self, city): self.city = city
def get_companyName(self): return self.companyName
def set_companyName(self, companyName): self.companyName = companyName
def get_iso3166_1(self): return self.iso3166_1
def set_iso3166_1(self, iso3166_1): self.iso3166_1 = iso3166_1
def get_firstName(self): return self.firstName
def set_firstName(self, firstName): self.firstName = firstName
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_lastName(self): return self.lastName
def set_lastName(self, lastName): self.lastName = lastName
def get_emails(self): return self.emails
def set_emails(self, emails): self.emails = emails
def get_middleName(self): return self.middleName
def set_middleName(self, middleName): self.middleName = middleName
def get_nets(self): return self.nets
def set_nets(self, nets): self.nets = nets
def get_orgs(self): return self.orgs
def set_orgs(self, orgs): self.orgs = orgs
def get_phones(self): return self.phones
def set_phones(self, phones): self.phones = phones
def get_postalCode(self): return self.postalCode
def set_postalCode(self, postalCode): self.postalCode = postalCode
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_iso3166_2(self): return self.iso3166_2
def set_iso3166_2(self, iso3166_2): self.iso3166_2 = iso3166_2
def get_streetAddress(self): return self.streetAddress
def set_streetAddress(self, streetAddress): self.streetAddress = streetAddress
def get_updateDate(self): return self.updateDate
def set_updateDate(self, updateDate): self.updateDate = updateDate
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.registrationDate is not None or
self.ref is not None or
self.note is not None or
self.asns is not None or
self.city is not None or
self.companyName is not None or
self.iso3166_1 is not None or
self.firstName is not None or
self.handle is not None or
self.lastName is not None or
self.emails is not None or
self.middleName is not None or
self.nets is not None or
self.orgs is not None or
self.phones is not None or
self.postalCode is not None or
self.comment is not None or
self.iso3166_2 is not None or
self.streetAddress is not None or
self.updateDate is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='poc', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='poc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='poc'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='poc', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.registrationDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sregistrationDate>%s</%sregistrationDate>%s' % (namespace_, self.gds_format_datetime(self.registrationDate, input_name='registrationDate'), namespace_, eol_))
if self.ref is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sref>%s</%sref>%s' % (namespace_, self.gds_format_string(quote_xml(self.ref).encode(ExternalEncoding), input_name='ref'), namespace_, eol_))
if self.note is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snote>%s</%snote>%s' % (namespace_, self.gds_format_string(quote_xml(self.note).encode(ExternalEncoding), input_name='note'), namespace_, eol_))
if self.asns is not None:
self.asns.export(outfile, level, namespace_, name_='asns', pretty_print=pretty_print)
if self.city is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scity>%s</%scity>%s' % (namespace_, self.gds_format_string(quote_xml(self.city).encode(ExternalEncoding), input_name='city'), namespace_, eol_))
if self.companyName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scompanyName>%s</%scompanyName>%s' % (namespace_, self.gds_format_string(quote_xml(self.companyName).encode(ExternalEncoding), input_name='companyName'), namespace_, eol_))
if self.iso3166_1 is not None:
self.iso3166_1.export(outfile, level, namespace_, name_='iso3166-1', pretty_print=pretty_print)
if self.firstName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstName>%s</%sfirstName>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstName).encode(ExternalEncoding), input_name='firstName'), namespace_, eol_))
if self.handle is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shandle>%s</%shandle>%s' % (namespace_, self.gds_format_string(quote_xml(self.handle).encode(ExternalEncoding), input_name='handle'), namespace_, eol_))
if self.lastName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastName>%s</%slastName>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastName).encode(ExternalEncoding), input_name='lastName'), namespace_, eol_))
if self.emails is not None:
self.emails.export(outfile, level, namespace_, name_='emails', pretty_print=pretty_print)
if self.middleName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smiddleName>%s</%smiddleName>%s' % (namespace_, self.gds_format_string(quote_xml(self.middleName).encode(ExternalEncoding), input_name='middleName'), namespace_, eol_))
if self.nets is not None:
self.nets.export(outfile, level, namespace_, name_='nets', pretty_print=pretty_print)
if self.orgs is not None:
self.orgs.export(outfile, level, namespace_, name_='orgs', pretty_print=pretty_print)
if self.phones is not None:
self.phones.export(outfile, level, namespace_, name_='phones', pretty_print=pretty_print)
if self.postalCode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spostalCode>%s</%spostalCode>%s' % (namespace_, self.gds_format_string(quote_xml(self.postalCode).encode(ExternalEncoding), input_name='postalCode'), namespace_, eol_))
if self.comment is not None:
self.comment.export(outfile, level, namespace_, name_='comment', pretty_print=pretty_print)
if self.iso3166_2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%siso3166-2>%s</%siso3166-2>%s' % (namespace_, self.gds_format_string(quote_xml(self.iso3166_2).encode(ExternalEncoding), input_name='iso3166-2'), namespace_, eol_))
if self.streetAddress is not None:
self.streetAddress.export(outfile, level, namespace_, name_='streetAddress', pretty_print=pretty_print)
if self.updateDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%supdateDate>%s</%supdateDate>%s' % (namespace_, self.gds_format_datetime(self.updateDate, input_name='updateDate'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='poc'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.registrationDate is not None:
showIndent(outfile, level)
outfile.write('registrationDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.registrationDate, input_name='registrationDate'))
if self.ref is not None:
showIndent(outfile, level)
outfile.write('ref=%s,\n' % quote_python(self.ref).encode(ExternalEncoding))
if self.note is not None:
showIndent(outfile, level)
outfile.write('note=%s,\n' % quote_python(self.note).encode(ExternalEncoding))
if self.asns is not None:
showIndent(outfile, level)
outfile.write('asns=model_.asns(\n')
self.asns.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.city is not None:
showIndent(outfile, level)
outfile.write('city=%s,\n' % quote_python(self.city).encode(ExternalEncoding))
if self.companyName is not None:
showIndent(outfile, level)
outfile.write('companyName=%s,\n' % quote_python(self.companyName).encode(ExternalEncoding))
if self.iso3166_1 is not None:
showIndent(outfile, level)
outfile.write('iso3166_1=model_.iso3166_1(\n')
self.iso3166_1.exportLiteral(outfile, level, name_='iso3166_1')
showIndent(outfile, level)
outfile.write('),\n')
if self.firstName is not None:
showIndent(outfile, level)
outfile.write('firstName=%s,\n' % quote_python(self.firstName).encode(ExternalEncoding))
if self.handle is not None:
showIndent(outfile, level)
outfile.write('handle=%s,\n' % quote_python(self.handle).encode(ExternalEncoding))
if self.lastName is not None:
showIndent(outfile, level)
outfile.write('lastName=%s,\n' % quote_python(self.lastName).encode(ExternalEncoding))
if self.emails is not None:
showIndent(outfile, level)
outfile.write('emails=model_.emails(\n')
self.emails.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.middleName is not None:
showIndent(outfile, level)
outfile.write('middleName=%s,\n' % quote_python(self.middleName).encode(ExternalEncoding))
if self.nets is not None:
showIndent(outfile, level)
outfile.write('nets=model_.nets(\n')
self.nets.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.orgs is not None:
showIndent(outfile, level)
outfile.write('orgs=model_.orgs(\n')
self.orgs.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.phones is not None:
showIndent(outfile, level)
outfile.write('phones=model_.phones(\n')
self.phones.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.postalCode is not None:
showIndent(outfile, level)
outfile.write('postalCode=%s,\n' % quote_python(self.postalCode).encode(ExternalEncoding))
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=model_.comment(\n')
self.comment.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.iso3166_2 is not None:
showIndent(outfile, level)
outfile.write('iso3166_2=%s,\n' % quote_python(self.iso3166_2).encode(ExternalEncoding))
if self.streetAddress is not None:
showIndent(outfile, level)
outfile.write('streetAddress=model_.streetAddress(\n')
self.streetAddress.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.updateDate is not None:
showIndent(outfile, level)
outfile.write('updateDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.updateDate, input_name='updateDate'))
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'registrationDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.registrationDate = dval_
elif nodeName_ == 'ref':
ref_ = child_.text
ref_ = self.gds_validate_string(ref_, node, 'ref')
self.ref = ref_
elif nodeName_ == 'note':
note_ = child_.text
note_ = self.gds_validate_string(note_, node, 'note')
self.note = note_
elif nodeName_ == 'asns':
obj_ = asns.factory()
obj_.build(child_)
self.set_asns(obj_)
elif nodeName_ == 'city':
city_ = child_.text
city_ = self.gds_validate_string(city_, node, 'city')
self.city = city_
elif nodeName_ == 'companyName':
companyName_ = child_.text
companyName_ = self.gds_validate_string(companyName_, node, 'companyName')
self.companyName = companyName_
elif nodeName_ == 'iso3166-1':
obj_ = iso3166_1.factory()
obj_.build(child_)
self.set_iso3166_1(obj_)
elif nodeName_ == 'firstName':
firstName_ = child_.text
firstName_ = self.gds_validate_string(firstName_, node, 'firstName')
self.firstName = firstName_
elif nodeName_ == 'handle':
handle_ = child_.text
handle_ = self.gds_validate_string(handle_, node, 'handle')
self.handle = handle_
elif nodeName_ == 'lastName':
lastName_ = child_.text
lastName_ = self.gds_validate_string(lastName_, node, 'lastName')
self.lastName = lastName_
elif nodeName_ == 'emails':
obj_ = emails.factory()
obj_.build(child_)
self.set_emails(obj_)
elif nodeName_ == 'middleName':
middleName_ = child_.text
middleName_ = self.gds_validate_string(middleName_, node, 'middleName')
self.middleName = middleName_
elif nodeName_ == 'nets':
obj_ = nets.factory()
obj_.build(child_)
self.set_nets(obj_)
elif nodeName_ == 'orgs':
obj_ = orgs.factory()
obj_.build(child_)
self.set_orgs(obj_)
elif nodeName_ == 'phones':
obj_ = phone.factory()
obj_.build(child_)
self.set_phones(obj_)
elif nodeName_ == 'postalCode':
postalCode_ = child_.text
postalCode_ = self.gds_validate_string(postalCode_, node, 'postalCode')
self.postalCode = postalCode_
elif nodeName_ == 'comment':
obj_ = comment.factory()
obj_.build(child_)
self.set_comment(obj_)
elif nodeName_ == 'iso3166-2':
iso3166_2_ = child_.text
iso3166_2_ = self.gds_validate_string(iso3166_2_, node, 'iso3166_2')
self.iso3166_2 = iso3166_2_
elif nodeName_ == 'streetAddress':
obj_ = streetAddress.factory()
obj_.build(child_)
self.set_streetAddress(obj_)
elif nodeName_ == 'updateDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.updateDate = dval_
else:
obj_ = self.gds_build_any(child_, 'poc')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class poc
class asns(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, asnRef=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
self.asnRef = asnRef
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if asns.subclass:
return asns.subclass(*args_, **kwargs_)
else:
return asns(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_asnRef(self): return self.asnRef
def set_asnRef(self, asnRef): self.asnRef = asnRef
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.asnRef is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='asns', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='asns')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='asns'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='asns', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
if self.asnRef is not None:
self.asnRef.export(outfile, level, namespace_, name_='asnRef', pretty_print=pretty_print)
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='asns'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.asnRef is not None:
showIndent(outfile, level)
outfile.write('asnRef=model_.asnRef(\n')
self.asnRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'asnRef':
obj_ = asnRef.factory()
obj_.build(child_)
self.set_asnRef(obj_)
else:
obj_ = self.gds_build_any(child_, 'asns')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class asns
class iso3166_1(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, code2=None, code3=None, name=None, e164=None):
self.code2 = code2
self.code3 = code3
self.name = name
self.e164 = e164
def factory(*args_, **kwargs_):
if iso3166_1.subclass:
return iso3166_1.subclass(*args_, **kwargs_)
else:
return iso3166_1(*args_, **kwargs_)
factory = staticmethod(factory)
def get_code2(self): return self.code2
def set_code2(self, code2): self.code2 = code2
def get_code3(self): return self.code3
def set_code3(self, code3): self.code3 = code3
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_e164(self): return self.e164
def set_e164(self, e164): self.e164 = e164
def hasContent_(self):
if (
self.code2 is not None or
self.code3 is not None or
self.name is not None or
self.e164 is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='iso3166-1', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='iso3166-1')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='iso3166-1'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='iso3166-1', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.code2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode2>%s</%scode2>%s' % (namespace_, self.gds_format_string(quote_xml(self.code2).encode(ExternalEncoding), input_name='code2'), namespace_, eol_))
if self.code3 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode3>%s</%scode3>%s' % (namespace_, self.gds_format_string(quote_xml(self.code3).encode(ExternalEncoding), input_name='code3'), namespace_, eol_))
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.e164 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%se164>%s</%se164>%s' % (namespace_, self.gds_format_string(quote_xml(self.e164).encode(ExternalEncoding), input_name='e164'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='iso3166-1'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.code2 is not None:
showIndent(outfile, level)
outfile.write('code2=%s,\n' % quote_python(self.code2).encode(ExternalEncoding))
if self.code3 is not None:
showIndent(outfile, level)
outfile.write('code3=%s,\n' % quote_python(self.code3).encode(ExternalEncoding))
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.e164 is not None:
showIndent(outfile, level)
outfile.write('e164=%s,\n' % quote_python(self.e164).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'code2':
code2_ = child_.text
code2_ = self.gds_validate_string(code2_, node, 'code2')
self.code2 = code2_
elif nodeName_ == 'code3':
code3_ = child_.text
code3_ = self.gds_validate_string(code3_, node, 'code3')
self.code3 = code3_
elif nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'e164':
e164_ = child_.text
e164_ = self.gds_validate_string(e164_, node, 'e164')
self.e164 = e164_
# end class iso3166_1
class emails(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, email=None):
if email is None:
self.email = []
else:
self.email = email
def factory(*args_, **kwargs_):
if emails.subclass:
return emails.subclass(*args_, **kwargs_)
else:
return emails(*args_, **kwargs_)
factory = staticmethod(factory)
def get_email(self): return self.email
def set_email(self, email): self.email = email
def add_email(self, value): self.email.append(value)
def insert_email(self, index, value): self.email[index] = value
def hasContent_(self):
if (
self.email
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='emails', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='emails')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='emails'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='emails', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for email_ in self.email:
showIndent(outfile, level, pretty_print)
outfile.write('<%semail>%s</%semail>%s' % (namespace_, self.gds_format_string(quote_xml(email_).encode(ExternalEncoding), input_name='email'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='emails'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('email=[\n')
level += 1
for email_ in self.email:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(email_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'email':
email_ = child_.text
email_ = self.gds_validate_string(email_, node, 'email')
self.email.append(email_)
# end class emails
class nets(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, netRef=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
self.netRef = netRef
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if nets.subclass:
return nets.subclass(*args_, **kwargs_)
else:
return nets(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_netRef(self): return self.netRef
def set_netRef(self, netRef): self.netRef = netRef
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.netRef is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='nets', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nets')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='nets'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='nets', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
if self.netRef is not None:
self.netRef.export(outfile, level, namespace_, name_='netRef', pretty_print=pretty_print)
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='nets'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.netRef is not None:
showIndent(outfile, level)
outfile.write('netRef=model_.netRef(\n')
self.netRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'netRef':
obj_ = netRef.factory()
obj_.build(child_)
self.set_netRef(obj_)
else:
obj_ = self.gds_build_any(child_, 'nets')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class nets
class orgs(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, orgPocLinkRef=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
if orgPocLinkRef is None:
self.orgPocLinkRef = []
else:
self.orgPocLinkRef = orgPocLinkRef
def factory(*args_, **kwargs_):
if orgs.subclass:
return orgs.subclass(*args_, **kwargs_)
else:
return orgs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_orgPocLinkRef(self): return self.orgPocLinkRef
def set_orgPocLinkRef(self, orgPocLinkRef): self.orgPocLinkRef = orgPocLinkRef
def add_orgPocLinkRef(self, value): self.orgPocLinkRef.append(value)
def insert_orgPocLinkRef(self, index, value): self.orgPocLinkRef[index] = value
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.orgPocLinkRef
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='orgs', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orgs')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='orgs'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='orgs', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
for orgPocLinkRef_ in self.orgPocLinkRef:
orgPocLinkRef_.export(outfile, level, namespace_, name_='orgPocLinkRef', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='orgs'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('orgPocLinkRef=[\n')
level += 1
for orgPocLinkRef_ in self.orgPocLinkRef:
showIndent(outfile, level)
outfile.write('model_.orgPocLinkRef(\n')
orgPocLinkRef_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'orgPocLinkRef':
obj_ = orgPocLinkRef.factory()
obj_.build(child_)
self.orgPocLinkRef.append(obj_)
# end class orgs
class comment(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, line=None):
if line is None:
self.line = []
else:
self.line = line
def factory(*args_, **kwargs_):
if comment.subclass:
return comment.subclass(*args_, **kwargs_)
else:
return comment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_line(self): return self.line
def set_line(self, line): self.line = line
def add_line(self, value): self.line.append(value)
def insert_line(self, index, value): self.line[index] = value
def hasContent_(self):
if (
self.line
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='comment', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='comment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='comment'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='comment', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for line_ in self.line:
line_.export(outfile, level, namespace_, name_='line', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='comment'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('line=[\n')
level += 1
for line_ in self.line:
showIndent(outfile, level)
outfile.write('model_.line(\n')
line_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'line':
obj_ = line.factory()
obj_.build(child_)
self.line.append(obj_)
# end class comment
class streetAddress(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, line=None):
if line is None:
self.line = []
else:
self.line = line
def factory(*args_, **kwargs_):
if streetAddress.subclass:
return streetAddress.subclass(*args_, **kwargs_)
else:
return streetAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_line(self): return self.line
def set_line(self, line): self.line = line
def add_line(self, value): self.line.append(value)
def insert_line(self, index, value): self.line[index] = value
def hasContent_(self):
if (
self.line
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='streetAddress', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='streetAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='streetAddress'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='streetAddress', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for line_ in self.line:
line_.export(outfile, level, namespace_, name_='line', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='streetAddress'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('line=[\n')
level += 1
for line_ in self.line:
showIndent(outfile, level)
outfile.write('model_.line(\n')
line_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'line':
obj_ = line.factory()
obj_.build(child_)
self.line.append(obj_)
# end class streetAddress
class limitExceeded(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, limit=None, valueOf_=None):
self.limit = _cast(int, limit)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if limitExceeded.subclass:
return limitExceeded.subclass(*args_, **kwargs_)
else:
return limitExceeded(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limit(self): return self.limit
def set_limit(self, limit): self.limit = limit
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='limitExceeded', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='limitExceeded')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='limitExceeded'):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
outfile.write(' limit="%s"' % self.gds_format_integer(self.limit, input_name='limit'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='limitExceeded', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='limitExceeded'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
showIndent(outfile, level)
outfile.write('limit=%d,\n' % (self.limit,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('limit', node)
if value is not None and 'limit' not in already_processed:
already_processed.add('limit')
try:
self.limit = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class limitExceeded
class asnRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if asnRef.subclass:
return asnRef.subclass(*args_, **kwargs_)
else:
return asnRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='asnRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='asnRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='asnRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='asnRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='asnRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class asnRef
class netRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, startAddress=None, endAddress=None, handle=None, name=None, valueOf_=None):
self.startAddress = _cast(None, startAddress)
self.endAddress = _cast(None, endAddress)
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if netRef.subclass:
return netRef.subclass(*args_, **kwargs_)
else:
return netRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='netRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='netRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='netRef'):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
outfile.write(' startAddress=%s' % (self.gds_format_string(quote_attrib(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), ))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
outfile.write(' endAddress=%s' % (self.gds_format_string(quote_attrib(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='netRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='netRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
showIndent(outfile, level)
outfile.write('startAddress="%s",\n' % (self.startAddress,))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
showIndent(outfile, level)
outfile.write('endAddress="%s",\n' % (self.endAddress,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('startAddress', node)
if value is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
self.startAddress = value
value = find_attr_value_('endAddress', node)
if value is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
self.endAddress = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class netRef
class orgPocLinkRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, relPocDescription=None, handle=None, name=None, relPocHandle=None, relPocName=None, relPocFunction=None, valueOf_=None):
self.relPocDescription = _cast(None, relPocDescription)
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.relPocHandle = _cast(None, relPocHandle)
self.relPocName = _cast(None, relPocName)
self.relPocFunction = _cast(None, relPocFunction)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if orgPocLinkRef.subclass:
return orgPocLinkRef.subclass(*args_, **kwargs_)
else:
return orgPocLinkRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_relPocDescription(self): return self.relPocDescription
def set_relPocDescription(self, relPocDescription): self.relPocDescription = relPocDescription
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_relPocHandle(self): return self.relPocHandle
def set_relPocHandle(self, relPocHandle): self.relPocHandle = relPocHandle
def get_relPocName(self): return self.relPocName
def set_relPocName(self, relPocName): self.relPocName = relPocName
def get_relPocFunction(self): return self.relPocFunction
def set_relPocFunction(self, relPocFunction): self.relPocFunction = relPocFunction
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='orgPocLinkRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orgPocLinkRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='orgPocLinkRef'):
if self.relPocDescription is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
outfile.write(' relPocDescription=%s' % (self.gds_format_string(quote_attrib(self.relPocDescription).encode(ExternalEncoding), input_name='relPocDescription'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.relPocHandle is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
outfile.write(' relPocHandle=%s' % (self.gds_format_string(quote_attrib(self.relPocHandle).encode(ExternalEncoding), input_name='relPocHandle'), ))
if self.relPocName is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
outfile.write(' relPocName=%s' % (self.gds_format_string(quote_attrib(self.relPocName).encode(ExternalEncoding), input_name='relPocName'), ))
if self.relPocFunction is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
outfile.write(' relPocFunction=%s' % (self.gds_format_string(quote_attrib(self.relPocFunction).encode(ExternalEncoding), input_name='relPocFunction'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='orgPocLinkRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='orgPocLinkRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.relPocDescription is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
showIndent(outfile, level)
outfile.write('relPocDescription="%s",\n' % (self.relPocDescription,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.relPocHandle is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
showIndent(outfile, level)
outfile.write('relPocHandle="%s",\n' % (self.relPocHandle,))
if self.relPocName is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
showIndent(outfile, level)
outfile.write('relPocName="%s",\n' % (self.relPocName,))
if self.relPocFunction is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
showIndent(outfile, level)
outfile.write('relPocFunction="%s",\n' % (self.relPocFunction,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('relPocDescription', node)
if value is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
self.relPocDescription = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('relPocHandle', node)
if value is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
self.relPocHandle = value
value = find_attr_value_('relPocName', node)
if value is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
self.relPocName = value
value = find_attr_value_('relPocFunction', node)
if value is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
self.relPocFunction = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class orgPocLinkRef
class phone(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, phone=None):
self.phone = phone
def factory(*args_, **kwargs_):
if phone.subclass:
return phone.subclass(*args_, **kwargs_)
else:
return phone(*args_, **kwargs_)
factory = staticmethod(factory)
def get_phone(self): return self.phone
def set_phone(self, phone): self.phone = phone
def hasContent_(self):
if (
self.phone is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='phone', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='phone')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='phone'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='phone', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.phone is not None:
self.phone.export(outfile, level, namespace_, name_='phone', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='phone'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.phone is not None:
showIndent(outfile, level)
outfile.write('phone=model_.phone(\n')
self.phone.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'phone':
obj_ = phone.factory()
obj_.build(child_)
self.set_phone(obj_)
# end class phone
class line(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, number=None, valueOf_=None, mixedclass_=None, content_=None):
self.number = _cast(int, number)
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if line.subclass:
return line.subclass(*args_, **kwargs_)
else:
return line(*args_, **kwargs_)
factory = staticmethod(factory)
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='line', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='line')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='line'):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='line', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='line'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
pass
# end class line
class type_(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, description=None, code=None):
self.description = description
self.code = code
def factory(*args_, **kwargs_):
if type_.subclass:
return type_.subclass(*args_, **kwargs_)
else:
return type_(*args_, **kwargs_)
factory = staticmethod(factory)
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_code(self): return self.code
def set_code(self, code): self.code = code
def hasContent_(self):
if (
self.description is not None or
self.code is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='type', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='type')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='type'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='type', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.code is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode>%s</%scode>%s' % (namespace_, self.gds_format_string(quote_xml(self.code).encode(ExternalEncoding), input_name='code'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='type'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.code is not None:
showIndent(outfile, level)
outfile.write('code=%s,\n' % quote_python(self.code).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
elif nodeName_ == 'code':
code_ = child_.text
code_ = self.gds_validate_string(code_, node, 'code')
self.code = code_
# end class type_
GDSClassesMapping = {
'phones': phone,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1',
pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="poc",
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from poc import *\n\n')
sys.stdout.write('import poc as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"asnRef",
"asns",
"comment",
"emails",
"iso3166_1",
"limitExceeded",
"line",
"netRef",
"nets",
"orgPocLinkRef",
"orgs",
"phone",
"poc",
"streetAddress",
"type_"
]
| mit | -6,584,860,182,229,985,000 | 43.763055 | 352 | 0.592276 | false | 4.060019 | false | false | false |
wagnerand/zamboni | mkt/monolith/resources.py | 2 | 1858 | import json
import logging
from django.db import transaction
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.response import Response
from mkt.api.authentication import RestOAuthAuthentication
from mkt.api.authorization import GroupPermission
from mkt.api.base import CORSMixin
from .forms import MonolithForm
from .models import MonolithRecord
logger = logging.getLogger('z.monolith')
class MonolithSerializer(serializers.ModelSerializer):
class Meta:
model = MonolithRecord
def transform_value(self, obj, value):
return json.loads(value)
class MonolithViewSet(CORSMixin, mixins.DestroyModelMixin,
mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
cors_allowed_methods = ('get', 'delete')
permission_classes = [GroupPermission('Monolith', 'API')]
authentication_classes = [RestOAuthAuthentication]
serializer_class = MonolithSerializer
def get_queryset(self):
form = MonolithForm(self.request.QUERY_PARAMS)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
key = form.cleaned_data['key']
start = form.cleaned_data['start']
end = form.cleaned_data['end']
qs = MonolithRecord.objects.all()
if key:
qs = qs.filter(key=key)
if start is not None:
qs = qs.filter(recorded__gte=start)
if end is not None:
qs = qs.filter(recorded__lt=end)
return qs
@transaction.commit_on_success
def delete(self, request, *args, **kwargs):
qs = self.filter_queryset(self.get_queryset())
logger.info('Deleting %d monolith resources' % qs.count())
qs.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| bsd-3-clause | 2,453,797,273,857,822,000 | 31.034483 | 76 | 0.678149 | false | 3.928118 | false | false | false |
diafygi/ascentsolar | server.py | 1 | 2596 | import reads
import writes
import re
import MySQLdb
#Run in the command line: "python server.py"
#Open browser and go to http://localhost:8888/
#Go to http://localhost:8888/recreate/ to reset database to example
def URLS():
yield ("^/$", writes.login)
yield ("^/logout/$", writes.logout)
yield ("^/manufacturer/$", reads.manufacturer)
yield ("^/manufacturer/[0-9]+/$", reads.manufacturer_id)
yield ("^/manufacturer/[0-9]+/edit/$", writes.manufacturer_id_edit)
yield ("^/manufacturer/add/$", writes.manufacturer_add)
yield ("^/product/$", reads.product)
yield ("^/product/[0-9]+/$", reads.product_id)
yield ("^/product/[0-9]+/edit/$", writes.product_id_edit)
yield ("^/product/add/$", writes.product_add)
yield ("^/product/mass-assign/$", writes.product_mass_assign)
yield ("^/distributor/$", reads.distributor)
yield ("^/distributor/[0-9]+/$", reads.distributor_id)
yield ("^/distributor/[0-9]+/edit/$", writes.distributor_id_edit)
yield ("^/distributor/add/$", writes.distributor_add)
yield ("^/seller/$", reads.seller)
yield ("^/seller/[0-9]+/$", reads.seller_id)
yield ("^/seller/[0-9]+/edit/$", writes.seller_id_edit)
yield ("^/seller/[0-9]+/commission/$", writes.seller_id_commission)
yield ("^/seller/add/$", writes.seller_add)
yield ("^/order/$", reads.order)
yield ("^/order/[0-9]+/$", reads.order_id)
yield ("^/order/[0-9]+/edit/$", writes.order_id_edit)
yield ("^/order/[0-9]+/fulfill/$", writes.order_id_fulfill)
yield ("^/order/[0-9]+/unfulfill/$", writes.order_id_unfulfill)
yield ("^/order/[0-9]+/commission/$", writes.order_id_commission)
yield ("^/order/add/$", writes.order_add)
yield ("^/pay/$", writes.pay)
yield ("^/customer/$", reads.customer)
yield ("^/customer/[0-9]+/$", reads.customer_id)
yield ("^/recreate/$", writes.recreate)
yield ("^/reset/$", writes.recreate)
yield ("^/reload/$", writes.recreate)
def app(req, resp):
for url, page in URLS():
if re.match(url, req['PATH_INFO']):
req['db'] = MySQLdb.connect(host="localhost", user="solaruser", passwd="solarpassword", db="solar")
req['cur'] = req['db'].cursor()
status, headers, data = page(req)
resp(status, headers)
req['cur'].close()
req['db'].commit()
req['db'].close()
return [data]
resp('404 Not Found', [('Content-type', 'text/plain')])
return ["404 Not Found"]
from wsgiref.simple_server import make_server
make_server('', 8888, app).serve_forever()
| gpl-2.0 | 2,646,338,254,056,790,000 | 38.333333 | 111 | 0.604777 | false | 3.131484 | false | false | false |
ntoll/mu | mu/virtual_environment.py | 1 | 25044 | import os
import sys
import datetime
from collections import namedtuple
import functools
import glob
import logging
import subprocess
import time
from PyQt5.QtCore import (
QObject,
QProcess,
pyqtSignal,
QTimer,
QProcessEnvironment,
)
from . import wheels
from . import settings
from . import config
wheels_dirpath = os.path.dirname(wheels.__file__)
logger = logging.getLogger(__name__)
class SplashLogHandler(logging.NullHandler):
"""
A simple log handler that does only one thing: use the referenced Qt signal
to emit the log.
"""
def __init__(self, emitter):
"""
Returns an instance of the class that will use the Qt signal passed in
as emitter.
"""
super().__init__()
self.setLevel(logging.DEBUG)
self.emitter = emitter
def emit(self, record):
"""
Emits a record via the Qt signal.
"""
timestamp = datetime.datetime.fromtimestamp(record.created)
messages = record.getMessage().splitlines()
for msg in messages:
output = "[{level}]({timestamp}) - {message}".format(
level=record.levelname, timestamp=timestamp, message=msg
)
self.emitter.emit(output)
def handle(self, record):
"""
Handles the log record.
"""
self.emit(record)
class Process(QObject):
"""
Use the QProcess mechanism to run a subprocess asynchronously
This will interact well with Qt Gui objects, eg by connecting the
`output` signals to an `QTextEdit.append` method and the `started`
and `finished` signals to a `QPushButton.setEnabled`.
eg::
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class Example(QMainWindow):
def __init__(self):
super().__init__()
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('Main window')
self.show()
self.process = Process()
self.process.output.connect(textEdit.append)
self.process.run(sys.executable, ["-u", "-m", "pip", "list"])
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
"""
started = pyqtSignal()
output = pyqtSignal(str)
finished = pyqtSignal()
Slots = namedtuple("Slots", ["started", "output", "finished"])
Slots.__new__.__defaults__ = (None, None, None)
def __init__(self):
super().__init__()
#
# Always run unbuffered and with UTF-8 IO encoding
#
self.environment = QProcessEnvironment.systemEnvironment()
self.environment.insert("PYTHONUNBUFFERED", "1")
self.environment.insert("PYTHONIOENCODING", "utf-8")
def _set_up_run(self, **envvars):
"""Run the process with the command and args"""
self.process = QProcess()
environment = QProcessEnvironment(self.environment)
for k, v in envvars.items():
environment.insert(k, v)
self.process.setProcessEnvironment(environment)
self.process.setProcessChannelMode(QProcess.MergedChannels)
def run_blocking(self, command, args, wait_for_s=30.0, **envvars):
self._set_up_run(**envvars)
self.process.start(command, args)
self.wait(wait_for_s=wait_for_s)
output = self.data()
return output
def run(self, command, args, **envvars):
logger.info(
"About to run %s with args %s and envvars %s",
command,
args,
envvars,
)
self._set_up_run(**envvars)
self.process.readyRead.connect(self._readyRead)
self.process.started.connect(self._started)
self.process.finished.connect(self._finished)
partial = functools.partial(self.process.start, command, args)
QTimer.singleShot(
1,
partial,
)
def wait(self, wait_for_s=30.0):
finished = self.process.waitForFinished(1000 * wait_for_s)
#
# If finished is False, it could be be because of an error
# or because we've already finished before starting to wait!
#
if (
not finished
and self.process.exitStatus() == self.process.CrashExit
):
raise VirtualEnvironmentError("Some error occurred")
def data(self):
return self.process.readAll().data().decode("utf-8")
def _started(self):
self.started.emit()
def _readyRead(self):
self.output.emit(self.data().strip())
def _finished(self):
self.finished.emit()
class Pip(object):
"""
Proxy for various pip commands
While this is a fairly useful abstraction in its own right, it's at
least initially to assist in testing, so we can mock out various
commands
"""
def __init__(self, pip_executable):
self.executable = pip_executable
self.process = Process()
def run(
self, command, *args, wait_for_s=30.0, slots=Process.Slots(), **kwargs
):
"""
Run a command with args, treating kwargs as Posix switches.
eg run("python", version=True)
run("python", "-c", "import sys; print(sys.executable)")
"""
#
# Any keyword args are treated as command-line switches
# As a special case, a boolean value indicates that the flag
# is a yes/no switch
#
params = [command, "--disable-pip-version-check"]
for k, v in kwargs.items():
switch = k.replace("_", "-")
if v is False:
switch = "no-" + switch
params.append("--" + switch)
if v is not True and v is not False:
params.append(str(v))
params.extend(args)
if slots.output is None:
result = self.process.run_blocking(
self.executable, params, wait_for_s=wait_for_s
)
return result
else:
if slots.started:
self.process.started.connect(slots.started)
self.process.output.connect(slots.output)
if slots.finished:
self.process.finished.connect(slots.finished)
self.process.run(self.executable, params)
def install(self, packages, slots=Process.Slots(), **kwargs):
"""
Use pip to install a package or packages.
If the first parameter is a string one package is installed; otherwise
it is assumed to be an iterable of package names.
Any kwargs are passed as command-line switches. A value of None
indicates a switch without a value (eg --upgrade)
"""
if isinstance(packages, str):
return self.run(
"install", packages, wait_for_s=180.0, slots=slots, **kwargs
)
else:
return self.run(
"install", *packages, wait_for_s=180.0, slots=slots, **kwargs
)
def uninstall(self, packages, slots=Process.Slots(), **kwargs):
"""
Use pip to uninstall a package or packages
If the first parameter is a string one package is uninstalled;
otherwise it is assumed to be an iterable of package names.
Any kwargs are passed as command-line switches. A value of None
indicates a switch without a value (eg --upgrade)
"""
if isinstance(packages, str):
return self.run(
"uninstall",
packages,
wait_for_s=180.0,
slots=slots,
yes=True,
**kwargs
)
else:
return self.run(
"uninstall",
*packages,
wait_for_s=180.0,
slots=slots,
yes=True,
**kwargs
)
def freeze(self):
"""
Use pip to return a list of installed packages
NB this is fairly trivial but is pulled out principally for
testing purposes
"""
return self.run("freeze")
def list(self):
"""
Use pip to return a list of installed packages
NB this is fairly trivial but is pulled out principally for
testing purposes
"""
return self.run("list")
def installed(self):
"""
Yield tuples of (package_name, version)
pip list gives a more consistent view of name/version
than pip freeze which uses different annotations for
file-installed wheels and editable (-e) installs
"""
lines = self.list().splitlines()
iterlines = iter(lines)
#
# The first two lines are headers
#
try:
next(iterlines)
next(iterlines)
#
# cf https://lgtm.com/rules/11000086/
#
except StopIteration:
raise VirtualEnvironmentError("Unable to parse installed packages")
for line in iterlines:
#
# Some lines have a third location element
#
name, version = line.split()[:2]
yield name, version
class VirtualEnvironmentError(Exception):
pass
class VirtualEnvironment(object):
"""
Represents and contains methods for manipulating a virtual environment.
"""
Slots = Process.Slots
def __init__(self, dirpath=None):
self.process = Process()
self._is_windows = sys.platform == "win32"
self._bin_extension = ".exe" if self._is_windows else ""
self.settings = settings.VirtualEnvironmentSettings()
self.settings.init()
dirpath_to_use = (
dirpath or self.settings.get("dirpath") or self._generate_dirpath()
)
logger.info("Using dirpath: %s", dirpath_to_use)
self.relocate(dirpath_to_use)
def __str__(self):
return "<%s at %s>" % (self.__class__.__name__, self.path)
@staticmethod
def _generate_dirpath():
"""
Construct a unique virtual environment folder
To avoid clashing with previously-created virtual environments,
construct one which includes the Python version and a timestamp
"""
return "%s-%s-%s" % (
config.VENV_DIR,
"%s%s" % sys.version_info[:2],
time.strftime("%Y%m%d-%H%M%S"),
)
def reset_pip(self):
self.pip = Pip(self.pip_executable)
def relocate(self, dirpath):
"""
Relocate sets up variables for, eg, the expected location and name of
the Python and Pip binaries, but doesn't access the file system. That's
done by code in or called from `create`
"""
self.path = str(dirpath)
self.name = os.path.basename(self.path)
self._bin_directory = os.path.join(
self.path, "scripts" if self._is_windows else "bin"
)
#
# Pip and the interpreter will be set up when the virtualenv is created
#
self.interpreter = os.path.join(
self._bin_directory, "python" + self._bin_extension
)
self.pip_executable = os.path.join(
self._bin_directory, "pip" + self._bin_extension
)
self.reset_pip()
logger.debug(
"Virtual environment set up %s at %s", self.name, self.path
)
self.settings["dirpath"] = self.path
def run_python(self, *args, slots=Process.Slots()):
"""
Run the referenced Python interpreter with the passed in args
If slots are supplied for the starting, output or finished signals
they will be used; otherwise it will be assume that this running
headless and the process will be run synchronously and output collected
will be returned when the process is complete
"""
if slots.output:
if slots.started:
self.process.started.connect(slots.started)
self.process.output.connect(slots.output)
if slots.finished:
self.process.finished.connect(slots.finished)
self.process.run(self.interpreter, args)
return self.process
else:
return self.process.run_blocking(self.interpreter, args)
def _directory_is_venv(self):
"""
Determine whether a directory appears to be an existing venv
There appears to be no canonical way to achieve this. Often the
presence of a pyvenv.cfg file is enough, but this isn't always there.
Specifically, on debian it's not when created by virtualenv. So we
fall back to finding an executable python command where we expect
"""
if os.path.isfile(os.path.join(self.path, "pyvenv.cfg")):
return True
#
# On windows os.access X_OK is close to meaningless, but it will
# succeed for executable files (and everything else). On Posix it
# does distinguish executable files
#
if os.access(self.interpreter, os.X_OK):
return True
return False
def ensure_and_create(self, emitter=None):
"""
If an emitter is provided, this will be used by a custom log handler
to display logging events onto a splash screen.
"""
splash_handler = None
if emitter:
splash_handler = SplashLogHandler(emitter)
logger.addHandler(splash_handler)
logger.info("Added log handler.")
n_retries = 3
for n in range(n_retries):
try:
logger.debug(
"Checking virtual environment; attempt #%d.", 1 + n
)
self.ensure()
except VirtualEnvironmentError:
new_dirpath = self._generate_dirpath()
logger.debug(
"Creating new virtual environment at %s.", new_dirpath
)
self.relocate(new_dirpath)
self.create()
else:
logger.info("Virtual environment already exists.")
return
# If we get here, there's a problem creating the virtual environment,
# so attempt to signal this via the logger, wait for the log to be
# displayed in the splash screen and then exit via the exception.
logger.error("Unable to create a working virtual environment.")
if emitter and splash_handler:
logger.removeHandler(splash_handler)
raise VirtualEnvironmentError(
"Unable to create a working virtual environment."
)
def ensure(self):
"""
Ensure that virtual environment exists and is in a good state.
"""
self.ensure_path()
self.ensure_interpreter()
self.ensure_interpreter_version()
self.ensure_pip()
self.ensure_key_modules()
def ensure_path(self):
"""
Ensure that the virtual environment path exists and is a valid venv.
"""
if not os.path.exists(self.path):
message = "%s does not exist." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
elif not os.path.isdir(self.path):
message = "%s exists but is not a directory." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
elif not self._directory_is_venv():
message = "Directory %s exists but is not a venv." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
logger.info("Virtual Environment found at: %s", self.path)
def ensure_interpreter(self):
"""
Ensure there is an interpreter of the expected name at the expected
location, given the platform and naming conventions.
NB if the interpreter is present as a symlink to a system interpreter
(likely for a venv) but the link is broken, then os.path.isfile will
fail as though the file wasn't there. Which is what we want in these
circumstances.
"""
if os.path.isfile(self.interpreter):
logger.info("Interpreter found at: %s", self.interpreter)
else:
message = (
"Interpreter not found where expected at: %s"
% self.interpreter
)
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_interpreter_version(self):
"""
Ensure that the venv interpreter matches the version of Python running
Mu.
This is necessary because otherwise we'll have mismatched wheels etc.
"""
current_version = "%s%s" % sys.version_info[:2]
#
# Can't use self.run_python as we're not yet within the Qt UI loop
#
process = subprocess.run(
[
self.interpreter,
"-c",
'import sys; print("%s%s" % sys.version_info[:2])',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
venv_version = process.stdout.decode("utf-8").strip()
if current_version == venv_version:
logger.info("Both interpreters at version %s", current_version)
else:
message = (
"Mu interpreter at version %s; venv interpreter at version %s."
% (current_version, venv_version)
)
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_key_modules(self):
"""
Ensure that the venv interpreter is able to load key modules.
"""
for module, *_ in wheels.mode_packages:
logger.debug("Verifying import of: %s", module)
try:
subprocess.run(
[self.interpreter, "-c", "import %s" % module],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
except subprocess.CalledProcessError:
message = "Failed to import: %s" % module
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_pip(self):
"""
Ensure that pip is available.
"""
if os.path.isfile(self.pip_executable):
logger.info("Pip found at: %s", self.pip_executable)
else:
message = (
"Pip not found where expected at: %s" % self.pip_executable
)
logger.error(message)
raise VirtualEnvironmentError(message)
def create(self):
"""
Create a new virtualenv at the referenced path.
"""
logger.info("Creating virtualenv: {}".format(self.path))
logger.info("Virtualenv name: {}".format(self.name))
env = dict(os.environ)
subprocess.run(
[
sys.executable,
"-m",
"virtualenv",
"-p",
sys.executable,
"-q",
self.path,
],
check=True,
env=env,
)
# Set the path to the interpreter
self.install_baseline_packages()
self.register_baseline_packages()
self.install_jupyter_kernel()
def install_jupyter_kernel(self):
"""
Install a Jupyter kernel for Mu (the name of the kernel indicates this
is a Mu related kernel).
"""
kernel_name = '"Python/Mu ({})"'.format(self.name)
logger.info("Installing Jupyter Kernel: %s", kernel_name)
return self.run_python(
"-m",
"ipykernel",
"install",
"--user",
"--name",
self.name,
"--display-name",
kernel_name,
)
def install_baseline_packages(self):
"""
Install all packages needed for non-core activity.
Each mode needs one or more packages to be able to run: pygame zero
mode needs pgzero and its dependencies; web mode needs Flask and so on.
We intend to ship with all the necessary wheels for those packages so
no network access is needed. But if the wheels aren't found, because
we're not running from an installer, then just pip install in the
usual way.
--upgrade is currently used with a thought to upgrade-releases of Mu.
"""
logger.info("Installing baseline packages.")
logger.info(
"%s %s",
wheels_dirpath,
"exists" if os.path.isdir(wheels_dirpath) else "does not exist",
)
#
# This command should install the baseline packages, picking up the
# precompiled wheels from the wheels path
#
# For dev purposes (where we might not have the wheels) warn where
# the wheels are not already present and download them
#
wheel_filepaths = glob.glob(os.path.join(wheels_dirpath, "*.whl"))
if not wheel_filepaths:
logger.warn(
"No wheels found in %s; downloading...", wheels_dirpath
)
wheels.download()
wheel_filepaths = glob.glob(os.path.join(wheels_dirpath, "*.whl"))
if not wheel_filepaths:
raise VirtualEnvironmentError(
"No wheels in %s; try `python -mmu.wheels`" % wheels_dirpath
)
self.reset_pip()
logger.debug(self.pip.install(wheel_filepaths))
def register_baseline_packages(self):
"""
Keep track of the baseline packages installed into the empty venv.
"""
self.reset_pip()
packages = list(self.pip.installed())
self.settings["baseline_packages"] = packages
def baseline_packages(self):
"""
Return the list of baseline packages.
"""
return self.settings.get("baseline_packages")
def install_user_packages(self, packages, slots=Process.Slots()):
"""
Install user defined packages.
"""
logger.info("Installing user packages: %s", ", ".join(packages))
self.reset_pip()
self.pip.install(
packages,
slots=slots,
upgrade=True,
)
def remove_user_packages(self, packages, slots=Process.Slots()):
"""
Remove user defined packages.
"""
logger.info("Removing user packages: %s", ", ".join(packages))
self.reset_pip()
self.pip.uninstall(
packages,
slots=slots,
)
def installed_packages(self):
"""
List all the third party modules installed by the user in the venv
containing the referenced Python interpreter.
"""
logger.info("Discovering installed third party modules in venv.")
#
# FIXME: Basically we need a way to distinguish between installed
# baseline packages and user-added packages. The baseline_packages
# in this class (or, later, from modes) are just the top-level classes:
# flask, pgzero etc. But they bring in many others further down. So:
# we either need to keep track of what's installed as part of the
# baseline install; or to keep track of what's installed by users.
# And then we have to hold those in the settings file
# The latter is probably easier.
#
baseline_packages = [
name for name, version in self.baseline_packages()
]
user_packages = []
self.reset_pip()
for package, version in self.pip.installed():
if package not in baseline_packages:
user_packages.append(package)
logger.info(user_packages)
return baseline_packages, user_packages
#
# Create a singleton virtual environment to be used throughout
# the application
#
venv = VirtualEnvironment()
| gpl-3.0 | 5,771,718,422,164,241,000 | 32.259918 | 79 | 0.554903 | false | 4.636061 | false | false | false |
josiah-wolf-oberholtzer/consort | consort/tools/MusicSpecifierSequence.py | 1 | 5595 | import abjad
import collections
from abjad.tools import abctools
from abjad.tools import mathtools
from abjad.tools import rhythmmakertools
class MusicSpecifierSequence(abctools.AbjadValueObject):
r'''A music specifier sequence.
::
>>> sequence_a = consort.MusicSpecifierSequence(
... music_specifiers='music',
... )
>>> print(format(sequence_a))
consort.tools.MusicSpecifierSequence(
music_specifiers=('music',),
)
::
>>> sequence_b = consort.MusicSpecifierSequence(
... application_rate='phrase',
... music_specifiers=['one', 'two', 'three'],
... )
>>> print(format(sequence_b))
consort.tools.MusicSpecifierSequence(
application_rate='phrase',
music_specifiers=('one', 'two', 'three'),
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_application_rate',
'_music_specifiers',
)
### INITIALIZER ###
def __init__(
self,
application_rate=None,
music_specifiers=None,
):
if application_rate is not None:
application_rate = application_rate or 'phrase'
assert application_rate in ('division', 'phrase')
if music_specifiers is None:
music_specifiers = [None]
if not isinstance(music_specifiers, collections.Sequence) or \
isinstance(music_specifiers, str):
music_specifiers = [music_specifiers]
music_specifiers = tuple(music_specifiers)
#music_specifiers = abjad.CyclicTuple(music_specifiers)
assert len(music_specifiers)
self._application_rate = application_rate
self._music_specifiers = music_specifiers
### SPECIAL METHODS ###
def __call__(
self,
durations=None,
layer=None,
division_mask_seed=0,
division_masks=None,
padding=None,
seed=None,
start_offset=None,
timespan_specifier=None,
voice_name=None,
):
import consort
timespans = abjad.TimespanList()
timespan_specifier = timespan_specifier or \
consort.TimespanSpecifier()
seed = seed or 0
division_mask_seed = division_mask_seed or 0
durations = [_ for _ in durations if _]
offsets = mathtools.cumulative_sums(durations, start_offset)
if not offsets:
return timespans
offset_pair_count = len(offsets) - 1
if offset_pair_count == 1:
offset_pair_count = 2 # make patterns happy
iterator = consort.iterate_nwise(offsets)
for i, offset_pair in enumerate(iterator):
start_offset, stop_offset = offset_pair
music_specifier = self[seed % len(self)]
timespan = consort.PerformedTimespan(
forbid_fusing=timespan_specifier.forbid_fusing,
forbid_splitting=timespan_specifier.forbid_splitting,
layer=layer,
minimum_duration=timespan_specifier.minimum_duration,
music_specifier=music_specifier,
start_offset=start_offset,
stop_offset=stop_offset,
voice_name=voice_name,
)
if not division_masks:
timespans.append(timespan)
else:
output_mask = division_masks.get_matching_pattern(
i, offset_pair_count + 1, rotation=division_mask_seed)
if output_mask is None:
timespans.append(timespan)
elif isinstance(output_mask, rhythmmakertools.SustainMask):
timespans.append(timespan)
elif isinstance(output_mask, rhythmmakertools.SilenceMask):
pass
division_mask_seed += 1
if self.application_rate == 'division':
seed += 1
if padding:
silent_timespans = abjad.TimespanList()
for shard in timespans.partition(True):
silent_timespan_one = consort.SilentTimespan(
layer=layer,
start_offset=shard.start_offset - padding,
stop_offset=shard.start_offset,
voice_name=voice_name,
)
silent_timespans.append(silent_timespan_one)
silent_timespan_two = consort.SilentTimespan(
layer=layer,
start_offset=shard.stop_offset,
stop_offset=shard.stop_offset + padding,
voice_name=voice_name,
)
silent_timespans.append(silent_timespan_two)
silent_timespans.compute_logical_or()
for timespan in timespans:
silent_timespans - timespan
timespans.extend(silent_timespans)
timespans.sort()
return timespans
def __getitem__(self, item):
return self._music_specifiers[item]
def __len__(self):
return len(self._music_specifiers)
### PUBLIC METHODS ###
def transpose(self, expr):
music_specifiers = [_.transpose(expr) for _ in self.music_specifiers]
return abjad.new(
self,
music_specifiers=music_specifiers,
)
### PUBLIC PROPERTIES ###
@property
def application_rate(self):
return self._application_rate
@property
def music_specifiers(self):
return self._music_specifiers
| mit | -2,756,695,303,295,140,000 | 32.502994 | 77 | 0.560858 | false | 4.320463 | false | false | false |
nysan/yocto-autobuilder | lib/python2.6/site-packages/buildbot-0.8.4p1-py2.6.egg/buildbot/steps/source.py | 4 | 58316 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from warnings import warn
from email.Utils import formatdate
from twisted.python import log
from zope.interface import implements
from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
from buildbot.interfaces import BuildSlaveTooOldError, IRenderable
from buildbot.status.builder import SKIPPED
class _ComputeRepositoryURL(object):
implements(IRenderable)
def __init__(self, repository):
self.repository = repository
def getRenderingFor(self, build):
'''
Helper function that the repository URL based on the parameter the
source step took and the Change 'repository' property
'''
s = build.getSourceStamp()
repository = self.repository
if not repository:
return str(s.repository)
else:
if callable(repository):
return str(build.render(repository(s.repository)))
elif isinstance(repository, dict):
return str(build.render(repository.get(s.repository)))
elif isinstance(repository, str) or isinstance(repository, unicode):
try:
return str(repository % s.repository)
except TypeError:
# that's the backward compatibility case
return build.render(repository)
else:
return str(build.render(repository))
class Source(LoggingBuildStep):
"""This is a base class to generate a source tree in the buildslave.
Each version control system has a specialized subclass, and is expected
to override __init__ and implement computeSourceRevision() and
startVC(). The class as a whole builds up the self.args dictionary, then
starts a LoggedRemoteCommand with those arguments.
"""
renderables = [ 'workdir' ]
# if the checkout fails, there's no point in doing anything else
haltOnFailure = True
flunkOnFailure = True
notReally = False
branch = None # the default branch, should be set in __init__
def __init__(self, workdir=None, mode='update', alwaysUseLatest=False,
timeout=20*60, retry=None, **kwargs):
"""
@type workdir: string
@param workdir: local directory (relative to the Builder's root)
where the tree should be placed
@type mode: string
@param mode: the kind of VC operation that is desired:
- 'update': specifies that the checkout/update should be
performed directly into the workdir. Each build is performed
in the same directory, allowing for incremental builds. This
minimizes disk space, bandwidth, and CPU time. However, it
may encounter problems if the build process does not handle
dependencies properly (if you must sometimes do a 'clean
build' to make sure everything gets compiled), or if source
files are deleted but generated files can influence test
behavior (e.g. python's .pyc files), or when source
directories are deleted but generated files prevent CVS from
removing them. When used with a patched checkout, from a
previous buildbot try for instance, it will try to "revert"
the changes first and will do a clobber if it is unable to
get a clean checkout. The behavior is SCM-dependent.
- 'copy': specifies that the source-controlled workspace
should be maintained in a separate directory (called the
'copydir'), using checkout or update as necessary. For each
build, a new workdir is created with a copy of the source
tree (rm -rf workdir; cp -R -P -p copydir workdir). This
doubles the disk space required, but keeps the bandwidth low
(update instead of a full checkout). A full 'clean' build
is performed each time. This avoids any generated-file
build problems, but is still occasionally vulnerable to
problems such as a CVS repository being manually rearranged
(causing CVS errors on update) which are not an issue with
a full checkout.
- 'clobber': specifies that the working directory should be
deleted each time, necessitating a full checkout for each
build. This insures a clean build off a complete checkout,
avoiding any of the problems described above, but is
bandwidth intensive, as the whole source tree must be
pulled down for each build.
- 'export': is like 'clobber', except that e.g. the 'cvs
export' command is used to create the working directory.
This command removes all VC metadata files (the
CVS/.svn/{arch} directories) from the tree, which is
sometimes useful for creating source tarballs (to avoid
including the metadata in the tar file). Not all VC systems
support export.
@type alwaysUseLatest: boolean
@param alwaysUseLatest: whether to always update to the most
recent available sources for this build.
Normally the Source step asks its Build for a list of all
Changes that are supposed to go into the build, then computes a
'source stamp' (revision number or timestamp) that will cause
exactly that set of changes to be present in the checked out
tree. This is turned into, e.g., 'cvs update -D timestamp', or
'svn update -r revnum'. If alwaysUseLatest=True, bypass this
computation and always update to the latest available sources
for each build.
The source stamp helps avoid a race condition in which someone
commits a change after the master has decided to start a build
but before the slave finishes checking out the sources. At best
this results in a build which contains more changes than the
buildmaster thinks it has (possibly resulting in the wrong
person taking the blame for any problems that result), at worst
is can result in an incoherent set of sources (splitting a
non-atomic commit) which may not build at all.
@type retry: tuple of ints (delay, repeats) (or None)
@param retry: if provided, VC update failures are re-attempted up
to REPEATS times, with DELAY seconds between each
attempt. Some users have slaves with poor connectivity
to their VC repository, and they say that up to 80% of
their build failures are due to transient network
failures that could be handled by simply retrying a
couple times.
"""
LoggingBuildStep.__init__(self, **kwargs)
self.addFactoryArguments(workdir=workdir,
mode=mode,
alwaysUseLatest=alwaysUseLatest,
timeout=timeout,
retry=retry,
)
assert mode in ("update", "copy", "clobber", "export")
if retry:
delay, repeats = retry
assert isinstance(repeats, int)
assert repeats > 0
self.args = {'mode': mode,
'timeout': timeout,
'retry': retry,
'patch': None, # set during .start
}
# This will get added to args later, after properties are rendered
self.workdir = workdir
self.alwaysUseLatest = alwaysUseLatest
# Compute defaults for descriptions:
description = ["updating"]
descriptionDone = ["update"]
if mode == "clobber":
description = ["checkout"]
# because checkingouting takes too much space
descriptionDone = ["checkout"]
elif mode == "export":
description = ["exporting"]
descriptionDone = ["export"]
self.description = description
self.descriptionDone = descriptionDone
def setStepStatus(self, step_status):
LoggingBuildStep.setStepStatus(self, step_status)
def setDefaultWorkdir(self, workdir):
self.workdir = self.workdir or workdir
def describe(self, done=False):
if done:
return self.descriptionDone
return self.description
def computeSourceRevision(self, changes):
"""Each subclass must implement this method to do something more
precise than -rHEAD every time. For version control systems that use
repository-wide change numbers (SVN, P4), this can simply take the
maximum such number from all the changes involved in this build. For
systems that do not (CVS), it needs to create a timestamp based upon
the latest Change, the Build's treeStableTimer, and an optional
self.checkoutDelay value."""
return None
def start(self):
if self.notReally:
log.msg("faking %s checkout/update" % self.name)
self.step_status.setText(["fake", self.name, "successful"])
self.addCompleteLog("log",
"Faked %s checkout/update 'successful'\n" \
% self.name)
return SKIPPED
# Allow workdir to be WithProperties
self.args['workdir'] = self.workdir
# what source stamp would this build like to use?
s = self.build.getSourceStamp()
# if branch is None, then use the Step's "default" branch
branch = s.branch or self.branch
# if revision is None, use the latest sources (-rHEAD)
revision = s.revision
if not revision and not self.alwaysUseLatest:
revision = self.computeSourceRevision(s.changes)
# the revision property is currently None, so set it to something
# more interesting
if revision is not None:
self.setProperty('revision', str(revision), "Source")
# if patch is None, then do not patch the tree after checkout
# 'patch' is None or a tuple of (patchlevel, diff, root)
# root is optional.
patch = s.patch
if patch:
self.addCompleteLog("patch", patch[1])
if self.alwaysUseLatest:
revision = None
self.startVC(branch, revision, patch)
def commandComplete(self, cmd):
if cmd.updates.has_key("got_revision"):
got_revision = cmd.updates["got_revision"][-1]
if got_revision is not None:
self.setProperty("got_revision", str(got_revision), "Source")
class BK(Source):
"""I perform BitKeeper checkout/update operations."""
name = 'bk'
renderables = [ 'bkurl', 'baseURL' ]
def __init__(self, bkurl=None, baseURL=None,
directory=None, extra_args=None, **kwargs):
"""
@type bkurl: string
@param bkurl: the URL which points to the BitKeeper server.
@type baseURL: string
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{bkurl} and C{baseURL}.
"""
self.bkurl = _ComputeRepositoryURL(bkurl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.extra_args = extra_args
Source.__init__(self, **kwargs)
self.addFactoryArguments(bkurl=bkurl,
baseURL=baseURL,
directory=directory,
extra_args=extra_args,
)
if bkurl and baseURL:
raise ValueError("you must use exactly one of bkurl and baseURL")
def computeSourceRevision(self, changes):
return changes.revision
def startVC(self, branch, revision, patch):
warnings = []
slavever = self.slaveVersion("bk")
if not slavever:
m = "slave does not have the 'bk' command"
raise BuildSlaveTooOldError(m)
if self.bkurl:
assert not branch # we need baseURL= to use branches
self.args['bkurl'] = self.bkurl
else:
self.args['bkurl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
self.args['branch'] = branch
if self.extra_args is not None:
self.args['extra_args'] = self.extra_args
revstuff = []
revstuff.append("[branch]")
if revision is not None:
revstuff.append("r%s" % revision)
if patch is not None:
revstuff.append("[patch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("bk", self.args)
self.startCommand(cmd, warnings)
class CVS(Source):
"""I do CVS checkout/update operations.
Note: if you are doing anonymous/pserver CVS operations, you will need
to manually do a 'cvs login' on each buildslave before the slave has any
hope of success. XXX: fix then, take a cvs password as an argument and
figure out how to do a 'cvs login' on each build
"""
name = "cvs"
renderables = [ "cvsroot" ]
#progressMetrics = ('output',)
#
# additional things to track: update gives one stderr line per directory
# (starting with 'cvs server: Updating ') (and is fairly stable if files
# is empty), export gives one line per directory (starting with 'cvs
# export: Updating ') and another line per file (starting with U). Would
# be nice to track these, requires grepping LogFile data for lines,
# parsing each line. Might be handy to have a hook in LogFile that gets
# called with each complete line.
def __init__(self, cvsroot=None, cvsmodule="",
global_options=[], branch=None, checkoutDelay=None,
checkout_options=[], export_options=[], extra_options=[],
login=None,
**kwargs):
"""
@type cvsroot: string
@param cvsroot: CVS Repository from which the source tree should
be obtained. '/home/warner/Repository' for local
or NFS-reachable repositories,
':pserver:[email protected]:/cvs' for anonymous CVS,
'[email protected]:/cvs' for non-anonymous CVS or
CVS over ssh. Lots of possibilities, check the
CVS documentation for more.
@type cvsmodule: string
@param cvsmodule: subdirectory of CVS repository that should be
retrieved
@type login: string or None
@param login: if not None, a string which will be provided as a
password to the 'cvs login' command, used when a
:pserver: method is used to access the repository.
This login is only needed once, but must be run
each time (just before the CVS operation) because
there is no way for the buildslave to tell whether
it was previously performed or not.
@type branch: string
@param branch: the default branch name, will be used in a '-r'
argument to specify which branch of the source tree
should be used for this checkout. Defaults to None,
which means to use 'HEAD'.
@type checkoutDelay: int or None
@param checkoutDelay: if not None, the number of seconds to put
between the last known Change and the
timestamp given to the -D argument. This
defaults to exactly half of the parent
Build's .treeStableTimer, but it could be
set to something else if your CVS change
notification has particularly weird
latency characteristics.
@type global_options: list of strings
@param global_options: these arguments are inserted in the cvs
command line, before the
'checkout'/'update' command word. See
'cvs --help-options' for a list of what
may be accepted here. ['-r'] will make
the checked out files read only. ['-r',
'-R'] will also assume the repository is
read-only (I assume this means it won't
use locks to insure atomic access to the
,v files).
@type checkout_options: list of strings
@param checkout_options: these arguments are inserted in the cvs
command line, after 'checkout' but before
branch or revision specifiers.
@type export_options: list of strings
@param export_options: these arguments are inserted in the cvs
command line, after 'export' but before
branch or revision specifiers.
@type extra_options: list of strings
@param extra_options: these arguments are inserted in the cvs
command line, after 'checkout' or 'export' but before
branch or revision specifiers.
"""
self.checkoutDelay = checkoutDelay
self.branch = branch
self.cvsroot = _ComputeRepositoryURL(cvsroot)
Source.__init__(self, **kwargs)
self.addFactoryArguments(cvsroot=cvsroot,
cvsmodule=cvsmodule,
global_options=global_options,
checkout_options=checkout_options,
export_options=export_options,
extra_options=extra_options,
branch=branch,
checkoutDelay=checkoutDelay,
login=login,
)
self.args.update({'cvsmodule': cvsmodule,
'global_options': global_options,
'checkout_options':checkout_options,
'export_options':export_options,
'extra_options':extra_options,
'login': login,
})
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([c.when for c in changes])
if self.checkoutDelay is not None:
when = lastChange + self.checkoutDelay
else:
lastSubmit = max([br.submittedAt for br in self.build.requests])
when = (lastChange + lastSubmit) / 2
return formatdate(when)
def startVC(self, branch, revision, patch):
if self.slaveVersionIsOlderThan("cvs", "1.39"):
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (branch != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
log.msg(m)
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("cvs", "2.10"):
if self.args['extra_options'] or self.args['export_options']:
m = ("This buildslave (%s) does not support export_options "
"or extra_options arguments to the CVS step."
% (self.build.slavename))
log.msg(m)
raise BuildSlaveTooOldError(m)
# the unwanted args are empty, and will probably be ignored by
# the slave, but delete them just to be safe
del self.args['export_options']
del self.args['extra_options']
if branch is None:
branch = "HEAD"
self.args['cvsroot'] = self.cvsroot
self.args['branch'] = branch
self.args['revision'] = revision
self.args['patch'] = patch
if self.args['branch'] == "HEAD" and self.args['revision']:
# special case. 'cvs update -r HEAD -D today' gives no files
# TODO: figure out why, see if it applies to -r BRANCH
self.args['branch'] = None
# deal with old slaves
warnings = []
slavever = self.slaveVersion("cvs", "old")
if slavever == "old":
# 0.5.0
if self.args['mode'] == "export":
self.args['export'] = 1
elif self.args['mode'] == "clobber":
self.args['clobber'] = 1
elif self.args['mode'] == "copy":
self.args['copydir'] = "source"
self.args['tag'] = self.args['branch']
assert not self.args['patch'] # 0.5.0 slave can't do patch
cmd = LoggedRemoteCommand("cvs", self.args)
self.startCommand(cmd, warnings)
class SVN(Source):
"""I perform Subversion checkout/update operations."""
name = 'svn'
branch_placeholder = '%%BRANCH%%'
renderables = [ 'svnurl', 'baseURL' ]
def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
directory=None, username=None, password=None,
extra_args=None, keep_on_purge=None, ignore_ignores=None,
always_purge=None, depth=None, **kwargs):
"""
@type svnurl: string
@param svnurl: the URL which points to the Subversion server,
combining the access method (HTTP, ssh, local file),
the repository host/port, the repository path, the
sub-tree within the repository, and the branch to
check out. Use exactly one of C{svnurl} and C{baseURL}.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{svnurl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended
to C{baseURL} and the result handed to
the SVN command.
@type username: string
@param username: username to pass to svn's --username
@type password: string
@param password: password to pass to svn's --password
"""
if not 'workdir' in kwargs and directory is not None:
# deal with old configs
warn("Please use workdir=, not directory=", DeprecationWarning)
kwargs['workdir'] = directory
self.svnurl = svnurl and _ComputeRepositoryURL(svnurl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
self.username = username
self.password = password
self.extra_args = extra_args
self.keep_on_purge = keep_on_purge
self.ignore_ignores = ignore_ignores
self.always_purge = always_purge
self.depth = depth
Source.__init__(self, **kwargs)
self.addFactoryArguments(svnurl=svnurl,
baseURL=baseURL,
defaultBranch=defaultBranch,
directory=directory,
username=username,
password=password,
extra_args=extra_args,
keep_on_purge=keep_on_purge,
ignore_ignores=ignore_ignores,
always_purge=always_purge,
depth=depth,
)
if svnurl and baseURL:
raise ValueError("you must use either svnurl OR baseURL")
def computeSourceRevision(self, changes):
if not changes or None in [c.revision for c in changes]:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def checkCompatibility(self):
''' Handle compatibility between old slaves/svn clients '''
slavever = self.slaveVersion("svn", "old")
if not slavever:
m = "slave does not have the 'svn' command"
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("svn", "1.39"):
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (self.args['branch'] != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
raise BuildSlaveTooOldError(m)
if (self.depth is not None) and self.slaveVersionIsOlderThan("svn","2.9"):
m = ("This buildslave (%s) does not support svn depth "
"arguments. Refusing to build. "
"Please upgrade the buildslave." % (self.build.slavename))
raise BuildSlaveTooOldError(m)
if (self.username is not None or self.password is not None) \
and self.slaveVersionIsOlderThan("svn", "2.8"):
m = ("This buildslave (%s) does not support svn usernames "
"and passwords. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.10 or newer." % (self.build.slavename,))
raise BuildSlaveTooOldError(m)
def getSvnUrl(self, branch, revision, patch):
''' Compute the svn url that will be passed to the svn remote command '''
if self.svnurl:
return self.svnurl
else:
if branch is None:
m = ("The SVN source step belonging to builder '%s' does not know "
"which branch to work with. This means that the change source "
"did not specify a branch and that defaultBranch is None." \
% self.build.builder.name)
raise RuntimeError(m)
computed = self.baseURL
if self.branch_placeholder in self.baseURL:
return computed.replace(self.branch_placeholder, branch)
else:
return computed + branch
def startVC(self, branch, revision, patch):
warnings = []
self.checkCompatibility()
self.args['svnurl'] = self.getSvnUrl(branch, revision, patch)
self.args['revision'] = revision
self.args['patch'] = patch
self.args['always_purge'] = self.always_purge
#Set up depth if specified
if self.depth is not None:
self.args['depth'] = self.depth
if self.username is not None:
self.args['username'] = self.username
if self.password is not None:
self.args['password'] = self.password
if self.extra_args is not None:
self.args['extra_args'] = self.extra_args
revstuff = []
#revstuff.append(self.args['svnurl'])
if self.args['svnurl'].find('trunk') == -1:
revstuff.append("[branch]")
if revision is not None:
revstuff.append("r%s" % revision)
if patch is not None:
revstuff.append("[patch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("svn", self.args)
self.startCommand(cmd, warnings)
class Darcs(Source):
"""Check out a source tree from a Darcs repository at 'repourl'.
Darcs has no concept of file modes. This means the eXecute-bit will be
cleared on all source files. As a result, you may need to invoke
configuration scripts with something like:
C{s(step.Configure, command=['/bin/sh', './configure'])}
"""
name = "darcs"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the Darcs repository. This
is used as the default branch. Using C{repourl} does
not enable builds of alternate branches: use
C{baseURL} to enable this. Use either C{repourl} or
C{baseURL}, not both.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{repourl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended to
C{baseURL} and the result handed to the
'darcs pull' command.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
)
assert self.args['mode'] != "export", \
"Darcs does not have an 'export' mode"
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("darcs")
if not slavever:
m = "slave is too old, does not know about darcs"
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("darcs", "1.39"):
if revision:
# TODO: revisit this once we implement computeSourceRevision
m = "0.6.6 slaves can't handle args['revision']"
raise BuildSlaveTooOldError(m)
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (branch != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
raise BuildSlaveTooOldError(m)
if self.repourl:
assert not branch # we need baseURL= to use branches
self.args['repourl'] = self.repourl
else:
self.args['repourl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[branch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("darcs", self.args)
self.startCommand(cmd)
class Git(Source):
"""Check out a source tree from a git repository 'repourl'."""
name = "git"
renderables = [ 'repourl' ]
def __init__(self, repourl=None,
branch="master",
submodules=False,
ignore_ignores=None,
reference=None,
shallow=False,
progress=False,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the git repository
@type branch: string
@param branch: The branch or tag to check out by default. If
a build specifies a different branch, it will
be used instead of this.
@type submodules: boolean
@param submodules: Whether or not to update (and initialize)
git submodules.
@type reference: string
@param reference: The path to a reference repository to obtain
objects from, if any.
@type shallow: boolean
@param shallow: Use a shallow or clone, if possible
@type progress: boolean
@param progress: Pass the --progress option when fetching. This
can solve long fetches getting killed due to
lack of output, but requires Git 1.7.2+.
"""
Source.__init__(self, **kwargs)
self.repourl = _ComputeRepositoryURL(repourl)
self.branch = branch
self.addFactoryArguments(repourl=repourl,
branch=branch,
submodules=submodules,
ignore_ignores=ignore_ignores,
reference=reference,
shallow=shallow,
progress=progress,
)
self.args.update({'submodules': submodules,
'ignore_ignores': ignore_ignores,
'reference': reference,
'shallow': shallow,
'progress': progress,
})
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def startVC(self, branch, revision, patch):
self.args['branch'] = branch
self.args['repourl'] = self.repourl
self.args['revision'] = revision
self.args['patch'] = patch
# check if there is any patchset we should fetch from Gerrit
try:
# GerritChangeSource
self.args['gerrit_branch'] = self.build.getProperty("event.patchSet.ref")
self.setProperty("gerrit_branch", self.args['gerrit_branch'])
except KeyError:
try:
# forced build
change = self.build.getProperty("gerrit_change").split('/')
if len(change) == 2:
self.args['gerrit_branch'] = "refs/changes/%2.2d/%d/%d" \
% (int(change[0]) % 100, int(change[0]), int(change[1]))
self.setProperty("gerrit_branch", self.args['gerrit_branch'])
except:
pass
slavever = self.slaveVersion("git")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about git")
cmd = LoggedRemoteCommand("git", self.args)
self.startCommand(cmd)
class Repo(Source):
"""Check out a source tree from a repo repository described by manifest."""
name = "repo"
renderables = [ "manifest_url" ]
def __init__(self,
manifest_url=None,
manifest_branch="master",
manifest_file="default.xml",
tarball=None,
**kwargs):
"""
@type manifest_url: string
@param manifest_url: The URL which points at the repo manifests repository.
@type manifest_branch: string
@param manifest_branch: The manifest branch to check out by default.
@type manifest_file: string
@param manifest_file: The manifest to use for sync.
"""
Source.__init__(self, **kwargs)
self.manifest_url = _ComputeRepositoryURL(manifest_url)
self.addFactoryArguments(manifest_url=manifest_url,
manifest_branch=manifest_branch,
manifest_file=manifest_file,
tarball=tarball,
)
self.args.update({'manifest_branch': manifest_branch,
'manifest_file': manifest_file,
'tarball': tarball,
})
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def parseDownloadProperty(self, s):
"""
lets try to be nice in the format we want
can support several instances of "repo download proj number/patch" (direct copy paste from gerrit web site)
or several instances of "proj number/patch" (simpler version)
This feature allows integrator to build with several pending interdependant changes.
returns list of repo downloads sent to the buildslave
"""
import re
if s == None:
return []
re1 = re.compile("repo download ([^ ]+) ([0-9]+/[0-9]+)")
re2 = re.compile("([^ ]+) ([0-9]+/[0-9]+)")
re3 = re.compile("([^ ]+)/([0-9]+/[0-9]+)")
ret = []
for cur_re in [re1, re2, re3]:
res = cur_re.search(s)
while res:
ret.append("%s %s" % (res.group(1), res.group(2)))
s = s[:res.start(0)] + s[res.end(0):]
res = cur_re.search(s)
return ret
def startVC(self, branch, revision, patch):
self.args['manifest_url'] = self.manifest_url
# only master has access to properties, so we must implement this here.
downloads = []
# download patches based on GerritChangeSource events
for change in self.build.allChanges():
if (change.properties.has_key("event.type") and
change.properties["event.type"] == "patchset-created"):
downloads.append("%s %s/%s"% (change.properties["event.change.project"],
change.properties["event.change.number"],
change.properties["event.patchSet.number"]))
# download patches based on web site forced build properties:
# "repo_d", "repo_d0", .., "repo_d9"
# "repo_download", "repo_download0", .., "repo_download9"
for propName in ["repo_d"] + ["repo_d%d" % i for i in xrange(0,10)] + \
["repo_download"] + ["repo_download%d" % i for i in xrange(0,10)]:
try:
s = self.build.getProperty(propName)
downloads.extend(self.parseDownloadProperty(s))
except KeyError:
pass
if downloads:
self.args["repo_downloads"] = downloads
self.setProperty("repo_downloads", downloads)
slavever = self.slaveVersion("repo")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about repo")
cmd = LoggedRemoteCommand("repo", self.args)
self.startCommand(cmd)
def commandComplete(self, cmd):
if cmd.updates.has_key("repo_downloaded"):
repo_downloaded = cmd.updates["repo_downloaded"][-1]
if repo_downloaded:
self.setProperty("repo_downloaded", str(repo_downloaded), "Source")
class Bzr(Source):
"""Check out a source tree from a bzr (Bazaar) repository at 'repourl'.
"""
name = "bzr"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
forceSharedRepo=None,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the bzr repository. This
is used as the default branch. Using C{repourl} does
not enable builds of alternate branches: use
C{baseURL} to enable this. Use either C{repourl} or
C{baseURL}, not both.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{repourl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended to
C{baseURL} and the result handed to the
'bzr checkout pull' command.
@param forceSharedRepo: Boolean, defaults to False. If set to True,
the working directory will be made into a
bzr shared repository if it is not already.
Shared repository greatly reduces the amount
of history data that needs to be downloaded
if not using update/copy mode, or if using
update/copy mode with multiple branches.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
forceSharedRepo=forceSharedRepo
)
self.args.update({'forceSharedRepo': forceSharedRepo})
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("bzr")
if not slavever:
m = "slave is too old, does not know about bzr"
raise BuildSlaveTooOldError(m)
if self.repourl:
assert not branch # we need baseURL= to use branches
self.args['repourl'] = self.repourl
else:
self.args['repourl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[" + branch + "]")
if revision is not None:
revstuff.append("r%s" % revision)
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("bzr", self.args)
self.startCommand(cmd)
class Mercurial(Source):
"""Check out a source tree from a mercurial repository 'repourl'."""
name = "hg"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
branchType='dirname', clobberOnBranchChange=True, **kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the Mercurial repository.
This uses the 'default' branch unless defaultBranch is
specified below and the C{branchType} is set to
'inrepo'. It is an error to specify a branch without
setting the C{branchType} to 'inrepo'.
@param baseURL: if 'dirname' branches are enabled, this is the base URL
to which a branch name will be appended. It should
probably end in a slash. Use exactly one of C{repourl}
and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly.
For 'dirname' branches, It will simply be
appended to C{baseURL} and the result handed to
the 'hg update' command.
For 'inrepo' branches, this specifies the named
revision to which the tree will update after a
clone.
@param branchType: either 'dirname' or 'inrepo' depending on whether
the branch name should be appended to the C{baseURL}
or the branch is a mercurial named branch and can be
found within the C{repourl}
@param clobberOnBranchChange: boolean, defaults to True. If set and
using inrepos branches, clobber the tree
at each branch change. Otherwise, just
update to the branch.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
self.branchType = branchType
self.clobberOnBranchChange = clobberOnBranchChange
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
branchType=branchType,
clobberOnBranchChange=clobberOnBranchChange,
)
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("hg")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about hg")
if self.repourl:
# we need baseURL= to use dirname branches
assert self.branchType == 'inrepo' or not branch
self.args['repourl'] = self.repourl
if branch:
self.args['branch'] = branch
else:
self.args['repourl'] = self.baseURL + (branch or '')
self.args['revision'] = revision
self.args['patch'] = patch
self.args['clobberOnBranchChange'] = self.clobberOnBranchChange
self.args['branchType'] = self.branchType
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[branch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("hg", self.args)
self.startCommand(cmd)
def computeSourceRevision(self, changes):
if not changes:
return None
# without knowing the revision ancestry graph, we can't sort the
# changes at all. So for now, assume they were given to us in sorted
# order, and just pay attention to the last one. See ticket #103 for
# more details.
if len(changes) > 1:
log.msg("Mercurial.computeSourceRevision: warning: "
"there are %d changes here, assuming the last one is "
"the most recent" % len(changes))
return changes[-1].revision
class P4(Source):
""" P4 is a class for accessing perforce revision control"""
name = "p4"
renderables = [ 'p4base' ]
def __init__(self, p4base=None, defaultBranch=None, p4port=None, p4user=None,
p4passwd=None, p4extra_views=[], p4line_end='local',
p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
"""
@type p4base: string
@param p4base: A view into a perforce depot, typically
"//depot/proj/"
@type defaultBranch: string
@param defaultBranch: Identify a branch to build by default. Perforce
is a view based branching system. So, the branch
is normally the name after the base. For example,
branch=1.0 is view=//depot/proj/1.0/...
branch=1.1 is view=//depot/proj/1.1/...
@type p4port: string
@param p4port: Specify the perforce server to connection in the format
<host>:<port>. Example "perforce.example.com:1666"
@type p4user: string
@param p4user: The perforce user to run the command as.
@type p4passwd: string
@param p4passwd: The password for the perforce user.
@type p4extra_views: list of tuples
@param p4extra_views: Extra views to be added to
the client that is being used.
@type p4line_end: string
@param p4line_end: value of the LineEnd client specification property
@type p4client: string
@param p4client: The perforce client to use for this buildslave.
"""
self.p4base = _ComputeRepositoryURL(p4base)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(p4base=p4base,
defaultBranch=defaultBranch,
p4port=p4port,
p4user=p4user,
p4passwd=p4passwd,
p4extra_views=p4extra_views,
p4line_end=p4line_end,
p4client=p4client,
)
self.args['p4port'] = p4port
self.args['p4user'] = p4user
self.args['p4passwd'] = p4passwd
self.args['p4extra_views'] = p4extra_views
self.args['p4line_end'] = p4line_end
self.p4client = p4client
def setBuild(self, build):
Source.setBuild(self, build)
self.args['p4client'] = self.p4client % {
'slave': build.slavename,
'builder': build.builder.name,
}
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("p4")
assert slavever, "slave is too old, does not know about p4"
args = dict(self.args)
args['p4base'] = self.p4base
args['branch'] = branch or self.branch
args['revision'] = revision
args['patch'] = patch
cmd = LoggedRemoteCommand("p4", args)
self.startCommand(cmd)
class P4Sync(Source):
"""This is a partial solution for using a P4 source repository. You are
required to manually set up each build slave with a useful P4
environment, which means setting various per-slave environment variables,
and creating a P4 client specification which maps the right files into
the slave's working directory. Once you have done that, this step merely
performs a 'p4 sync' to update that workspace with the newest files.
Each slave needs the following environment:
- PATH: the 'p4' binary must be on the slave's PATH
- P4USER: each slave needs a distinct user account
- P4CLIENT: each slave needs a distinct client specification
You should use 'p4 client' (?) to set up a client view spec which maps
the desired files into $SLAVEBASE/$BUILDERBASE/source .
"""
name = "p4sync"
def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
self.branch = None
Source.__init__(self, **kwargs)
self.addFactoryArguments(p4port=p4port,
p4user=p4user,
p4passwd=p4passwd,
p4client=p4client,
)
self.args['p4port'] = p4port
self.args['p4user'] = p4user
self.args['p4passwd'] = p4passwd
self.args['p4client'] = p4client
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("p4sync")
assert slavever, "slave is too old, does not know about p4"
cmd = LoggedRemoteCommand("p4sync", self.args)
self.startCommand(cmd)
class Monotone(Source):
"""Check out a source tree from a monotone repository 'repourl'."""
name = "mtn"
renderables = [ 'repourl' ]
def __init__(self, repourl=None, branch=None, progress=False, **kwargs):
"""
@type repourl: string
@param repourl: the URI which points at the monotone repository.
@type branch: string
@param branch: The branch or tag to check out by default. If
a build specifies a different branch, it will
be used instead of this.
@type progress: boolean
@param progress: Pass the --ticker=dot option when pulling. This
can solve long fetches getting killed due to
lack of output.
"""
Source.__init__(self, **kwargs)
self.repourl = _ComputeRepositoryURL(repourl)
if (not repourl):
raise ValueError("you must provide a repository uri in 'repourl'")
if (not branch):
raise ValueError("you must provide a default branch in 'branch'")
self.addFactoryArguments(repourl=repourl,
branch=branch,
progress=progress,
)
self.args.update({'branch': branch,
'progress': progress,
})
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("mtn")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about mtn")
self.args['repourl'] = self.repourl
if branch:
self.args['branch'] = branch
self.args['revision'] = revision
self.args['patch'] = patch
cmd = LoggedRemoteCommand("mtn", self.args)
self.startCommand(cmd)
def computeSourceRevision(self, changes):
if not changes:
return None
# without knowing the revision ancestry graph, we can't sort the
# changes at all. So for now, assume they were given to us in sorted
# order, and just pay attention to the last one. See ticket #103 for
# more details.
if len(changes) > 1:
log.msg("Monotone.computeSourceRevision: warning: "
"there are %d changes here, assuming the last one is "
"the most recent" % len(changes))
return changes[-1].revision
| gpl-2.0 | -4,113,521,986,634,069,500 | 41.535376 | 116 | 0.558132 | false | 4.683264 | false | false | false |
UQ-UQx/edx-platform_lti | lms/djangoapps/courseware/tests/test_recommender.py | 2 | 32023 | """
This test file will run through some XBlock test scenarios regarding the
recommender system
"""
import json
import itertools
import StringIO
from ddt import ddt, data
from copy import deepcopy
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.lms_xblock.runtime import quote_slashes
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
class TestRecommender(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that Recommender state is saved properly
"""
STUDENTS = [
{'email': '[email protected]', 'password': 'foo'},
{'email': '[email protected]', 'password': 'foo'}
]
XBLOCK_NAMES = ['recommender', 'recommender_second']
def setUp(self):
self.course = CourseFactory.create(
display_name='Recommender_Test_Course'
)
self.chapter = ItemFactory.create(
parent=self.course, display_name='Overview'
)
self.section = ItemFactory.create(
parent=self.chapter, display_name='Welcome'
)
self.unit = ItemFactory.create(
parent=self.section, display_name='New Unit'
)
self.xblock = ItemFactory.create(
parent=self.unit,
category='recommender',
display_name='recommender'
)
self.xblock2 = ItemFactory.create(
parent=self.unit,
category='recommender',
display_name='recommender_second'
)
self.course_url = reverse(
'courseware_section',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome',
}
)
self.resource_urls = [
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/"
"SP13_Periodic_Trends_and_Bonding/"
),
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/SP13_Covalent_Bonding/"
)
]
self.test_recommendations = {
self.resource_urls[0]: {
"title": "Covalent bonding and periodic trends",
"url": self.resource_urls[0],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage1.png"
),
"descriptionText": (
"short description for Covalent bonding "
"and periodic trends"
)
},
self.resource_urls[1]: {
"title": "Polar covalent bonds and electronegativity",
"url": self.resource_urls[1],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage2.png"
),
"descriptionText": (
"short description for Polar covalent "
"bonds and electronegativity"
)
}
}
for idx, student in enumerate(self.STUDENTS):
username = "u{}".format(idx)
self.create_account(username, student['email'], student['password'])
self.activate_user(student['email'])
self.staff_user = GlobalStaffFactory()
def get_handler_url(self, handler, xblock_name=None):
"""
Get url for the specified xblock handler
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
return reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(self.course.id.make_usage_key('recommender', xblock_name).to_deprecated_string()),
'handler': handler,
'suffix': ''
})
def enroll_student(self, email, password):
"""
Student login and enroll for the course
"""
self.login(email, password)
self.enroll(self.course, verify=True)
def enroll_staff(self, staff):
"""
Staff login and enroll for the course
"""
email = staff.email
password = 'test'
self.login(email, password)
self.enroll(self.course, verify=True)
def initialize_database_by_id(self, handler, resource_id, times, xblock_name=None):
"""
Call a ajax event (vote, delete, endorse) on a resource by its id
several times
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
for _ in range(0, times):
self.client.post(url, json.dumps({'id': resource_id}), '')
def call_event(self, handler, resource, xblock_name=None):
"""
Call a ajax event (add, edit, flag, etc.) by specifying the resource
it takes
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
resp = self.client.post(url, json.dumps(resource), '')
return json.loads(resp.content)
def check_event_response_by_element(self, handler, resource, resp_key, resp_val, xblock_name=None):
"""
Call the event specified by the handler with the resource, and check
whether the element (resp_key) in response is as expected (resp_val)
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
resp = self.call_event(handler, resource, xblock_name)
self.assertEqual(resp[resp_key], resp_val)
self.assert_request_status_code(200, self.course_url)
class TestRecommenderCreateFromEmpty(TestRecommender):
"""
Check whether we can add resources to an empty database correctly
"""
def test_add_resource(self):
"""
Verify the addition of new resource is handled correctly
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Check whether adding new resource is successful
for resource_id, resource in self.test_recommendations.iteritems():
for xblock_name in self.XBLOCK_NAMES:
result = self.call_event('add_resource', resource, xblock_name)
expected_result = {
'Success': True,
'upvotes': 0,
'downvotes': 0,
'id': resource_id
}
for field in resource:
expected_result[field] = resource[field]
self.assertDictEqual(result, expected_result)
self.assert_request_status_code(200, self.course_url)
def test_import_resources_by_student(self):
"""
Test the function for importing all resources into the Recommender
by a student.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Preparing imported resources
initial_configuration = {
'flagged_accum_resources': {},
'endorsed_recommendation_reasons': [],
'endorsed_recommendation_ids': [],
'deendorsed_recommendations': {},
'recommendations': self.test_recommendations[self.resource_urls[0]]
}
# Importing resources
f_handler = StringIO.StringIO(json.dumps(initial_configuration, sort_keys=True))
f_handler.name = 'import_resources'
url = self.get_handler_url('import_resources')
resp = self.client.post(url, {'file': f_handler})
self.assertEqual(resp.content, 'NOT_A_STAFF')
self.assert_request_status_code(200, self.course_url)
def test_import_resources(self):
"""
Test the function for importing all resources into the Recommender.
"""
self.enroll_staff(self.staff_user)
# Preparing imported resources
initial_configuration = {
'flagged_accum_resources': {},
'endorsed_recommendation_reasons': [],
'endorsed_recommendation_ids': [],
'deendorsed_recommendations': {},
'recommendations': self.test_recommendations[self.resource_urls[0]]
}
# Importing resources
f_handler = StringIO.StringIO(json.dumps(initial_configuration, sort_keys=True))
f_handler.name = 'import_resources'
url = self.get_handler_url('import_resources')
resp = self.client.post(url, {'file': f_handler})
self.assertEqual(resp.content, json.dumps(initial_configuration, sort_keys=True))
self.assert_request_status_code(200, self.course_url)
class TestRecommenderWithResources(TestRecommender):
"""
Check whether we can add/edit/flag/export resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderWithResources, self).setUp()
self.resource_id = self.resource_urls[0]
self.resource_id_second = self.resource_urls[1]
self.non_existing_resource_id = 'An non-existing id'
self.set_up_resources()
def set_up_resources(self):
"""
Set up resources and enroll staff
"""
self.logout()
self.enroll_staff(self.staff_user)
# Add resources, assume correct here, tested in test_add_resource
for resource, xblock_name in itertools.product(self.test_recommendations.values(), self.XBLOCK_NAMES):
self.call_event('add_resource', resource, xblock_name)
def generate_edit_resource(self, resource_id):
"""
Based on the given resource (specified by resource_id), this function
generate a new one for testing 'edit_resource' event
"""
resource = {"id": resource_id}
edited_recommendations = {
key: value + " edited" for key, value in self.test_recommendations[self.resource_id].iteritems()
}
resource.update(edited_recommendations)
return resource
def test_add_redundant_resource(self):
"""
Verify the addition of a redundant resource (url) is rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
result = self.call_event('add_resource', resource)
expected_result = {
'Success': False,
'error': (
'The resource you are attempting to '
'provide has already existed'
),
'dup_id': self.resource_id
}
for field in resource:
expected_result[field] = resource[field]
expected_result['dup_' + field] = self.test_recommendations[self.resource_id][field]
self.assertDictEqual(result, expected_result)
self.assert_request_status_code(200, self.course_url)
def test_add_deendorsed_resource(self):
"""
Verify the addition of a deendorsed resource (url) is rejected
"""
self.call_event('deendorse_resource', {"id": self.resource_id, 'reason': ''})
err_msg = 'The resource you are attempting to provide has been de-endorsed by staff, because: .*'
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
resp = self.call_event('add_resource', resource)
self.assertRegexpMatches(resp['error'], err_msg)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource_non_existing(self):
"""
Edit a non-existing resource
"""
resp = self.call_event(
'edit_resource', self.generate_edit_resource(self.non_existing_resource_id)
)
self.assertEqual(resp['error'], 'The selected resource is not existing')
self.assert_request_status_code(200, self.course_url)
def test_edit_redundant_resource(self):
"""
Check whether changing the url to the one of 'another' resource is
rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
resp = self.call_event('edit_resource', resource)
self.assertEqual(resp['error'], 'The resource you are attempting to provide has already existed')
self.assertEqual(resp['dup_id'], self.resource_id_second)
self.assert_request_status_code(200, self.course_url)
def test_edit_deendorsed_resource(self):
"""
Check whether changing the url to the one of a deendorsed resource is
rejected
"""
self.call_event('deendorse_resource', {"id": self.resource_id_second, 'reason': ''})
err_msg = 'The resource you are attempting to provide has been de-endorsed by staff, because: .*'
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
resp = self.call_event('edit_resource', resource)
self.assertRegexpMatches(resp['error'], err_msg)
self.assertEqual(resp['dup_id'], self.resource_id_second)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource(self):
"""
Check whether changing the content of resource is successful
"""
resp = self.call_event(
'edit_resource', self.generate_edit_resource(self.resource_id)
)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource_same_url(self):
"""
Check whether changing the content (except for url) of resource is successful
"""
resource = self.generate_edit_resource(self.resource_id)
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource['url'] = self.resource_id + suffix
resp = self.call_event('edit_resource', resource)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_edit_then_add_resource(self):
"""
Check whether we can add back an edited resource
"""
self.call_event('edit_resource', self.generate_edit_resource(self.resource_id))
# Test
resp = self.call_event('add_resource', self.test_recommendations[self.resource_id])
self.assertEqual(resp['id'], self.resource_id)
self.assert_request_status_code(200, self.course_url)
def test_edit_resources_in_different_xblocks(self):
"""
Check whether changing the content of resource is successful in two
different xblocks
"""
resource = self.generate_edit_resource(self.resource_id)
for xblock_name in self.XBLOCK_NAMES:
resp = self.call_event('edit_resource', resource, xblock_name)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_flag_resource_wo_reason(self):
"""
Flag a resource as problematic, without providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': ''}
# Test
self.check_event_response_by_element('flag_resource', resource, 'reason', '')
def test_flag_resource_w_reason(self):
"""
Flag a resource as problematic, with providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
self.check_event_response_by_element('flag_resource', resource, 'reason', 'reason 0')
def test_flag_resource_change_reason(self):
"""
Flag a resource as problematic twice, with different reasons
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
# Test
resource['reason'] = 'reason 1'
resp = self.call_event('flag_resource', resource)
self.assertEqual(resp['oldReason'], 'reason 0')
self.assertEqual(resp['reason'], 'reason 1')
self.assert_request_status_code(200, self.course_url)
def test_flag_resources_in_different_xblocks(self):
"""
Flag resources as problematic in two different xblocks
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
for xblock_name in self.XBLOCK_NAMES:
self.check_event_response_by_element('flag_resource', resource, 'reason', 'reason 0', xblock_name)
def test_flag_resources_by_different_users(self):
"""
Different users can't see the flag result of each other
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resp = self.call_event('flag_resource', resource)
# The second user won't see the reason provided by the first user
self.assertNotIn('oldReason', resp)
self.assertEqual(resp['reason'], 'reason 0')
self.assert_request_status_code(200, self.course_url)
def test_export_resources(self):
"""
Test the function for exporting all resources from the Recommender.
"""
self.call_event('deendorse_resource', {"id": self.resource_id, 'reason': ''})
self.call_event('endorse_resource', {"id": self.resource_id_second, 'reason': ''})
# Test
resp = self.call_event('export_resources', {})
self.assertIn(self.resource_id_second, resp['export']['recommendations'])
self.assertNotIn(self.resource_id, resp['export']['recommendations'])
self.assertIn(self.resource_id_second, resp['export']['endorsed_recommendation_ids'])
self.assertIn(self.resource_id, resp['export']['deendorsed_recommendations'])
self.assert_request_status_code(200, self.course_url)
@ddt
class TestRecommenderVoteWithResources(TestRecommenderWithResources):
"""
Check whether we can vote resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderVoteWithResources, self).setUp()
@data(
{'event': 'recommender_upvote'},
{'event': 'recommender_downvote'}
)
def test_vote_resource_non_existing(self, test_case):
"""
Vote a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'event': test_case['event']}
self.check_event_response_by_element('handle_vote', resource, 'error', 'The selected resource is not existing')
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_once(self, test_case):
"""
Vote a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 0},
{'event': 'recommender_downvote', 'new_votes': 0}
)
def test_vote_resource_twice(self, test_case):
"""
Vote a resource twice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_thrice(self, test_case):
"""
Vote a resource thrice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
for _ in range(0, 2):
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'event_second': 'recommender_downvote', 'new_votes': -1},
{'event': 'recommender_downvote', 'event_second': 'recommender_upvote', 'new_votes': 1}
)
def test_switch_vote_resource(self, test_case):
"""
Switch the vote of a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['event'] = test_case['event_second']
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_different_resources(self, test_case):
"""
Vote two different resources
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['id'] = self.resource_id_second
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resources_in_different_xblocks(self, test_case):
"""
Vote two resources in two different xblocks
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'], self.XBLOCK_NAMES[1])
@data(
{'event': 'recommender_upvote', 'new_votes': 2},
{'event': 'recommender_downvote', 'new_votes': -2}
)
def test_vote_resource_by_different_users(self, test_case):
"""
Vote resource by two different users
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@ddt
class TestRecommenderStaffFeedbackWithResources(TestRecommenderWithResources):
"""
Check whether we can deendorse/endorse resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderStaffFeedbackWithResources, self).setUp()
@data('deendorse_resource', 'endorse_resource')
def test_deendorse_or_endorse_resource_non_existing(self, test_case):
"""
Deendorse/endorse a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'reason': ''}
self.check_event_response_by_element(test_case, resource, 'error', 'The selected resource is not existing')
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_resource_once(self, test_case):
"""
Deendorse/endorse a resource
"""
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'The selected resource is not existing'},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'undo endorsement'}
)
def test_deendorse_or_endorse_resource_twice(self, test_case):
"""
Deendorse/endorse a resource twice
"""
resource = {"id": self.resource_id, 'reason': ''}
self.call_event(test_case['handler'], resource)
# Test
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'The selected resource is not existing'},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_endorse_resource_thrice(self, test_case):
"""
Deendorse/endorse a resource thrice
"""
resource = {"id": self.resource_id, 'reason': ''}
for _ in range(0, 2):
self.call_event(test_case['handler'], resource)
# Test
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_different_resources(self, test_case):
"""
Deendorse/endorse two different resources
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id_second, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_resources_in_different_xblocks(self, test_case):
"""
Deendorse/endorse two resources in two different xblocks
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'], self.XBLOCK_NAMES[1])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'Deendorse resource without permission'},
{'handler': 'endorse_resource', 'key': 'error', 'val': 'Endorse resource without permission'}
)
def test_deendorse_or_endorse_resource_by_student(self, test_case):
"""
Deendorse/endorse resource by a student
"""
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@ddt
class TestRecommenderFileUploading(TestRecommender):
"""
Check whether we can handle file uploading correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderFileUploading, self).setUp()
def attempt_upload_file_and_verify_result(self, test_case, xblock_name=None):
"""
Running on a test case, creating a temp file, uploading it by
calling the corresponding ajax event, and verifying that upload
happens or is rejected as expected.
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
f_handler = StringIO.StringIO(test_case['magic_number'].decode('hex'))
f_handler.content_type = test_case['mimetypes']
f_handler.name = 'file' + test_case['suffixes']
url = self.get_handler_url('upload_screenshot', xblock_name)
resp = self.client.post(url, {'file': f_handler})
self.assertRegexpMatches(resp.content, test_case['response_regexp'])
self.assert_request_status_code(200, self.course_url)
@data(
{
'suffixes': '.csv',
'magic_number': 'ffff',
'mimetypes': 'text/plain',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong extension name
{
'suffixes': '.gif',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/gif',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/jpeg',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': '474946383761',
'mimetypes': 'image/png',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '474946383761',
'mimetypes': 'image/jpeg',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/png',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.gif',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/gif',
'response_regexp': 'FILE_TYPE_ERROR'
}
)
def test_upload_screenshot_wrong_file_type(self, test_case):
"""
Verify the file uploading fails correctly when file with wrong type
(extension/magic number) is provided
"""
self.enroll_staff(self.staff_user)
# Upload file with wrong extension name or magic number
self.attempt_upload_file_and_verify_result(test_case)
self.assert_request_status_code(200, self.course_url)
@data(
{
'suffixes': '.png',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/png',
'response_regexp': 'fs://.*.png'
},
{
'suffixes': '.gif',
'magic_number': '474946383961',
'mimetypes': 'image/gif',
'response_regexp': 'fs://.*.gif'
},
{
'suffixes': '.gif',
'magic_number': '474946383761',
'mimetypes': 'image/gif',
'response_regexp': 'fs://.*.gif'
},
{
'suffixes': '.jpg',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/jpeg',
'response_regexp': 'fs://.*.jpeg'
}
)
def test_upload_screenshot_correct_file_type(self, test_case):
"""
Verify the file type checking in the file uploading method is
successful.
"""
self.enroll_staff(self.staff_user)
# Upload file with correct extension name and magic number
self.attempt_upload_file_and_verify_result(test_case)
self.assert_request_status_code(200, self.course_url)
| agpl-3.0 | 2,393,517,875,017,542,000 | 39.382093 | 134 | 0.593448 | false | 3.913835 | true | false | false |
Curbfeeler/PinbotFromES | multiball.py | 1 | 10422 | #################################################################################
##____ ___ _ _ ____ ___ _____ ____ ___
#| _ \_ _| \ | | __ ) / _ \_ _| |___ \ / _ \
#| |_) | || \| | _ \| | | || | __) || | | |
#| __/| || |\ | |_) | |_| || | / __/ | |_| |
#|_|__|___|_|_\_|____/_\___/_|_| __|_____(_)___/_____ ___ ___ _ _
#| _ \ | _ \ / _ \ / ___| | ____| _ \_ _|_ _|_ _/ _ \| \ | |
#| |_) |____| |_) | | | | | | _| | | | | | | | | | | | | \| |
#| __/_____| _ <| |_| | |___ | |___| |_| | | | | | | |_| | |\ |
#|_| |_| \_\\___/ \____| |_____|____/___| |_| |___\___/|_| \_|
##
## A P-ROC Project by Dan Myers, Copyright 2013-2014
## Built on the PyProcGame Framework from Adam Preble and Gerry Stellenberg
## Thanks to Scott Danesi for his Earthshaker Project, which is my starting point
#################################################################################
#################################################################################
## __ _____ ____ ______________ ___ __ __
## / |/ / / / / / /_ __/ _/ __ )/ | / / / /
## / /|_/ / / / / / / / / // __ / /| | / / / /
## / / / / /_/ / /___/ / _/ // /_/ / ___ |/ /___/ /___
## /_/ /_/\____/_____/_/ /___/_____/_/ |_/_____/_____/
##
#################################################################################
import procgame.game
from procgame import *
import pinproc
from random import choice
from random import seed
class Multiball(game.Mode):
def __init__(self, game, priority):
super(Multiball, self).__init__(game, priority)
self.ballsLocked = 0
self.ballLock1Lit = False
self.ballLock2Lit = False
#self.ballLock3Lit = False
self.multiballStarting = False
#self.multiballIntroLength = 11.287
def mode_started(self):
self.getUserStats()
self.update_lamps()
return super(Multiball, self).mode_started()
def mode_stopped(self):
self.stopMultiball()
pass
def update_lamps(self):
print "Update Lamps: Multiball"
#self.disableLockLamps()
#if (self.ballLock1Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 1 is Lit"
#elif (self.ballLock2Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 2 is Lit"
#elif (self.ballLock3Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 3 is Lit"
def open_visor(self):
self.game.coils.visorMotor.enable()
self.ballLock1Lit = True
self.game.utilities.set_player_stats('lock1_lit', self.ballLock1Lit)
self.ballLock2Lit = True
self.game.utilities.set_player_stats('lock2_lit',self.ballLock2Lit)
#def disableLockLamps(self):
#self.game.lamps.rightRampLock.disable()
#self.game.lamps.ejectLock.disable()
#self.game.lamps.dropHoleLock.disable()
def getUserStats(self):
self.ballLock1Lit = self.game.utilities.get_player_stats('lock1_lit')
self.ballLock2Lit = self.game.utilities.get_player_stats('lock2_lit')
#self.ballLock3Lit = self.game.utilities.get_player_stats('lock3_lit')
self.ballsLocked = self.game.utilities.get_player_stats('balls_locked')
print "Lock 1: " + str(self.game.utilities.get_player_stats('lock1_lit'))
print "Lock 2: " + str(self.game.utilities.get_player_stats('lock2_lit'))
#print "Lock 3: " + str(self.game.utilities.get_player_stats('lock3_lit'))
print "Balls Locked: " + str(self.game.utilities.get_player_stats('balls_locked'))
#def liteLock(self,callback):
#self.callback = callback
#if (self.ballsLocked == 0):
#self.game.utilities.set_player_stats('lock1_lit',True)
#print "Setting Ball 1 Lock to Lit"
#self.getUserStats()
#elif (self.ballsLocked == 1):
#self.game.utilities.set_player_stats('lock2_lit',True)
#self.getUserStats()
#elif (self.ballsLocked == 2):
#self.game.utilities.set_player_stats('lock3_lit',True)
#self.getUserStats()
#self.update_lamps()
def lockLeftEyeBall(self):
self.game.sound.play('ball_lock_1')
self.game.utilities.set_player_stats('ball1_locked',True)
self.game.utilities.set_player_stats('balls_locked',self.game.utilities.get_player_stats('balls_locked') + 1)
self.game.utilities.set_player_stats('lock1_lit',False)
self.getUserStats()
self.update_lamps()
self.game.utilities.displayText(100,'LEFT', 'EYE','IS','MADE',seconds=3,justify='center')
self.game.utilities.score(1000)
self.game.lampctrlflash.play_show('skillshot', repeat=False, callback=self.game.update_lamps)
self.game.trough.launch_balls(num=1)
self.ballLock1Lit = False
#self.callback()
if self.game.utilities.get_player_stats('balls_locked')==2:
self.startMultiball()
def lockRightEyeBall(self):
self.game.sound.play('ball_lock_2')
self.game.utilities.set_player_stats('ball2_locked',True)
self.game.utilities.set_player_stats('balls_locked',self.game.utilities.get_player_stats('balls_locked') + 1)
self.game.utilities.set_player_stats('lock2_lit',False)
self.getUserStats()
self.update_lamps()
self.game.utilities.displayText(100,'RIGHT', 'EYE','IS','MADE',seconds=3,justify='center')
self.game.utilities.score(1000)
self.game.lampctrlflash.play_show('skillshot', repeat=False, callback=self.game.update_lamps)
self.game.trough.launch_balls(num=1)
self.ballLock2Lit = False
#self.callback()
if self.game.utilities.get_player_stats('balls_locked')==2:
self.startMultiball()
def startMultiball(self):
self.multiballStarting = True
self.game.utilities.set_player_stats('multiball_running',True)
self.resetMultiballStats()
#self.game.collect_mode.incrementActiveZoneLimit()
self.getUserStats()
self.update_lamps()
self.multiballIntro()
def multiballIntro(self):
self.cancel_delayed('dropReset')
self.game.utilities.disableGI()
self.game.sound.stop_music()
#self.game.lampctrlflash.play_show('multiball_intro_1', repeat=False)
#self.game.utilities.randomLampPulse(100)
# Sound FX #
self.game.sound.play('multiball_1')
self.game.sound.play_music('multiball_loop'+ str(self.game.ball),loops=1,music_volume=.5)
#Short Out Noises
#self.delay(delay=2,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=3,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=4.5,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=6,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=8,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=9,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=10,handler=self.game.sound.play,param='short_out_1')
#self.game.coils.quakeMotor.schedule(schedule=0x08080808,cycle_seconds=-1,now=True)
self.resetMultiballStats()
self.delay(delay=8,handler=self.multiballRun)
def multiballRun(self):
self.game.utilities.enableGI()
#self.game.coils.quakeMotor.patter(on_time=15,off_time=100)
#self.game.utilities.enableMultiballQuake()
#self.game.sound.play('centerRampComplete')
self.game.sound.play_music('multiball_loop'+ str(self.game.ball),loops=-1,music_volume=.6)
#self.game.utilities.acCoilPulse(coilname='singleEjectHole_LeftInsertBDFlasher',pulsetime=50)
#self.game.utilities.acFlashPulse('singleEjectHole_LeftInsertBDFlasher')
if self.game.switches.rightEyeball.is_active()==True:
self.game.utilities.acCoilPulse(coilname='rightEyeballEject_SunFlasher',pulsetime=50)
if self.game.switches.leftEyeball.is_active()==True:
self.game.utilities.acCoilPulse(coilname='leftEyeballEject_LeftPlayfieldFlasher',pulsetime=50)
if self.game.switches.singleEject.is_active()==True:
self.game.utilities.acCoilPulse(coilname='singleEjectHole_LeftInsertBDFlasher',pulsetime=50)
#self.game.trough.launch_balls(num=2)
self.multiballStarting = False
self.game.update_lamps()
def stopMultiball(self):
self.game.utilities.set_player_stats('multiball_running',False)
#self.game.utilities.set_player_stats('jackpot_lit',False)
self.game.utilities.setBallInPlay(True)
#self.game.sound.stop_music()
#self.game.sound.play_music('main'+ str(self.game.ball),loops=1,music_volume=.5)
self.resetMultiballStats()
#self.game.bonusmultiplier_mode.incrementBonusMultiplier()
self.game.update_lamps()
#self.game.coils.quakeMotor.disable()
#self.callback()
def resetMultiballStats(self):
self.game.utilities.set_player_stats('lock1_lit',False)
self.game.utilities.set_player_stats('lock2_lit',False)
self.game.utilities.set_player_stats('lock3_lit',False)
self.game.utilities.set_player_stats('balls_locked',0)
self.getUserStats()
#def sw_underPlayfieldDrop1_active(self, sw):
#if (self.ballLock1Lit == True):
#self.lockBall1()
#elif (self.ballLock2Lit == True):
#self.lockBall2()
#elif (self.ballLock3Lit == True):
#self.startMultiball()
#else:
#pass
#def sw_ballPopperBottom_closed(self, sw):
#if(self.multiballStarting == True):
#return procgame.game.SwitchStop
#else:
#return procgame.game.SwitchContinue
#def sw_outhole_closed_for_500ms(self, sw):
##if (self.game.trough.num_balls_in_play == 2):
##Last ball - Need to stop multiball
##self.stopMultiball()
#return procgame.game.SwitchContinue
def sw_leftEyeball_closed_for_100ms(self, sw):
if (self.ballLock1Lit == True):
self.lockLeftEyeBall()
return procgame.game.SwitchContinue
def sw_rightEyeball_closed_for_100ms(self, sw):
if (self.ballLock2Lit == True):
self.lockRightEyeBall()
return procgame.game.SwitchContinue
#EJECTS/EYEBALLS
#rightEyeball:
#number: S42
#label: 'Right Eye Eject'
#leftEyeball:
#number: S41
#label: 'Left Eye Eject'
def sw_visorClosed_open_for_100ms(self, sw):
self.open_visor()
return procgame.game.SwitchContinue
def sw_visorOpen_closed_for_100ms(self, sw):
self.open_visor()
return procgame.game.SwitchContinue
#visorOpen:
#number: S67
#visorClosed:
#number: S66
| mit | -5,230,343,648,591,653,000 | 38.778626 | 111 | 0.644982 | false | 2.713356 | false | false | false |
Vagab0nd/SiCKRAGE | lib3/twilio/rest/bulkexports/v1/export_configuration.py | 1 | 9958 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ExportConfigurationList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the ExportConfigurationList
:param Version version: Version that contains the resource
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationList
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationList
"""
super(ExportConfigurationList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, resource_type):
"""
Constructs a ExportConfigurationContext
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
return ExportConfigurationContext(self._version, resource_type=resource_type, )
def __call__(self, resource_type):
"""
Constructs a ExportConfigurationContext
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
return ExportConfigurationContext(self._version, resource_type=resource_type, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Bulkexports.V1.ExportConfigurationList>'
class ExportConfigurationPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the ExportConfigurationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationPage
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationPage
"""
super(ExportConfigurationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ExportConfigurationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return ExportConfigurationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Bulkexports.V1.ExportConfigurationPage>'
class ExportConfigurationContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, resource_type):
"""
Initialize the ExportConfigurationContext
:param Version version: Version that contains the resource
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
super(ExportConfigurationContext, self).__init__(version)
# Path Solution
self._solution = {'resource_type': resource_type, }
self._uri = '/Exports/{resource_type}/Configuration'.format(**self._solution)
def fetch(self):
"""
Fetch the ExportConfigurationInstance
:returns: The fetched ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ExportConfigurationInstance(
self._version,
payload,
resource_type=self._solution['resource_type'],
)
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: Whether files are automatically generated
:param unicode webhook_url: URL targeted at export
:param unicode webhook_method: Whether to GET or POST to the webhook url
:returns: The updated ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
data = values.of({'Enabled': enabled, 'WebhookUrl': webhook_url, 'WebhookMethod': webhook_method, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return ExportConfigurationInstance(
self._version,
payload,
resource_type=self._solution['resource_type'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Bulkexports.V1.ExportConfigurationContext {}>'.format(context)
class ExportConfigurationInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, resource_type=None):
"""
Initialize the ExportConfigurationInstance
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
super(ExportConfigurationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'enabled': payload.get('enabled'),
'webhook_url': payload.get('webhook_url'),
'webhook_method': payload.get('webhook_method'),
'resource_type': payload.get('resource_type'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'resource_type': resource_type or self._properties['resource_type'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
if self._context is None:
self._context = ExportConfigurationContext(
self._version,
resource_type=self._solution['resource_type'],
)
return self._context
@property
def enabled(self):
"""
:returns: Whether files are automatically generated
:rtype: bool
"""
return self._properties['enabled']
@property
def webhook_url(self):
"""
:returns: URL targeted at export
:rtype: unicode
"""
return self._properties['webhook_url']
@property
def webhook_method(self):
"""
:returns: Whether to GET or POST to the webhook url
:rtype: unicode
"""
return self._properties['webhook_method']
@property
def resource_type(self):
"""
:returns: The type of communication – Messages, Calls
:rtype: unicode
"""
return self._properties['resource_type']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the ExportConfigurationInstance
:returns: The fetched ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return self._proxy.fetch()
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: Whether files are automatically generated
:param unicode webhook_url: URL targeted at export
:param unicode webhook_method: Whether to GET or POST to the webhook url
:returns: The updated ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Bulkexports.V1.ExportConfigurationInstance {}>'.format(context)
| gpl-3.0 | 7,873,266,434,988,749,000 | 33.66899 | 108 | 0.652362 | false | 4.574713 | true | false | false |
cherrypy/magicbus | magicbus/compat.py | 1 | 2070 | """Compatibility code for using magicbus with various versions of Python.
Magic Bus 3.3 is compatible with Python versions 2.7+. This module provides a
useful abstraction over the differences between Python versions, sometimes by
preferring a newer idiom, sometimes an older one, and sometimes a custom one.
In particular, Python 2 uses str and '' for byte strings, while Python 3
uses str and '' for unicode strings. We will call each of these the 'native
string' type for each version. Because of this major difference, this module
provides new 'bytestr', 'unicodestr', and 'nativestr' attributes, as well as
the function: 'ntob', which translates native strings (of type 'str') into
byte strings regardless of Python version.
"""
import sys
if sys.version_info >= (3, 0):
py3k = True
basestring = (bytes, str)
unicodestr = str
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
# Python 2
py3k = False
basestring = basestring
unicodestr = unicode
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
try:
from http.server import HTTPServer, BaseHTTPRequestHandler as HTTPHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler as HTTPHandler
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
import threading
try:
from _thread import get_ident as get_thread_ident
except ImportError:
from thread import get_ident as get_thread_ident
if sys.version_info >= (3, 3):
TimerClass = threading.Timer
else:
TimerClass = threading._Timer
| bsd-3-clause | 3,203,971,497,558,720,500 | 31.34375 | 77 | 0.717874 | false | 4.215886 | false | false | false |
derekjchow/models | research/global_objectives/util_test.py | 4 | 13413 | # Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for global objectives util functions."""
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from global_objectives import util
def weighted_sigmoid_cross_entropy(targets, logits, weight):
return (weight * targets * np.log(1.0 + np.exp(-logits)) + (
(1.0 - targets) * np.log(1.0 + 1.0 / np.exp(-logits))))
def hinge_loss(labels, logits):
# Mostly copied from tensorflow.python.ops.losses but with loss per datapoint.
labels = tf.to_float(labels)
all_ones = tf.ones_like(labels)
labels = tf.subtract(2 * labels, all_ones)
return tf.nn.relu(tf.subtract(all_ones, tf.multiply(labels, logits)))
class WeightedSigmoidCrossEntropyTest(parameterized.TestCase, tf.test.TestCase):
def testTrivialCompatibilityWithSigmoidCrossEntropy(self):
"""Tests compatibility with unweighted function with weight 1.0."""
x_shape = [300, 10]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits)
expected_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
logits, targets))
with self.test_session():
self.assertAllClose(expected_loss.eval(),
weighted_loss.eval(),
atol=0.000001)
def testNonTrivialCompatibilityWithSigmoidCrossEntropy(self):
"""Tests use of an arbitrary weight (4.12)."""
x_shape = [300, 10]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
weight = 4.12
weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits,
weight,
weight)
expected_loss = (
weight *
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
logits, targets))
with self.test_session():
self.assertAllClose(expected_loss.eval(),
weighted_loss.eval(),
atol=0.000001)
def testDifferentSizeWeightedSigmoidCrossEntropy(self):
"""Tests correctness on 3D tensors.
Tests that the function works as expected when logits is a 3D tensor and
targets is a 2D tensor.
"""
targets_shape = [30, 4]
logits_shape = [targets_shape[0], targets_shape[1], 3]
targets = np.random.random_sample(targets_shape).astype(np.float32)
logits = np.random.randn(*logits_shape).astype(np.float32)
weight_vector = [2.0, 3.0, 13.0]
loss = util.weighted_sigmoid_cross_entropy_with_logits(targets,
logits,
weight_vector)
with self.test_session():
loss = loss.eval()
for i in range(0, len(weight_vector)):
expected = weighted_sigmoid_cross_entropy(targets, logits[:, :, i],
weight_vector[i])
self.assertAllClose(loss[:, :, i], expected, atol=0.000001)
@parameterized.parameters((300, 10, 0.3), (20, 4, 2.0), (30, 4, 3.9))
def testWeightedSigmoidCrossEntropy(self, batch_size, num_labels, weight):
"""Tests thats the tf and numpy functions agree on many instances."""
x_shape = [batch_size, num_labels]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
with self.test_session():
loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits,
weight,
1.0,
name='weighted-loss')
expected = weighted_sigmoid_cross_entropy(targets, logits, weight)
self.assertAllClose(expected, loss.eval(), atol=0.000001)
def testGradients(self):
"""Tests that weighted loss gradients behave as expected."""
dummy_tensor = tf.constant(1.0)
positives_shape = [10, 1]
positives_logits = dummy_tensor * tf.Variable(
tf.random_normal(positives_shape) + 1.0)
positives_targets = tf.ones(positives_shape)
positives_weight = 4.6
positives_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
positives_logits, positives_targets) * positives_weight)
negatives_shape = [190, 1]
negatives_logits = dummy_tensor * tf.Variable(
tf.random_normal(negatives_shape))
negatives_targets = tf.zeros(negatives_shape)
negatives_weight = 0.9
negatives_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
negatives_logits, negatives_targets) * negatives_weight)
all_logits = tf.concat([positives_logits, negatives_logits], 0)
all_targets = tf.concat([positives_targets, negatives_targets], 0)
weighted_loss = tf.reduce_sum(
util.weighted_sigmoid_cross_entropy_with_logits(
all_targets, all_logits, positives_weight, negatives_weight))
weighted_gradients = tf.gradients(weighted_loss, dummy_tensor)
expected_loss = tf.add(
tf.reduce_sum(positives_loss),
tf.reduce_sum(negatives_loss))
expected_gradients = tf.gradients(expected_loss, dummy_tensor)
with tf.Session() as session:
tf.global_variables_initializer().run()
grad, expected_grad = session.run(
[weighted_gradients, expected_gradients])
self.assertAllClose(grad, expected_grad)
def testDtypeFlexibility(self):
"""Tests the loss on inputs of varying data types."""
shape = [20, 3]
logits = np.random.randn(*shape)
targets = tf.truncated_normal(shape)
positive_weights = tf.constant(3, dtype=tf.int64)
negative_weights = 1
loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets, logits, positive_weights, negative_weights)
with self.test_session():
self.assertEqual(loss.eval().dtype, np.float)
class WeightedHingeLossTest(tf.test.TestCase):
def testTrivialCompatibilityWithHinge(self):
# Tests compatibility with unweighted hinge loss.
x_shape = [55, 10]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.3))
weighted_loss = util.weighted_hinge_loss(targets, logits)
expected_loss = hinge_loss(targets, logits)
with self.test_session():
self.assertAllClose(expected_loss.eval(), weighted_loss.eval())
def testLessTrivialCompatibilityWithHinge(self):
# Tests compatibility with a constant weight for positives and negatives.
x_shape = [56, 11]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.7))
weight = 1.0 + 1.0/2 + 1.0/3 + 1.0/4 + 1.0/5 + 1.0/6 + 1.0/7
weighted_loss = util.weighted_hinge_loss(targets, logits, weight, weight)
expected_loss = hinge_loss(targets, logits) * weight
with self.test_session():
self.assertAllClose(expected_loss.eval(), weighted_loss.eval())
def testNontrivialCompatibilityWithHinge(self):
# Tests compatibility with different positive and negative weights.
x_shape = [23, 8]
logits_positives = tf.constant(np.random.randn(*x_shape).astype(np.float32))
logits_negatives = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets_positives = tf.ones(x_shape)
targets_negatives = tf.zeros(x_shape)
logits = tf.concat([logits_positives, logits_negatives], 0)
targets = tf.concat([targets_positives, targets_negatives], 0)
raw_loss = util.weighted_hinge_loss(targets,
logits,
positive_weights=3.4,
negative_weights=1.2)
loss = tf.reduce_sum(raw_loss, 0)
positives_hinge = hinge_loss(targets_positives, logits_positives)
negatives_hinge = hinge_loss(targets_negatives, logits_negatives)
expected = tf.add(tf.reduce_sum(3.4 * positives_hinge, 0),
tf.reduce_sum(1.2 * negatives_hinge, 0))
with self.test_session():
self.assertAllClose(loss.eval(), expected.eval())
def test3DLogitsAndTargets(self):
# Tests correctness when logits is 3D and targets is 2D.
targets_shape = [30, 4]
logits_shape = [targets_shape[0], targets_shape[1], 3]
targets = tf.to_float(
tf.constant(np.random.random_sample(targets_shape) > 0.7))
logits = tf.constant(np.random.randn(*logits_shape).astype(np.float32))
weight_vector = [1.0, 1.0, 1.0]
loss = util.weighted_hinge_loss(targets, logits, weight_vector)
with self.test_session():
loss_value = loss.eval()
for i in range(len(weight_vector)):
expected = hinge_loss(targets, logits[:, :, i]).eval()
self.assertAllClose(loss_value[:, :, i], expected)
class BuildLabelPriorsTest(tf.test.TestCase):
def testLabelPriorConsistency(self):
# Checks that, with zero pseudocounts, the returned label priors reproduce
# label frequencies in the batch.
batch_shape = [4, 10]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.678)))
label_priors_update = util.build_label_priors(
labels=labels, positive_pseudocount=0, negative_pseudocount=0)
expected_priors = tf.reduce_mean(labels, 0)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllClose(label_priors_update.eval(), expected_priors.eval())
def testLabelPriorsUpdate(self):
# Checks that the update of label priors behaves as expected.
batch_shape = [1, 5]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4)))
label_priors_update = util.build_label_priors(labels)
label_sum = np.ones(shape=batch_shape)
weight_sum = 2.0 * np.ones(shape=batch_shape)
with self.test_session() as session:
tf.global_variables_initializer().run()
for _ in range(3):
label_sum += labels.eval()
weight_sum += np.ones(shape=batch_shape)
expected_posteriors = label_sum / weight_sum
label_priors = label_priors_update.eval().reshape(batch_shape)
self.assertAllClose(label_priors, expected_posteriors)
# Re-initialize labels to get a new random sample.
session.run(labels.initializer)
def testLabelPriorsUpdateWithWeights(self):
# Checks the update of label priors with per-example weights.
batch_size = 6
num_labels = 5
batch_shape = [batch_size, num_labels]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6)))
weights = tf.Variable(tf.random_uniform(batch_shape) * 6.2)
update_op = util.build_label_priors(labels, weights=weights)
expected_weighted_label_counts = 1.0 + tf.reduce_sum(weights * labels, 0)
expected_weight_sum = 2.0 + tf.reduce_sum(weights, 0)
expected_label_posteriors = tf.divide(expected_weighted_label_counts,
expected_weight_sum)
with self.test_session() as session:
tf.global_variables_initializer().run()
updated_priors, expected_posteriors = session.run(
[update_op, expected_label_posteriors])
self.assertAllClose(updated_priors, expected_posteriors)
class WeightedSurrogateLossTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('hinge', util.weighted_hinge_loss),
('xent', util.weighted_sigmoid_cross_entropy_with_logits))
def testCompatibilityLoss(self, loss_name, loss_fn):
x_shape = [28, 4]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5))
positive_weights = 0.66
negative_weights = 11.1
expected_loss = loss_fn(
targets,
logits,
positive_weights=positive_weights,
negative_weights=negative_weights)
computed_loss = util.weighted_surrogate_loss(
targets,
logits,
loss_name,
positive_weights=positive_weights,
negative_weights=negative_weights)
with self.test_session():
self.assertAllClose(expected_loss.eval(), computed_loss.eval())
def testSurrogatgeError(self):
x_shape = [7, 3]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5))
with self.assertRaises(ValueError):
util.weighted_surrogate_loss(logits, targets, 'bug')
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,391,438,213,405,433,900 | 39.279279 | 80 | 0.659584 | false | 3.635945 | true | false | false |
da-mkay/subsynco | src/subsynco/gst/player.py | 1 | 11288 | #!/usr/bin/env python
'''
SubSynco - a tool for synchronizing subtitle files
Copyright (C) 2015 da-mkay
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gdk
from gi.repository import Gst
from gi.repository import Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(),
# respectively:
from gi.repository import GdkX11, GstVideo
import ctypes
import re
import sys
# Import TimeClbFilter so that the plugin gets registered:
from subsynco.gst.filter import TimeClbFilter
from subsynco.media.text_formatter import TextFormatter
from subsynco.utils.logger import Logger
class MultimediaPlayer(object):
def __init__(self, drawing_area):
self._drawing_area = drawing_area;
self._subtitle = None
self._position_changed_callback = None
self._duration_changed_callback = None
self._subtitle_list = None
self._cur_subtitle = None
self._duration = None
self._position = 0
self._file_uri = None
self._drawing_area.connect('realize', self._on_video_realize)
self._drawing_area.connect('unrealize', self._on_video_unrealize)
self._drawing_area.connect('draw', self._on_video_draw)
# GStreamer setup
# ---------------
self._player = Gst.ElementFactory.make('playbin', 'MultimediaPlayer')
# PlayBin uses autovideosink by default but we need to wrap it
# in a Bin so that we can use timeclbfilter and textoverlay.
video_sink = Gst.ElementFactory.make('autovideosink')
# Create the following bin:
# timeclbfilter ! textoverlay ! autovideosink
# video_bin is then set as self._player's video-sink
self._textoverlay = Gst.ElementFactory.make('textoverlay',
'textoverlay')
timeclbfilter = Gst.ElementFactory.make('timeclbfilter',
'timeclbfilter')
video_bin = Gst.Bin.new('timer-text-video-bin')
video_bin.add(timeclbfilter)
video_bin.add(self._textoverlay)
video_bin.add(video_sink)
sink_pad = Gst.GhostPad.new('sink',
timeclbfilter.get_static_pad('sink'))
video_bin.add_pad(sink_pad)
timeclbfilter.link(self._textoverlay)
self._textoverlay.link(video_sink)
timeclbfilter.set_timer_callback(self._on_timer_tick)
self._textoverlay.set_property('font-desc', 'Sans 28')
self._textoverlay.set_property('color', 0xffffe400)
self._textoverlay.set_property('outline-color', 0xff333333)
self._player.set_property('video-sink', video_bin)
bus = self._player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message', self._on_player_message)
bus.connect('sync-message::element', self._on_player_sync_message)
self._text_formatter = TextFormatter()
def _on_timer_tick(self, nanos):
self._position = nanos
# If a SubtitleList is set we show/hide the subtitles here
# based on the time.
if (self._subtitle_list is not None):
millis = nanos / 1000000
__, subtitle = self._subtitle_list.get_subtitle(millis)
if (subtitle is not self._cur_subtitle):
if (subtitle is None):
txt = ''
else:
txt = self._text_formatter.fix_format(subtitle.text,
pango_markup=True)
self._textoverlay.set_property('text', txt)
self._cur_subtitle = subtitle
# Invoke users position_changed callback if any.
if (self._position_changed_callback is not None):
self._position_changed_callback(nanos)
def _on_video_realize(self, widget):
# The window handle must be retrieved in GUI-thread and before
# playing pipeline.
video_window = self._drawing_area.get_property('window')
if sys.platform == 'win32':
# On Windows we need a "hack" to get the native window
# handle.
# See http://stackoverflow.com/questions/23021327/how-i-can-
# get-drawingarea-window-handle-in-gtk3/27236258#27236258
if not video_window.ensure_native():
Logger.error(
_('[Player] Video playback requires a native window'))
return
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
video_window_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(
video_window.__gpointer__, None)
gdkdll = ctypes.CDLL ('libgdk-3-0.dll')
self._video_window_handle = gdkdll.gdk_win32_window_get_handle(
video_window_gpointer)
else:
self._video_window_handle = video_window.get_xid()
def _on_video_unrealize(self, widget):
# To prevent race conditions when closing the window while
# playing
self._player.set_state(Gst.State.NULL)
def _on_video_draw(self, drawing_area, cairo_context):
"""This method is called when the player's DrawingArea emits the
draw-signal.
Usually the playbin will render the currently opened video in
the DrawingArea. But if no video is opened we take care of
drawing.
"""
if self._file_uri is not None:
# A video is already opened. So playbin will take care of
# showing the video inside the DrawingArea.
return False
# No video is opened. So we draw a simple black background.
width = drawing_area.get_allocated_width()
height = drawing_area.get_allocated_height()
cairo_context.rectangle(0, 0, width, height)
cairo_context.set_source_rgb(0.15, 0.15, 0.15)
cairo_context.fill()
text = _('no video loaded')
cairo_context.set_font_size(14)
x_bearing, y_bearing, txt_width, txt_height, x_advance, y_advance = (
cairo_context.text_extents(text))
cairo_context.move_to(width/2 - txt_width/2 - x_bearing, height/2 -
y_bearing/2)
cairo_context.set_source_rgb(1.0, 1.0, 1.0)
cairo_context.show_text(text)
return True
def _on_player_message(self, bus, message):
if message.type == Gst.MessageType.EOS:
# We pause the video instead of stop, because we may still
# want to seek.
self.pause()
elif message.type == Gst.MessageType.ERROR:
self.stop()
(err, debug) = message.parse_error()
Logger.error(_('[Player] {}').format(err), debug)
elif message.type == Gst.MessageType.ASYNC_DONE:
# TODO Don't try to get duration at each ASYNC_DONE, only
# on new file and real state change.
self._query_duration()
# TODO Gst.MessageType.DURATION_CHANGED: query_duration would
# fail if ASYNC_DONE was not received
def _on_player_sync_message(self, bus, message):
# For more information see here:
# http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-p
# lugins-base-libs/html/gst-plugins-base-libs-gstvideooverlay.ht
# ml
if message.get_structure() is None:
return
if not GstVideo.is_video_overlay_prepare_window_handle_message(message):
return
imagesink = message.src
imagesink.set_property('force-aspect-ratio', True)
imagesink.set_window_handle(self._video_window_handle)
def _query_duration(self):
if self._duration is not None:
return True
ok, dur = self._player.query_duration(Gst.Format.TIME)
self._duration = dur
if (self._duration_changed_callback is not None):
self._duration_changed_callback(self._duration)
return ok
def set_position_changed_callback(self, callback):
self._position_changed_callback = callback
def set_duration_changed_callback(self, callback):
self._duration_changed_callback = callback
def set_subtitle_list(self, subtitle_list):
"""Set the subsynco.media.subtitle.SubtitleList to be used for
showing subtitles.
"""
self._textoverlay.set_property('text', '')
self._cur_subtitle = None
self._subtitle_list = subtitle_list
def pause(self):
if self._file_uri is not None:
self._player.set_state(Gst.State.PAUSED)
def play(self):
if self._file_uri is not None:
self._player.set_state(Gst.State.PLAYING)
def stop(self):
self._player.set_state(Gst.State.NULL)
self._duration = None
self._position = 0
if (self._duration_changed_callback is not None):
self._duration_changed_callback(0)
if (self._position_changed_callback is not None):
self._position_changed_callback(0)
def set_file(self, file_uri):
self.stop()
self._file_uri = file_uri
if file_uri is None:
# The DrawingArea may still show the old video (if any was
# opened before). So we force a draw-signal which will lead
# to a call to _on_video_draw.
self._drawing_area.queue_draw()
else:
self._player.set_property('uri', file_uri)
def seek(self, nanos):
if self._file_uri is None:
return
# The duration should have been already queried when the file
# was loaded. However ensure that we have a duration!
ok = self._query_duration()
if not ok:
Logger.warn(
_('Warning - [Player] Failed to get duration. Seek aborted!'))
return
if (nanos < 0):
nanos = 0
elif (nanos > self._duration):
nanos = self._duration # TODO: duration is inaccurate!!!
if (nanos == self._position):
return
ok = self._player.seek_simple(Gst.Format.TIME,
Gst.SeekFlags.FLUSH | Gst.SeekFlags.ACCURATE,
nanos)
if not ok:
Logger.warn(_('Warning - [Player] Failed to seek.'))
def seek_relative(self, nanos):
self.seek(self._position + nanos)
| gpl-3.0 | -3,769,176,288,573,613,600 | 40.19708 | 80 | 0.601967 | false | 3.987284 | false | false | false |
pla93/django-mantis-actionables | mantis_actionables/templatetags/actionables_tags.py | 2 | 1383 | __author__ = 'Philipp Lang'
from django import template
from dingos import DINGOS_TEMPLATE_FAMILY
from mantis_actionables.forms import TagForm
from mantis_actionables.models import Status
register = template.Library()
@register.filter
def lookup_status_processing(value):
return Status.PROCESSING_MAP.get(value,'ERROR')
@register.filter
def lookup_status_tlp(value):
return Status.TLP_MAP.get(value,'ERROR')
@register.filter
def lookup_status_confidence(value):
return Status.CONFIDENCE_MAP.get(value,'ERROR')
@register.simple_tag()
def show_addTagInput_actionables(obj_id,curr_context):
form = TagForm()
form.fields['tag'].widget.attrs.update({
'data-obj-id': obj_id,
'data-curr-context': curr_context,
})
return form.fields['tag'].widget.render('tag','')
@register.inclusion_tag('mantis_actionables/%s/includes/_ContextMetaDataWidget.html' % DINGOS_TEMPLATE_FAMILY)
def show_ContextMetaData(context_obj,widget_config=None):
if not widget_config:
widget_config = {'action_buttons' : ['edit','show_history']}
context = {'context_obj': context_obj,
'edit_button' : False,
'show_history_button': False,
'show_details_button' : False}
for button in widget_config.get('action_buttons',[]):
context["%s_button" % button] = True
return context
| gpl-2.0 | -4,653,241,711,023,776,000 | 24.611111 | 110 | 0.683297 | false | 3.440299 | false | false | false |
gvkalra/gladieter | gladieter/gladieter/lib/utils.py | 1 | 1426 | from defaults import ONS_PEER_ROOT
from error import GladieterLibError
from defaults import ONS_SUPPORTED_REGEX
from xml.dom.minidom import parse
import urllib
import ons
def gtin_to_fqdn(gtin, peer_root=ONS_PEER_ROOT):
aus = gtin[0:1] + "." + gtin[12:13] + "." + gtin[11:12] + "." \
+ gtin[10:11] + "." + gtin[9:10] + "." + gtin[8:9] + "." \
+ gtin[7:8] + "." + gtin[6:7] + "." + gtin[5:6] + "." \
+ gtin[4:5] + "." + gtin[3:4] + "." + gtin[2:3] + "." \
+ gtin[1:2] + "."
return aus + "gtin.gs1.id." + peer_root
def regexp_to_uri(regexp):
s = regexp.split('!')
# be picky!
if s[1] in ONS_SUPPORTED_REGEX:
return regexp.split('!')[2] # url
raise GladieterLibError("Regexp not supported")
def is_product_recalled(gtin):
fqdn = gtin_to_fqdn(str(gtin))
o = ons.ONSServer()
epcis = o.query_epcis(fqdn)
uri = regexp_to_uri(epcis['regexp'])
query_url = uri + "Service/Poll/SimpleEventQuery?" +\
"EQ_bizStep=urn:epcglobal:cbv:bizstep:holding&EQ_disposition=urn:epcglobal:cbv:disp:recalled" +\
"&MATCH_epc=urn:epc:id:gtin:" + str(gtin) + "&"
try:
xml = urllib.urlopen(query_url)
dom = parse(xml)
if len(dom.getElementsByTagName('action')) == 0:
return False
return True # recalled
except:
return False
def get_gs1source_query_url(gtin):
fqdn = gtin_to_fqdn(str(gtin))
o = ons.ONSServer()
gs1_source = o.query_gs1source(fqdn)
uri = regexp_to_uri(gs1_source['regexp'])
return uri | mit | 2,814,091,997,375,439,000 | 28.122449 | 98 | 0.638149 | false | 2.380634 | false | false | false |
charanpald/sandbox | sandbox/ranking/leafrank/SVMLeafRank.py | 1 | 2491 | import numpy
import logging
from sandbox.util.Sampling import Sampling
from sandbox.predictors.LibSVM import LibSVM
class SVMLeafRank(LibSVM):
"""
This is a subclass of LibSVM which will do model selection before learning.
"""
def __init__(self, paramDict, folds, sampleSize=None, numProcesses=1):
"""
sampleSize is the number of randomly chosen examples to use for model
selection
"""
super(SVMLeafRank, self).__init__()
self.paramDict = paramDict
self.folds = folds
self.chunkSize = 2
self.setMetricMethod("auc2")
self.sampleSize = sampleSize
self.processes = numProcesses
def generateLearner(self, X, y):
"""
Train using the given examples and labels, and use model selection to
find the best parameters.
"""
if numpy.unique(y).shape[0] != 2:
print(y)
raise ValueError("Can only operate on binary data")
#Do model selection first
if self.sampleSize == None:
idx = Sampling.crossValidation(self.folds, X.shape[0])
learner, meanErrors = self.parallelModelSelect(X, y, idx, self.paramDict)
else:
idx = Sampling.crossValidation(self.folds, self.sampleSize)
inds = numpy.random.permutation(X.shape[0])[0:self.sampleSize]
learner, meanErrors = self.parallelModelSelect(X[inds, :], y[inds], idx, self.paramDict)
learner = self.getBestLearner(meanErrors, self.paramDict, X, y)
return learner
def getBestLearner(self, meanErrors, paramDict, X, y, idx=None):
"""
As we are using AUC we will look for the max value.
"""
return super(SVMLeafRank, self).getBestLearner(meanErrors, paramDict, X, y, idx, best="max")
def copy(self):
"""
Return a new copied version of this object.
"""
svm = SVMLeafRank(self.paramDict, self.folds, self.sampleSize)
svm.setKernel(self.kernel,self.kernelParam)
svm.setC(self.C)
svm.setErrorCost(self.errorCost)
svm.setPenalty(self.penalty)
svm.setSvmType(self.type)
svm.processes=self.processes
svm.epsilon=self.epsilon
svm.metricMethod = self.metricMethod
svm.chunkSize = self.chunkSize
svm.timeout = self.timeout
svm.normModelSelect = svm.normModelSelect
return svm
| gpl-3.0 | -7,128,618,342,529,609,000 | 36.179104 | 100 | 0.616218 | false | 3.998395 | false | false | false |
Inboxen/Inboxen | inboxen/account/tasks.py | 1 | 5836 | ##
# Copyright (C) 2013, 2014, 2015, 2016, 2017 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime
import logging
from celery import chord
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from django.utils import timezone
from pytz import utc
from inboxen.celery import app
from inboxen.models import Inbox
from inboxen.tasks import batch_delete_items, delete_inboxen_item
from inboxen.tickets.models import Question, Response
from inboxen.utils.tasks import chunk_queryset, create_queryset, task_group_skew
log = logging.getLogger(__name__)
INBOX_RESET_FIELDS = [
"description",
"disabled",
"exclude_from_unified",
"new",
"pinned",
"search_tsv",
"user",
]
QUESTION_RESET_FIELDS = [
"author",
"subject",
"body",
]
RESPONSE_RESET_FIELDS = [
"author",
"body",
]
def model_cleaner(instance, fields):
"""Resets model fields to their defaults"""
for field_name in fields:
field = instance._meta.get_field(field_name)
setattr(instance, field_name, field.get_default())
@app.task
@transaction.atomic()
def clean_questions(user_id):
for question in Question.objects.filter(author_id=user_id):
model_cleaner(question, QUESTION_RESET_FIELDS)
question.date = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
question.save()
@app.task
@transaction.atomic()
def clean_responses(user_id):
for response in Response.objects.filter(author_id=user_id):
model_cleaner(response, RESPONSE_RESET_FIELDS)
response.save()
@app.task(rate_limit="10/m", default_retry_delay=5 * 60) # 5 minutes
@transaction.atomic()
def disown_inbox(inbox_id):
try:
inbox = Inbox.objects.get(id=inbox_id)
except Inbox.DoesNotExist:
return False
# delete emails in another task(s)
batch_delete_items.delay("email", kwargs={'inbox__id': inbox.pk})
# remove data from inbox
model_cleaner(inbox, INBOX_RESET_FIELDS)
inbox.deleted = True
inbox.created = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
inbox.save()
return True
@app.task(ignore_result=True)
@transaction.atomic()
def finish_delete_user(result, user_id):
inbox = Inbox.objects.filter(user__id=user_id).only('id').exists()
user = get_user_model().objects.get(id=user_id)
if inbox:
raise Exception("User {0} still has inboxes!".format(user.username))
else:
log.info("Deleting user %s", user.username)
user.delete()
@app.task(ignore_result=True)
@transaction.atomic()
def delete_account(user_id):
# first we need to make sure the user can't login
user = get_user_model().objects.get(id=user_id)
user.set_unusable_password()
user.is_active = False
user.save()
# get ready to delete all inboxes
inboxes = user.inbox_set.only('id')
inbox_tasks = [disown_inbox.s(inbox.id) for inbox in inboxes]
question_tasks = [clean_questions.s(user_id), clean_responses.s(user_id)]
delete_chord = chord(inbox_tasks + question_tasks, finish_delete_user.s(user_id))
delete_chord.apply_async()
log.info("Deletion tasks for %s sent off", user.username)
@app.task
def user_suspended():
now = timezone.now()
for delta_start, delta_end, function in settings.USER_SUSPEND_TASKS:
kwargs = {}
if delta_start is None:
kwargs["last_login__gt"] = now - delta_end
elif delta_end is None:
kwargs["last_login__lt"] = now - delta_start
else:
kwargs["last_login__range"] = (now - delta_end, now - delta_start)
task = app.tasks[function]
task.apply_async(kwargs={"kwargs": kwargs})
@app.task
def user_suspended_disable_emails(kwargs):
kwargs = {"user__%s" % k: v for k, v in kwargs.items()}
items = create_queryset("userprofile", kwargs=kwargs)
items.update(receiving_emails=False)
@app.task
def user_suspended_delete_emails(kwargs, batch_number=500, chunk_size=10000, delay=20):
kwargs = {"inbox__user__%s" % k: v for k, v in kwargs.items()}
emails = create_queryset("email", kwargs=kwargs)
for idx, chunk in chunk_queryset(emails, chunk_size):
email_tasks = delete_inboxen_item.chunks([("email", i) for i in chunk], batch_number).group()
task_group_skew(email_tasks, start=(idx + 1) * delay, step=delay)
email_tasks.delay()
@app.task
def user_suspended_delete_user(kwargs, batch_number=500, chunk_size=10000, delay=20):
users = create_queryset(get_user_model(), kwargs=kwargs)
for idx, chunk in chunk_queryset(users, chunk_size):
user_tasks = delete_account.chunks([(i,) for i in chunk], batch_number).group()
task_group_skew(user_tasks, start=idx + 1, step=delay)
user_tasks.delay()
@app.task
def user_suspended_delete_user_never_logged_in(kwargs, batch_number=500, chunk_size=10000, delay=20):
kwargs = {k.replace("last_login", "date_joined"): v for k, v in kwargs.items()}
kwargs["last_login__isnull"] = True
user_suspended_delete_user(kwargs, batch_number, chunk_size, delay)
| agpl-3.0 | 6,240,896,996,204,487,000 | 31.065934 | 101 | 0.681289 | false | 3.387115 | false | false | false |
HKuz/Test_Code | CodeFights/floatRange.py | 1 | 1284 | #!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
| mit | -144,380,351,648,928,930 | 31.923077 | 79 | 0.425234 | false | 2.326087 | false | false | false |
frankvdp/django | django/contrib/gis/geos/libgeos.py | 38 | 5127 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject, cached_property
from django.utils.version import get_version_tuple
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
# Set restype for compatibility across 32 and 64-bit platforms.
_lgeos.GEOSversion.restype = c_char_p
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n', warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n', err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory:
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, restype=None, errcheck=None, argtypes=None, **kwargs):
self.func_name = func_name
if restype is not None:
self.restype = restype
if errcheck is not None:
self.errcheck = errcheck
if argtypes is not None:
self.argtypes = argtypes
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@cached_property
def func(self):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
def geos_version():
"""Return the string version of the GEOS library."""
return lgeos.GEOSversion()
def geos_version_tuple():
"""Return the GEOS version as a tuple (major, minor, subminor)."""
return get_version_tuple(geos_version().decode())
| bsd-3-clause | -6,973,117,126,197,560,000 | 28.297143 | 95 | 0.6575 | false | 3.726017 | false | false | false |
manashmndl/LearningPyQt | pyqt/chap11/ypipewidget.py | 1 | 6172 | #!/usr/bin/env python
# Copyright (c) 2008-14 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future_builtins import *
from PyQt4.QtCore import (QPointF, QSize, Qt)
from PyQt4.QtCore import pyqtSignal as Signal
from PyQt4.QtGui import (QApplication, QBrush, QColor, QFontMetricsF,
QFrame, QLabel, QLinearGradient, QPainter, QPolygon,
QSizePolicy, QSpinBox, QWidget)
class YPipeWidget(QWidget):
value_changed = Signal(int, int)
def __init__(self, leftFlow=0, rightFlow=0, maxFlow=100,
parent=None):
super(YPipeWidget, self).__init__(parent)
self.leftSpinBox = QSpinBox(self)
self.leftSpinBox.setRange(0, maxFlow)
self.leftSpinBox.setValue(leftFlow)
self.leftSpinBox.setSuffix(" l/s")
self.leftSpinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.leftSpinBox.valueChanged.connect(self.valueChanged)
self.rightSpinBox = QSpinBox(self)
self.rightSpinBox.setRange(0, maxFlow)
self.rightSpinBox.setValue(rightFlow)
self.rightSpinBox.setSuffix(" l/s")
self.rightSpinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.rightSpinBox.valueChanged.connect(self.valueChanged)
self.label = QLabel(self)
self.label.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
self.label.setAlignment(Qt.AlignCenter)
fm = QFontMetricsF(self.font())
self.label.setMinimumWidth(fm.width(" 999 l/s "))
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding))
self.setMinimumSize(self.minimumSizeHint())
self.valueChanged()
def valueChanged(self):
a = self.leftSpinBox.value()
b = self.rightSpinBox.value()
self.label.setText("{0} l/s".format(a + b))
self.value_changed.emit(a, b)
self.update()
def values(self):
return self.leftSpinBox.value(), self.rightSpinBox.value()
def minimumSizeHint(self):
return QSize(self.leftSpinBox.width() * 3,
self.leftSpinBox.height() * 5)
def resizeEvent(self, event=None):
fm = QFontMetricsF(self.font())
x = (self.width() - self.label.width()) / 2
y = self.height() - (fm.height() * 1.5)
self.label.move(x, y)
y = self.height() / 60.0
x = (self.width() / 4.0) - self.leftSpinBox.width()
self.leftSpinBox.move(x, y)
x = self.width() - (self.width() / 4.0)
self.rightSpinBox.move(x, y)
def paintEvent(self, event=None):
LogicalSize = 100.0
def logicalFromPhysical(length, side):
return (length / side) * LogicalSize
fm = QFontMetricsF(self.font())
ymargin = ((LogicalSize / 30.0) +
logicalFromPhysical(self.leftSpinBox.height(),
self.height()))
ymax = (LogicalSize -
logicalFromPhysical(fm.height() * 2, self.height()))
width = LogicalSize / 4.0
cx, cy = LogicalSize / 2.0, LogicalSize / 3.0
ax, ay = cx - (2 * width), ymargin
bx, by = cx - width, ay
dx, dy = cx + width, ay
ex, ey = cx + (2 * width), ymargin
fx, fy = cx + (width / 2), cx + (LogicalSize / 24.0)
gx, gy = fx, ymax
hx, hy = cx - (width / 2), ymax
ix, iy = hx, fy
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
side = min(self.width(), self.height())
painter.setViewport((self.width() - side) / 2,
(self.height() - side) / 2, side, side)
painter.setWindow(0, 0, LogicalSize, LogicalSize)
painter.setPen(Qt.NoPen)
gradient = QLinearGradient(QPointF(0, 0),
QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
a = self.leftSpinBox.value()
gradient.setColorAt(1, (Qt.red if a != 0 else Qt.white))
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon([ax, ay, bx, by, cx, cy, ix, iy]))
gradient = QLinearGradient(QPointF(0, 0), QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
b = self.rightSpinBox.value()
gradient.setColorAt(1, (Qt.blue if b != 0
else Qt.white))
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon([cx, cy, dx, dy, ex, ey, fx, fy]))
if (a + b) == 0:
color = QColor(Qt.white)
else:
ashare = (a / (a + b)) * 255.0
bshare = 255.0 - ashare
color = QColor(ashare, 0, bshare)
gradient = QLinearGradient(QPointF(0, 0), QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
gradient.setColorAt(1, color)
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon(
[cx, cy, fx, fy, gx, gy, hx, hy, ix, iy]))
painter.setPen(Qt.black)
painter.drawPolyline(QPolygon([ax, ay, ix, iy, hx, hy]))
painter.drawPolyline(QPolygon([gx, gy, fx, fy, ex, ey]))
painter.drawPolyline(QPolygon([bx, by, cx, cy, dx, dy]))
if __name__ == "__main__":
import sys
def valueChanged(a, b):
print(a, b)
app = QApplication(sys.argv)
form = YPipeWidget()
form.value_changed.connect(valueChanged)
form.setWindowTitle("YPipe")
form.move(0, 0)
form.show()
form.resize(400, 400)
app.exec_()
| mit | -3,199,703,286,188,070,000 | 35.52071 | 74 | 0.603046 | false | 3.5553 | false | false | false |
PyJAX/foodie | foodie/settings.py | 1 | 2013 | """
Django settings for foodie project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i-$0s)*3+m%d_kw4c&f6h+5a_k8f$bco=4gg2xm-=88l33!(d!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'randomizer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foodie.urls'
WSGI_APPLICATION = 'foodie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
| isc | -2,277,805,838,004,729,600 | 22.964286 | 71 | 0.718331 | false | 3.200318 | false | false | false |
sbg2133/miscellaneous_projects | carina/velocityMaps/vdisp.py | 1 | 1943 | import numpy as np
from lpf import lowpass_cosine as filt
import matplotlib.pyplot as plt
plt.ion()
def fwhm(v_array, vmin, vmax):
v_array = v_array[:,1000:-1000,]
steps = v_array.shape[1]
print steps, vmax, vmin
vres = (vmax - vmin)/steps
v = np.arange(vmin, vmax, vres)/1.0e3
tau = 1.0
fc = 0.05
fwhm = np.zeros(v_array.shape[0])
vlpf = np.zeros_like(v_array)
#near = np.zeros_like(fwhm).astype('int')
#second = np.zeros_like(fwhm).astype('int')
for i in range(len(v_array)):
# avoid edges
if not np.nanmean(v_array[i]):
fwhm[i] = np.nan
else:
try:
v_lpf = filt(v_array[i], tau, fc, fc/3.)
vlpf[i] = v_lpf
maximum = np.nanmax(v_lpf)
max_idx = np.nanargmax(v_lpf)
minimum = np.nanmin(v_lpf)
height = maximum - minimum
half_max = height/2.
nearest = np.nanargmin((np.abs(v_lpf - half_max)))
#near[i] = nearest
"""
print "half max:", half_max
print "nearest:", nearest
print "max idx:", max_idx
"""
if nearest > max_idx: # if half max point is to right of maximum
second_point = np.nanargmin((np.abs(v_lpf[:max_idx] - half_max)))
if nearest < max_idx: # if it's on the left
second_point = np.nanargmin((np.abs(v_lpf[max_idx:] - half_max)))
#second[i] = second_point
#print i, max_idx, nearest, second_point
half_width_idxs = np.abs(nearest - second_point)
if half_width_idxs > 1000:
fwhm[i] = np.nan
else:
fwhm[i] = 2.*half_width_idxs*vres
except ValueError:
fwhm[i] = np.nan
return vlpf, fwhm
| gpl-3.0 | 2,516,412,777,968,482,000 | 34.327273 | 85 | 0.487905 | false | 3.402802 | false | false | false |
burkesquires/pyeq2 | Models_2D/Power.py | 3 | 23359 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2013 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: [email protected]
#
# License: BSD-style (see LICENSE.txt in main source directory)
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(all= 'ignore')
import pyeq2.Model_2D_BaseClass
class Geometric_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Geometric Modified"
_HTML = 'y = a * x<sup>(b/x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
independentData1CannotContainBothPositiveAndNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, (b/x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b/x_in));\n"
return s
class PowerA_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power A Modified"
_HTML = 'y = a * b<sup>x</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default'):
pyeq2.Model_2D_BaseClass.Model_2D_BaseClass.__init__(self, inFittingTarget, inExtendedVersionName)
self.lowerCoefficientBounds = [None, 0.0]
self.extendedVersionHandler.AppendAdditionalCoefficientBounds(self)
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(b, x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, x_in);\n"
return s
class PowerA_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power A Modified Transform"
_HTML = 'y = a * b<sup>cx + d</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.power(b, c * x_in + d)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, c * x_in + d);\n"
return s
class PowerB_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power B Modified"
_HTML = 'y = a<sup>ln(x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(a, x_LogX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(x_in));\n"
return s
class PowerB_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power B Modified Transform"
_HTML = 'y = a<sup>ln(bx + c)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.power(a, numpy.log(b * x_in + c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(b * x_in + c));\n"
return s
class PowerC_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power C Modified"
_HTML = 'y = (a + x)<sup>b</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = numpy.power(a + x_in, b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + x_in, b);\n"
return s
class PowerC_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power C Modified Transform"
_HTML = 'y = (a + bx)<sup>c</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.power(a + b * x_in, c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + b * x_in, c);\n"
return s
class PowerLawExponentialCutoff(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power Law With Exponential Cutoff"
_HTML = 'p(k) = C * k<sup>(-T)</sup> * exp(-k/K)'
_leftSideHTML = 'p(k)'
_coefficientDesignators = ['C', 'T', 'K']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
C = inCoeffs[0]
T = inCoeffs[1]
K = inCoeffs[2]
try:
temp = C * numpy.power(x_in, -1.0 * T) * numpy.exp(-1.0 * x_in / K)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = C * pow(x_in, -1.0 * T) * exp(-1.0 * x_in / K);\n"
return s
class PowerRoot(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Root"
_HTML = 'y = a<sup>(1.0/x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[-1.0]), [-1.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_Neg1 = inDataCacheDictionary['PowX_-1.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(a, PowX_Neg1)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, (1.0/x_in));\n"
return s
class SimplePower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Simple Power"
_HTML = 'y = x<sup>a</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = False
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(x_in, a)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(x_in, a);\n"
return s
class StandardGeometric(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Standard Geometric"
_HTML = 'y = a * x<sup>bx</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, (b*x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b*x_in));\n"
return s
class StandardPower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Standard Power"
_HTML = 'y = a * x<sup>b</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = False
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, b);\n"
return s
class XShiftedPower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "X Shifted Power"
_HTML = 'y = a * (x-b)<sup>c</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.power((x_in-b), c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow((x_in-b), c);\n"
return s
| bsd-2-clause | 2,006,532,823,001,543,400 | 33.605926 | 121 | 0.70521 | false | 3.792661 | false | false | false |
quantopian/serializable-traitlets | setup.py | 1 | 1476 | from setuptools import setup, find_packages
from sys import version_info
def install_requires():
requires = [
'traitlets>=4.1',
'six>=1.9.0',
'pyyaml>=3.11',
]
if (version_info.major, version_info.minor) < (3, 4):
requires.append('singledispatch>=3.4.0')
return requires
def extras_require():
return {
'test': [
'tox',
'pytest>=2.8.5',
'pytest-cov>=1.8.1',
'pytest-pep8>=1.0.6',
'click>=6.0',
],
}
def main():
setup(
name='straitlets',
# remember to update straitlets/__init__.py!
version='0.3.3',
description="Serializable IPython Traitlets",
author="Quantopian Team",
author_email="[email protected]",
packages=find_packages(include='straitlets.*'),
include_package_data=True,
zip_safe=True,
url="https://github.com/quantopian/serializable-traitlets",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: IPython',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
],
install_requires=install_requires(),
extras_require=extras_require()
)
if __name__ == '__main__':
main()
| apache-2.0 | -1,732,214,769,954,039,300 | 25.836364 | 67 | 0.536585 | false | 3.874016 | false | false | false |
iarroyof/lxmls-toolkit | lxmls/deep_learning/sgd.py | 3 | 4091 | import sys
import numpy as np
import time
import theano
import theano.tensor as T
def class_acc(hat_y, y_ref):
'''
Computes percent accuracy and log probability given estimated and reference
class indices
'''
# Check probability of devel set
pred = hat_y[y_ref, np.arange(y_ref.shape[0])]
p_dev = np.sum(np.log(pred))
# Check percent correct classification on the devel set
cr = np.sum((np.argmax(hat_y, 0) == y_ref).astype(int))*1.0/y_ref.shape[0]
return (cr, p_dev)
def sanity_checks(batch_up, n_batch, bsize, lrate, train_set):
if batch_up:
if not n_batch:
raise ValueError, ("If you use compiled batch update you need to "
"specify n_batch")
if bsize or lrate or train_set:
raise ValueError, ("If you use compiled batch update you can not"
"specify bsize, lrate and train_set")
else:
if not bsize or not lrate or not train_set:
raise ValueError, ("If compiled batch not used you need to specity"
"bsize, lrate and train_set")
def SGD_train(model, n_iter, bsize=None, lrate=None, train_set=None,
batch_up=None, n_batch=None, devel_set=None, model_dbg=None):
# SANITY CHECKS:
sanity_checks(batch_up, n_batch, bsize, lrate, train_set)
if not batch_up:
train_x, train_y = train_set
# Number of mini batches
n_batch = train_x.shape[1]/bsize + 1
# Check for Theano vars
if getattr(model, "_forward", None):
shared_vars = True
else:
shared_vars = False
# For each iteration run backpropagation in a batch of examples. For
# each batch, sum up all gradients and update each weights with the
# SGD rule.
prev_p_devel = None
prev_p_train = None
for i in np.arange(n_iter):
# This will hold the posterior of train data for each epoch
p_train = 0
init_time = time.clock()
for j in np.arange(n_batch):
if batch_up:
# Compiled batch update
p_train += -batch_up(j)
else:
# Manual batch update
# Mini batch
batch_x = train_x[:, j*bsize:(j+1)*bsize]
batch_y = train_y[j*bsize:(j+1)*bsize]
# Get gradients for each layer and this batch
nabla_params = model.grads(batch_x, batch_y)
# Update each parameter with SGD rule
for m in np.arange(len(model.params)):
if shared_vars:
# Parameters as theano shared variables
model.params[m].set_value(model.params[m].get_value()
- lrate*np.array(nabla_params[m]))
else:
# Parameters as numpy array
model.params[m] -= lrate*nabla_params[m]
# INFO
sys.stdout.write("\rBatch %d/%d (%d%%) " %
(j+1, n_batch, (j+1)*100.0/n_batch))
sys.stdout.flush()
batch_time = time.clock() - init_time
# Check probability of devel set
if devel_set:
corr, p_devel = class_acc(model.forward(devel_set[0]), devel_set[1])
if prev_p_devel:
delta_p_devel = p_devel - prev_p_devel
else:
delta_p_devel = 0
prev_p_devel = p_devel
if prev_p_train:
delta_p_train = p_train - prev_p_train
else:
delta_p_train = 0
prev_p_train = p_train
validation_time = time.clock() - init_time - batch_time
sys.stdout.write(" Epoch %2d/%2d in %2.2f seg\n" % (i+1, n_iter, batch_time))
if devel_set:
sys.stdout.write("Logpos devel: %10.1f (delta: %10.2f) Corr devel %2.2f\n\n" % (p_devel, delta_p_devel, corr))
print ""
| mit | -8,543,377,877,708,973,000 | 35.526786 | 122 | 0.523588 | false | 3.652679 | false | false | false |
fuzzycode/RoboPi | robot/utils/iterators.py | 1 | 1689 | # -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Björn Larsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class BiCircular(object):
def __init__(self, enumerator):
self._index = 0
self._enum = enumerator
def next(self):
self._nextIndex(True)
return self._enum[self._index]
def previous(self):
self._nextIndex(False)
return self._enum[self._index]
def _nextIndex(self, next):
inc = 1 if next else -1
self._index += inc
if self._index < 0:
self._index = len(self._enum) - 1
elif self._index >= len(self._enum):
self._index = 0
| mit | 8,327,679,356,106,653,000 | 35.695652 | 80 | 0.692536 | false | 4.137255 | false | false | false |
ColdrickSotK/storyboard | storyboard/tests/db/migration/test_migrations.py | 1 | 4632 | # Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against mysql and pg) in a properly configured unit
test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import utils as db_utils
from storyboard.tests.db.migration import test_migrations_base as base
CONF = cfg.CONF
class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestMigrations, self).__init__(*args, **kwargs)
def setUp(self):
super(TestMigrations, self).setUp()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
self.assertColumnExists(engine, 'users', 'created_at')
self.assertColumnExists(engine, 'users', 'last_login')
self.assertColumnExists(engine, 'teams', 'updated_at')
self.assertColumnExists(engine, 'teams', 'name')
def _check_002(self, engine, data):
self.assertColumnExists(engine, 'users', 'openid')
self.assertColumnNotExists(engine, 'users', 'password')
def _check_003(self, engine, data):
self.assertColumnExists(engine, 'projects', 'is_active')
self.assertColumnExists(engine, 'stories', 'is_active')
self.assertColumnExists(engine, 'tasks', 'is_active')
def _check_004(self, engine, data):
self.assertColumnExists(engine, 'projects', 'description')
def _check_005(self, engine, data):
self.assertColumnExists(engine, 'projects', 'is_active')
self.assertColumnExists(engine, 'stories', 'is_active')
self.assertColumnExists(engine, 'tasks', 'is_active')
def _check_006(self, engine, data):
self.assertColumnNotExists(engine, 'users', 'first_name')
self.assertColumnNotExists(engine, 'users', 'last_name')
self.assertColumnExists(engine, 'users', 'full_name')
def _pre_upgrade_007(self, engine):
self.assertColumnNotExists(engine, 'comments', 'is_active')
def _check_007(self, engine, data):
self.assertColumnExists(engine, 'comments', 'is_active')
| apache-2.0 | 57,971,858,778,312,310 | 36.658537 | 78 | 0.697755 | false | 3.882649 | true | false | false |
flipjack/suventa | project/app/views.py | 1 | 5804 | # -*- encoding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth import login, authenticate, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
import json
from forms import *
from models import *
from allauth.socialaccount.models import SocialToken
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('landing'))
def landing(request):
return render(request, 'app/landing.html',locals())
def login(request, empresa, token):
if not request.user.is_anonymous():
return HttpResponseRedirect(reverse('index'))
form = LoginForm()
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
user = form.cleaned_data["user"]
password = form.cleaned_data["password"]
company = form.cleaned_data["company"]
username = company + '_' + user
if not company:
username = 'ZaresApp_Castellanos'
access = authenticate(username=username, password=password)
if access is not None:
if access.is_active:
login(request, access)
return HttpResponseRedirect(reverse('index'))
else:
mensaje="Usuario esta desactivado"
else:
mensaje="Usuario o contraseña incorrecto"
else:
print form.errors
mensaje="Usuario o contraseña incorrecto"
return render(request, 'app/login.html',locals())
def index(request):
return render(request, 'app/sell_point.html',locals())
@csrf_exempt
def menus(request):
form = MenusForm()
if request.method == "POST":
if 'jstree' in request.POST:
menu = Menu.objects.filter(id = request.POST.get('menu_id'))[0]
#menu.nivel = int(request.POST.get('level')) + 1
if request.POST.get('depend_id'):
menu.parent = Menu.objects.filter(id = request.POST.get('depend_id'))[0]
else:
menu.parent = None
menu.save()
return HttpResponse(json.dumps({"data": "true"}),content_type="application/json")
if 'add_menu' in request.POST:
form = MenusForm(request.POST)
if form.is_valid():
obj = form.save(commit = False)
obj.company = request.user.company
obj.save()
messages.success(request, 'Guardado con éxito!')
form = MenusForm()
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
menus = Menu.objects.filter(company = request.user.company)
return render(request, 'app/menus.html',locals())
def menus_edit(request,slug, ide):
menu = Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0]
form = MenusForm(instance = menu)
if request.POST:
form = MenusForm(request.POST, instance=menu)
if form.is_valid():
obj = form.save(commit = False)
obj.company = request.user.company
obj.save()
messages.success(request, 'Modificado con éxito!')
return HttpResponseRedirect( reverse('mis_menus' ) )
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
menu = Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0]
return render(request, 'app/menus_edit.html',locals())
def menus_delete(request,slug, ide):
Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0].delete()
messages.warning(request, 'El menú se eliminó con exito')
registry = Registry()
registry.name = "Se eliminó menú"
registry.code = "delete_menu"
registry.user = request.user.user
registry.company = request.user.company
registry.save()
return HttpResponseRedirect( reverse('menus') )
def sellpoints(request):
form = Sell_pointForm()
if request.method == "POST":
if 'add_sell_point' in request.POST:
form = Sell_pointForm(request.POST, request.FILES)
if form.is_valid():
points = len(Sell_point.objects.filter(company=request.user.company))
if points >= request.user.company.sell_point_limits:
messages.error(request, 'Llegaste al limite de tus puntos de venta')
else:
obj = form.save(commit=False)
obj.company = request.user.company
obj.create_by = request.user
obj.save()
messages.success(request, 'Nuevo punto de venta dado de alta con éxito')
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
pvs = Sell_point.objects.filter(company = request.user.company)
return render(request, 'app/sellpoints.html',locals())
def sellpoints_edit(request, ide):
sell_point = Sell_point.objects.filter(ide=ide)[0]
form = Sell_pointForm(instance = sell_point)
if request.POST:
form = Sell_pointForm(request.POST, request.FILES, instance=sell_point)
if form.is_valid():
form.save()
messages.success(request, 'Punto de venta modificado con éxito')
return HttpResponseRedirect( reverse('mis_puntos_de_venta' ) )
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
return render(request, 'app/mis_puntos_de_venta_edit.html',locals())
def products(request):
return render(request, 'app/products.html',locals())
def products_add(request):
form = ProductForm()
#if request.POST:
#form = Sell_pointForm(request.POST, request.FILES)
#if form.is_valid():
#points = len(Sell_point.objects.filter(company=request.user.company))
#if points >= request.user.company.sell_point_limits:
#messages.error(request, 'Llegaste al limite de tus puntos de venta')
#else:
#obj = form.save(commit=False)
#obj.company = request.user.company
#obj.create_by = request.user
#obj.save()
#messages.success(request, 'Nuevo punto de venta dado de alta con éxito')
#return HttpResponseRedirect( reverse('mis_puntos_de_venta' ) )
#else:
#messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
return render(request, 'app/products_add.html',locals())
| bsd-3-clause | -6,158,858,356,048,572,000 | 35.670886 | 84 | 0.718626 | false | 3.104502 | false | false | false |
dslutz/qemu | scripts/qapi/gen.py | 11 | 8280 | # -*- coding: utf-8 -*-
#
# QAPI code generation
#
# Copyright (c) 2018-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
# Marc-André Lureau <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import errno
import os
import re
from contextlib import contextmanager
from qapi.common import *
from qapi.schema import QAPISchemaVisitor
class QAPIGen:
def __init__(self, fname):
self.fname = fname
self._preamble = ''
self._body = ''
def preamble_add(self, text):
self._preamble += text
def add(self, text):
self._body += text
def get_content(self):
return self._top() + self._preamble + self._body + self._bottom()
def _top(self):
return ''
def _bottom(self):
return ''
def write(self, output_dir):
# Include paths starting with ../ are used to reuse modules of the main
# schema in specialised schemas. Don't overwrite the files that are
# already generated for the main schema.
if self.fname.startswith('../'):
return
pathname = os.path.join(output_dir, self.fname)
odir = os.path.dirname(pathname)
if odir:
try:
os.makedirs(odir)
except os.error as e:
if e.errno != errno.EEXIST:
raise
fd = os.open(pathname, os.O_RDWR | os.O_CREAT, 0o666)
f = open(fd, 'r+', encoding='utf-8')
text = self.get_content()
oldtext = f.read(len(text) + 1)
if text != oldtext:
f.seek(0)
f.truncate(0)
f.write(text)
f.close()
def _wrap_ifcond(ifcond, before, after):
if before == after:
return after # suppress empty #if ... #endif
assert after.startswith(before)
out = before
added = after[len(before):]
if added[0] == '\n':
out += '\n'
added = added[1:]
out += gen_if(ifcond)
out += added
out += gen_endif(ifcond)
return out
class QAPIGenCCode(QAPIGen):
def __init__(self, fname):
super().__init__(fname)
self._start_if = None
def start_if(self, ifcond):
assert self._start_if is None
self._start_if = (ifcond, self._body, self._preamble)
def end_if(self):
assert self._start_if
self._wrap_ifcond()
self._start_if = None
def _wrap_ifcond(self):
self._body = _wrap_ifcond(self._start_if[0],
self._start_if[1], self._body)
self._preamble = _wrap_ifcond(self._start_if[0],
self._start_if[2], self._preamble)
def get_content(self):
assert self._start_if is None
return super().get_content()
class QAPIGenC(QAPIGenCCode):
def __init__(self, fname, blurb, pydoc):
super().__init__(fname)
self._blurb = blurb
self._copyright = '\n * '.join(re.findall(r'^Copyright .*', pydoc,
re.MULTILINE))
def _top(self):
return mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
%(blurb)s
*
* %(copyright)s
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*/
''',
blurb=self._blurb, copyright=self._copyright)
def _bottom(self):
return mcgen('''
/* Dummy declaration to prevent empty .o file */
char qapi_dummy_%(name)s;
''',
name=c_fname(self.fname))
class QAPIGenH(QAPIGenC):
def _top(self):
return super()._top() + guardstart(self.fname)
def _bottom(self):
return guardend(self.fname)
@contextmanager
def ifcontext(ifcond, *args):
"""A 'with' statement context manager to wrap with start_if()/end_if()
*args: any number of QAPIGenCCode
Example::
with ifcontext(ifcond, self._genh, self._genc):
modify self._genh and self._genc ...
Is equivalent to calling::
self._genh.start_if(ifcond)
self._genc.start_if(ifcond)
modify self._genh and self._genc ...
self._genh.end_if()
self._genc.end_if()
"""
for arg in args:
arg.start_if(ifcond)
yield
for arg in args:
arg.end_if()
class QAPIGenDoc(QAPIGen):
def _top(self):
return (super()._top()
+ '@c AUTOMATICALLY GENERATED, DO NOT MODIFY\n\n')
class QAPISchemaMonolithicCVisitor(QAPISchemaVisitor):
def __init__(self, prefix, what, blurb, pydoc):
self._prefix = prefix
self._what = what
self._genc = QAPIGenC(self._prefix + self._what + '.c',
blurb, pydoc)
self._genh = QAPIGenH(self._prefix + self._what + '.h',
blurb, pydoc)
def write(self, output_dir):
self._genc.write(output_dir)
self._genh.write(output_dir)
class QAPISchemaModularCVisitor(QAPISchemaVisitor):
def __init__(self, prefix, what, user_blurb, builtin_blurb, pydoc):
self._prefix = prefix
self._what = what
self._user_blurb = user_blurb
self._builtin_blurb = builtin_blurb
self._pydoc = pydoc
self._genc = None
self._genh = None
self._module = {}
self._main_module = None
@staticmethod
def _is_user_module(name):
return name and not name.startswith('./')
@staticmethod
def _is_builtin_module(name):
return not name
def _module_dirname(self, what, name):
if self._is_user_module(name):
return os.path.dirname(name)
return ''
def _module_basename(self, what, name):
ret = '' if self._is_builtin_module(name) else self._prefix
if self._is_user_module(name):
basename = os.path.basename(name)
ret += what
if name != self._main_module:
ret += '-' + os.path.splitext(basename)[0]
else:
name = name[2:] if name else 'builtin'
ret += re.sub(r'-', '-' + name + '-', what)
return ret
def _module_filename(self, what, name):
return os.path.join(self._module_dirname(what, name),
self._module_basename(what, name))
def _add_module(self, name, blurb):
basename = self._module_filename(self._what, name)
genc = QAPIGenC(basename + '.c', blurb, self._pydoc)
genh = QAPIGenH(basename + '.h', blurb, self._pydoc)
self._module[name] = (genc, genh)
self._genc, self._genh = self._module[name]
def _add_user_module(self, name, blurb):
assert self._is_user_module(name)
if self._main_module is None:
self._main_module = name
self._add_module(name, blurb)
def _add_system_module(self, name, blurb):
self._add_module(name and './' + name, blurb)
def write(self, output_dir, opt_builtins=False):
for name in self._module:
if self._is_builtin_module(name) and not opt_builtins:
continue
(genc, genh) = self._module[name]
genc.write(output_dir)
genh.write(output_dir)
def _begin_system_module(self, name):
pass
def _begin_user_module(self, name):
pass
def visit_module(self, name):
if name is None:
if self._builtin_blurb:
self._add_system_module(None, self._builtin_blurb)
self._begin_system_module(name)
else:
# The built-in module has not been created. No code may
# be generated.
self._genc = None
self._genh = None
else:
self._add_user_module(name, self._user_blurb)
self._begin_user_module(name)
def visit_include(self, name, info):
relname = os.path.relpath(self._module_filename(self._what, name),
os.path.dirname(self._genh.fname))
self._genh.preamble_add(mcgen('''
#include "%(relname)s.h"
''',
relname=relname))
| gpl-2.0 | 7,214,599,436,790,775,000 | 27.064407 | 79 | 0.553811 | false | 3.593316 | false | false | false |
CivicKnowledge/ambry | ambry/orm/code.py | 1 | 2703 | """Object-Rlational Mapping classess, based on Sqlalchemy, for representing the
dataset, partitions, configuration, tables and columns.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
__docformat__ = 'restructuredtext en'
from six import iteritems
from sqlalchemy import event
from sqlalchemy import Column as SAColumn, Integer
from sqlalchemy import Text, String, ForeignKey
from ambry.identity import ObjectNumber
from . import Base, MutationDict, JSONEncodedObj
class Code(Base):
"""Code entries for variables."""
__tablename__ = 'codes'
c_vid = SAColumn('cd_c_vid', String(20), ForeignKey('columns.c_vid'), primary_key=True,
index=True, nullable=False)
d_vid = SAColumn('cd_d_vid', String(20), ForeignKey('datasets.d_vid'), primary_key=True,
nullable=False, index=True)
key = SAColumn('cd_skey', String(20), primary_key=True, nullable=False, index=True) # String version of the key, the value in the dataset
ikey = SAColumn('cd_ikey', Integer, index=True) # Set only if the key is actually an integer
value = SAColumn('cd_value', Text, nullable=False) # The value the key maps to
description = SAColumn('cd_description', Text)
source = SAColumn('cd_source', Text)
data = SAColumn('cd_data', MutationDict.as_mutable(JSONEncodedObj))
def __init__(self, **kwargs):
for p in self.__mapper__.attrs:
if p.key in kwargs:
setattr(self, p.key, kwargs[p.key])
del kwargs[p.key]
if self.data:
self.data.update(kwargs)
def __repr__(self):
return "<code: {}->{} >".format(self.key, self.value)
def update(self, f):
"""Copy another files properties into this one."""
for p in self.__mapper__.attrs:
if p.key == 'oid':
continue
try:
setattr(self, p.key, getattr(f, p.key))
except AttributeError:
# The dict() method copies data property values into the main dict,
# and these don't have associated class properties.
continue
@property
def insertable_dict(self):
d = {('cd_' + k).strip('_'): v for k, v in iteritems(self.dict)}
# the `key` property is not named after its db column
d['cd_skey'] = d['cd_key']
del d['cd_key']
return d
@staticmethod
def before_insert(mapper, conn, target):
target.d_vid = str(ObjectNumber.parse(target.c_vid).as_dataset)
event.listen(Code, 'before_insert', Code.before_insert)
| bsd-2-clause | -4,505,728,845,002,071,000 | 30.430233 | 142 | 0.625601 | false | 3.78042 | false | false | false |
eph214/webpymail | webpymail/mailapp/templatetags/webtags.py | 7 | 1891 | # -*- coding: utf-8 -*-
# WebPyMail - IMAP python/django web mail client
# Copyright (C) 2008 Helder Guerreiro
## This file is part of WebPyMail.
##
## WebPyMail is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## WebPyMail is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with WebPyMail. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <[email protected]>
#
# $Id$
#
from django import template
from django.template import resolve_variable
from django.utils.translation import gettext_lazy as _
register = template.Library()
# Tag to retrieve a message part from the server:
@register.tag(name="spaces")
def do_spaces(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, num_spaces = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, \
"%r tag requires one arg: num_spaces" \
% token.contents.split()[0]
return PartTextNode(num_spaces)
class PartTextNode(template.Node):
def __init__(self, num_spaces):
self.num_spaces = num_spaces
def render(self, context):
num_spaces = resolve_variable(self.num_spaces, context)
try:
num_spaces = int(num_spaces)
except ValueError:
raise template.TemplateSyntaxError, \
"%r tag's num_spaces argument must be an int" % tag_name
return ' ' * num_spaces
| gpl-3.0 | 666,743,244,955,380,400 | 31.050847 | 72 | 0.68588 | false | 3.859184 | false | false | false |
BackupGGCode/annetgpgpu | examples/misc/runSOMNet.py | 2 | 1098 | from ANPyNetCPU import *
black = vectorf([0,0,0])
white = vectorf([1,1,1])
red = vectorf([1,0,0])
green = vectorf([0,1,0])
blue = vectorf([0,0,1])
trainSet = TrainingSet()
trainSet.AddInput(black)
trainSet.AddInput(white)
trainSet.AddInput(red)
trainSet.AddInput(green)
trainSet.AddInput(blue)
widthMap = 4
heightMap = 1
inpWidth = 3
inpHeight = 1
SOM = SOMNet(inpWidth,inpHeight,widthMap,heightMap)
SOM.SetTrainingSet(trainSet)
SOM.SetLearningRate(0.3)
SOM.Training(1000)
# gets to each input vector the corresponding centroid, eucl. distance and the ID of the BMU
inputv = SOM.GetCentrOInpList()
# gets an ordered list of different centroids with the ID of the corresponding BMU
centroids = SOM.GetCentroidList()
# output for fun
for i in centroids:
print (i)
# .. again
for i in inputv:
print (i)
# Save IDs of the BMUs into a list
IDList = []
for i in inputv:
IDList.append(i.m_iBMUID)
print (IDList)
# Searches the corresponding centroids from the other list based on the index :D
for i in IDList:
for j in centroids:
if i == j.m_iBMUID:
print (j.m_vCentroid) | lgpl-2.1 | 215,936,719,292,420,540 | 21.428571 | 92 | 0.724044 | false | 2.671533 | false | false | false |
churchlab/ulutil | ulutil/qPCR2quantitation.py | 1 | 3390 | # Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
def qPCR2quantitation(inputfile,output_formats):
outputbasename = os.path.splitext(os.path.basename(inputfile))[0]
# Learn some things about the data:
# How many curves are there?
ip = open(inputfile,'r')
for line in ip:
if line.startswith('Step'):
# Verify the fields in the line:
fields = line.split(',')
if fields[0] != 'Step' or fields[1] != 'Cycle' or fields[2] != 'Dye' or fields[3] != 'Temp.':
raise ValueError, 'Expected line like: "Step,Cycle,Dye,Temp.,..."'
curve_labels = fields[4:-1] # (skip the above four fields and last extra comma)
break
# What step is the quantitation at?
for line in ip: # advance to data set characterization
if line.strip() == 'Analysis Options':
break
for line in ip:
if line.startswith("Step") and "Quantitation" in line:
line_id = line.split()[1].strip(':')
break
ip.close()
# Create data structures
cycles = []
curves = [[] for curve in curve_labels]
# Load the data
ip = open(inputfile,'r')
for line in ip: # advance to data
if line.startswith('Step'):
break
for line in ip:
if line.strip() == '':
break
if line.split(',')[0] == line_id:
cycles.append(int(line.split(',')[1]))
data = map(float,line.split(',')[4:-1])
for (i,value) in enumerate(data):
curves[i].append(value)
# Make the plots
fig = plt.figure()
ax = fig.add_subplot(111)
for (label,curve) in zip(curve_labels,curves):
ax.plot(cycles,curve,label=label)
ax.legend(loc=2)
ax.set_xlabel('Cycles')
ax.set_ylabel('Fluorescence (a.u.)')
for format in output_formats:
fig.savefig(outputbasename+'.quantitation.'+format)
if __name__ == '__main__':
import sys
import optparse
output_formats = set()
def append_format(option,opt_str,value,parser):
output_formats.add(opt_str.strip('-'))
option_parser = optparse.OptionParser()
option_parser.add_option('--png',action='callback',callback=append_format)
option_parser.add_option('--pdf',action='callback',callback=append_format)
option_parser.add_option('--eps',action='callback',callback=append_format)
(options,args) = option_parser.parse_args()
if len(args) != 1:
raise ValueError, "Must give a single file as input."
output_formats = list(output_formats)
if output_formats == []:
output_formats.append('pdf')
output_formats.append('png')
inputfile = args[0]
qPCR2quantitation(inputfile,output_formats)
| apache-2.0 | -2,069,403,091,796,338,700 | 33.958763 | 105 | 0.620944 | false | 3.779264 | false | false | false |
Coder-Yu/SDLib | shillingmodels/bandwagonAttack.py | 1 | 2252 | #coding:utf-8
#author:Yu Junliang
import random
import numpy as np
from attack import Attack
class BandWagonAttack(Attack):
def __init__(self,conf):
super(BandWagonAttack, self).__init__(conf)
self.hotItems = sorted(self.itemProfile.iteritems(), key=lambda d: len(d[1]), reverse=True)[
:int(self.selectedSize * len(self.itemProfile))]
def insertSpam(self,startID=0):
print 'Modeling bandwagon attack...'
itemList = self.itemProfile.keys()
if startID == 0:
self.startUserID = len(self.userProfile)
else:
self.startUserID = startID
for i in range(int(len(self.userProfile)*self.attackSize)):
#fill 装填项目
fillerItems = self.getFillerItems()
for item in fillerItems:
self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)
#selected 选择项目
selectedItems = self.getSelectedItems()
for item in selectedItems:
self.spamProfile[str(self.startUserID)][item] = self.targetScore
#target 目标项目
for j in range(self.targetCount):
target = np.random.randint(len(self.targetItems))
self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore
self.spamItem[str(self.startUserID)].append(self.targetItems[target])
self.startUserID += 1
def getFillerItems(self):
mu = int(self.fillerSize*len(self.itemProfile))
sigma = int(0.1*mu)
markedItemsCount = int(round(random.gauss(mu, sigma)))
if markedItemsCount < 0:
markedItemsCount = 0
markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)
return markedItems
def getSelectedItems(self):
mu = int(self.selectedSize * len(self.itemProfile))
sigma = int(0.1 * mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedIndexes = np.random.randint(len(self.hotItems), size=markedItemsCount)
markedItems = [self.hotItems[index][0] for index in markedIndexes]
return markedItems | gpl-3.0 | -3,365,530,411,611,354,600 | 38.105263 | 122 | 0.631059 | false | 3.664474 | false | false | false |
akmcinto/TodoApp | ToDoApp/todo/migrations/0009_auto_20160126_1426.py | 1 | 1146 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 21:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todo', '0008_auto_20160126_0004'),
]
operations = [
migrations.AddField(
model_name='todolist',
name='list_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='item',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 705576), verbose_name='date created'),
),
migrations.AlterField(
model_name='todolist',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 704075), verbose_name='date created'),
),
]
| apache-2.0 | -2,449,670,075,222,136,000 | 32.705882 | 128 | 0.630017 | false | 3.965398 | false | false | false |
timsnyder/bokeh | bokeh/application/handlers/code_runner.py | 2 | 6272 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a utility class ``CodeRunner`` for use by handlers that execute
Python source code.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import sys
import traceback
from types import ModuleType
# External imports
# Bokeh imports
from ...util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CodeRunner',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class CodeRunner(object):
''' Compile and run Python source code.
'''
def __init__(self, source, path, argv):
'''
Args:
source (str) : python source code
path (str) : a filename to use in any debugging or error output
argv (list[str]) : a list of string arguments to make available
as ``sys.argv`` when the code executes
'''
self._permanent_error = None
self._permanent_error_detail = None
self.reset_run_errors()
import ast
self._code = None
try:
nodes = ast.parse(source, path)
self._code = compile(nodes, filename=path, mode='exec', dont_inherit=True)
except SyntaxError as e:
import traceback
self._code = None
self._permanent_error = ("Invalid syntax in \"%s\" on line %d:\n%s" % (os.path.basename(e.filename), e.lineno, e.text))
self._permanent_error_detail = traceback.format_exc()
self._path = path
self._source = source
self._argv = argv
self.ran = False
# Properties --------------------------------------------------------------
@property
def error(self):
''' If code execution fails, may contain a related error message.
'''
return self._error if self._permanent_error is None else self._permanent_error
@property
def error_detail(self):
''' If code execution fails, may contain a traceback or other details.
'''
return self._error_detail if self._permanent_error_detail is None else self._permanent_error_detail
@property
def failed(self):
''' ``True`` if code execution failed
'''
return self._failed or self._code is None
@property
def path(self):
''' The path that new modules will be configured with.
'''
return self._path
@property
def source(self):
''' The configured source code that will be executed when ``run`` is
called.
'''
return self._source
# Public methods ----------------------------------------------------------
def new_module(self):
''' Make a fresh module to run in.
Returns:
Module
'''
self.reset_run_errors()
if self._code is None:
return None
module_name = 'bk_script_' + make_id().replace('-', '')
module = ModuleType(str(module_name)) # str needed for py2.7
module.__dict__['__file__'] = os.path.abspath(self._path)
return module
def reset_run_errors(self):
''' Clears any transient error conditions from a previous run.
Returns
None
'''
self._failed = False
self._error = None
self._error_detail = None
def run(self, module, post_check):
''' Execute the configured source code in a module and run any post
checks.
Args:
module (Module) : a module to execute the configured code in.
post_check(callable) : a function that can raise an exception
if expected post-conditions are not met after code execution.
'''
try:
# Simulate the sys.path behaviour decribed here:
#
# https://docs.python.org/2/library/sys.html#sys.path
_cwd = os.getcwd()
_sys_path = list(sys.path)
_sys_argv = list(sys.argv)
sys.path.insert(0, os.path.dirname(self._path))
sys.argv = [os.path.basename(self._path)] + self._argv
exec(self._code, module.__dict__)
post_check()
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
_exc_type, _exc_value, exc_traceback = sys.exc_info()
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
finally:
# undo sys.path, CWD fixups
os.chdir(_cwd)
sys.path = _sys_path
sys.argv = _sys_argv
self.ran = True
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | -7,543,560,056,578,469,000 | 29.745098 | 131 | 0.449139 | false | 5.192053 | false | false | false |
bc-python-tools/mistool | test/string/multisplit/iterator/test_multisplit_iterator.py | 1 | 1659 | #!/usr/bin/env python3
# --------------------- #
# -- SEVERAL IMPORTS -- #
# --------------------- #
from pathlib import Path
from pytest import fixture, raises
from orpyste.data import ReadBlock as READ
# ------------------- #
# -- MODULE TESTED -- #
# ------------------- #
from mistool import string_use
from mistool.config.pattern import PATTERNS_WORDS
# ----------------------- #
# -- GENERAL CONSTANTS -- #
# ----------------------- #
THIS_DIR = Path(__file__).parent
CLASS_MULTI_SPLIT = string_use.MultiSplit
# ----------------------- #
# -- DATAS FOR TESTING -- #
# ----------------------- #
THE_DATAS_FOR_TESTING = READ(
content = THIS_DIR / 'multisplit_iterator.txt',
mode = {
'container' : ":default:",
'verbatim' : ["text", "seps", "listiter"]
}
)
@fixture(scope="module")
def or_datas(request):
THE_DATAS_FOR_TESTING.build()
def remove_extras():
THE_DATAS_FOR_TESTING.remove_extras()
request.addfinalizer(remove_extras)
# --------------- #
# -- REPLACING -- #
# --------------- #
def test_string_use_multisplit_iterator(or_datas):
tests = THE_DATAS_FOR_TESTING.mydict("tree std nosep nonb")
for testname, infos in tests.items():
text = infos['text'][0].strip()
seps = eval(infos['seps'][0])
listiter_wanted = [
eval("({0})".format(l))
for l in infos['listiter']
]
msplit = CLASS_MULTI_SPLIT(
seps = seps,
strip = True
)
listview = msplit(text)
listiter_found = [(x.type, x.val) for x in msplit.iter()]
assert listiter_wanted == listiter_found
| gpl-3.0 | -4,821,293,765,038,772,000 | 20.545455 | 65 | 0.512357 | false | 3.463466 | true | false | false |
Lazar-T/image_compressor | compress.py | 1 | 1153 | #!/usr/bin/python
import os
from sys import argv
from PIL import Image
script, rootdir = argv
COLOR_WHITE = "\033[1;37m{0}\033[00m"
COLOR_BLUE = "\033[1;36m{0}\033[00m"
folder_size = 0
for (path, dirs, files) in os.walk(rootdir):
for file in files:
filename = os.path.join(path, file)
folder_size += os.path.getsize(filename)
pre_size = "%0.1f MB" % (folder_size/(1024*1024.0))
i = 0
for subdir, dirs, files in os.walk(rootdir):
for file in files:
image = os.path.join(subdir, file)
if image.endswith('.jpg'):
i = i + 1
print COLOR_WHITE.format('Compressing: \t %s' % image)
im = Image.open(image)
im.save(image, quality=30)
print '\n'
print COLOR_BLUE.format('Compresson completed.')
print COLOR_BLUE.format('Compressed %s images' % i)
folder_size = 0
for (path, dirs, files) in os.walk(rootdir):
for file in files:
filename = os.path.join(path, file)
folder_size += os.path.getsize(filename)
after_size = "%0.1f MB" % (folder_size/(1024*1024.0))
print COLOR_BLUE.format('Size of folder went from %s to %s' % (pre_size, after_size))
| mit | -801,296,573,217,265,900 | 25.813953 | 85 | 0.626193 | false | 2.926396 | false | false | false |
mission-peace/interview | python/graph/disjointset.py | 1 | 1697 | # disjoint sets
# https://github.com/mission-peace/interview/blob/master/src/com/interview/graph/DisjointSet.java
class Node(object):
def __init__(self, data, parent = None, rank = 0):
self.data = data
self.parent = parent
self.rank = rank
def __str__(self):
return str(self.data)
def __repr__(self):
return self.__str__()
class DisjointSet(object):
def __init__(self):
self.map = {}
def make_set(self, data):
node = Node(data)
node.parent = node
self.map[data] = node
def union(self, data1, data2):
node1 = self.map[data1]
node2 = self.map[data2]
parent1 = self.find_set_util(node1)
parent2 = self.find_set_util(node2)
if parent1.data == parent2.data:
return
if parent1.rank >= parent2.rank:
if parent1.rank == parent2.rank:
parent1.rank = parent1.rank + 1
parent2.parent = parent1
else:
parent1.parent = parent2
def find_set(self, data):
return self.find_set_util(self.map[data])
def find_set_util(self, node):
parent = node.parent
if parent == node:
return parent
node.parent = self.find_set_util(node.parent)
return node.parent
if __name__ == '__main__':
ds = DisjointSet()
ds.make_set(1)
ds.make_set(2)
ds.make_set(3)
ds.make_set(4)
ds.make_set(5)
ds.make_set(6)
ds.make_set(7)
ds.union(1,2)
ds.union(2,3)
ds.union(4,5)
ds.union(6,7)
ds.union(5,6)
ds.union(3,7)
for i in range(1,8):
print(ds.find_set(i))
| apache-2.0 | -8,107,211,652,950,265,000 | 21.038961 | 97 | 0.542133 | false | 3.20794 | false | false | false |
anselmobd/fo2 | src/lotes/migrations/0010_lote.py | 1 | 1354 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-03-28 17:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0009_remove_modelotermica_receita'),
]
operations = [
migrations.CreateModel(
name='Lote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lote', models.CharField(max_length=20, verbose_name='lote')),
('op', models.IntegerField(blank=True, null=True, verbose_name='OP')),
('referencia', models.CharField(max_length=5, verbose_name='Referência')),
('tamanho', models.CharField(max_length=3, verbose_name='Tamanho')),
('cor', models.CharField(max_length=6, verbose_name='Cor')),
('qtd_produzir', models.IntegerField(verbose_name='quantidade')),
('create_at', models.DateTimeField(blank=True, null=True, verbose_name='criado em')),
('update_at', models.DateTimeField(blank=True, null=True, verbose_name='alterado em')),
],
options={
'db_table': 'fo2_cd_lote',
'verbose_name': 'lote',
},
),
]
| mit | 991,786,391,476,645,600 | 40 | 114 | 0.566888 | false | 3.800562 | false | false | false |
clubit/sale-workflow | sale_franco_check/wizard/confirm_sale_order_franco.py | 1 | 1610 | from openerp import models, fields, api, _
from openerp import netsvc
from openerp import tools
from itertools import groupby
class confirm_quotation_franco(models.TransientModel):
_name = 'confirm.sale.order.franco'
_description = 'Confirm Sale Order'
@api.multi
def confirm_sale_order_franco(self):
wf_service = netsvc.LocalService('workflow')
sale_orders = self.env['sale.order'].browse(self._context.get('active_ids', []))
filtered_sale_orders = filter(lambda order: order.state =='draft', sale_orders) # only consider quotations
sorted_sale_orders = sorted(filtered_sale_orders, key=lambda order: order.partner_id) # necessary for group_by
for partner, ordrs in groupby(sorted_sale_orders, lambda order: order.partner_id):
orders = [order for order in ordrs] # iterator only allows one iteration
orders_franco_check = filter(lambda order: order.franco_check, orders)
orders_to_be_confirmed = filter(lambda order: not order.franco_check, orders)
if not partner.franco_amount or partner.franco_amount <= 0.0:
amount_total = float("inf")
else:
amount_total = sum(map(lambda order: order.amount_untaxed, orders_franco_check))
if amount_total >= partner.franco_amount:
orders_to_be_confirmed += orders_franco_check
for order in orders_to_be_confirmed:
wf_service.trg_validate(self.env.uid, 'sale.order', order.id, 'order_confirm', self.env.cr)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | -5,945,792,633,498,405,000 | 47.787879 | 118 | 0.662733 | false | 3.718245 | false | false | false |
spatialaudio/python-sounddevice | sounddevice.py | 1 | 107862 | # Copyright (c) 2015-2020 Matthias Geier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Play and Record Sound with Python.
API overview:
* Convenience functions to play and record NumPy arrays:
`play()`, `rec()`, `playrec()` and the related functions
`wait()`, `stop()`, `get_status()`, `get_stream()`
* Functions to get information about the available hardware:
`query_devices()`, `query_hostapis()`,
`check_input_settings()`, `check_output_settings()`
* Module-wide default settings: `default`
* Platform-specific settings:
`AsioSettings`, `CoreAudioSettings`, `WasapiSettings`
* PortAudio streams, using NumPy arrays:
`Stream`, `InputStream`, `OutputStream`
* PortAudio streams, using Python buffer objects (NumPy not needed):
`RawStream`, `RawInputStream`, `RawOutputStream`
* Miscellaneous functions and classes:
`sleep()`, `get_portaudio_version()`, `CallbackFlags`,
`CallbackStop`, `CallbackAbort`
Online documentation:
https://python-sounddevice.readthedocs.io/
"""
__version__ = '0.4.1'
import atexit as _atexit
import os as _os
import platform as _platform
import sys as _sys
from ctypes.util import find_library as _find_library
from _sounddevice import ffi as _ffi
try:
for _libname in (
'portaudio', # Default name on POSIX systems
'bin\\libportaudio-2.dll', # DLL from conda-forge
'lib/libportaudio.dylib', # dylib from anaconda
):
_libname = _find_library(_libname)
if _libname is not None:
break
else:
raise OSError('PortAudio library not found')
_lib = _ffi.dlopen(_libname)
except OSError:
if _platform.system() == 'Darwin':
_libname = 'libportaudio.dylib'
elif _platform.system() == 'Windows':
_libname = 'libportaudio' + _platform.architecture()[0] + '.dll'
else:
raise
import _sounddevice_data
_libname = _os.path.join(
next(iter(_sounddevice_data.__path__)), 'portaudio-binaries', _libname)
_lib = _ffi.dlopen(_libname)
_sampleformats = {
'float32': _lib.paFloat32,
'int32': _lib.paInt32,
'int24': _lib.paInt24,
'int16': _lib.paInt16,
'int8': _lib.paInt8,
'uint8': _lib.paUInt8,
}
_initialized = 0
_last_callback = None
def play(data, samplerate=None, mapping=None, blocking=False, loop=False,
**kwargs):
"""Play back a NumPy array containing audio data.
This is a convenience function for interactive use and for small
scripts. It cannot be used for multiple overlapping playbacks.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create an `OutputStream` and a callback function for taking care
of the actual playback.
* Start the stream.
* If ``blocking=True`` was given, wait until playback is done.
If not, return immediately.
If you need more control (e.g. block-wise gapless playback, multiple
overlapping playbacks, ...), you should explicitly create an
`OutputStream` yourself.
If NumPy is not available, you can use a `RawOutputStream`.
Parameters
----------
data : array_like
Audio data to be played back. The columns of a two-dimensional
array are interpreted as channels, one-dimensional arrays are
treated as mono data.
The data types *float64*, *float32*, *int32*, *int16*, *int8*
and *uint8* can be used.
*float64* data is simply converted to *float32* before passing
it to PortAudio, because it's not supported natively.
mapping : array_like, optional
List of channel numbers (starting with 1) where the columns of
*data* shall be played back on. Must have the same length as
number of channels in *data* (except if *data* is mono, in which
case the signal is played back on all given output channels).
Each channel number may only appear once in *mapping*.
blocking : bool, optional
If ``False`` (the default), return immediately (but playback
continues in the background), if ``True``, wait until playback
is finished. A non-blocking invocation can be stopped with
`stop()` or turned into a blocking one with `wait()`.
loop : bool, optional
Play *data* in a loop.
Other Parameters
----------------
samplerate, **kwargs
All parameters of `OutputStream` -- except *channels*, *dtype*,
*callback* and *finished_callback* -- can be used.
Notes
-----
If you don't specify the correct sampling rate
(either with the *samplerate* argument or by assigning a value to
`default.samplerate`), the audio data will be played back,
but it might be too slow or too fast!
See Also
--------
rec, playrec
"""
ctx = _CallbackContext(loop=loop)
ctx.frames = ctx.check_data(data, mapping, kwargs.get('device'))
def callback(outdata, frames, time, status):
assert len(outdata) == frames
ctx.callback_enter(status, outdata)
ctx.write_outdata(outdata)
ctx.callback_exit()
ctx.start_stream(OutputStream, samplerate, ctx.output_channels,
ctx.output_dtype, callback, blocking,
prime_output_buffers_using_stream_callback=False,
**kwargs)
def rec(frames=None, samplerate=None, channels=None, dtype=None,
out=None, mapping=None, blocking=False, **kwargs):
"""Record audio data into a NumPy array.
This is a convenience function for interactive use and for small
scripts.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create an `InputStream` and a callback function for taking care
of the actual recording.
* Start the stream.
* If ``blocking=True`` was given, wait until recording is done.
If not, return immediately.
If you need more control (e.g. block-wise gapless recording,
overlapping recordings, ...), you should explicitly create an
`InputStream` yourself.
If NumPy is not available, you can use a `RawInputStream`.
Parameters
----------
frames : int, sometimes optional
Number of frames to record. Not needed if *out* is given.
channels : int, optional
Number of channels to record. Not needed if *mapping* or *out*
is given. The default value can be changed with
`default.channels`.
dtype : str or numpy.dtype, optional
Data type of the recording. Not needed if *out* is given.
The data types *float64*, *float32*, *int32*, *int16*, *int8*
and *uint8* can be used. For ``dtype='float64'``, audio data is
recorded in *float32* format and converted afterwards, because
it's not natively supported by PortAudio. The default value can
be changed with `default.dtype`.
mapping : array_like, optional
List of channel numbers (starting with 1) to record.
If *mapping* is given, *channels* is silently ignored.
blocking : bool, optional
If ``False`` (the default), return immediately (but recording
continues in the background), if ``True``, wait until recording
is finished.
A non-blocking invocation can be stopped with `stop()` or turned
into a blocking one with `wait()`.
Returns
-------
numpy.ndarray or type(out)
The recorded data.
.. note:: By default (``blocking=False``), an array of data is
returned which is still being written to while recording!
The returned data is only valid once recording has stopped.
Use `wait()` to make sure the recording is finished.
Other Parameters
----------------
out : numpy.ndarray or subclass, optional
If *out* is specified, the recorded data is written into the
given array instead of creating a new array.
In this case, the arguments *frames*, *channels* and *dtype* are
silently ignored!
If *mapping* is given, its length must match the number of
channels in *out*.
samplerate, **kwargs
All parameters of `InputStream` -- except *callback* and
*finished_callback* -- can be used.
Notes
-----
If you don't specify a sampling rate (either with the *samplerate*
argument or by assigning a value to `default.samplerate`),
the default sampling rate of the sound device will be used
(see `query_devices()`).
See Also
--------
play, playrec
"""
ctx = _CallbackContext()
out, ctx.frames = ctx.check_out(out, frames, channels, dtype, mapping)
def callback(indata, frames, time, status):
assert len(indata) == frames
ctx.callback_enter(status, indata)
ctx.read_indata(indata)
ctx.callback_exit()
ctx.start_stream(InputStream, samplerate, ctx.input_channels,
ctx.input_dtype, callback, blocking, **kwargs)
return out
def playrec(data, samplerate=None, channels=None, dtype=None,
out=None, input_mapping=None, output_mapping=None, blocking=False,
**kwargs):
"""Simultaneous playback and recording of NumPy arrays.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create a `Stream` and a callback function for taking care of the
actual playback and recording.
* Start the stream.
* If ``blocking=True`` was given, wait until playback/recording is
done. If not, return immediately.
If you need more control (e.g. block-wise gapless playback and
recording, realtime processing, ...),
you should explicitly create a `Stream` yourself.
If NumPy is not available, you can use a `RawStream`.
Parameters
----------
data : array_like
Audio data to be played back. See `play()`.
channels : int, sometimes optional
Number of input channels, see `rec()`.
The number of output channels is obtained from *data.shape*.
dtype : str or numpy.dtype, optional
Input data type, see `rec()`.
If *dtype* is not specified, it is taken from *data.dtype*
(i.e. `default.dtype` is ignored).
The output data type is obtained from *data.dtype* anyway.
input_mapping, output_mapping : array_like, optional
See the parameter *mapping* of `rec()` and `play()`,
respectively.
blocking : bool, optional
If ``False`` (the default), return immediately (but continue
playback/recording in the background), if ``True``, wait until
playback/recording is finished.
A non-blocking invocation can be stopped with `stop()` or turned
into a blocking one with `wait()`.
Returns
-------
numpy.ndarray or type(out)
The recorded data. See `rec()`.
Other Parameters
----------------
out : numpy.ndarray or subclass, optional
See `rec()`.
samplerate, **kwargs
All parameters of `Stream` -- except *channels*, *dtype*,
*callback* and *finished_callback* -- can be used.
Notes
-----
If you don't specify the correct sampling rate
(either with the *samplerate* argument or by assigning a value to
`default.samplerate`), the audio data will be played back,
but it might be too slow or too fast!
See Also
--------
play, rec
"""
ctx = _CallbackContext()
output_frames = ctx.check_data(data, output_mapping, kwargs.get('device'))
if dtype is None:
dtype = ctx.data.dtype # ignore module defaults
out, input_frames = ctx.check_out(out, output_frames, channels, dtype,
input_mapping)
if input_frames != output_frames:
raise ValueError('len(data) != len(out)')
ctx.frames = input_frames
def callback(indata, outdata, frames, time, status):
assert len(indata) == len(outdata) == frames
ctx.callback_enter(status, indata)
ctx.read_indata(indata)
ctx.write_outdata(outdata)
ctx.callback_exit()
ctx.start_stream(Stream, samplerate,
(ctx.input_channels, ctx.output_channels),
(ctx.input_dtype, ctx.output_dtype),
callback, blocking,
prime_output_buffers_using_stream_callback=False,
**kwargs)
return out
def wait(ignore_errors=True):
"""Wait for `play()`/`rec()`/`playrec()` to be finished.
Playback/recording can be stopped with a `KeyboardInterrupt`.
Returns
-------
CallbackFlags or None
If at least one buffer over-/underrun happened during the last
playback/recording, a `CallbackFlags` object is returned.
See Also
--------
get_status
"""
if _last_callback:
return _last_callback.wait(ignore_errors)
def stop(ignore_errors=True):
"""Stop playback/recording.
This only stops `play()`, `rec()` and `playrec()`, but has no
influence on streams created with `Stream`, `InputStream`,
`OutputStream`, `RawStream`, `RawInputStream`, `RawOutputStream`.
"""
if _last_callback:
# Calling stop() before close() is necessary for older PortAudio
# versions, see issue #87:
_last_callback.stream.stop(ignore_errors)
_last_callback.stream.close(ignore_errors)
def get_status():
"""Get info about over-/underflows in `play()`/`rec()`/`playrec()`.
Returns
-------
CallbackFlags
A `CallbackFlags` object that holds information about the last
invocation of `play()`, `rec()` or `playrec()`.
See Also
--------
wait
"""
if _last_callback:
return _last_callback.status
else:
raise RuntimeError('play()/rec()/playrec() was not called yet')
def get_stream():
"""Get a reference to the current stream.
This applies only to streams created by calls to `play()`, `rec()`
or `playrec()`.
Returns
-------
Stream
An `OutputStream`, `InputStream` or `Stream` associated with
the last invocation of `play()`, `rec()` or `playrec()`,
respectively.
"""
if _last_callback:
return _last_callback.stream
else:
raise RuntimeError('play()/rec()/playrec() was not called yet')
def query_devices(device=None, kind=None):
"""Return information about available devices.
Information and capabilities of PortAudio devices.
Devices may support input, output or both input and output.
To find the default input/output device(s), use `default.device`.
Parameters
----------
device : int or str, optional
Numeric device ID or device name substring(s).
If specified, information about only the given *device* is
returned in a single dictionary.
kind : {'input', 'output'}, optional
If *device* is not specified and *kind* is ``'input'`` or
``'output'``, a single dictionary is returned with information
about the default input or output device, respectively.
Returns
-------
dict or DeviceList
A dictionary with information about the given *device* or -- if
no arguments were specified -- a `DeviceList` containing one
dictionary for each available device.
The dictionaries have the following keys:
``'name'``
The name of the device.
``'hostapi'``
The ID of the corresponding host API. Use
`query_hostapis()` to get information about a host API.
``'max_input_channels'``, ``'max_output_channels'``
The maximum number of input/output channels supported by the
device. See `default.channels`.
``'default_low_input_latency'``, ``'default_low_output_latency'``
Default latency values for interactive performance.
This is used if `default.latency` (or the *latency* argument
of `playrec()`, `Stream` etc.) is set to ``'low'``.
``'default_high_input_latency'``, ``'default_high_output_latency'``
Default latency values for robust non-interactive
applications (e.g. playing sound files).
This is used if `default.latency` (or the *latency* argument
of `playrec()`, `Stream` etc.) is set to ``'high'``.
``'default_samplerate'``
The default sampling frequency of the device.
This is used if `default.samplerate` is not set.
Notes
-----
The list of devices can also be displayed in a terminal:
.. code-block:: sh
python3 -m sounddevice
Examples
--------
The returned `DeviceList` can be indexed and iterated over like any
sequence type (yielding the abovementioned dictionaries), but it
also has a special string representation which is shown when used in
an interactive Python session.
Each available device is listed on one line together with the
corresponding device ID, which can be assigned to `default.device`
or used as *device* argument in `play()`, `Stream` etc.
The first character of a line is ``>`` for the default input device,
``<`` for the default output device and ``*`` for the default
input/output device. After the device ID and the device name, the
corresponding host API name is displayed. In the end of each line,
the maximum number of input and output channels is shown.
On a GNU/Linux computer it might look somewhat like this:
>>> import sounddevice as sd
>>> sd.query_devices()
0 HDA Intel: ALC662 rev1 Analog (hw:0,0), ALSA (2 in, 2 out)
1 HDA Intel: ALC662 rev1 Digital (hw:0,1), ALSA (0 in, 2 out)
2 HDA Intel: HDMI 0 (hw:0,3), ALSA (0 in, 8 out)
3 sysdefault, ALSA (128 in, 128 out)
4 front, ALSA (0 in, 2 out)
5 surround40, ALSA (0 in, 2 out)
6 surround51, ALSA (0 in, 2 out)
7 surround71, ALSA (0 in, 2 out)
8 iec958, ALSA (0 in, 2 out)
9 spdif, ALSA (0 in, 2 out)
10 hdmi, ALSA (0 in, 8 out)
* 11 default, ALSA (128 in, 128 out)
12 dmix, ALSA (0 in, 2 out)
13 /dev/dsp, OSS (16 in, 16 out)
Note that ALSA provides access to some "real" and some "virtual"
devices. The latter sometimes have a ridiculously high number of
(virtual) inputs and outputs.
On macOS, you might get something similar to this:
>>> sd.query_devices()
0 Built-in Line Input, Core Audio (2 in, 0 out)
> 1 Built-in Digital Input, Core Audio (2 in, 0 out)
< 2 Built-in Output, Core Audio (0 in, 2 out)
3 Built-in Line Output, Core Audio (0 in, 2 out)
4 Built-in Digital Output, Core Audio (0 in, 2 out)
"""
if kind not in ('input', 'output', None):
raise ValueError('Invalid kind: {!r}'.format(kind))
if device is None and kind is None:
return DeviceList(query_devices(i)
for i in range(_check(_lib.Pa_GetDeviceCount())))
device = _get_device_id(device, kind, raise_on_error=True)
info = _lib.Pa_GetDeviceInfo(device)
if not info:
raise PortAudioError('Error querying device {}'.format(device))
assert info.structVersion == 2
name_bytes = _ffi.string(info.name)
try:
# We don't know beforehand if DirectSound and MME device names use
# 'utf-8' or 'mbcs' encoding. Let's try 'utf-8' first, because it more
# likely raises an exception on 'mbcs' data than vice versa, see also
# https://github.com/spatialaudio/python-sounddevice/issues/72.
# All other host APIs use 'utf-8' anyway.
name = name_bytes.decode('utf-8')
except UnicodeDecodeError:
if info.hostApi in (
_lib.Pa_HostApiTypeIdToHostApiIndex(_lib.paDirectSound),
_lib.Pa_HostApiTypeIdToHostApiIndex(_lib.paMME)):
name = name_bytes.decode('mbcs')
else:
raise
device_dict = {
'name': name,
'hostapi': info.hostApi,
'max_input_channels': info.maxInputChannels,
'max_output_channels': info.maxOutputChannels,
'default_low_input_latency': info.defaultLowInputLatency,
'default_low_output_latency': info.defaultLowOutputLatency,
'default_high_input_latency': info.defaultHighInputLatency,
'default_high_output_latency': info.defaultHighOutputLatency,
'default_samplerate': info.defaultSampleRate,
}
if kind and device_dict['max_' + kind + '_channels'] < 1:
raise ValueError(
'Not an {} device: {!r}'.format(kind, device_dict['name']))
return device_dict
def query_hostapis(index=None):
"""Return information about available host APIs.
Parameters
----------
index : int, optional
If specified, information about only the given host API *index*
is returned in a single dictionary.
Returns
-------
dict or tuple of dict
A dictionary with information about the given host API *index*
or -- if no *index* was specified -- a tuple containing one
dictionary for each available host API.
The dictionaries have the following keys:
``'name'``
The name of the host API.
``'devices'``
A list of device IDs belonging to the host API.
Use `query_devices()` to get information about a device.
``'default_input_device'``, ``'default_output_device'``
The device ID of the default input/output device of the host
API. If no default input/output device exists for the given
host API, this is -1.
.. note:: The overall default device(s) -- which can be
overwritten by assigning to `default.device` -- take(s)
precedence over `default.hostapi` and the information in
the abovementioned dictionaries.
See Also
--------
query_devices
"""
if index is None:
return tuple(query_hostapis(i)
for i in range(_check(_lib.Pa_GetHostApiCount())))
info = _lib.Pa_GetHostApiInfo(index)
if not info:
raise PortAudioError('Error querying host API {}'.format(index))
assert info.structVersion == 1
return {
'name': _ffi.string(info.name).decode(),
'devices': [_lib.Pa_HostApiDeviceIndexToDeviceIndex(index, i)
for i in range(info.deviceCount)],
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice,
}
def check_input_settings(device=None, channels=None, dtype=None,
extra_settings=None, samplerate=None):
"""Check if given input device settings are supported.
All parameters are optional, `default` settings are used for any
unspecified parameters. If the settings are supported, the function
does nothing; if not, an exception is raised.
Parameters
----------
device : int or str, optional
Device ID or device name substring(s), see `default.device`.
channels : int, optional
Number of input channels, see `default.channels`.
dtype : str or numpy.dtype, optional
Data type for input samples, see `default.dtype`.
extra_settings : settings object, optional
This can be used for host-API-specific input settings.
See `default.extra_settings`.
samplerate : float, optional
Sampling frequency, see `default.samplerate`.
"""
parameters, dtype, samplesize, samplerate = _get_stream_parameters(
'input', device=device, channels=channels, dtype=dtype, latency=None,
extra_settings=extra_settings, samplerate=samplerate)
_check(_lib.Pa_IsFormatSupported(parameters, _ffi.NULL, samplerate))
def check_output_settings(device=None, channels=None, dtype=None,
extra_settings=None, samplerate=None):
"""Check if given output device settings are supported.
Same as `check_input_settings()`, just for output device
settings.
"""
parameters, dtype, samplesize, samplerate = _get_stream_parameters(
'output', device=device, channels=channels, dtype=dtype, latency=None,
extra_settings=extra_settings, samplerate=samplerate)
_check(_lib.Pa_IsFormatSupported(_ffi.NULL, parameters, samplerate))
def sleep(msec):
"""Put the caller to sleep for at least *msec* milliseconds.
The function may sleep longer than requested so don't rely on this
for accurate musical timing.
"""
_lib.Pa_Sleep(msec)
def get_portaudio_version():
"""Get version information for the PortAudio library.
Returns the release number and a textual description of the current
PortAudio build, e.g. ::
(1899, 'PortAudio V19-devel (built Feb 15 2014 23:28:00)')
"""
return _lib.Pa_GetVersion(), _ffi.string(_lib.Pa_GetVersionText()).decode()
class _StreamBase(object):
"""Direct or indirect base class for all stream classes."""
def __init__(self, kind, samplerate=None, blocksize=None, device=None,
channels=None, dtype=None, latency=None, extra_settings=None,
callback=None, finished_callback=None, clip_off=None,
dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None,
userdata=None, wrap_callback=None):
"""Base class for PortAudio streams.
This class should only be used by library authors who want to
create their own custom stream classes.
Most users should use the derived classes
`Stream`, `InputStream`, `OutputStream`,
`RawStream`, `RawInputStream` and `RawOutputStream` instead.
This class has the same properties and methods as `Stream`,
except for `read_available`/:meth:`~Stream.read` and
`write_available`/:meth:`~Stream.write`.
It can be created with the same parameters as `Stream`,
except that there are three additional parameters
and the *callback* parameter also accepts a C function pointer.
Parameters
----------
kind : {'input', 'output', 'duplex'}
The desired type of stream: for recording, playback or both.
callback : Python callable or CData function pointer, optional
If *wrap_callback* is ``None`` this can be a function pointer
provided by CFFI.
Otherwise, it has to be a Python callable.
wrap_callback : {'array', 'buffer'}, optional
If *callback* is a Python callable, this selects whether
the audio data is provided as NumPy array (like in `Stream`)
or as Python buffer object (like in `RawStream`).
userdata : CData void pointer
This is passed to the underlying C callback function
on each call and can only be accessed from a *callback*
provided as ``CData`` function pointer.
Examples
--------
A usage example of this class can be seen at
https://github.com/spatialaudio/python-rtmixer.
"""
assert kind in ('input', 'output', 'duplex')
assert wrap_callback in ('array', 'buffer', None)
if blocksize is None:
blocksize = default.blocksize
if clip_off is None:
clip_off = default.clip_off
if dither_off is None:
dither_off = default.dither_off
if never_drop_input is None:
never_drop_input = default.never_drop_input
if prime_output_buffers_using_stream_callback is None:
prime_output_buffers_using_stream_callback = \
default.prime_output_buffers_using_stream_callback
stream_flags = _lib.paNoFlag
if clip_off:
stream_flags |= _lib.paClipOff
if dither_off:
stream_flags |= _lib.paDitherOff
if never_drop_input:
stream_flags |= _lib.paNeverDropInput
if prime_output_buffers_using_stream_callback:
stream_flags |= _lib.paPrimeOutputBuffersUsingStreamCallback
if kind == 'duplex':
idevice, odevice = _split(device)
ichannels, ochannels = _split(channels)
idtype, odtype = _split(dtype)
ilatency, olatency = _split(latency)
iextra, oextra = _split(extra_settings)
iparameters, idtype, isize, isamplerate = _get_stream_parameters(
'input', idevice, ichannels, idtype, ilatency, iextra,
samplerate)
oparameters, odtype, osize, osamplerate = _get_stream_parameters(
'output', odevice, ochannels, odtype, olatency, oextra,
samplerate)
self._dtype = idtype, odtype
self._device = iparameters.device, oparameters.device
self._channels = iparameters.channelCount, oparameters.channelCount
self._samplesize = isize, osize
if isamplerate != osamplerate:
raise ValueError(
'Input and output device must have the same samplerate')
else:
samplerate = isamplerate
else:
parameters, self._dtype, self._samplesize, samplerate = \
_get_stream_parameters(kind, device, channels, dtype, latency,
extra_settings, samplerate)
self._device = parameters.device
self._channels = parameters.channelCount
if kind == 'input':
iparameters = parameters
oparameters = _ffi.NULL
elif kind == 'output':
iparameters = _ffi.NULL
oparameters = parameters
ffi_callback = _ffi.callback('PaStreamCallback', error=_lib.paAbort)
if callback is None:
callback_ptr = _ffi.NULL
elif kind == 'input' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _buffer(iptr, frames, self._channels, self._samplesize)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'input' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _array(
_buffer(iptr, frames, self._channels, self._samplesize),
self._channels, self._dtype)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'output' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _buffer(optr, frames, self._channels, self._samplesize)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'output' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _array(
_buffer(optr, frames, self._channels, self._samplesize),
self._channels, self._dtype)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'duplex' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
ichannels, ochannels = self._channels
isize, osize = self._samplesize
idata = _buffer(iptr, frames, ichannels, isize)
odata = _buffer(optr, frames, ochannels, osize)
return _wrap_callback(
callback, idata, odata, frames, time, status)
elif kind == 'duplex' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
ichannels, ochannels = self._channels
idtype, odtype = self._dtype
isize, osize = self._samplesize
idata = _array(_buffer(iptr, frames, ichannels, isize),
ichannels, idtype)
odata = _array(_buffer(optr, frames, ochannels, osize),
ochannels, odtype)
return _wrap_callback(
callback, idata, odata, frames, time, status)
else:
# Use cast() to allow CData from different FFI instance:
callback_ptr = _ffi.cast('PaStreamCallback*', callback)
# CFFI callback object must be kept alive during stream lifetime:
self._callback = callback_ptr
if userdata is None:
userdata = _ffi.NULL
self._ptr = _ffi.new('PaStream**')
_check(_lib.Pa_OpenStream(self._ptr, iparameters, oparameters,
samplerate, blocksize, stream_flags,
callback_ptr, userdata),
'Error opening {}'.format(self.__class__.__name__))
# dereference PaStream** --> PaStream*
self._ptr = self._ptr[0]
self._blocksize = blocksize
info = _lib.Pa_GetStreamInfo(self._ptr)
if not info:
raise PortAudioError('Could not obtain stream info')
# TODO: assert info.structVersion == 1
self._samplerate = info.sampleRate
if not oparameters:
self._latency = info.inputLatency
elif not iparameters:
self._latency = info.outputLatency
else:
self._latency = info.inputLatency, info.outputLatency
if finished_callback:
if isinstance(finished_callback, _ffi.CData):
self._finished_callback = finished_callback
else:
def finished_callback_wrapper(_):
return finished_callback()
# CFFI callback object is kept alive during stream lifetime:
self._finished_callback = _ffi.callback(
'PaStreamFinishedCallback', finished_callback_wrapper)
_check(_lib.Pa_SetStreamFinishedCallback(self._ptr,
self._finished_callback))
# Avoid confusion if something goes wrong before assigning self._ptr:
_ptr = _ffi.NULL
@property
def samplerate(self):
"""The sampling frequency in Hertz (= frames per second).
In cases where the hardware sampling frequency is inaccurate and
PortAudio is aware of it, the value of this field may be
different from the *samplerate* parameter passed to `Stream()`.
If information about the actual hardware sampling frequency is
not available, this field will have the same value as the
*samplerate* parameter passed to `Stream()`.
"""
return self._samplerate
@property
def blocksize(self):
"""Number of frames per block.
The special value 0 means that the blocksize can change between
blocks. See the *blocksize* argument of `Stream`.
"""
return self._blocksize
@property
def device(self):
"""IDs of the input/output device."""
return self._device
@property
def channels(self):
"""The number of input/output channels."""
return self._channels
@property
def dtype(self):
"""Data type of the audio samples.
See Also
--------
default.dtype, samplesize
"""
return self._dtype
@property
def samplesize(self):
"""The size in bytes of a single sample.
See Also
--------
dtype
"""
return self._samplesize
@property
def latency(self):
"""The input/output latency of the stream in seconds.
This value provides the most accurate estimate of input/output
latency available to the implementation.
It may differ significantly from the *latency* value(s) passed
to `Stream()`.
"""
return self._latency
@property
def active(self):
"""``True`` when the stream is active, ``False`` otherwise.
A stream is active after a successful call to `start()`, until
it becomes inactive either as a result of a call to `stop()` or
`abort()`, or as a result of an exception raised in the stream
callback. In the latter case, the stream is considered inactive
after the last buffer has finished playing.
See Also
--------
stopped
"""
if self.closed:
return False
return _check(_lib.Pa_IsStreamActive(self._ptr)) == 1
@property
def stopped(self):
"""``True`` when the stream is stopped, ``False`` otherwise.
A stream is considered to be stopped prior to a successful call
to `start()` and after a successful call to `stop()` or
`abort()`. If a stream callback is cancelled (by raising an
exception) the stream is *not* considered to be stopped.
See Also
--------
active
"""
if self.closed:
return True
return _check(_lib.Pa_IsStreamStopped(self._ptr)) == 1
@property
def closed(self):
"""``True`` after a call to `close()`, ``False`` otherwise."""
return self._ptr == _ffi.NULL
@property
def time(self):
"""The current stream time in seconds.
This is according to the same clock used to generate the
timestamps passed with the *time* argument to the stream
callback (see the *callback* argument of `Stream`).
The time values are monotonically increasing and have
unspecified origin.
This provides valid time values for the entire life of the
stream, from when the stream is opened until it is closed.
Starting and stopping the stream does not affect the passage of
time as provided here.
This time may be used for synchronizing other events to the
audio stream, for example synchronizing audio to MIDI.
"""
time = _lib.Pa_GetStreamTime(self._ptr)
if not time:
raise PortAudioError('Error getting stream time')
return time
@property
def cpu_load(self):
"""CPU usage information for the stream.
The "CPU Load" is a fraction of total CPU time consumed by a
callback stream's audio processing routines including, but not
limited to the client supplied stream callback. This function
does not work with blocking read/write streams.
This may be used in the stream callback function or in the
application.
It provides a floating point value, typically between 0.0 and
1.0, where 1.0 indicates that the stream callback is consuming
the maximum number of CPU cycles possible to maintain real-time
operation. A value of 0.5 would imply that PortAudio and the
stream callback was consuming roughly 50% of the available CPU
time. The value may exceed 1.0. A value of 0.0 will always be
returned for a blocking read/write stream, or if an error
occurs.
"""
return _lib.Pa_GetStreamCpuLoad(self._ptr)
def __enter__(self):
"""Start the stream in the beginning of a "with" statement."""
self.start()
return self
def __exit__(self, *args):
"""Stop and close the stream when exiting a "with" statement."""
self.stop()
self.close()
def start(self):
"""Commence audio processing.
See Also
--------
stop, abort
"""
err = _lib.Pa_StartStream(self._ptr)
if err != _lib.paStreamIsNotStopped:
_check(err, 'Error starting stream')
def stop(self, ignore_errors=True):
"""Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns.
See Also
--------
start, abort
"""
err = _lib.Pa_StopStream(self._ptr)
if not ignore_errors:
_check(err, 'Error stopping stream')
def abort(self, ignore_errors=True):
"""Terminate audio processing immediately.
This does not wait for pending buffers to complete.
See Also
--------
start, stop
"""
err = _lib.Pa_AbortStream(self._ptr)
if not ignore_errors:
_check(err, 'Error aborting stream')
def close(self, ignore_errors=True):
"""Close the stream.
If the audio stream is active any pending buffers are discarded
as if `abort()` had been called.
"""
err = _lib.Pa_CloseStream(self._ptr)
self._ptr = _ffi.NULL
if not ignore_errors:
_check(err, 'Error closing stream')
class RawInputStream(_StreamBase):
"""Raw stream for recording only. See __init__() and RawStream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input stream (using buffer objects).
This is the same as `InputStream`, except that the *callback*
function and :meth:`~RawStream.read` work on plain Python buffer
objects instead of on NumPy arrays.
NumPy is not necessary for using this.
Parameters
----------
dtype : str
See `RawStream`.
callback : callable
User-supplied function to consume audio data in response to
requests from an active stream.
The callback must have this signature::
callback(indata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`RawStream`, except that *outdata* is missing.
See Also
--------
RawStream, Stream
"""
_StreamBase.__init__(self, kind='input', wrap_callback='buffer',
**_remove_self(locals()))
@property
def read_available(self):
"""The number of frames that can be read without waiting.
Returns a value representing the maximum number of frames that
can be read from the stream without blocking or busy waiting.
"""
return _check(_lib.Pa_GetStreamReadAvailable(self._ptr))
def read(self, frames):
"""Read samples from the stream into a buffer.
This is the same as `Stream.read()`, except that it returns
a plain Python buffer object instead of a NumPy array.
NumPy is not necessary for using this.
Parameters
----------
frames : int
The number of frames to be read. See `Stream.read()`.
Returns
-------
data : buffer
A buffer of interleaved samples. The buffer contains
samples in the format specified by the *dtype* parameter
used to open the stream, and the number of channels
specified by *channels*.
See also `samplesize`.
overflowed : bool
See `Stream.read()`.
"""
channels, _ = _split(self._channels)
samplesize, _ = _split(self._samplesize)
data = _ffi.new('signed char[]', channels * samplesize * frames)
err = _lib.Pa_ReadStream(self._ptr, data, frames)
if err == _lib.paInputOverflowed:
overflowed = True
else:
_check(err)
overflowed = False
return _ffi.buffer(data), overflowed
class RawOutputStream(_StreamBase):
"""Raw stream for playback only. See __init__() and RawStream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio output stream (using buffer objects).
This is the same as `OutputStream`, except that the *callback*
function and :meth:`~RawStream.write` work on plain Python
buffer objects instead of on NumPy arrays.
NumPy is not necessary for using this.
Parameters
----------
dtype : str
See `RawStream`.
callback : callable
User-supplied function to generate audio data in response to
requests from an active stream.
The callback must have this signature::
callback(outdata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`RawStream`, except that *indata* is missing.
See Also
--------
RawStream, Stream
"""
_StreamBase.__init__(self, kind='output', wrap_callback='buffer',
**_remove_self(locals()))
@property
def write_available(self):
"""The number of frames that can be written without waiting.
Returns a value representing the maximum number of frames that
can be written to the stream without blocking or busy waiting.
"""
return _check(_lib.Pa_GetStreamWriteAvailable(self._ptr))
def write(self, data):
"""Write samples to the stream.
This is the same as `Stream.write()`, except that it expects
a plain Python buffer object instead of a NumPy array.
NumPy is not necessary for using this.
Parameters
----------
data : buffer or bytes or iterable of int
A buffer of interleaved samples. The buffer contains
samples in the format specified by the *dtype* argument used
to open the stream, and the number of channels specified by
*channels*. The length of the buffer is not constrained to
a specific range, however high performance applications will
want to match this parameter to the *blocksize* parameter
used when opening the stream. See also `samplesize`.
Returns
-------
underflowed : bool
See `Stream.write()`.
"""
try:
data = _ffi.from_buffer(data)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
_, samplesize = _split(self._samplesize)
_, channels = _split(self._channels)
samples, remainder = divmod(len(data), samplesize)
if remainder:
raise ValueError('len(data) not divisible by samplesize')
frames, remainder = divmod(samples, channels)
if remainder:
raise ValueError('Number of samples not divisible by channels')
err = _lib.Pa_WriteStream(self._ptr, data, frames)
if err == _lib.paOutputUnderflowed:
underflowed = True
else:
_check(err)
underflowed = False
return underflowed
class RawStream(RawInputStream, RawOutputStream):
"""Raw stream for playback and recording. See __init__()."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input/output stream (using buffer objects).
This is the same as `Stream`, except that the *callback*
function and `read()`/`write()` work on plain Python buffer
objects instead of on NumPy arrays.
NumPy is not necessary for using this.
To open a "raw" input-only or output-only stream use
`RawInputStream` or `RawOutputStream`, respectively.
If you want to handle audio data as NumPy arrays instead of
buffer objects, use `Stream`, `InputStream` or `OutputStream`.
Parameters
----------
dtype : str or pair of str
The sample format of the buffers provided to the stream
callback, `read()` or `write()`.
In addition to the formats supported by `Stream`
(``'float32'``, ``'int32'``, ``'int16'``, ``'int8'``,
``'uint8'``), this also supports ``'int24'``, i.e.
packed 24 bit format.
The default value can be changed with `default.dtype`.
See also `samplesize`.
callback : callable
User-supplied function to consume, process or generate audio
data in response to requests from an active stream.
The callback must have this signature::
callback(indata: buffer, outdata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *indata* and *outdata* are plain
Python buffer objects instead of NumPy arrays.
See Also
--------
RawInputStream, RawOutputStream, Stream
"""
_StreamBase.__init__(self, kind='duplex', wrap_callback='buffer',
**_remove_self(locals()))
class InputStream(RawInputStream):
"""Stream for input only. See __init__() and Stream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input stream (using NumPy).
This has the same methods and attributes as `Stream`, except
:meth:`~Stream.write` and `write_available`.
Furthermore, the stream callback is expected to have a different
signature (see below).
Parameters
----------
callback : callable
User-supplied function to consume audio in response to
requests from an active stream.
The callback must have this signature::
callback(indata: numpy.ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *outdata* is missing.
See Also
--------
Stream, RawInputStream
"""
_StreamBase.__init__(self, kind='input', wrap_callback='array',
**_remove_self(locals()))
def read(self, frames):
"""Read samples from the stream into a NumPy array.
The function doesn't return until all requested *frames* have
been read -- this may involve waiting for the operating system
to supply the data (except if no more than `read_available`
frames were requested).
This is the same as `RawStream.read()`, except that it
returns a NumPy array instead of a plain Python buffer object.
Parameters
----------
frames : int
The number of frames to be read. This parameter is not
constrained to a specific range, however high performance
applications will want to match this parameter to the
*blocksize* parameter used when opening the stream.
Returns
-------
data : numpy.ndarray
A two-dimensional `numpy.ndarray` with one column per
channel (i.e. with a shape of ``(frames, channels)``) and
with a data type specified by `dtype`.
overflowed : bool
``True`` if input data was discarded by PortAudio after the
previous call and before this call.
"""
dtype, _ = _split(self._dtype)
channels, _ = _split(self._channels)
data, overflowed = RawInputStream.read(self, frames)
data = _array(data, channels, dtype)
return data, overflowed
class OutputStream(RawOutputStream):
"""Stream for output only. See __init__() and Stream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio output stream (using NumPy).
This has the same methods and attributes as `Stream`, except
:meth:`~Stream.read` and `read_available`.
Furthermore, the stream callback is expected to have a different
signature (see below).
Parameters
----------
callback : callable
User-supplied function to generate audio data in response to
requests from an active stream.
The callback must have this signature::
callback(outdata: numpy.ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *indata* is missing.
See Also
--------
Stream, RawOutputStream
"""
_StreamBase.__init__(self, kind='output', wrap_callback='array',
**_remove_self(locals()))
def write(self, data):
"""Write samples to the stream.
This function doesn't return until the entire buffer has been
consumed -- this may involve waiting for the operating system to
consume the data (except if *data* contains no more than
`write_available` frames).
This is the same as `RawStream.write()`, except that it
expects a NumPy array instead of a plain Python buffer object.
Parameters
----------
data : array_like
A two-dimensional array-like object with one column per
channel (i.e. with a shape of ``(frames, channels)``) and
with a data type specified by `dtype`. A one-dimensional
array can be used for mono data. The array layout must be
C-contiguous (see :func:`numpy.ascontiguousarray`).
The length of the buffer is not constrained to a specific
range, however high performance applications will want to
match this parameter to the *blocksize* parameter used when
opening the stream.
Returns
-------
underflowed : bool
``True`` if additional output data was inserted after the
previous call and before this call.
"""
import numpy as np
data = np.asarray(data)
_, dtype = _split(self._dtype)
_, channels = _split(self._channels)
if data.ndim > 1 and data.shape[1] != channels:
raise ValueError('Number of channels must match')
if data.dtype != dtype:
raise TypeError('dtype mismatch: {!r} vs {!r}'.format(
data.dtype.name, dtype))
if not data.flags.c_contiguous:
raise TypeError('data must be C-contiguous')
return RawOutputStream.write(self, data)
class Stream(InputStream, OutputStream):
"""Stream for input and output. See __init__()."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio stream for simultaneous input and output (using NumPy).
To open an input-only or output-only stream use `InputStream` or
`OutputStream`, respectively. If you want to handle audio data
as plain buffer objects instead of NumPy arrays, use
`RawStream`, `RawInputStream` or `RawOutputStream`.
A single stream can provide multiple channels of real-time
streaming audio input and output to a client application. A
stream provides access to audio hardware represented by one or
more devices. Depending on the underlying host API, it may be
possible to open multiple streams using the same device, however
this behavior is implementation defined. Portable applications
should assume that a device may be simultaneously used by at
most one stream.
The arguments *device*, *channels*, *dtype* and *latency* can be
either single values (which will be used for both input and
output parameters) or pairs of values (where the first one is
the value for the input and the second one for the output).
All arguments are optional, the values for unspecified
parameters are taken from the `default` object.
If one of the values of a parameter pair is ``None``, the
corresponding value from `default` will be used instead.
The created stream is inactive (see `active`, `stopped`).
It can be started with `start()`.
Every stream object is also a
:ref:`context manager <python:context-managers>`, i.e. it can be
used in a :ref:`with statement <python:with>` to automatically
call `start()` in the beginning of the statement and `stop()`
and `close()` on exit.
Parameters
----------
samplerate : float, optional
The desired sampling frequency (for both input and output).
The default value can be changed with `default.samplerate`.
blocksize : int, optional
The number of frames passed to the stream callback function,
or the preferred block granularity for a blocking read/write
stream.
The special value ``blocksize=0`` (which is the default) may
be used to request that the stream callback will receive an
optimal (and possibly varying) number of frames based on
host requirements and the requested latency settings.
The default value can be changed with `default.blocksize`.
.. note:: With some host APIs, the use of non-zero
*blocksize* for a callback stream may introduce an
additional layer of buffering which could introduce
additional latency. PortAudio guarantees that the
additional latency will be kept to the theoretical
minimum however, it is strongly recommended that a
non-zero *blocksize* value only be used when your
algorithm requires a fixed number of frames per stream
callback.
device : int or str or pair thereof, optional
Device index(es) or query string(s) specifying the device(s)
to be used. The default value(s) can be changed with
`default.device`.
channels : int or pair of int, optional
The number of channels of sound to be delivered to the
stream callback or accessed by `read()` or `write()`. It
can range from 1 to the value of ``'max_input_channels'`` or
``'max_output_channels'`` in the dict returned by
`query_devices()`. By default, the maximum possible number
of channels for the selected device is used (which may not
be what you want; see `query_devices()`). The default
value(s) can be changed with `default.channels`.
dtype : str or numpy.dtype or pair thereof, optional
The sample format of the `numpy.ndarray` provided to the
stream callback, `read()` or `write()`.
It may be any of *float32*, *int32*, *int16*, *int8*,
*uint8*. See `numpy.dtype`.
The *float64* data type is not supported, this is only
supported for convenience in `play()`/`rec()`/`playrec()`.
The packed 24 bit format ``'int24'`` is only supported in
the "raw" stream classes, see `RawStream`. The default
value(s) can be changed with `default.dtype`.
latency : float or {'low', 'high'} or pair thereof, optional
The desired latency in seconds. The special values
``'low'`` and ``'high'`` (latter being the default) select
the default low and high latency, respectively (see
`query_devices()`). The default value(s) can be changed
with `default.latency`.
Where practical, implementations should configure their
latency based on this parameter, otherwise they may choose
the closest viable latency instead. Unless the suggested
latency is greater than the absolute upper limit for the
device, implementations should round the *latency* up to the
next practical value -- i.e. to provide an equal or higher
latency wherever possible. Actual latency values for an
open stream may be retrieved using the `latency` attribute.
.. note:: Specifying the desired latency as 'high' does
not guarantee a stable audio stream. For reference, by
default Audacity specifies a desired latency of 100ms and
achieves robust performance.
extra_settings : settings object or pair thereof, optional
This can be used for host-API-specific input/output
settings. See `default.extra_settings`.
callback : callable, optional
User-supplied function to consume, process or generate audio
data in response to requests from an `active` stream.
When a stream is running, PortAudio calls the stream
callback periodically. The callback function is responsible
for processing and filling input and output buffers,
respectively.
If no *callback* is given, the stream will be opened in
"blocking read/write" mode. In blocking mode, the client
can receive sample data using `read()` and write sample
data using `write()`, the number of frames that may be
read or written without blocking is returned by
`read_available` and `write_available`, respectively.
The callback must have this signature::
callback(indata: ndarray, outdata: ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The first and second argument are the input and output
buffer, respectively, as two-dimensional `numpy.ndarray`
with one column per channel (i.e. with a shape of
``(frames, channels)``) and with a data type specified by
`dtype`.
The output buffer contains uninitialized data and the
*callback* is supposed to fill it with proper audio data.
If no data is available, the buffer should be filled with
zeros (e.g. by using ``outdata.fill(0)``).
.. note:: In Python, assigning to an identifier merely
re-binds the identifier to another object, so this *will
not work* as expected::
outdata = my_data # Don't do this!
To actually assign data to the buffer itself, you can use
indexing, e.g.::
outdata[:] = my_data
... which fills the whole buffer, or::
outdata[:, 1] = my_channel_data
... which only fills one channel.
The third argument holds the number of frames to be
processed by the stream callback. This is the same as the
length of the input and output buffers.
The forth argument provides a CFFI structure with
timestamps indicating the ADC capture time of the first
sample in the input buffer (``time.inputBufferAdcTime``),
the DAC output time of the first sample in the output buffer
(``time.outputBufferDacTime``) and the time the callback was
invoked (``time.currentTime``).
These time values are expressed in seconds and are
synchronised with the time base used by `time` for the
associated stream.
The fifth argument is a `CallbackFlags` instance indicating
whether input and/or output buffers have been inserted or
will be dropped to overcome underflow or overflow
conditions.
If an exception is raised in the *callback*, it will not be
called again. If `CallbackAbort` is raised, the stream will
finish as soon as possible. If `CallbackStop` is raised,
the stream will continue until all buffers generated by the
callback have been played. This may be useful in
applications such as soundfile players where a specific
duration of output is required. If another exception is
raised, its traceback is printed to `sys.stderr`.
Exceptions are *not* propagated to the main thread, i.e. the
main Python program keeps running as if nothing had
happened.
.. note:: The *callback* must always fill the entire output
buffer, no matter if or which exceptions are raised.
If no exception is raised in the *callback*, it
automatically continues to be called until `stop()`,
`abort()` or `close()` are used to stop the stream.
The PortAudio stream callback runs at very high or real-time
priority. It is required to consistently meet its time
deadlines. Do not allocate memory, access the file system,
call library functions or call other functions from the
stream callback that may block or take an unpredictable
amount of time to complete. With the exception of
`cpu_load` it is not permissible to call PortAudio API
functions from within the stream callback.
In order for a stream to maintain glitch-free operation the
callback must consume and return audio data faster than it
is recorded and/or played. PortAudio anticipates that each
callback invocation may execute for a duration approaching
the duration of *frames* audio frames at the stream's
sampling frequency. It is reasonable to expect to be able
to utilise 70% or more of the available CPU time in the
PortAudio callback. However, due to buffer size adaption
and other factors, not all host APIs are able to guarantee
audio stability under heavy CPU load with arbitrary fixed
callback buffer sizes. When high callback CPU utilisation
is required the most robust behavior can be achieved by
using ``blocksize=0``.
finished_callback : callable, optional
User-supplied function which will be called when the stream
becomes inactive (i.e. once a call to `stop()` will not
block).
A stream will become inactive after the stream callback
raises an exception or when `stop()` or `abort()` is called.
For a stream providing audio output, if the stream callback
raises `CallbackStop`, or `stop()` is called, the stream
finished callback will not be called until all generated
sample data has been played. The callback must have this
signature::
finished_callback() -> None
clip_off : bool, optional
See `default.clip_off`.
dither_off : bool, optional
See `default.dither_off`.
never_drop_input : bool, optional
See `default.never_drop_input`.
prime_output_buffers_using_stream_callback : bool, optional
See `default.prime_output_buffers_using_stream_callback`.
"""
_StreamBase.__init__(self, kind='duplex', wrap_callback='array',
**_remove_self(locals()))
class DeviceList(tuple):
"""A list with information about all available audio devices.
This class is not meant to be instantiated by the user.
Instead, it is returned by `query_devices()`.
It contains a dictionary for each available device, holding the keys
described in `query_devices()`.
This class has a special string representation that is shown as
return value of `query_devices()` if used in an interactive
Python session. It will also be shown when using the :func:`print`
function. Furthermore, it can be obtained with :func:`repr` and
:class:`str() <str>`.
"""
__slots__ = ()
def __repr__(self):
idev = _get_device_id(default.device['input'], 'input')
odev = _get_device_id(default.device['output'], 'output')
digits = len(str(_lib.Pa_GetDeviceCount() - 1))
hostapi_names = [hostapi['name'] for hostapi in query_hostapis()]
text = '\n'.join(
u'{mark} {idx:{dig}} {name}, {ha} ({ins} in, {outs} out)'.format(
mark=(' ', '>', '<', '*')[(idx == idev) + 2 * (idx == odev)],
idx=idx,
dig=digits,
name=info['name'],
ha=hostapi_names[info['hostapi']],
ins=info['max_input_channels'],
outs=info['max_output_channels'])
for idx, info in enumerate(self))
return text
class CallbackFlags(object):
"""Flag bits for the *status* argument to a stream *callback*.
If you experience under-/overflows, you can try to increase the
``latency`` and/or ``blocksize`` settings.
You should also avoid anything that could block the callback
function for a long time, e.g. extensive computations, waiting for
another thread, reading/writing files, network connections, etc.
See Also
--------
Stream
Examples
--------
This can be used to collect the errors of multiple *status* objects:
>>> import sounddevice as sd
>>> errors = sd.CallbackFlags()
>>> errors |= status1
>>> errors |= status2
>>> errors |= status3
>>> # and so on ...
>>> errors.input_overflow
True
The values may also be set and cleared by the user:
>>> import sounddevice as sd
>>> cf = sd.CallbackFlags()
>>> cf
<sounddevice.CallbackFlags: no flags set>
>>> cf.input_underflow = True
>>> cf
<sounddevice.CallbackFlags: input underflow>
>>> cf.input_underflow = False
>>> cf
<sounddevice.CallbackFlags: no flags set>
"""
__slots__ = '_flags'
def __init__(self, flags=0x0):
self._flags = flags
def __repr__(self):
flags = str(self)
if not flags:
flags = 'no flags set'
return '<sounddevice.CallbackFlags: {}>'.format(flags)
def __str__(self):
return ', '.join(name.replace('_', ' ') for name in dir(self)
if not name.startswith('_') and getattr(self, name))
def __bool__(self):
return bool(self._flags)
def __ior__(self, other):
if not isinstance(other, CallbackFlags):
return NotImplemented
self._flags |= other._flags
return self
@property
def input_underflow(self):
"""Input underflow.
In a stream opened with ``blocksize=0``, indicates that input
data is all silence (zeros) because no real data is available.
In a stream opened with a non-zero *blocksize*, it indicates
that one or more zero samples have been inserted into the input
buffer to compensate for an input underflow.
This can only happen in full-duplex streams (including
`playrec()`).
"""
return self._hasflag(_lib.paInputUnderflow)
@input_underflow.setter
def input_underflow(self, value):
self._updateflag(_lib.paInputUnderflow, value)
@property
def input_overflow(self):
"""Input overflow.
In a stream opened with ``blocksize=0``, indicates that data
prior to the first sample of the input buffer was discarded due
to an overflow, possibly because the stream callback is using
too much CPU time. In a stream opened with a non-zero
*blocksize*, it indicates that data prior to one or more samples
in the input buffer was discarded.
This can happen in full-duplex and input-only streams (including
`playrec()` and `rec()`).
"""
return self._hasflag(_lib.paInputOverflow)
@input_overflow.setter
def input_overflow(self, value):
self._updateflag(_lib.paInputOverflow, value)
@property
def output_underflow(self):
"""Output underflow.
Indicates that output data (or a gap) was inserted, possibly
because the stream callback is using too much CPU time.
This can happen in full-duplex and output-only streams
(including `playrec()` and `play()`).
"""
return self._hasflag(_lib.paOutputUnderflow)
@output_underflow.setter
def output_underflow(self, value):
self._updateflag(_lib.paOutputUnderflow, value)
@property
def output_overflow(self):
"""Output overflow.
Indicates that output data will be discarded because no room is
available.
This can only happen in full-duplex streams (including
`playrec()`), but only when ``never_drop_input=True`` was
specified. See `default.never_drop_input`.
"""
return self._hasflag(_lib.paOutputOverflow)
@output_overflow.setter
def output_overflow(self, value):
self._updateflag(_lib.paOutputOverflow, value)
@property
def priming_output(self):
"""Priming output.
Some of all of the output data will be used to prime the stream,
input data may be zero.
This will only take place with some of the host APIs, and only
if ``prime_output_buffers_using_stream_callback=True`` was
specified.
See `default.prime_output_buffers_using_stream_callback`.
"""
return self._hasflag(_lib.paPrimingOutput)
def _hasflag(self, flag):
"""Check a given flag."""
return bool(self._flags & flag)
def _updateflag(self, flag, value):
"""Set/clear a given flag."""
if value:
self._flags |= flag
else:
self._flags &= ~flag
class _InputOutputPair(object):
"""Parameter pairs for device, channels, dtype and latency."""
_indexmapping = {'input': 0, 'output': 1}
def __init__(self, parent, default_attr):
self._pair = [None, None]
self._parent = parent
self._default_attr = default_attr
def __getitem__(self, index):
index = self._indexmapping.get(index, index)
value = self._pair[index]
if value is None:
value = getattr(self._parent, self._default_attr)[index]
return value
def __setitem__(self, index, value):
index = self._indexmapping.get(index, index)
self._pair[index] = value
def __repr__(self):
return '[{0[0]!r}, {0[1]!r}]'.format(self)
class default(object):
"""Get/set defaults for the *sounddevice* module.
The attributes `device`, `channels`, `dtype`, `latency` and
`extra_settings` accept single values which specify the given
property for both input and output. However, if the property
differs between input and output, pairs of values can be used, where
the first value specifies the input and the second value specifies
the output. All other attributes are always single values.
Examples
--------
>>> import sounddevice as sd
>>> sd.default.samplerate = 48000
>>> sd.default.dtype
['float32', 'float32']
Different values for input and output:
>>> sd.default.channels = 1, 2
A single value sets both input and output at the same time:
>>> sd.default.device = 5
>>> sd.default.device
[5, 5]
An attribute can be set to the "factory default" by assigning
``None``:
>>> sd.default.samplerate = None
>>> sd.default.device = None, 4
Use `reset()` to reset all attributes:
>>> sd.default.reset()
"""
_pairs = 'device', 'channels', 'dtype', 'latency', 'extra_settings'
# The class attributes listed in _pairs are only provided here for static
# analysis tools and for the docs. They're overwritten in __init__().
device = None, None
"""Index or query string of default input/output device.
If not overwritten, this is queried from PortAudio.
If a string is given, the device is selected which contains all
space-separated parts in the right order. Each device string
contains the name of the corresponding host API in the end.
The string comparison is case-insensitive.
See Also
--------
:func:`query_devices`
"""
channels = _default_channels = None, None
"""Number of input/output channels.
The maximum number of channels for a given device can be found out
with `query_devices()`.
"""
dtype = _default_dtype = 'float32', 'float32'
"""Data type used for input/output samples.
The types ``'float32'``, ``'int32'``, ``'int16'``, ``'int8'`` and
``'uint8'`` can be used for all streams and functions.
Additionally, `play()`, `rec()` and `playrec()` support
``'float64'`` (for convenience, data is merely converted from/to
``'float32'``) and `RawInputStream`, `RawOutputStream` and
`RawStream` support ``'int24'`` (packed 24 bit format, which is
*not* supported in NumPy!).
If NumPy is available, the corresponding `numpy.dtype` objects can
be used as well.
The floating point representations ``'float32'`` and ``'float64'``
use +1.0 and -1.0 as the maximum and minimum values, respectively.
``'uint8'`` is an unsigned 8 bit format where 128 is considered
"ground".
"""
latency = _default_latency = 'high', 'high'
"""Suggested input/output latency in seconds.
The special values ``'low'`` and ``'high'`` can be used to select
the default low/high latency of the chosen device.
``'high'`` is typically more robust (i.e. buffer under-/overflows
are less likely), but the latency may be too large for interactive
applications.
See Also
--------
:func:`query_devices`
"""
extra_settings = _default_extra_settings = None, None
"""Host-API-specific input/output settings.
See Also
--------
AsioSettings, CoreAudioSettings, WasapiSettings
"""
samplerate = None
"""Sampling frequency in Hertz (= frames per second).
See Also
--------
:func:`query_devices`
"""
blocksize = _lib.paFramesPerBufferUnspecified
"""See the *blocksize* argument of `Stream`."""
clip_off = False
"""Disable clipping.
Set to ``True`` to disable default clipping of out of range samples.
"""
dither_off = False
"""Disable dithering.
Set to ``True`` to disable default dithering.
"""
never_drop_input = False
"""Set behavior for input overflow of full-duplex streams.
Set to ``True`` to request that where possible a full duplex stream
will not discard overflowed input samples without calling the stream
callback. This flag is only valid for full-duplex callback streams
(i.e. only `Stream` and `RawStream` and only if *callback* was
specified; this includes `playrec()`) and only when used in
combination with ``blocksize=0`` (the default). Using this flag
incorrectly results in an error being raised. See also
http://www.portaudio.com/docs/proposals/001-UnderflowOverflowHandling.html.
"""
prime_output_buffers_using_stream_callback = False
"""How to fill initial output buffers.
Set to ``True`` to call the stream callback to fill initial output
buffers, rather than the default behavior of priming the buffers
with zeros (silence). This flag has no effect for input-only
(`InputStream` and `RawInputStream`) and blocking read/write streams
(i.e. if *callback* wasn't specified). See also
http://www.portaudio.com/docs/proposals/020-AllowCallbackToPrimeStream.html.
"""
def __init__(self):
for attr in self._pairs:
# __setattr__() must be avoided here
vars(self)[attr] = _InputOutputPair(self, '_default_' + attr)
def __setattr__(self, name, value):
"""Only allow setting existing attributes."""
if name in self._pairs:
getattr(self, name)._pair[:] = _split(value)
elif name in dir(self) and name != 'reset':
object.__setattr__(self, name, value)
else:
raise AttributeError(
"'default' object has no attribute " + repr(name))
@property
def _default_device(self):
return (_lib.Pa_GetDefaultInputDevice(),
_lib.Pa_GetDefaultOutputDevice())
@property
def hostapi(self):
"""Index of the default host API (read-only)."""
return _check(_lib.Pa_GetDefaultHostApi())
def reset(self):
"""Reset all attributes to their "factory default"."""
vars(self).clear()
self.__init__()
if not hasattr(_ffi, 'I_AM_FAKE'):
# This object shadows the 'default' class, except when building the docs.
default = default()
class PortAudioError(Exception):
"""This exception will be raised on PortAudio errors.
Attributes
----------
args
A variable length tuple containing the following elements when
available:
1) A string describing the error
2) The PortAudio ``PaErrorCode`` value
3) A 3-tuple containing the host API index, host error code, and the
host error message (which may be an empty string)
"""
def __str__(self):
errormsg = self.args[0] if self.args else ''
if len(self.args) > 1:
errormsg = '{} [PaErrorCode {}]'.format(errormsg, self.args[1])
if len(self.args) > 2:
host_api, hosterror_code, hosterror_text = self.args[2]
hostname = query_hostapis(host_api)['name']
errormsg = "{}: '{}' [{} error {}]".format(
errormsg, hosterror_text, hostname, hosterror_code)
return errormsg
class CallbackStop(Exception):
"""Exception to be raised by the user to stop callback processing.
If this is raised in the stream callback, the callback will not be
invoked anymore (but all pending audio buffers will be played).
See Also
--------
CallbackAbort, :meth:`Stream.stop`, Stream
"""
class CallbackAbort(Exception):
"""Exception to be raised by the user to abort callback processing.
If this is raised in the stream callback, all pending buffers are
discarded and the callback will not be invoked anymore.
See Also
--------
CallbackStop, :meth:`Stream.abort`, Stream
"""
class AsioSettings(object):
def __init__(self, channel_selectors):
"""ASIO-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
Parameters
----------
channel_selectors : list of int
Support for opening only specific channels of an ASIO
device. *channel_selectors* is a list of integers
specifying the (zero-based) channel numbers to use.
The length of *channel_selectors* must match the
corresponding *channels* parameter of `Stream()` (or
variants), otherwise a crash may result.
The values in the *channel_selectors* array must specify
channels within the range of supported channels.
Examples
--------
Setting output channels when calling `play()`:
>>> import sounddevice as sd
>>> asio_out = sd.AsioSettings(channel_selectors=[12, 13])
>>> sd.play(..., extra_settings=asio_out)
Setting default output channels:
>>> sd.default.extra_settings = asio_out
>>> sd.play(...)
Setting input channels as well:
>>> asio_in = sd.AsioSettings(channel_selectors=[8])
>>> sd.default.extra_settings = asio_in, asio_out
>>> sd.playrec(..., channels=1, ...)
"""
if isinstance(channel_selectors, int):
raise TypeError('channel_selectors must be a list or tuple')
# int array must be kept alive!
self._selectors = _ffi.new('int[]', channel_selectors)
self._streaminfo = _ffi.new('PaAsioStreamInfo*', dict(
size=_ffi.sizeof('PaAsioStreamInfo'),
hostApiType=_lib.paASIO,
version=1,
flags=_lib.paAsioUseChannelSelectors,
channelSelectors=self._selectors))
class CoreAudioSettings(object):
def __init__(self, channel_map=None, change_device_parameters=False,
fail_if_conversion_required=False, conversion_quality='max'):
"""Mac Core Audio-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
Parameters
----------
channel_map : sequence of int, optional
Support for opening only specific channels of a Core Audio
device. Note that *channel_map* is treated differently
between input and output channels.
For input devices, *channel_map* is a list of integers
specifying the (zero-based) channel numbers to use.
For output devices, *channel_map* must have the same length
as the number of output channels of the device. Specify
unused channels with -1, and a 0-based index for any desired
channels.
See the example below. For additional information, see the
`PortAudio documentation`__.
__ https://app.assembla.com/spaces/portaudio/git/source/
master/src/hostapi/coreaudio/notes.txt
change_device_parameters : bool, optional
If ``True``, allows PortAudio to change things like the
device's frame size, which allows for much lower latency,
but might disrupt the device if other programs are using it,
even when you are just querying the device. ``False`` is
the default.
fail_if_conversion_required : bool, optional
In combination with the above flag, ``True`` causes the
stream opening to fail, unless the exact sample rates are
supported by the device.
conversion_quality : {'min', 'low', 'medium', 'high', 'max'}, optional
This sets Core Audio's sample rate conversion quality.
``'max'`` is the default.
Example
-------
This example assumes a device having 6 input and 6 output
channels. Input is from the second and fourth channels, and
output is to the device's third and fifth channels:
>>> import sounddevice as sd
>>> ca_in = sd.CoreAudioSettings(channel_map=[1, 3])
>>> ca_out = sd.CoreAudioSettings(channel_map=[-1, -1, 0, -1, 1, -1])
>>> sd.playrec(..., channels=2, extra_settings=(ca_in, ca_out))
"""
conversion_dict = {
'min': _lib.paMacCoreConversionQualityMin,
'low': _lib.paMacCoreConversionQualityLow,
'medium': _lib.paMacCoreConversionQualityMedium,
'high': _lib.paMacCoreConversionQualityHigh,
'max': _lib.paMacCoreConversionQualityMax,
}
# Minimal checking on channel_map to catch errors that might
# otherwise go unnoticed:
if isinstance(channel_map, int):
raise TypeError('channel_map must be a list or tuple')
try:
self._flags = conversion_dict[conversion_quality.lower()]
except (KeyError, AttributeError) as e:
raise ValueError('conversion_quality must be one of ' +
repr(list(conversion_dict))) from e
if change_device_parameters:
self._flags |= _lib.paMacCoreChangeDeviceParameters
if fail_if_conversion_required:
self._flags |= _lib.paMacCoreFailIfConversionRequired
# this struct must be kept alive!
self._streaminfo = _ffi.new('PaMacCoreStreamInfo*')
_lib.PaMacCore_SetupStreamInfo(self._streaminfo, self._flags)
if channel_map is not None:
# this array must be kept alive!
self._channel_map = _ffi.new('SInt32[]', channel_map)
if len(self._channel_map) == 0:
raise TypeError('channel_map must not be empty')
_lib.PaMacCore_SetupChannelMap(self._streaminfo,
self._channel_map,
len(self._channel_map))
class WasapiSettings(object):
def __init__(self, exclusive=False):
"""WASAPI-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
They can also be used in `check_input_settings()` and
`check_output_settings()`.
Parameters
----------
exclusive : bool
Exclusive mode allows to deliver audio data directly to
hardware bypassing software mixing.
Examples
--------
Setting exclusive mode when calling `play()`:
>>> import sounddevice as sd
>>> wasapi_exclusive = sd.WasapiSettings(exclusive=True)
>>> sd.play(..., extra_settings=wasapi_exclusive)
Setting exclusive mode as default:
>>> sd.default.extra_settings = wasapi_exclusive
>>> sd.play(...)
"""
flags = 0x0
if exclusive:
flags |= _lib.paWinWasapiExclusive
self._streaminfo = _ffi.new('PaWasapiStreamInfo*', dict(
size=_ffi.sizeof('PaWasapiStreamInfo'),
hostApiType=_lib.paWASAPI,
version=1,
flags=flags,
))
class _CallbackContext(object):
"""Helper class for re-use in play()/rec()/playrec() callbacks."""
blocksize = None
data = None
out = None
frame = 0
input_channels = output_channels = None
input_dtype = output_dtype = None
input_mapping = output_mapping = None
silent_channels = None
def __init__(self, loop=False):
import threading
try:
import numpy
assert numpy # avoid "imported but unused" message (W0611)
except ImportError as e:
raise ImportError(
'NumPy must be installed for play()/rec()/playrec()') from e
self.loop = loop
self.event = threading.Event()
self.status = CallbackFlags()
def check_data(self, data, mapping, device):
"""Check data and output mapping."""
import numpy as np
data = np.asarray(data)
if data.ndim < 2:
data = data.reshape(-1, 1)
frames, channels = data.shape
dtype = _check_dtype(data.dtype)
mapping_is_explicit = mapping is not None
mapping, channels = _check_mapping(mapping, channels)
if data.shape[1] == 1:
pass # No problem, mono data is duplicated into arbitrary channels
elif data.shape[1] != len(mapping):
raise ValueError(
'number of output channels != size of output mapping')
# Apparently, some PortAudio host APIs duplicate mono streams to the
# first two channels, which is unexpected when specifying mapping=[1].
# In this case, we play silence on the second channel, but only if the
# device actually supports a second channel:
if (mapping_is_explicit and np.array_equal(mapping, [0]) and
query_devices(device, 'output')['max_output_channels'] >= 2):
channels = 2
silent_channels = np.setdiff1d(np.arange(channels), mapping)
if len(mapping) + len(silent_channels) != channels:
raise ValueError('each channel may only appear once in mapping')
self.data = data
self.output_channels = channels
self.output_dtype = dtype
self.output_mapping = mapping
self.silent_channels = silent_channels
return frames
def check_out(self, out, frames, channels, dtype, mapping):
"""Check out, frames, channels, dtype and input mapping."""
import numpy as np
if out is None:
if frames is None:
raise TypeError('frames must be specified')
if channels is None:
channels = default.channels['input']
if channels is None:
if mapping is None:
raise TypeError(
'Unable to determine number of input channels')
else:
channels = len(np.atleast_1d(mapping))
if dtype is None:
dtype = default.dtype['input']
out = np.empty((frames, channels), dtype, order='C')
else:
frames, channels = out.shape
dtype = out.dtype
dtype = _check_dtype(dtype)
mapping, channels = _check_mapping(mapping, channels)
if out.shape[1] != len(mapping):
raise ValueError(
'number of input channels != size of input mapping')
self.out = out
self.input_channels = channels
self.input_dtype = dtype
self.input_mapping = mapping
return out, frames
def callback_enter(self, status, data):
"""Check status and blocksize."""
self.status |= status
self.blocksize = min(self.frames - self.frame, len(data))
def read_indata(self, indata):
# We manually iterate over each channel in mapping because
# numpy.take(..., out=...) has a bug:
# https://github.com/numpy/numpy/pull/4246.
# Note: using indata[:blocksize, mapping] (a.k.a. 'fancy' indexing)
# would create unwanted copies (and probably memory allocations).
for target, source in enumerate(self.input_mapping):
# If out.dtype is 'float64', 'float32' data is "upgraded" here:
self.out[self.frame:self.frame + self.blocksize, target] = \
indata[:self.blocksize, source]
def write_outdata(self, outdata):
# 'float64' data is cast to 'float32' here:
outdata[:self.blocksize, self.output_mapping] = \
self.data[self.frame:self.frame + self.blocksize]
outdata[:self.blocksize, self.silent_channels] = 0
if self.loop and self.blocksize < len(outdata):
self.frame = 0
outdata = outdata[self.blocksize:]
self.blocksize = min(self.frames, len(outdata))
self.write_outdata(outdata)
else:
outdata[self.blocksize:] = 0
def callback_exit(self):
if not self.blocksize:
raise CallbackAbort
self.frame += self.blocksize
def finished_callback(self):
self.event.set()
# Drop temporary audio buffers to free memory
self.data = None
self.out = None
# Drop CFFI objects to avoid reference cycles
self.stream._callback = None
self.stream._finished_callback = None
def start_stream(self, StreamClass, samplerate, channels, dtype, callback,
blocking, **kwargs):
stop() # Stop previous playback/recording
self.stream = StreamClass(samplerate=samplerate,
channels=channels,
dtype=dtype,
callback=callback,
finished_callback=self.finished_callback,
**kwargs)
self.stream.start()
global _last_callback
_last_callback = self
if blocking:
self.wait()
def wait(self, ignore_errors=True):
"""Wait for finished_callback.
Can be interrupted with a KeyboardInterrupt.
"""
try:
self.event.wait()
finally:
self.stream.close(ignore_errors)
return self.status if self.status else None
def _remove_self(d):
"""Return a copy of d without the 'self' entry."""
d = d.copy()
del d['self']
return d
def _check_mapping(mapping, channels):
"""Check mapping, obtain channels."""
import numpy as np
if mapping is None:
mapping = np.arange(channels)
else:
mapping = np.array(mapping, copy=True)
mapping = np.atleast_1d(mapping)
if mapping.min() < 1:
raise ValueError('channel numbers must not be < 1')
channels = mapping.max()
mapping -= 1 # channel numbers start with 1
return mapping, channels
def _check_dtype(dtype):
"""Check dtype."""
import numpy as np
dtype = np.dtype(dtype).name
if dtype in _sampleformats:
pass
elif dtype == 'float64':
dtype = 'float32'
else:
raise TypeError('Unsupported data type: ' + repr(dtype))
return dtype
def _get_stream_parameters(kind, device, channels, dtype, latency,
extra_settings, samplerate):
"""Get parameters for one direction (input or output) of a stream."""
assert kind in ('input', 'output')
if device is None:
device = default.device[kind]
if channels is None:
channels = default.channels[kind]
if dtype is None:
dtype = default.dtype[kind]
if latency is None:
latency = default.latency[kind]
if extra_settings is None:
extra_settings = default.extra_settings[kind]
if samplerate is None:
samplerate = default.samplerate
device = _get_device_id(device, kind, raise_on_error=True)
info = query_devices(device)
if channels is None:
channels = info['max_' + kind + '_channels']
try:
# If NumPy is available, get canonical dtype name
dtype = _sys.modules['numpy'].dtype(dtype).name
except Exception:
pass # NumPy not available or invalid dtype (e.g. 'int24') or ...
try:
sampleformat = _sampleformats[dtype]
except KeyError as e:
raise ValueError('Invalid ' + kind + ' sample format') from e
samplesize = _check(_lib.Pa_GetSampleSize(sampleformat))
if latency in ('low', 'high'):
latency = info['default_' + latency + '_' + kind + '_latency']
if samplerate is None:
samplerate = info['default_samplerate']
parameters = _ffi.new('PaStreamParameters*', (
device, channels, sampleformat, latency,
extra_settings._streaminfo if extra_settings else _ffi.NULL))
return parameters, dtype, samplesize, samplerate
def _wrap_callback(callback, *args):
"""Invoke callback function and check for custom exceptions."""
args = args[:-1] + (CallbackFlags(args[-1]),)
try:
callback(*args)
except CallbackStop:
return _lib.paComplete
except CallbackAbort:
return _lib.paAbort
return _lib.paContinue
def _buffer(ptr, frames, channels, samplesize):
"""Create a buffer object from a pointer to some memory."""
return _ffi.buffer(ptr, frames * channels * samplesize)
def _array(buffer, channels, dtype):
"""Create NumPy array from a buffer object."""
import numpy as np
data = np.frombuffer(buffer, dtype=dtype)
data.shape = -1, channels
return data
def _split(value):
"""Split input/output value into two values.
This can be useful for generic code that allows using the same value
for input and output but also a pair of two separate values.
"""
if isinstance(value, (str, bytes)):
# iterable, but not meant for splitting
return value, value
try:
invalue, outvalue = value
except TypeError:
invalue = outvalue = value
except ValueError as e:
raise ValueError('Only single values and pairs are allowed') from e
return invalue, outvalue
def _check(err, msg=''):
"""Raise PortAudioError for below-zero error codes."""
if err >= 0:
return err
errormsg = _ffi.string(_lib.Pa_GetErrorText(err)).decode()
if msg:
errormsg = '{}: {}'.format(msg, errormsg)
if err == _lib.paUnanticipatedHostError:
# (gh82) We grab the host error info here rather than inside
# PortAudioError since _check should only ever be called after a
# failing API function call. This way we can avoid any potential issues
# in scenarios where multiple APIs are being used simultaneously.
info = _lib.Pa_GetLastHostErrorInfo()
host_api = _lib.Pa_HostApiTypeIdToHostApiIndex(info.hostApiType)
hosterror_text = _ffi.string(info.errorText).decode()
hosterror_info = host_api, info.errorCode, hosterror_text
raise PortAudioError(errormsg, err, hosterror_info)
raise PortAudioError(errormsg, err)
def _get_device_id(id_or_query_string, kind, raise_on_error=False):
"""Return device ID given space-separated substrings."""
assert kind in ('input', 'output', None)
if id_or_query_string is None:
id_or_query_string = default.device
idev, odev = _split(id_or_query_string)
if kind == 'input':
id_or_query_string = idev
elif kind == 'output':
id_or_query_string = odev
else:
if idev == odev:
id_or_query_string = idev
else:
raise ValueError('Input and output device are different: {!r}'
.format(id_or_query_string))
if isinstance(id_or_query_string, int):
return id_or_query_string
device_list = []
for id, info in enumerate(query_devices()):
if not kind or info['max_' + kind + '_channels'] > 0:
hostapi_info = query_hostapis(info['hostapi'])
device_list.append((id, info['name'], hostapi_info['name']))
query_string = id_or_query_string.lower()
substrings = query_string.split()
matches = []
exact_device_matches = []
for id, device_string, hostapi_string in device_list:
full_string = device_string + ', ' + hostapi_string
pos = 0
for substring in substrings:
pos = full_string.lower().find(substring, pos)
if pos < 0:
break
pos += len(substring)
else:
matches.append((id, full_string))
if device_string.lower() == query_string:
exact_device_matches.append(id)
if kind is None:
kind = 'input/output' # Just used for error messages
if not matches:
if raise_on_error:
raise ValueError(
'No ' + kind + ' device matching ' + repr(id_or_query_string))
else:
return -1
if len(matches) > 1:
if len(exact_device_matches) == 1:
return exact_device_matches[0]
if raise_on_error:
raise ValueError('Multiple ' + kind + ' devices found for ' +
repr(id_or_query_string) + ':\n' +
'\n'.join('[{}] {}'.format(id, name)
for id, name in matches))
else:
return -1
return matches[0][0]
def _initialize():
"""Initialize PortAudio.
This temporarily forwards messages from stderr to ``/dev/null``
(where supported).
In most cases, this doesn't have to be called explicitly, because it
is automatically called with the ``import sounddevice`` statement.
"""
old_stderr = None
try:
stdio = _ffi.dlopen(None)
except OSError:
pass
else:
for stderr_name in 'stderr', '__stderrp':
try:
old_stderr = getattr(stdio, stderr_name)
except _ffi.error:
continue
else:
devnull = stdio.fopen(_os.devnull.encode(), b'w')
setattr(stdio, stderr_name, devnull)
break
try:
_check(_lib.Pa_Initialize(), 'Error initializing PortAudio')
global _initialized
_initialized += 1
finally:
if old_stderr is not None:
setattr(stdio, stderr_name, old_stderr)
stdio.fclose(devnull)
def _terminate():
"""Terminate PortAudio.
In most cases, this doesn't have to be called explicitly.
"""
global _initialized
_check(_lib.Pa_Terminate(), 'Error terminating PortAudio')
_initialized -= 1
def _exit_handler():
assert _initialized >= 0
# We cleanup any open streams here since older versions of portaudio don't
# manage this (see github issue #1)
if _last_callback:
# NB: calling stop() first is required; without it portaudio hangs when
# calling close()
_last_callback.stream.stop()
_last_callback.stream.close()
while _initialized:
_terminate()
_atexit.register(_exit_handler)
_initialize()
if __name__ == '__main__':
print(query_devices())
| mit | 6,031,826,291,675,383,000 | 36.47811 | 80 | 0.610855 | false | 4.429651 | false | false | false |
openstack/ironic | ironic/api/controllers/v1/volume_target.py | 1 | 18441 | # Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
from ironic import api
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import notification_utils as notify
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import method
from ironic.common import args
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import policy
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'volume_type',
'boot_index', 'volume_id']
TARGET_SCHEMA = {
'type': 'object',
'properties': {
'boot_index': {'type': 'integer'},
'extra': {'type': ['object', 'null']},
'node_uuid': {'type': 'string'},
'properties': {'type': ['object', 'null']},
'volume_id': {'type': 'string'},
'volume_type': {'type': 'string'},
'uuid': {'type': ['string', 'null']},
},
'required': ['boot_index', 'node_uuid', 'volume_id', 'volume_type'],
'additionalProperties': False,
}
TARGET_VALIDATOR_EXTRA = args.dict_valid(
node_uuid=args.uuid,
uuid=args.uuid,
)
TARGET_VALIDATOR = args.and_valid(
args.schema(TARGET_SCHEMA),
TARGET_VALIDATOR_EXTRA
)
PATCH_ALLOWED_FIELDS = [
'boot_index',
'extra',
'node_uuid',
'properties',
'volume_id',
'volume_type'
]
def convert_with_links(rpc_target, fields=None, sanitize=True):
target = api_utils.object_to_dict(
rpc_target,
link_resource='volume/targets',
fields=(
'boot_index',
'extra',
'properties',
'volume_id',
'volume_type'
)
)
api_utils.populate_node_uuid(rpc_target, target)
if fields is not None:
api_utils.check_for_invalid_fields(fields, target)
if not sanitize:
return target
api_utils.sanitize_dict(target, fields)
return target
def list_convert_with_links(rpc_targets, limit, url=None, fields=None,
detail=None, **kwargs):
if detail:
kwargs['detail'] = detail
return collection.list_convert_with_links(
items=[convert_with_links(p, fields=fields, sanitize=False)
for p in rpc_targets],
item_name='targets',
limit=limit,
url=url,
fields=fields,
sanitize_func=api_utils.sanitize_dict,
**kwargs
)
class VolumeTargetsController(rest.RestController):
"""REST controller for VolumeTargets."""
invalid_sort_key_list = ['extra', 'properties']
def __init__(self, node_ident=None):
super(VolumeTargetsController, self).__init__()
self.parent_node_ident = node_ident
def _redact_target_properties(self, target):
# Filters what could contain sensitive information. For iSCSI
# volumes this can include iscsi connection details which may
# be sensitive.
redacted = ('** Value redacted: Requires permission '
'baremetal:volume:view_target_properties '
'access. Permission denied. **')
redacted_message = {
'redacted_contents': redacted
}
target.properties = redacted_message
def _get_volume_targets_collection(self, node_ident, marker, limit,
sort_key, sort_dir, resource_url=None,
fields=None, detail=None,
project=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.VolumeTarget.get_by_uuid(
api.request.context, marker)
if sort_key in self.invalid_sort_key_list:
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
node_ident = self.parent_node_ident or node_ident
if node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
node = api_utils.get_rpc_node(node_ident)
targets = objects.VolumeTarget.list_by_node_id(
api.request.context, node.id, limit, marker_obj,
sort_key=sort_key, sort_dir=sort_dir, project=project)
else:
targets = objects.VolumeTarget.list(api.request.context,
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
project=project)
cdict = api.request.context.to_policy_values()
if not policy.check_policy('baremetal:volume:view_target_properties',
cdict, cdict):
for target in targets:
self._redact_target_properties(target)
return list_convert_with_links(targets, limit,
url=resource_url,
fields=fields,
sort_key=sort_key,
sort_dir=sort_dir,
detail=detail)
@METRICS.timer('VolumeTargetsController.get_all')
@method.expose()
@args.validate(node=args.uuid_or_name, marker=args.uuid,
limit=args.integer, sort_key=args.string,
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
sort_dir='asc', fields=None, detail=None, project=None):
"""Retrieve a list of volume targets.
:param node: UUID or name of a node, to get only volume targets
for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: "asc".
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, whether to retrieve with detail.
:param project: Optional, an associated node project (owner,
or lessee) to filter the query upon.
:returns: a list of volume targets, or an empty list if no volume
target is found.
:raises: InvalidParameterValue if sort_key does not exist
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
project = api_utils.check_volume_list_policy(
parent_node=self.parent_node_ident)
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
if fields and detail:
raise exception.InvalidParameterValue(
_("Can't fetch a subset of fields with 'detail' set"))
resource_url = 'volume/targets'
return self._get_volume_targets_collection(node, marker, limit,
sort_key, sort_dir,
resource_url=resource_url,
fields=fields,
detail=detail,
project=project)
@METRICS.timer('VolumeTargetsController.get_one')
@method.expose()
@args.validate(target_uuid=args.uuid, fields=args.string_list)
def get_one(self, target_uuid, fields=None):
"""Retrieve information about the given volume target.
:param target_uuid: UUID of a volume target.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
rpc_target, _ = api_utils.check_volume_policy_and_retrieve(
'baremetal:volume:get',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
cdict = api.request.context.to_policy_values()
if not policy.check_policy('baremetal:volume:view_target_properties',
cdict, cdict):
self._redact_target_properties(rpc_target)
return convert_with_links(rpc_target, fields=fields)
@METRICS.timer('VolumeTargetsController.post')
@method.expose(status_code=http_client.CREATED)
@method.body('target')
@args.validate(target=TARGET_VALIDATOR)
def post(self, target):
"""Create a new volume target.
:param target: a volume target within the request body.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists
"""
context = api.request.context
raise_node_not_found = False
node = None
owner = None
lessee = None
node_uuid = target.get('node_uuid')
try:
node = api_utils.replace_node_uuid_with_id(target)
owner = node.owner
lessee = node.lessee
except exception.NotFound:
raise_node_not_found = True
api_utils.check_owner_policy('node', 'baremetal:volume:create',
owner, lessee=lessee,
conceal_node=False)
if raise_node_not_found:
raise exception.InvalidInput(fieldname='node_uuid',
value=node_uuid)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
# NOTE(hshiina): UUID is mandatory for notification payload
if not target.get('uuid'):
target['uuid'] = uuidutils.generate_uuid()
new_target = objects.VolumeTarget(context, **target)
notify.emit_start_notification(context, new_target, 'create',
node_uuid=node.uuid)
with notify.handle_error_notification(context, new_target, 'create',
node_uuid=node.uuid):
new_target.create()
notify.emit_end_notification(context, new_target, 'create',
node_uuid=node.uuid)
# Set the HTTP Location Header
api.response.location = link.build_url('volume/targets',
new_target.uuid)
return convert_with_links(new_target)
@METRICS.timer('VolumeTargetsController.patch')
@method.expose()
@method.body('patch')
@args.validate(target_uuid=args.uuid, patch=args.patch)
def patch(self, target_uuid, patch):
"""Update an existing volume target.
:param target_uuid: UUID of a volume target.
:param patch: a json PATCH document to apply to this volume target.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: PatchError if a given patch can not be applied.
:raises: InvalidParameterValue if the volume target's UUID is being
changed
:raises: NodeLocked if the node is already locked
:raises: NodeNotFound if the node associated with the volume target
does not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:raises: InvalidUUID if invalid node UUID is passed in the patch.
:raises: InvalidStateRequested If a node associated with the
volume target is not powered off.
"""
context = api.request.context
api_utils.check_volume_policy_and_retrieve('baremetal:volume:update',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)
values = api_utils.get_patch_values(patch, '/node_uuid')
for value in values:
if not uuidutils.is_uuid_like(value):
message = _("Expected a UUID for node_uuid, but received "
"%(uuid)s.") % {'uuid': str(value)}
raise exception.InvalidUUID(message=message)
rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid)
target_dict = rpc_target.as_dict()
# NOTE(smoriya):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
rpc_node = api_utils.replace_node_id_with_uuid(target_dict)
target_dict = api_utils.apply_jsonpatch(target_dict, patch)
try:
if target_dict['node_uuid'] != rpc_node.uuid:
# TODO(TheJulia): I guess the intention is to
# permit the mapping to be changed
# should we even allow this at all?
rpc_node = objects.Node.get(
api.request.context, target_dict['node_uuid'])
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a PATCH request to change a volume target
e.code = http_client.BAD_REQUEST # BadRequest
raise
api_utils.patched_validate_with_schema(
target_dict, TARGET_SCHEMA, TARGET_VALIDATOR)
api_utils.patch_update_changed_fields(
target_dict, rpc_target, fields=objects.VolumeTarget.fields,
schema=TARGET_SCHEMA, id_map={'node_id': rpc_node.id}
)
notify.emit_start_notification(context, rpc_target, 'update',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_target, 'update',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
new_target = api.request.rpcapi.update_volume_target(
context, rpc_target, topic)
api_target = convert_with_links(new_target)
notify.emit_end_notification(context, new_target, 'update',
node_uuid=rpc_node.uuid)
return api_target
@METRICS.timer('VolumeTargetsController.delete')
@method.expose(status_code=http_client.NO_CONTENT)
@args.validate(target_uuid=args.uuid)
def delete(self, target_uuid):
"""Delete a volume target.
:param target_uuid: UUID of a volume target.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the target does
not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: InvalidStateRequested If a node associated with the
volume target is not powered off.
"""
context = api.request.context
api_utils.check_volume_policy_and_retrieve('baremetal:volume:delete',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid)
rpc_node = objects.Node.get_by_id(context, rpc_target.node_id)
notify.emit_start_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
api.request.rpcapi.destroy_volume_target(context,
rpc_target, topic)
notify.emit_end_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid)
| apache-2.0 | 1,114,913,847,609,829,500 | 40.440449 | 79 | 0.579253 | false | 4.453272 | false | false | false |
wadester/wh_test_py | py_utc_timestamp.py | 1 | 1090 | #!/usr/bin/env python
# Module: py_utc_timestamp.py
# Purpose: Python UTC timestamp
# Date: N/A
# Notes:
# 1) ...
# Ref:
# http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python
#
"""py_utc_timestamp.py: Python UTC timestamp test"""
from __future__ import division
from datetime import datetime, timedelta
import subprocess
def totimestamp(dt, epoch=datetime(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
def py_utc_timestamp():
"""py_utc_timestamp.py: run the test"""
print __doc__
now = datetime.utcnow()
print now
print totimestamp(now)
ref_dt = datetime(2016, 7, 13, 1, 2, 3, 0)
subprocess.call("date '+%s' -d '2016/7/13 1:2:3 UTC'", shell=True)
ref_dt_t = 1468371723
ref_dt_res = totimestamp(ref_dt)
delta = ref_dt_t - ref_dt_res
print "ref = {0} t={1} result={2}".format(ref_dt, ref_dt_t, ref_dt_res)
print "delta={0}".format(delta)
if __name__ == "__main__":
py_utc_timestamp()
| gpl-2.0 | 6,463,227,694,067,315,000 | 27.684211 | 97 | 0.63211 | false | 2.906667 | false | false | false |
Micronaet/micronaet-lognaet | lognaet/lognaet.py | 1 | 3724 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class LognaetMovement(orm.Model):
''' Default object for manage movement modifications
'''
_name = 'lognaet.movement'
_description = 'Log movement'
_columns = {
'name': fields.char('Reference', size=50),
'cause': fields.selection([
('1', 'Delete'),
('2', 'Variation (Q)'),
('3', 'Variation (Price)'),
('4', 'Variation (Discount)'),
('5', 'Add'),
], 'Cause'),
'hostname': fields.char('Hostname', size=30),
'username': fields.char('Username', size=30),
'document': fields.char('Type of doc.', size=15), # TODO selection!!
#'series': fields.char('Series', size=2),
#'number': fields.char('Number', size=15),
'article': fields.char('Article', size=20),
'lot': fields.char('Lot', size=18),
'code': fields.char('Partner code', size=10),
'date': fields.date('Date'),
'year': fields.char('Year', size=4),
'previous': fields.char('Previous', size=15),
'current': fields.char('Current', size=15),
'timestamp': fields.datetime('Timestamp'),
}
_defaults = {
'timestamp': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class LognaetOrder(orm.Model):
''' Default object for manage order statistics
'''
_name = 'lognaet.order'
_description = 'Log order'
_columns = {
'name': fields.char('Reference', size=100),
'hostname': fields.char('Hostname', size=30),
'username': fields.char('Username', size=30),
'user': fields.char('Mexal user', size=30),
'type': fields.selection([
('ModOC', 'Modify'),
('InsOC', 'Insert'),
], 'Type'),
'start': fields.char('Start time', size=10),
'end': fields.char('End time', size=10),
'date': fields.date('Date'),
'total': fields.integer('Total row'),
'timestamp': fields.datetime('Timestamp'),
}
_defaults = {
'timestamp': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
| agpl-3.0 | -5,641,202,321,804,962,000 | 34.807692 | 79 | 0.597744 | false | 4.083333 | false | false | false |
lukasjuhrich/pycroft | hades_logs/app.py | 1 | 1259 | from celery import Celery
class HadesCelery(Celery):
"""Celery subclass complying with the Hades RPC API
This subclass sets a few options in :meth:`__init__` such as the
default exchange and hooks into :meth:`signature` to set a routing
key if given.
:param str routing_key: The routing key to enforce in the options
given to :meth:`signature`. For unicast messages it is
usually of the format ``<site>`` or ``<site>.<node>``. If not
set, behavior of :meth:`signature` is unchanged.
"""
def __init__(self, *a, routing_key=None, **kw):
super().__init__(*a, **kw)
self.routing_key = routing_key
self.conf['CELERY_DEFAULT_EXCHANGE'] = 'hades.agent.rpc'
self.conf['CELERY_DEFAULT_EXCHANGE_TYPE'] = 'topic'
self.conf['CELERY_CREATE_MISSING_QUEUES'] = True
self.conf['CELERY_TASK_SERIALIZER'] = 'json'
self.conf['CELERY_EVENT_SERIALIZER'] = 'json'
self.conf['CELERY_RESULT_SERIALIZER'] = 'json'
def signature(self, *a, **kw):
if self.routing_key is not None:
kw = kw.copy()
kw.setdefault('options', {})
kw['options']['routing_key'] = self.routing_key
return super().signature(*a, **kw)
| apache-2.0 | -7,818,587,323,205,741,000 | 39.612903 | 70 | 0.610802 | false | 3.659884 | false | false | false |
creamidea/Mushroom | NodeSite/NodeSite/accounts/views.py | 1 | 2235 | # -*- coding: utf-8 -*-
# Create your views here.
from django.shortcuts import render
from django.http import (HttpResponse, HttpResponseRedirect)
from django.contrib.auth.views import (login, logout)
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth.decorators import (login_required,
permission_required)
from django.contrib.auth.models import (Permission, Group, User)
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse #reverse函数可以像在模板中使用url那样,在代码中使用
def signin(request):
return login(request, template_name='signin.html')
def signout(request):
return logout(request, next_page=reverse('home'))
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
# import inspect, sys
# frame = inspect.stack()[1][0]
# caller__name__ = frame.f_locals['__name__']
# print(caller__name__)
if form.is_valid():
content_type = ContentType.objects.get(app_label='account', model='user')
# p, created = Permission.objects.get_or_create(codename=u"can_vote", name=u"can vote", content_type=content_type)
p = Permission.objects.get_or_create(codename=u"can_vote", name=u"can vote", content_type=content_type)
new_user = form.save()
new_user.user_permissions.add(p)
return HttpResponseRedirect(reverse('singin'))
else:
form = UserCreationForm()
return render(request, "signup.html", {
'form': form,
})
@login_required
def profile(request):
print '/////////////////////////////////'
if request.user.has_perm('auth.can_vote'):
print 'you can vote'
form = UserCreationForm()
print dir(request.user.groups)
# print request.user.get_all_permissions()
return render(request, 'profile.html', {
'form': form,
})
@permission_required('auth.can_manage_users', raise_exception=True)
def manage(request):
form = UserCreationForm()
return render(request, 'manage.html', {
'form': form,
})
| mit | -2,341,865,873,242,403,300 | 33.296875 | 126 | 0.649658 | false | 3.771478 | false | false | false |
robjstan/modelling-course | notebooks/python/f04.py | 1 | 2908 | # setup ipython environment
from ipywidgets import interact, fixed
# setup python environment
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
col = plt.rcParams['axes.color_cycle']
def mc(p, q, tmax = 1, x0 = 0):
x = x0
r = np.random.rand(tmax+1)
for t in range(tmax+1):
yield x
if x == 0 and r[t] < 1 - p:
x = 1
elif x == 1 and r[t] < 1 - q:
x = 0
def plot_mc(p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9):
print('P=|%.1f %.1f|\n |%.1f %.1f|' %(p,1-p,1-q,q))
plt.figure(figsize=[9,4])
plt.plot(list(mc(p,q,100)))
plt.yticks([0,1],['$S_1$, off','$S_2$, on'])
plt.xlabel('time')
plt.ylim(-0.1,1.1)
plt.show()
def mc_sol(s1, p, q, t):
x0 = np.matrix([s1,1-s1])
pij = np.matrix([[p,1-p],[1-q,q]])
return np.array(x0*pij**t)[0]
def plot_mc_sol(s1:(0,1,0.1)=1,p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9, tmax=fixed(20)):
s1,s2 = list(zip(*[mc_sol(s1,p,q,t) for t in range(tmax+1)]))
plt.figure(figsize=[9,4])
plt.plot(s1, label='S_1, off')
plt.plot(s2, label='S_2, on')
plt.xlabel('time')
plt.ylabel('proportion of channels')
plt.ylim(-0.01,1.01)
plt.legend()
plt.show()
def plot_mc_sol2(p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9, \
tmax=fixed(20), log_n:(0,10,1)=0):
s1,s2 = list(zip(*[mc_sol(1,p,q,t) for t in range(tmax+1)]))
n = int(np.exp(log_n))
sim = [list(mc(p,q,tmax)) for _ in range(n)]
sim_av = [np.mean(s) for s in list(zip(*sim))]
print("n = %d" % n)
plt.figure(figsize=[9,4])
plt.plot(s2)
plt.plot(sim_av)
plt.xlabel('time')
plt.ylabel('proportion of channels on')
plt.ylim(-0.01,1.01)
plt.show()
def red_sim(i, n, tmax=1):
for t in range(tmax+1):
yield i
i = np.random.binomial(2*n,i/(2*n))
def plot_red_sim(log_n:(0,10,1)=7,prop_i:(0,1,0.1)=0.5,n_sim:(1,20,1)=1):
n = int(np.exp(log_n))
i = int(2*n*prop_i)
print("n = %d, i0 = %d" % (n,i))
plt.figure(figsize=[9,4])
for _ in range(n_sim):
plt.plot(list(red_sim(i,n,50)))
plt.xlabel('time')
plt.ylabel("number of copies of 'a'")
plt.ylim(-2*n*0.01, 2*n*1.01)
plt.show()
def mc4state(p1, p2, tmax=1):
x = np.matrix([1,0,0,0])
p = np.matrix([[1-p1/2-p2/2,p1/2,p2/2,0],[0,1-p2,0,p2],
[0,0,1-p1,p1],[1,0,0,0]])
for t in range(tmax+1):
yield x.tolist()[0]
x = x*p
def plot_mc4state(p1:(0,1,0.1)=1, p2:(0,1,0.1)=1, \
plot_all=False):
pts = list(zip(*mc4state(p1, p2, 30)))
plt.figure(figsize=[9,4])
plt.plot(pts[0], label='A00')
if plot_all:
plt.plot(pts[1], label='A10')
plt.plot(pts[2], label='A01')
plt.plot(pts[3], label='A11', color=col[5])
plt.xlabel('time')
plt.ylabel('proportion of enzymes in state')
plt.ylim(-0.05,1.05)
plt.legend()
plt.show()
| mit | 4,165,750,053,726,287,000 | 26.961538 | 80 | 0.525791 | false | 2.24556 | false | false | false |
tensorflow/models | research/seq_flow_lite/layers/dense_layers.py | 1 | 3708 | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Basic dense layers."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class BaseQDense(base_layers.BaseLayer):
"""Quantized encoder dense layers."""
def __init__(self,
units,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=2,
normalize=True,
**kwargs):
self.units = units
self.rank = rank
assert rank >= 2 and rank <= 4
self.activation = activation
self.bias = bias
self.normalize = normalize
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(**kwargs)
super(BaseQDense, self).__init__(**kwargs)
def build(self, input_shapes):
assert len(input_shapes) == self.rank
if self.rank == 4:
assert input_shapes[1] == 1 or input_shapes[2] == 1
self.in_units = input_shapes[-1]
shape = [self.in_units, self.units]
self.w = self.add_qweight(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.units])
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.BatchNormalization(**kwargs)
def _dense_r2(self, inputs, normalize_method):
outputs = tf.matmul(inputs, self.w)
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
if self.normalize:
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
return self.qoutput(outputs)
def _dense_r34(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
outputs = tf.reshape(inputs, [-1, self.in_units])
outputs = self._dense_r2(outputs, normalize_method)
if self.rank == 3:
return tf.reshape(outputs, [bsz, -1, self.units])
elif inputs.get_shape().as_list()[1] == 1:
return tf.reshape(outputs, [bsz, 1, -1, self.units])
else:
return tf.reshape(outputs, [bsz, -1, 1, self.units])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 2:
return self._dense_r2(inputs, normalize_method)
return self._dense_r34(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class BaseQDenseVarLen(BaseQDense):
"""Dense on variable length sequence."""
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=2, **kwargs)
def call(self, inputs, mask, inverse_normalizer):
def normalize_method(tensor):
maskr2 = tf.reshape(mask, [-1, 1])
return self.normalization(tensor, maskr2, inverse_normalizer)
return self._do_call(inputs, normalize_method)
| apache-2.0 | 8,344,653,043,419,898,000 | 33.654206 | 80 | 0.666667 | false | 3.753036 | false | false | false |
stackdio/stackdio | stackdio/api/search/serializers.py | 2 | 1170 | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from rest_framework import serializers
logger = logging.getLogger(__name__)
class PassThroughSerializer(serializers.Serializer): # pylint: disable=abstract-method
def to_representation(self, instance):
return instance
class SearchSerializer(serializers.Serializer): # pylint: disable=abstract-method
type = serializers.SlugRelatedField(slug_field='model', read_only=True)
title = serializers.CharField()
url = serializers.URLField()
object = PassThroughSerializer()
| apache-2.0 | -2,793,053,900,935,845,000 | 30.621622 | 87 | 0.750427 | false | 4.193548 | false | false | false |
darrell24015/FutureLearn | Python/Week3/character.py | 1 | 1637 | class Character():
# Create a character
def __init__(self, char_name, char_description):
self.name = char_name
self.description = char_description
self.conversation = None
# Describe this character
def describe(self):
print( self.name + " is here!" )
print( self.description )
# Set what this character will say when talked to
def set_conversation(self, conversation):
self.conversation = conversation
# Talk to this character
def talk(self):
if self.conversation is not None:
print("[" + self.name + " says]: " + self.conversation)
else:
print(self.name + " doesn't want to talk to you")
# Fight with this character
def fight(self, combat_item):
print(self.name + " doesn't want to fight with you")
return True
class Enemy(Character):
def __init__(self,char_name,char_description):
super().__init__(char_name,char_description)
self.weakness = None
def fight(self, combat_item):
if combat_item == self.weakness:
print("You fend " + self.name + " off with the " + combat_item)
return True
else:
print(self.name + " crushes you, puny adventurer!")
return False
def set_weakness(self, item_weakness):
self.weakness = item_weakness
def get_weakness(self):
return self.weakness
class Friend(Character):
def __init__(self,char_name,char_description):
super().__init__(char_name,char_description)
self.feelings = None
def set_feelings(self, character_feelings):
self.feelings = character_feelings
def get_feelings(self):
return self.feelings
| gpl-3.0 | 1,497,780,064,817,535,700 | 24.578125 | 67 | 0.651191 | false | 3.403326 | false | false | false |
openprocurement/openprocurement.tender.openeu | openprocurement/tender/openeu/tests/question_blanks.py | 1 | 3648 | # -*- coding: utf-8 -*-
# TenderQuestionResourceTest
def patch_tender_question(self):
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), {'data': {'title': 'question title', 'description': 'question description',
'author': self.test_bids_data[0]['tenderers'][0]}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
question = response.json['data']
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
response = self.app.patch_json('/tenders/{}/questions/some_id'.format(self.tender_id),
{"data": {"answer": "answer"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'question_id'}
])
response = self.app.patch_json('/tenders/some_id/questions/some_id', {"data": {"answer": "answer"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/questions/{}'.format(self.tender_id, question['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
self.time_shift('active.pre-qualification')
self.check_chronograph()
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update question in current (unsuccessful) tender status")
def answering_question(self):
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), {'data': {'title': 'question title', 'description': 'question description',
'author': self.test_bids_data[0]['tenderers'][0]}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
question = response.json['data']
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
self.assertIn('dateAnswered', response.json['data'])
question["answer"] = "answer"
question['dateAnswered'] = response.json['data']['dateAnswered']
self.time_shift('active.pre-qualification')
self.check_chronograph()
| apache-2.0 | -766,566,000,831,895,900 | 47.64 | 116 | 0.644737 | false | 3.703553 | false | false | false |
simar7/build-mozharness | mozharness/base/log.py | 7 | 14897 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic logging, the way I remember it from scripts gone by.
TODO:
- network logging support.
- log rotation config
"""
from datetime import datetime
import logging
import os
import sys
import traceback
# Define our own FATAL_LEVEL
FATAL_LEVEL = logging.CRITICAL + 10
logging.addLevelName(FATAL_LEVEL, 'FATAL')
# mozharness log levels.
DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, IGNORE = (
'debug', 'info', 'warning', 'error', 'critical', 'fatal', 'ignore')
# LogMixin {{{1
class LogMixin(object):
"""This is a mixin for any object to access similar logging
functionality -- more so, of course, for those objects with
self.config and self.log_obj, of course.
"""
def _log_level_at_least(self, level):
log_level = INFO
levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL]
if hasattr(self, 'config'):
log_level = self.config.get('log_level', INFO)
return levels.index(level) >= levels.index(log_level)
def _print(self, message, stderr=False):
if not hasattr(self, 'config') or self.config.get('log_to_console', True):
if stderr:
print >> sys.stderr, message
else:
print message
def log(self, message, level=INFO, exit_code=-1):
if self.log_obj:
return self.log_obj.log_message(
message, level=level,
exit_code=exit_code,
post_fatal_callback=self._post_fatal,
)
if level == INFO:
if self._log_level_at_least(level):
self._print(message)
elif level == DEBUG:
if self._log_level_at_least(level):
self._print('DEBUG: %s' % message)
elif level in (WARNING, ERROR, CRITICAL):
if self._log_level_at_least(level):
self._print("%s: %s" % (level.upper(), message), stderr=True)
elif level == FATAL:
if self._log_level_at_least(level):
self._print("FATAL: %s" % message, stderr=True)
raise SystemExit(exit_code)
def worst_level(self, target_level, existing_level, levels=None):
"""returns either existing_level or target level.
This depends on which is closest to levels[0]
By default, levels is the list of log levels"""
if not levels:
levels = [FATAL, CRITICAL, ERROR, WARNING, INFO, DEBUG, IGNORE]
if target_level not in levels:
self.fatal("'%s' not in %s'." % (target_level, levels))
for l in levels:
if l in (target_level, existing_level):
return l
# Copying Bear's dumpException():
# https://hg.mozilla.org/build/tools/annotate/1485f23c38e0/sut_tools/sut_lib.py#l23
def exception(self, message=None, level=ERROR):
tb_type, tb_value, tb_traceback = sys.exc_info()
if message is None:
message = ""
else:
message = "%s\n" % message
for s in traceback.format_exception(tb_type, tb_value, tb_traceback):
message += "%s\n" % s
# Log at the end, as a fatal will attempt to exit after the 1st line.
self.log(message, level=level)
def debug(self, message):
self.log(message, level=DEBUG)
def info(self, message):
self.log(message, level=INFO)
def warning(self, message):
self.log(message, level=WARNING)
def error(self, message):
self.log(message, level=ERROR)
def critical(self, message):
self.log(message, level=CRITICAL)
def fatal(self, message, exit_code=-1):
self.log(message, level=FATAL, exit_code=exit_code)
def _post_fatal(self, message=None, exit_code=None):
""" Sometimes you want to create a report or cleanup
or notify on fatal(); override this method to do so.
Please don't use this for anything significantly long-running.
"""
pass
# OutputParser {{{1
class OutputParser(LogMixin):
""" Helper object to parse command output.
This will buffer output if needed, so we can go back and mark
[(linenum - 10):linenum+10] as errors if need be, without having to
get all the output first.
linenum+10 will be easy; we can set self.num_post_context_lines to 10,
and self.num_post_context_lines-- as we mark each line to at least error
level X.
linenum-10 will be trickier. We'll not only need to save the line
itself, but also the level that we've set for that line previously,
whether by matching on that line, or by a previous line's context.
We should only log that line if all output has ended (self.finish() ?);
otherwise store a list of dictionaries in self.context_buffer that is
buffered up to self.num_pre_context_lines (set to the largest
pre-context-line setting in error_list.)
"""
def __init__(self, config=None, log_obj=None, error_list=None, log_output=True):
self.config = config
self.log_obj = log_obj
self.error_list = error_list or []
self.log_output = log_output
self.num_errors = 0
self.num_warnings = 0
# TODO context_lines.
# Not in use yet, but will be based off error_list.
self.context_buffer = []
self.num_pre_context_lines = 0
self.num_post_context_lines = 0
self.worst_log_level = INFO
def parse_single_line(self, line):
for error_check in self.error_list:
# TODO buffer for context_lines.
match = False
if 'substr' in error_check:
if error_check['substr'] in line:
match = True
elif 'regex' in error_check:
if error_check['regex'].search(line):
match = True
else:
self.warning("error_list: 'substr' and 'regex' not in %s" %
error_check)
if match:
log_level = error_check.get('level', INFO)
if self.log_output:
message = ' %s' % line
if error_check.get('explanation'):
message += '\n %s' % error_check['explanation']
if error_check.get('summary'):
self.add_summary(message, level=log_level)
else:
self.log(message, level=log_level)
if log_level in (ERROR, CRITICAL, FATAL):
self.num_errors += 1
if log_level == WARNING:
self.num_warnings += 1
self.worst_log_level = self.worst_level(log_level,
self.worst_log_level)
break
else:
if self.log_output:
self.info(' %s' % line)
def add_lines(self, output):
if isinstance(output, basestring):
output = [output]
for line in output:
if not line or line.isspace():
continue
line = line.decode("utf-8", 'replace').rstrip()
self.parse_single_line(line)
# BaseLogger {{{1
class BaseLogger(object):
"""Create a base logging class.
TODO: status? There may be a status object or status capability in
either logging or config that allows you to count the number of
error,critical,fatal messages for us to count up at the end (aiming
for 0).
"""
LEVELS = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARNING: logging.WARNING,
ERROR: logging.ERROR,
CRITICAL: logging.CRITICAL,
FATAL: FATAL_LEVEL
}
def __init__(
self, log_level=INFO,
log_format='%(message)s',
log_date_format='%H:%M:%S',
log_name='test',
log_to_console=True,
log_dir='.',
log_to_raw=False,
logger_name='',
append_to_log=False,
):
self.log_format = log_format
self.log_date_format = log_date_format
self.log_to_console = log_to_console
self.log_to_raw = log_to_raw
self.log_level = log_level
self.log_name = log_name
self.log_dir = log_dir
self.append_to_log = append_to_log
# Not sure what I'm going to use this for; useless unless we
# can have multiple logging objects that don't trample each other
self.logger_name = logger_name
self.all_handlers = []
self.log_files = {}
self.create_log_dir()
def create_log_dir(self):
if os.path.exists(self.log_dir):
if not os.path.isdir(self.log_dir):
os.remove(self.log_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.abs_log_dir = os.path.abspath(self.log_dir)
def init_message(self, name=None):
if not name:
name = self.__class__.__name__
self.log_message("%s online at %s in %s" %
(name, datetime.now().strftime("%Y%m%d %H:%M:%S"),
os.getcwd()))
def get_logger_level(self, level=None):
if not level:
level = self.log_level
return self.LEVELS.get(level, logging.NOTSET)
def get_log_formatter(self, log_format=None, date_format=None):
if not log_format:
log_format = self.log_format
if not date_format:
date_format = self.log_date_format
return logging.Formatter(log_format, date_format)
def new_logger(self, logger_name):
"""Create a new logger.
By default there are no handlers.
"""
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(self.get_logger_level())
self._clear_handlers()
if self.log_to_console:
self.add_console_handler()
if self.log_to_raw:
self.log_files['raw'] = '%s_raw.log' % self.log_name
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files['raw']),
log_format='%(message)s')
def _clear_handlers(self):
"""To prevent dups -- logging will preserve Handlers across
objects :(
"""
attrs = dir(self)
if 'all_handlers' in attrs and 'logger' in attrs:
for handler in self.all_handlers:
self.logger.removeHandler(handler)
self.all_handlers = []
def __del__(self):
logging.shutdown()
self._clear_handlers()
def add_console_handler(self, log_level=None, log_format=None,
date_format=None):
console_handler = logging.StreamHandler()
console_handler.setLevel(self.get_logger_level(log_level))
console_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(console_handler)
self.all_handlers.append(console_handler)
def add_file_handler(self, log_path, log_level=None, log_format=None,
date_format=None):
if not self.append_to_log and os.path.exists(log_path):
os.remove(log_path)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self.get_logger_level(log_level))
file_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(file_handler)
self.all_handlers.append(file_handler)
def log_message(self, message, level=INFO, exit_code=-1, post_fatal_callback=None):
"""Generic log method.
There should be more options here -- do or don't split by line,
use os.linesep instead of assuming \n, be able to pass in log level
by name or number.
Adding the IGNORE special level for runCommand.
"""
if level == IGNORE:
return
for line in message.splitlines():
self.logger.log(self.get_logger_level(level), line)
if level == FATAL:
if callable(post_fatal_callback):
self.logger.log(FATAL_LEVEL, "Running post_fatal callback...")
post_fatal_callback(message=message, exit_code=exit_code)
self.logger.log(FATAL_LEVEL, 'Exiting %d' % exit_code)
raise SystemExit(exit_code)
# SimpleFileLogger {{{1
class SimpleFileLogger(BaseLogger):
"""Create one logFile. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self,
log_format='%(asctime)s %(levelname)8s - %(message)s',
logger_name='Simple', log_dir='logs', **kwargs):
BaseLogger.__init__(self, logger_name=logger_name, log_format=log_format,
log_dir=log_dir, **kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
self.log_path = os.path.join(self.abs_log_dir, '%s.log' % self.log_name)
self.log_files['default'] = self.log_path
self.add_file_handler(self.log_path)
# MultiFileLogger {{{1
class MultiFileLogger(BaseLogger):
"""Create a log per log level in log_dir. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self, logger_name='Multi',
log_format='%(asctime)s %(levelname)8s - %(message)s',
log_dir='logs', log_to_raw=True, **kwargs):
BaseLogger.__init__(self, logger_name=logger_name,
log_format=log_format,
log_to_raw=log_to_raw, log_dir=log_dir,
**kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
min_logger_level = self.get_logger_level(self.log_level)
for level in self.LEVELS.keys():
if self.get_logger_level(level) >= min_logger_level:
self.log_files[level] = '%s_%s.log' % (self.log_name,
level)
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files[level]),
log_level=level)
# __main__ {{{1
if __name__ == '__main__':
pass
| mpl-2.0 | 2,744,077,304,454,648,300 | 36.905852 | 87 | 0.57139 | false | 3.86333 | true | false | false |
a10networks/a10sdk-python | a10sdk/core/techreport/techreport_priority_partition.py | 2 | 1363 | from a10sdk.common.A10BaseClass import A10BaseClass
class PriorityPartition(A10BaseClass):
"""Class Description::
Configure partition to always poll for techreport.
Class priority-partition supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param part_name: {"description": "Name of partition always want to show in showtech (shared is always shown by default)", "format": "string", "minLength": 1, "optional": false, "maxLength": 14, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/techreport/priority-partition/{part_name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "part_name"]
self.b_key = "priority-partition"
self.a10_url="/axapi/v3/techreport/priority-partition/{part_name}"
self.DeviceProxy = ""
self.part_name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | 4,059,860,966,536,163,300 | 34.868421 | 216 | 0.65077 | false | 3.775623 | false | false | false |
hubbardgary/AdventOfCode | day25.py | 1 | 4511 | # --- Day 25: Let It Snow ---
#
# Merry Christmas! Santa is booting up his weather machine; looks like you might get a white Christmas after all.
#
# The weather machine beeps! On the console of the machine is a copy protection message asking you to enter a code from
# the instruction manual. Apparently, it refuses to run unless you give it that code. No problem; you'll just look up
# the code in the--
#
# "Ho ho ho", Santa ponders aloud. "I can't seem to find the manual."
#
# You look up the support number for the manufacturer and give them a call. Good thing, too - that 49th star wasn't
# going to earn itself.
#
# "Oh, that machine is quite old!", they tell you. "That model went out of support six minutes ago, and we just
# finished shredding all of the manuals. I bet we can find you the code generation algorithm, though."
#
# After putting you on hold for twenty minutes (your call is very important to them, it reminded you repeatedly), they
# finally find an engineer that remembers how the code system works.
#
# The codes are printed on an infinite sheet of paper, starting in the top-left corner. The codes are filled in by
# diagonals: starting with the first row with an empty first box, the codes are filled in diagonally up and to the
# right. This process repeats until the infinite paper is covered. So, the first few codes are filled in in this order:
#
# | 1 2 3 4 5 6
# ---+---+---+---+---+---+---+
# 1 | 1 3 6 10 15 21
# 2 | 2 5 9 14 20
# 3 | 4 8 13 19
# 4 | 7 12 18
# 5 | 11 17
# 6 | 16
#
# For example, the 12th code would be written to row 4, column 2; the 15th code would be written to row 1, column 5.
#
# The voice on the other end of the phone continues with how the codes are actually generated. The first code is
# 20151125. After that, each code is generated by taking the previous one, multiplying it by 252533, and then keeping
# the remainder from dividing that value by 33554393.
#
# So, to find the second code (which ends up in row 2, column 1), start with the previous value, 20151125. Multiply it
# by 252533 to get 5088824049625. Then, divide that by 33554393, which leaves a remainder of 31916031. That remainder
# is the second code.
#
# "Oh!", says the voice. "It looks like we missed a scrap from one of the manuals. Let me read it to you." You write
# down his numbers:
#
# | 1 2 3 4 5 6
# ---+---------+---------+---------+---------+---------+---------+
# 1 | 20151125 18749137 17289845 30943339 10071777 33511524
# 2 | 31916031 21629792 16929656 7726640 15514188 4041754
# 3 | 16080970 8057251 1601130 7981243 11661866 16474243
# 4 | 24592653 32451966 21345942 9380097 10600672 31527494
# 5 | 77061 17552253 28094349 6899651 9250759 31663883
# 6 | 33071741 6796745 25397450 24659492 1534922 27995004
#
# "Now remember", the voice continues, "that's not even all of the first few numbers; for example, you're missing the
# one at 7,1 that would come before 6,2. But, it should be enough to let your-- oh, it's time for lunch! Bye!" The call
# disconnects.
#
# Santa looks nervous. Your puzzle input contains the message on the machine's console. What code do you give the
# machine?
#
# --- Part Two ---
#
# The machine springs to life, then falls silent again. It beeps. "Insufficient fuel", the console reads. "Fifty stars
# are required before proceeding. One star is available."
#
# ..."one star is available"? You check the fuel tank; sure enough, a lone star sits at the bottom, awaiting its
# friends. Looks like you need to provide 49 yourself.
#
# You have enough stars to [Start the machine].
#
# You fill the weather machine with fifty stars. It comes to life!
#
# Snow begins to fall.
#
# Congratulations! You've finished every puzzle! I hope you had as much fun solving them as I had making them for you.
# I'd love to hear about your adventure; you can get in touch with me via contact info on my website or through Twitter.
def next_code(current_code):
return (current_code * 252533) % 33554393
max_x = 1
max_y = 1
code = 20151125
target_x = 2981
target_y = 3075
target_found = False
while not target_found:
y = 1
x = max_x + 1
while x > 0:
max_x = max(max_x, x)
code = next_code(code)
if x == target_x and y == target_y:
print("{0}, {1} = {2}".format(x, y, code))
target_found = True
break
y += 1
x -= 1
| mit | -3,800,924,078,984,257,500 | 42.796117 | 120 | 0.674795 | false | 3.25 | false | false | false |
rreece/pyframe | test/read_tree.py | 1 | 3725 | #!/usr/bin/env python
"""
NAME
read_tree.py - a pyframe example script
SYNOPSIS
./read_tree.py myntuple.root
DESCRIPTION
Demonstrates how to read trees in pyframe and make some histograms.
OPTIONS
-h, --help
Prints this manual and exits.
-o, --output output.hists.root
Output file name.
-t, --tree myntuple
Input tree name.
AUTHOR
Ryan Reece <[email protected]>
COPYRIGHT
Copyright 2015 Ryan Reece
License: GPL <http://www.gnu.org/licenses/gpl.html>
SEE ALSO
- pyframe <https://github.com/rreece/pyframe/>
- ROOT <http://root.cern.ch>
TO DO
- One.
- Two.
2015-05-26
"""
#------------------------------------------------------------------------------
# imports
#------------------------------------------------------------------------------
## std
import argparse
## ROOT
import ROOT
ROOT.gROOT.SetBatch(True)
## my modules
import pyrootutils
import pyframe
#------------------------------------------------------------------------------
# globals
#------------------------------------------------------------------------------
GeV = 1000.
#------------------------------------------------------------------------------
# options
#------------------------------------------------------------------------------
def options():
parser = argparse.ArgumentParser()
parser.add_argument('infiles', default=None, nargs='+',
help='Input files as separate args.')
parser.add_argument('-i', '--input', default=None,
help='Input files as a comma-separated list.')
parser.add_argument('-o', '--output', default='output.hists.root',
help='Name of output file.')
parser.add_argument('-t', '--tree', default='myntuple',
help='Name of input tree.')
ops = parser.parse_args()
assert ops.infiles
return ops
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
def main():
ops = options()
## get input files and output options
input_files = list(ops.infiles)
if ops.input:
s_input = str(ops.input)
input_files.extend( s_input.split(',') )
tree_name = ops.tree
plot_output = ops.output
## make an EventLoop
loop = pyframe.core.EventLoop('TestLoop', tree=tree_name)
loop.add_input_files(input_files)
## schedule algorithms
loop += PlotsAlg()
## run the event loop
loop.run()
print 'Done.'
#------------------------------------------------------------------------------
# PlotsAlg
#------------------------------------------------------------------------------
class PlotsAlg(pyframe.core.Algorithm):
#__________________________________________________________________________
def __init__(self, name='PlotsAlg'):
pyframe.core.Algorithm.__init__(self, name)
#__________________________________________________________________________
def execute(self):
weight = 1.0
## fill event-level histograms
self.hist('h_w', "ROOT.TH1F('$', ';w;Events', 20, -2.0, 3.0)").Fill(self.chain.w, weight)
self.hist('h_ph_n', "ROOT.TH1F('$', ';ph_n;Events', 20, -0.5, 19.5)").Fill(self.chain.ph_n, weight)
## build VarProxies for photons
photons = self.chain.build_var_proxies('ph_', self.chain.ph_n)
## fill histograms per photon
for ph in photons:
self.hist('h_ph_pt', "ROOT.TH1F('$', ';ph_pt;Events / (10 GeV)', 20, 0.0, 200)").Fill(ph.pt/GeV, weight)
#------------------------------------------------------------------------------
if __name__ == '__main__': main()
| gpl-2.0 | -6,439,672,346,281,726,000 | 27.219697 | 116 | 0.435168 | false | 4.247434 | false | false | false |
OriHoch/pysiogame | game_boards/game004.py | 1 | 6831 | # -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,36,6)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,23,9)
def create_game_objects(self, level = 1):
self.board.decolorable = False
self.vis_buttons = [1,1,1,1,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
if self.mainloop.scheme is None:
s = random.randrange(150, 225, 5)
v = random.randrange(190, 225, 5)
h = random.randrange(0, 255, 5)
color0 = ex.hsv_to_rgb(h,40,230) #highlight 1
color1 = ex.hsv_to_rgb(h,70,v) #highlight 2
color2 = ex.hsv_to_rgb(h,s,v) #normal color
color3 = ex.hsv_to_rgb(h,230,100)
task_bg_color = (255,255,255)
task_font_color = (0,0,0)
else:
s = 150
v = 225
h = 170
color0 = ex.hsv_to_rgb(h,40,230) #highlight 1
color1 = ex.hsv_to_rgb(h,70,v) #highlight 2
color2 = ex.hsv_to_rgb(h,s,v) #normal color
color3 = ex.hsv_to_rgb(h,230,100)
task_bg_color = self.mainloop.scheme.u_color
task_font_color = self.mainloop.scheme.u_font_color
white = (255,255,255)
#data = [x_count, y_count, range_from, range_to, max_sum_range, image]
self.points = 1
if self.level.lvl == 1:
data = [23,9]
elif self.level.lvl == 2:
data = [23,9]
color1 = color0
elif self.level.lvl == 3:
data = [23,9]
color1 = color2 = color0
elif self.level.lvl == 4:
data = [23,9]
color1 = color2 = color0
elif self.level.lvl == 5:
data = [23,9]
color0 = (0,0,0)
self.points = 2
elif self.level.lvl == 6:
data = [23,9]
color2 = color1 = color0 = (0,0,0)
color3 = (40,40,40)
self.points = 3
self.data = data
self.board.level_start(data[0],data[1],self.layout.scale)
num1 = random.randrange(1,10)
num2 = random.randrange(1,10)
self.solution = [num1,num2,num1 * num2]
self.digits = ["0","1","2","3","4","5","6","7","8","9"]
unique = set()
for i in range(1,10):
for j in range(1,10):
if i == num1 and j == num2: color=color0
elif i == num1 or j == num2: color=color1
elif self.level.lvl == 2 and (i == num2 or j == num1):color=color1
else: color = color2
mul = i*j
unique.add(mul)
caption = str(mul)
self.board.add_unit(i-1,j-1,1,1,classes.board.Label,caption,color,"",2)
self.board.add_unit(9,0,1,9,classes.board.Obstacle,"",color3)
unique = sorted(unique)
#draw outline with selectable numbers
self.multi = dict()
if self.mainloop.scheme is None:
s = 180
else:
s = 80
v = 240
h = 7
x = 11
y = 0
for i in range(36):
if i < 9: x += 1
elif i == 9: x = 22
elif i < 18: y += 1
elif i == 18: x = 20
elif i < 27: x -= 1
elif i == 27: x = 10
elif i <= 36: y -= 1
color = ex.hsv_to_rgb(h*i,s,v)
self.multi[str(unique[i])]=i
caption = str(unique[i])
self.board.add_unit(x,y,1,1,classes.board.Letter,caption,color,"",2)
self.board.ships[-1].audible = False
if self.lang.lang == "he":
sv = self.lang.n2spk(unique[i])
self.board.ships[-1].speaker_val = sv
self.board.ships[-1].speaker_val_update = False
x=14
y=4
captions = [str(num1),chr(215),str(num2),"="]
if self.level.lvl < 4:
color = self.board.ships[self.multi[str(self.solution[2])]].initcolor
else:
color = (255,255,255)#color4
for i in range(4):
self.board.add_unit(x+i,y,1,1,classes.board.Label,captions[i],color,"",2)
self.outline_all(0,1)
self.board.add_door(18,y,1,1,classes.board.Door,"",task_bg_color,"",font_size = 2)
self.home_square = self.board.units[86]
self.home_square.door_outline = True
self.home_square.font_color = task_font_color
self.board.all_sprites_list.move_to_front(self.home_square)
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
if self.show_msg == False:
if event.type == pygame.KEYDOWN and event.key != pygame.K_RETURN:
lhv = len(self.home_square.value)
self.changed_since_check = True
if event.key == pygame.K_BACKSPACE:
if lhv > 0:
self.home_square.value = self.home_square.value[0:lhv-1]
elif not self.board.grid[4][18]:
char = event.unicode
if len(char)>0 and lhv < 2 and char in self.digits:
self.home_square.value += char
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
elif event.type == pygame.MOUSEMOTION and self.drag:
if self.board.grid[4][18]:
self.home_square.value = ""
self.home_square.update_me = True
def update(self,game):
game.fill((255,255,255))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
if self.board.grid[4][18]:
sol = self.board.ships[self.multi[str(self.solution[2])]]
if sol.grid_x == 18 and sol.grid_y == 4:
self.update_score(self.points)
self.passed()
else:
self.failed()
else:
if self.home_square.value != "" and (int(self.home_square.value) == self.solution[2]):
self.update_score(self.points)
self.quick_passed()
else:
self.failed()
def passed(self):
tts = self.d["Perfect!"]#+" "+str(self.solution[0])+" "+self.d["multiplied by"]+" "+str(self.solution[1])+" "+self.d["equals"]+" "+str(self.solution[2])
self.level.next_board(tts)
def quick_passed(self):
tts = self.d["Perfect!"]
self.level.next_board(tts)
def failed(self):
self.level.try_again()
| gpl-3.0 | 6,998,048,290,850,005,000 | 36.95 | 160 | 0.51398 | false | 3.311197 | false | false | false |
codingkevin/suds | tools/run_all_tests.py | 1 | 4633 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetiæ ( [email protected] )
"""
"poor man's tox" development script used on Windows to run the full suds-jurko
test suite using multiple Python interpreter versions.
Intended to be used as a general 'all tests passed' check. To see more detailed
information on specific failures, run the failed test group manually,
configured for greater verbosity than done here.
"""
import os.path
import shutil
import sys
from suds_devel.configuration import BadConfiguration, Config, configparser
from suds_devel.environment import BadEnvironment
import suds_devel.utility as utility
class MyConfig(Config):
def __init__(self, script, project_folder, ini_file):
"""
Initialize new script configuration.
External configuration parameters may be specified relative to the
following folders:
* script - relative to the current working folder
* project_folder - relative to the script folder
* ini_file - relative to the project folder
"""
super(MyConfig, self).__init__(script, project_folder, ini_file)
try:
self._read_environment_configuration()
except configparser.Error:
raise BadConfiguration(sys.exc_info()[1].message)
def _prepare_configuration():
# We know we are a regular stand-alone script file and not an imported
# module (either frozen, imported from disk, zip-file, external database or
# any other source). That means we can safely assume we have the __file__
# attribute available.
global config
config = MyConfig(__file__, "..", "setup.cfg")
def _print_title(env, message_fmt):
separator = "-" * 63
print("")
print(separator)
print("--- " + message_fmt % (env.name(),))
print(separator)
def _report_startup_information():
print("Running in folder: '%s'" % (os.getcwd(),))
def _run_tests(env):
if env.sys_version_info >= (3,):
_print_title(env, "Building suds for Python %s")
build_folder = os.path.join(config.project_folder, "build")
if os.path.isdir(build_folder):
shutil.rmtree(build_folder)
# Install the project into the target Python environment in editable mode.
# This will actually build Python 3 sources in case we are using a Python 3
# environment.
setup_cmd = ["setup.py", "-q", "develop"]
_, _, return_code = env.execute(setup_cmd, cwd=config.project_folder)
if return_code != 0:
return False
test_folder = os.path.join(config.project_folder, "tests")
pytest_cmd = ["-m", "pytest", "-q", "-x", "--tb=short"]
_print_title(env, "Testing suds with Python %s")
_, _, return_code = env.execute(pytest_cmd, cwd=test_folder)
if return_code != 0:
return False
_print_title(env, "Testing suds with Python %s - no assertions")
pytest_cmd.insert(0, "-O")
_, _, return_code = env.execute(pytest_cmd, cwd=test_folder)
return return_code == 0
def _run_tests_in_all_environments():
if not config.python_environments:
raise BadConfiguration("No Python environments configured.")
for env in config.python_environments:
if not env.initial_scan_completed:
_print_title(env, "Scanning environment Python %s")
env.run_initial_scan()
if not _run_tests(env):
return False
return True
def main():
try:
_report_startup_information()
_prepare_configuration()
success = _run_tests_in_all_environments()
except (BadConfiguration, BadEnvironment):
utility.report_error(sys.exc_info()[1])
return -2
print("")
if not success:
print("Test failed.")
return -3
print("All tests passed.")
return 0
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 | -1,672,474,241,241,148,400 | 33.066176 | 79 | 0.668034 | false | 3.976824 | true | false | false |
prezi/python-github3 | pygithub3/resources/issues.py | 8 | 1421 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import re
from .base import Resource
from .users import User
from .pull_requests import PullRequest
class Label(Resource):
@staticmethod
def is_valid_color(color):
valid_color = re.compile(r'[0-9abcdefABCDEF]{6}')
match = valid_color.match(color)
if match is None:
return False
return match.start() == 0 and match.end() == len(color)
def __str__(self):
return '<Label (%s)>' % getattr(self, 'name', '')
class Milestone(Resource):
_dates = ('created_at', 'due_on')
_maps = {'creator': User}
def __str__(self):
return '<Milestone (%s)>' % getattr(self, 'title', '')
class Issue(Resource):
_dates = ('created_at', 'updated_at', 'closed_at')
_maps = {
'assignee': User,
'user': User,
'milestone': Milestone,
'pull_request': PullRequest
}
_collection_maps = {'labels': Label}
def __str__(self):
return '<Issue (%s)>' % getattr(self, 'number', '')
class Comment(Resource):
_dates = ('created_at', 'updated_at')
_maps = {'user': User}
def __str__(self):
return '<Comment (%s)>' % (getattr(self, 'user', ''))
class Event(Resource):
_dates = ('created_at', )
_maps = {'actor': User, 'issue': Issue}
def __str__(self):
return '<Event (%s)>' % (getattr(self, 'commit_id', ''))
| isc | 3,395,134,593,891,507,700 | 20.861538 | 64 | 0.549613 | false | 3.474328 | false | false | false |
OpenOil-UG/aleph | aleph/archive/archive.py | 1 | 1055 | import os
from aleph.util import checksum
class Archive(object):
def _get_file_path(self, meta):
ch = meta.content_hash
if ch is None:
raise ValueError("No content hash available.")
path = os.path.join(ch[:2], ch[2:4], ch[4:6], ch)
file_name = 'data'
if meta.file_name is not None:
file_name = meta.file_name
else:
if meta.extension is not None:
file_name = '%s.%s' % (file_name, meta.extension)
return os.path.join(path, file_name)
def _update_metadata(self, filename, meta):
meta.content_hash = checksum(filename)
return meta
def archive_file(self, filename, meta, move=False):
""" Import the given file into the archive, and return an
updated metadata object. If ``move`` is given, the original
file will not exist afterwards. """
pass
def load_file(self, meta):
pass
def cleanup_file(self, meta):
pass
def generate_url(self, meta):
return
| mit | 9,021,590,469,122,582,000 | 27.513514 | 67 | 0.583886 | false | 3.864469 | false | false | false |
wildlava/explore | python/modular/run_explore.py | 1 | 1552 | #
# Explore
# - The Adventure Interpreter
#
# Copyright (C) 2006 Joe Peterson
#
import sys
import Explore
filename = None
no_delay = False
trs_compat = False
for arg_num in range(len(sys.argv)):
if sys.argv[arg_num] == "-f":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
filename = sys.argv[arg_num + 1]
else:
print >> sys.stderr, "Error: Missing adventure filename"
sys.exit(1)
elif sys.argv[arg_num] == "-q":
quiet = True
elif sys.argv[arg_num] == "-c":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
command = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-r":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
resume = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-s":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
last_suspend = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "--one-shot":
one_shot = True
elif sys.argv[arg_num] == "--no-title":
show_title = False
elif sys.argv[arg_num] == "--title-only":
show_title_only = True
elif sys.argv[arg_num] == "--no-delay":
no_delay = True
elif sys.argv[arg_num] == "--trs-compat":
trs_compat = True
Explore.play(filename, no_delay, trs_compat)
| mit | 8,505,619,847,493,570,000 | 33.488889 | 114 | 0.537371 | false | 2.811594 | false | false | false |
michaelneuder/connect_four | app/bin/main_widget.py | 1 | 6141 | #!/usr/bin/env python3
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sqlite3
from main_widgets.home_main_widget import home_main_widget
from main_widgets.pvp_main_widget import pvp_main_widget
from main_widgets.pvai_main_widget import pvai_main_widget
from main_widgets.how_to_play import rules_main_widget
from main_widgets.game_history_main_widget import game_history_main_widget
from dialogs.save_dialog import save_dialog
from dialogs.color_select_dialog import color_select_dialog
class main_widget(QWidget):
def __init__(self):
QWidget.__init__(self)
self.main_layout = QVBoxLayout(self)
# # -------- layout declaration -------- # #
self.stack_layout = QStackedLayout()
self.footer_layout = QHBoxLayout()
self.footer_widget = QWidget()
# # -------- widget declaration -------- # #
# window widgets
self.home_widget = home_main_widget()
self.pvp_widget = pvp_main_widget()
self.pvai_widget = pvai_main_widget()
self.rules_widget = rules_main_widget()
self.game_history_widget = game_history_main_widget()
# footer widgets
self.main_menu_push_button = QPushButton("main menu")
self.rules_push_button = QPushButton("how to play")
self.pvp_push_button = QPushButton("player v. player")
self.pvai_push_button = QPushButton("player v. ai")
self.game_history_push_button = QPushButton("saved games")
self.quit_push_button = QPushButton("quit")
# # -------- add to layouts -------- # #
# stack layout
self.stack_layout.addWidget(self.home_widget)
self.stack_layout.addWidget(self.rules_widget)
self.stack_layout.addWidget(self.pvp_widget)
self.stack_layout.addWidget(self.pvai_widget)
self.stack_layout.addWidget(self.game_history_widget)
# footer layout
self.footer_layout.addStretch(0)
self.footer_layout.addWidget(self.main_menu_push_button)
self.footer_layout.addWidget(self.rules_push_button)
self.footer_layout.addWidget(self.pvp_push_button)
self.footer_layout.addWidget(self.pvai_push_button)
self.footer_layout.addWidget(self.game_history_push_button)
self.footer_layout.addWidget(self.quit_push_button)
self.footer_layout.addStretch(0)
# hiding upon opening bc menu
self.main_menu_push_button.hide()
self.pvp_push_button.hide()
self.pvai_push_button.hide()
self.rules_push_button.hide()
self.game_history_push_button.hide()
self.quit_push_button.hide()
# main layout
self.main_layout.addLayout(self.stack_layout)
self.main_layout.addLayout(self.footer_layout)
# # -------- actions -------- # #
self.main_menu_push_button.clicked.connect(self.main_menu_clicked)
self.pvp_push_button.clicked.connect(self.pvp_clicked)
self.pvai_push_button.clicked.connect(self.pvai_clicked)
self.rules_push_button.clicked.connect(self.rules_clicked)
self.game_history_push_button.clicked.connect(self.game_history_clicked)
self.home_widget.rules_push_button.clicked.connect(self.rules_clicked)
self.home_widget.pvp_push_button.clicked.connect(self.pvp_clicked)
self.home_widget.pvai_push_button.clicked.connect(self.pvai_clicked)
self.home_widget.game_history_push_button.clicked.connect(self.game_history_clicked)
self.game_history_widget.load_game_button.clicked.connect(self.load_game)
self.pvp_widget.save_clicked_signal.connect(self.game_history_widget.populate_list)
def main_menu_clicked(self):
self.stack_layout.setCurrentIndex(0)
self.hide_footer()
def pvp_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.show_footer()
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText('welcome to the player vs player game. red goes first, and '
'you can simply click on the column where you want to place your piece!')
msg.setWindowTitle("player vs player")
msg.setStandardButtons(QMessageBox.Ok)
# retval = msg.exec_()
def pvai_clicked(self):
self.stack_layout.setCurrentIndex(3)
self.show_footer()
self.dialog = color_select_dialog()
# self.dialog.exec_()
def rules_clicked(self):
self.stack_layout.setCurrentIndex(1)
self.show_footer()
def game_history_clicked(self):
self.stack_layout.setCurrentIndex(4)
self.show_footer()
def undo_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.undo_clicked()
def reset_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.reset_clicked()
def save_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.save_clicked()
def load_clicked(self):
self.stack_layout.setCurrentIndex(4)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText('select a game to load and press the load button at the bottom of page ')
msg.setWindowTitle("load")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def hide_footer(self):
# self.menu_bar.hide()
self.main_menu_push_button.hide()
self.pvp_push_button.hide()
self.pvai_push_button.hide()
self.rules_push_button.hide()
self.game_history_push_button.hide()
self.quit_push_button.hide()
def show_footer(self):
# self.menu_bar.show()
self.main_menu_push_button.show()
self.pvp_push_button.show()
self.pvai_push_button.show()
self.rules_push_button.show()
self.game_history_push_button.show()
self.quit_push_button.show()
def load_game(self):
self.stack_layout.setCurrentIndex(2)
moves = self.game_history_widget.moves
self.pvp_widget.reset_clicked()
for col in moves:
self.pvp_widget.column_clicked(0,int(col))
| mit | 4,348,574,129,228,213,000 | 38.365385 | 93 | 0.657385 | false | 3.623009 | false | false | false |
autostack/pytest-autostack | autostack/redisq.py | 1 | 2867 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import redis
#import zmq
import time
from uuid import uuid4
from autostack.utils import get_open_port
__author__ = 'Avi Tal <[email protected]>'
__date__ = 'Sep 6, 2015'
def gen_key(name):
return 'redisqueue:{}'.format(name)
class RedisQueue(object):
"""
Simple Queue with Redis Backend
https://redis-py.readthedocs.org/en/latest/
"""
def __init__(self, name=None, **kwargs):
"""
The default connection parameters are:
host='localhost', port=6379, db=0
"""
self.__db = redis.Redis(**kwargs)
self.__key = name or gen_key(str(uuid4()))
def __len__(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
@property
def key(self):
return self.__key
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def clear(self):
self.__db.delete(self.key)
def put(self, item):
"""Put item into the queue."""
self.__db.rpush(self.key, item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
if timeout is None:
timeout = 0
item = self.__db.blpop(self.key, timeout=timeout)
if item is not None:
item = item[1]
else:
item = self.__db.lpop(self.key)
if item is not None:
if isinstance(item, str) and item != 'goodbye':
item = eval(item)
return item
def join(self):
self.put('goodbye')
#class ZeroMQueue(object):
# def __init__(self, name=None, port='5556', host='127.0.0.1'):
# self.topic = name or str(uuid4())
# port = port or get_open_port(host)
#
# subcontext = zmq.Context()
# self._subscriber = subcontext.socket(zmq.PULL)
# self._subscriber.bind('tcp://{}:{}'.format(host, port))
#
# pubcontext = zmq.Context()
# self._publisher = pubcontext.socket(zmq.PUSH)
# self._publisher.connect('tcp://{}:{}'.format(host, port))
#
# def put(self, item):
# self._publisher.send_json(item)
# time.sleep(1)
#
# def get(self, block=True, timeout=None):
# if block:
# item = self._subscriber.recv_json()
# else:
# try:
# item = self._subscriber.recv_json(flags=zmq.NOBLOCK)
# except zmq.Again as e:
# pass
# return item
#
# def join(self):
# self.put('goodbye')
| mit | -8,005,998,425,748,914,000 | 26.304762 | 79 | 0.553889 | false | 3.574813 | false | false | false |
saschwarz/django-periodicals | periodicals/urls.py | 1 | 4810 | from django.contrib import admin
from django.conf import settings
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from haystack.views import SearchView
from haystack.query import SearchQuerySet
from .views import (AuthorList, AuthorDetail,
ArticleDetail, ArticleTags,
IssueYear, IssueDetail,
PeriodicalList, PeriodicalDetail,
SeriesList, SeriesDetail)
# query results with most recent publication date first
sqs = SearchQuerySet().order_by('-pub_date')
urlpatterns = \
patterns('',
url(r'^search/',
SearchView(load_all=False,
template="periodicals/search.html",
searchqueryset=sqs,
),
name='haystack_search',
),
# not in sitemap
url(r'^authors/$',
AuthorList.as_view(),
name='periodicals_authors_list',
),
url(r'^authors/(?P<author_slug>[-\w]+)/$',
AuthorDetail.as_view(),
name='periodicals_author_detail'
),
url(r'^tags/$',
TemplateView.as_view(template_name='periodicals/tags.html'),
name='periodicals_tags',
),
url(r'^tag/(?P<tag>[^/]+)/$',
ArticleTags.as_view(template_name='periodicals/article_tag_detail.html'),
name='periodicals_article_tag_detail'
),
)
if settings.PERIODICALS_LINKS_ENABLED:
urlpatterns += \
patterns('',
# success for adding a link - don't include in sitemap
url(r'^links/added/$',
TemplateView.as_view(template_name='periodicals/link_success.html'),
name='periodicals_add_link_success'
),
# add a link to an article - don't include in sitemap
url(r'^links/(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/(?P<article_slug>[-\w]+)/$',
'periodicals.views.add_article_link',
name='periodicals_add_article_link'
),
# add a link to an issue - don't include in sitemap
url(r'^links/(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/$',
'periodicals.views.add_issue_link',
name='periodicals_add_issue_link'
),
# Page showing all periodical Issues and Articles with external links
url(r'^links/(?P<periodical_slug>[-\w]+)/$',
'periodicals.views.links',
name='periodicals_links'
),
)
urlpatterns += \
patterns('',
# periodical detail including list of periodical's years
url(r'^(?P<periodical_slug>[-\w]+)/$',
PeriodicalDetail.as_view(),
name='periodicals_periodical_detail'
),
# list of periodical's issues and articles viewable online
url(r'^(?P<periodical_slug>[-\w]+)/online/$',
'periodicals.views.read_online',
name='periodicals_read_online'
),
# list of periodical's issues for a year - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/(?P<year>\d{4})/$',
IssueYear.as_view(),
name='periodicals_issue_year'
),
# list of periodical's series - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/series/$',
SeriesList.as_view(),
name='periodicals_series_list'
),
# list of articles in a series - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/series/(?P<series>.+)/$',
SeriesDetail.as_view(),
name='periodicals_series_detail'
),
# one periodical issue
url(r'^(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/$',
IssueDetail.as_view(),
name='periodicals_issue_detail'
),
# one article
url(r'^(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/(?P<article_slug>[-\w]+)/$',
ArticleDetail.as_view(),
name='periodicals_article_detail'
),
# list of periodicals - not in sitemap
url(r'',
PeriodicalList.as_view(),
name='periodicals_list'
),
)
admin.autodiscover()
| bsd-3-clause | -8,323,299,732,640,862,000 | 36.874016 | 109 | 0.477755 | false | 4.069374 | false | false | false |
thcrock/map-matching-visualizer | core/matchers/osrm.py | 1 | 3294 | '''
Basic utilities for matching coordinates to the street grid
using the OSRM match endpoint
'''
import logging
import requests
from django.conf import settings
from core.matchers.base import BaseMatcher
logger = logging.getLogger(__name__)
class OsrmMatcher(BaseMatcher):
def _match_output(self):
coords = self.raw_coords
coord_string = ';'.join(
"%s,%s" % (lon, lat) for lon, lat in coords
)
radiuses = self.radiuses or [settings.OSRM.DEFAULT_RADIUS] * len(coords)
radius_string = ';'.join(map(str, radiuses))
options = {
'radiuses': radius_string,
'geometries': 'geojson',
'annotations': 'true',
'overview': 'full',
}
request_url = '{}/{}'.format(
settings.OSRM.MATCH_ENDPOINT,
coord_string
)
logger.debug('Request url: {}'.format(request_url))
response = requests.get(request_url, params=options)
output = response.json()
if 'tracepoints' not in output:
logger.error('No tracepoints found for {}'.format(output))
raise IOError(output)
logger.debug('Match response: {}'.format(output))
return output
def unsnapped_points(self):
unsnapped_points = []
for index, tracepoint in enumerate(self.output['tracepoints']):
if not tracepoint:
logger.warning('Tracepoint index {} not found'.format(index))
unsnapped_points.append(index)
return unsnapped_points
def snapped_points(self):
return [
tracepoint.get('location') if tracepoint else None
for tracepoint
in self.output['tracepoints']
]
def snapped_names(self):
return [
tracepoint.get('name') if tracepoint else None
for tracepoint
in self.output['tracepoints']
]
def tracepoint_nodes(self, tracepoint_index):
node_lookup = set()
nodes = []
tracepoint = self.output['tracepoints'][tracepoint_index]
if tracepoint:
legs = self.output['matchings'][tracepoint['matchings_index']]['legs']
if len(legs) == tracepoint['waypoint_index']:
return []
leg = legs[tracepoint['waypoint_index']]
for node in leg['annotation']['nodes']:
if node not in node_lookup:
node_lookup.add(node)
nodes.append(node)
return nodes
else:
return []
def _generate_nodes(self):
node_lookup = set()
nodes = []
for index, tracepoint in enumerate(self.output['tracepoints']):
if tracepoint:
route = self.output['matchings'][tracepoint['matchings_index']]
legs = route['legs']
if tracepoint['waypoint_index'] == len(legs):
continue
leg = legs[tracepoint['waypoint_index']]
leg_nodes = leg['annotation']['nodes']
for node in leg_nodes:
if node not in node_lookup:
node_lookup.add(node)
nodes.append((node, index))
return nodes
| gpl-3.0 | 7,802,764,669,465,882,000 | 34.042553 | 82 | 0.549787 | false | 4.41555 | false | false | false |
appleseedhq/cortex | test/IECoreScene/SmoothSmoothSkinningWeightsOpTest.py | 5 | 20115 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class SmoothSmoothSkinningWeightsOpTest( unittest.TestCase ) :
def mesh( self ) :
vertsPerFace = IECore.IntVectorData( [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ] )
vertexIds = IECore.IntVectorData( [
0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8,
8, 9, 11, 10, 10, 11, 13, 12, 12, 13, 15, 14, 14, 15, 1, 0,
1, 15, 13, 3, 3, 13, 11, 5, 5, 11, 9, 7, 14, 0, 2, 12,
12, 2, 4, 10, 10, 4, 6, 8
] )
return IECoreScene.MeshPrimitive( vertsPerFace, vertexIds )
def createSSD( self, offsets, counts, indices, weights ) :
names = IECore.StringVectorData( [ "|joint1", "|joint1|joint2", "|joint1|joint2|joint3" ] )
poses = IECore.M44fVectorData( [
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 2, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 0, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, -2, -0, 1 )
] )
return IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
def original( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 7, 8, 10, 12, 14, 16, 17, 18, 20, 22, 23 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [ 0, 0, 0, 1, 0, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 1, 0, 1, 0, 0 ] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_50( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.966667, 0.0333333, 0.966667, 0.0333333, 0.725, 0.275, 0.725, 0.275,
0.1, 0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.583333, 0.416667,
0.583333, 0.416667, 0.583333, 0.416667, 0.583333, 0.416667, 0.1, 0.8375,
0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.725, 0.275,
0.966667, 0.0333333, 0.966667, 0.0333333
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_100( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.933333, 0.0666667, 0.933333, 0.0666667, 0.65, 0.35, 0.65, 0.35,
0.2, 0.675, 0.125, 0.2, 0.675, 0.125, 0.666667, 0.333333,
0.666667, 0.333333, 0.666667, 0.333333, 0.666667, 0.333333, 0.2, 0.675,
0.125, 0.2, 0.675, 0.125, 0.65, 0.35, 0.65, 0.35,
0.933333, 0.0666667, 0.933333, 0.0666667
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth3_30( self ) :
offsets = IECore.IntVectorData( [ 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45 ] )
counts = IECore.IntVectorData( [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ] )
indices = IECore.IntVectorData( [
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
] )
weights = IECore.FloatVectorData( [
0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125, 0.691672, 0.301016,
0.0073125, 0.691672, 0.301016, 0.0073125, 0.145912, 0.767439, 0.0866484, 0.145912,
0.767439, 0.0866484, 0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438,
0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438, 0.145912, 0.767439,
0.0866484, 0.145912, 0.767439, 0.0866484, 0.691672, 0.301016, 0.0073125, 0.691672,
0.301016, 0.0073125, 0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothSelectVerts( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 9, 10, 12, 14, 16, 18, 21, 24, 26, 28, 29 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 0, 1, 0, 1, 2, 1, 1, 2, 1, 2, 1, 2,
1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.725, 0.275, 0.725, 0.275, 0.1, 0.8375, 0.0625,
1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1,
0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothWithLocks( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 5, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 31 ] )
counts = IECore.IntVectorData( [ 1, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161,
0.902086, 0.0979137, 0.902086, 0.0979137, 0.624712, 0.375288, 0.624712, 0.375288,
0.624712, 0.375288, 0.624712, 0.375288, 0.902086, 0.0979137, 0.902086, 0.0979137,
0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def testTypes( self ) :
""" Test SmoothSmoothSkinningWeightsOp types"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
self.assertEqual( type(op), IECoreScene.SmoothSmoothSkinningWeightsOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.SmoothSmoothSkinningWeightsOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
def testSmooth1_0( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testSmooth1_100( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 1.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 1.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_100()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth1_50( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.5 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_50()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth3_30( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 3 iterations and 0.3 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth3_30()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testLocks( self ) :
""" Test SmoothSmoothSkinningWeightsOp locking mechanism"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, False ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothWithLocks()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure locked weights did not change
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
if resultIndices[i] == 0 :
self.assertAlmostEqual( resultWeights[i], origWeights[i], 6 )
# make sure the result is normalized
nop = IECoreScene.NormalizeSmoothSkinningWeightsOp()
nop.parameters()['input'].setValue( result )
normalized = nop.operate()
self.assertEqual( result.influenceNames(), normalized.influenceNames() )
self.assertEqual( result.influencePose(), normalized.influencePose() )
self.assertEqual( result.pointIndexOffsets(), normalized.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), normalized.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), normalized.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
normalizedWeights = normalized.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], normalizedWeights[i], 6 )
def testVertexSelection( self ) :
""" Test SmoothSmoothSkinningWeightsOp using selected vertices"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "2-4,10-12" ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothSelectVerts()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure only selected vertices changed
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultOffsets = decompressedResult.pointIndexOffsets()
resultCounts = decompressedResult.pointInfluenceCounts()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
nonSelectedVerts = [ x for x in range( 0, resultOffsets.size() ) if x not in op.parameters()['vertexIndices'].getFrameListValue().asList() ]
for i in nonSelectedVerts :
for j in range( 0, resultCounts[i] ) :
current = resultOffsets[i] + j
self.assertAlmostEqual( resultWeights[current], origWeights[current], 6 )
def testErrorStates( self ) :
""" Test SmoothSmoothSkinningWeightsOp with various error states"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
# bad mesh
op.parameters()['mesh'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
# wrong number of verts
op.parameters()['mesh'].setValue( op.parameters()['mesh'].defaultValue )
self.assertRaises( RuntimeError, op.operate )
# wrong number of locks
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, True, False ] ) )
self.assertRaises( RuntimeError, op.operate )
# invalid vertex ids
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "10-18" ) )
self.assertRaises( RuntimeError, op.operate )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 6,765,676,124,956,696,000 | 45.454965 | 142 | 0.693711 | false | 2.972074 | true | false | false |
Saevon/webdnd | shared/management/__init__.py | 1 | 1120 | from django.db.models.signals import post_syncdb
from django.conf import settings
from django.core import management
import os
import re
FIXTURE_RE = re.compile(r'^[^.]*.json$')
def load_data(sender, **kwargs):
"""
Loads fixture data after loading the last installed app
"""
if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + ".models":
fixture_files = []
for loc in settings.INITIAL_FIXTURE_DIRS:
loc = os.path.abspath(loc)
if os.path.exists(loc):
fixture_files += os.listdir(loc)
fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files)
fixture_files = [os.path.join(loc, f) for f in fixture_files]
if len(fixture_files) > 0:
print "Initializing Fixtures:"
for fixture in fixture_files:
print " >> %s" % (fixture)
management.call_command('loaddata', fixture, verbosity=0)
# Update the index
print 'Generating Index'
management.call_command('index', 'all', flush=True, verbosity=1)
post_syncdb.connect(load_data)
| mit | -2,741,098,657,701,872,600 | 30.111111 | 76 | 0.6125 | false | 3.708609 | false | false | false |
kmoocdev2/edx-platform | openedx/core/djangoapps/lang_pref/api.py | 13 | 3474 | # -*- coding: utf-8 -*-
""" Python API for language and translation management. """
from collections import namedtuple
from django.conf import settings
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.site_configuration.helpers import get_value
# Named tuples can be referenced using object-like variable
# deferencing, making the use of tuples more readable by
# eliminating the need to see the context of the tuple packing.
Language = namedtuple('Language', 'code name')
def header_language_selector_is_enabled():
"""Return true if the header language selector has been enabled via settings or site-specific configuration."""
setting = get_value('SHOW_HEADER_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_HEADER_LANGUAGE_SELECTOR', False))
# The SHOW_LANGUAGE_SELECTOR setting is deprecated, but might still be in use on some installations.
deprecated_setting = get_value('SHOW_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_LANGUAGE_SELECTOR', False))
return setting or deprecated_setting
def footer_language_selector_is_enabled():
"""Return true if the footer language selector has been enabled via settings or site-specific configuration."""
return get_value('SHOW_FOOTER_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_FOOTER_LANGUAGE_SELECTOR', False))
def released_languages():
"""Retrieve the list of released languages.
Constructs a list of Language tuples by intersecting the
list of valid language tuples with the list of released
language codes.
Returns:
list of Language: Languages in which full translations are available.
Example:
>>> print released_languages()
[Language(code='en', name=u'English'), Language(code='fr', name=u'Français')]
"""
released_language_codes = DarkLangConfig.current().released_languages_list
default_language_code = settings.LANGUAGE_CODE
if default_language_code not in released_language_codes:
released_language_codes.append(default_language_code)
released_language_codes.sort()
# Intersect the list of valid language tuples with the list
# of released language codes
return [
Language(language_info[0], language_info[1])
for language_info in settings.LANGUAGES
if language_info[0] in released_language_codes
]
def all_languages():
"""Retrieve the list of all languages, translated and sorted.
Returns:
list of (language code (str), language name (str)): the language names
are translated in the current activated language and the results sorted
alphabetically.
"""
languages = [(lang[0], _(lang[1])) for lang in settings.ALL_LANGUAGES] # pylint: disable=translation-of-non-string
return sorted(languages, key=lambda lang: lang[1])
def get_closest_released_language(target_language_code):
"""
Return the language code that most closely matches the target and is fully
supported by the LMS, or None if there are no fully supported languages that
match the target.
"""
match = None
languages = released_languages()
for language in languages:
if language.code == target_language_code:
match = language.code
break
elif (match is None) and (language.code[:2] == target_language_code[:2]):
match = language.code
return match
| agpl-3.0 | -8,022,781,380,228,279,000 | 35.946809 | 119 | 0.716384 | false | 4.368553 | false | false | false |
Quantify-world/apification | src/apification/utils/writeonce.py | 1 | 2624 | from itertools import chain
import warnings
class writeonce(object):
def __init__(self, *args, **kwargs):
self.name = None
self.__doc__ = kwargs.pop('writeonce_doc', None)
self.msg = kwargs.pop('writeonce_msg', None)
self.args = args # for class decorator case
self.kwargs = kwargs
if args: # for property case
self.default = args[0]
def __call__(self, klass):
for attr_name in chain(self.args, self.kwargs.iterkeys()):
if hasattr(klass, attr_name):
raise TypeError(u'%s already has "%s" attribute: unable to add writeonce property' % (klass, attr_name))
default_args = []
if attr_name in self.kwargs:
default_args.append(self.kwargs[attr_name])
setattr(klass, attr_name, type(self)(*default_args))
return klass
@staticmethod
def iter_bases_attrs(klass):
iterables = []
for base in reversed(type.mro(klass)):
iterables.append(base.__dict__.iteritems())
return chain(*iterables)
def get_name(self, obj):
if not self.name:
for attr_name, attr_value in self.iter_bases_attrs(obj.__class__):
if attr_value is self:
self.name = attr_name
return self.name
def __get__(self, instance, owner):
if instance is None:
return self
key = '__writeonce_%s_%s' % (id(instance), self.get_name(instance))
if hasattr(instance, key):
return getattr(instance, key)
elif hasattr(self, 'default'):
return self.default
else:
raise AttributeError(u"%s has no attribute '%s'" % (instance, self.get_name(instance)))
def __set__(self, instance, value):
key = '__writeonce_%s_%s' % (id(instance), self.get_name(instance))
if not hasattr(instance, key):
setattr(instance, key, value)
elif getattr(instance, key) is value:
warnings.warn(u"Same value overwritten in writeonce attribute '%s' of '%s'" % (self.get_name(instance), instance))
else:
raise TypeError(
(self.msg or u"immutable property '%(name)s' of %(instance)s can't be modified") % {
'name': self.get_name(instance),
'instance': instance,
'old_value': getattr(instance, key),
'value': value})
def __delete__(self, instance):
raise TypeError(u"immutable property %s of %s can't be deleted" % (self.get_name(instance), instance))
| mit | 7,823,749,600,766,490,000 | 40 | 126 | 0.564405 | false | 4.1 | false | false | false |
etkirsch/legends-of-erukar | erukar/system/server/DifferentialMessageEngine.py | 1 | 4310 | from erukar.system.engine.commands import Map, Inventory, Skills, Stats
class DifferentialMessageEngine:
''''
The Differential Message Engine takes messages that need to be sent
to real life players and minimizes them before they are sent out.
This should reduce bandwidth drastically
'''
MapStateParams = [
'minX',
'minY',
'height',
'width',
'pixel',
'coordinates'
]
MapTypesForDiff = ['rooms', 'actions', 'lighting']
def __init__(self):
self.state = {}
def messages_for(self, instance, node, log):
yield from self.game(instance, node, log)
yield from self.map(node)
yield from self.skills(node)
yield from self.vitals(node)
yield from self.inventory(node)
def game(self, inst, node, log):
char = node.lifeform()
game_state = {
'wealth': char.wealth,
'log': log,
'location': inst.dungeon.overland_location.alias(),
'movementPoints': char.movement_allowed,
'actionPoints': {
'current': char.current_action_points,
'reserved': char.reserved_action_points
},
'turnOrder': inst.turn_manager.frontend_readable_turn_order()[:4],
'interactions': inst.get_interaction_results(node)
}
yield 'update state', game_state
def map(self, node):
cmd = node.create_command(Map)
new = cmd.execute().result_for(node.uid)[0]
yield from self.map_state_diff(node, new)
for _type in self.MapTypesForDiff:
yield from self._type_diff(node, _type, new[_type])
actors = new['actors']
yield 'update actors', actors
def map_state_diff(self, node, new):
map_state = {kw: new[kw] for kw in self.MapStateParams}
state = self.get(node, 'map_state', {})
coord_diff = self.diff(node, 'map_state', map_state, state)
if coord_diff:
yield 'update map state', coord_diff
def _type_diff(self, node, _type, new):
state = self.get(node, _type, {})
diff = self.diff(node, _type, new, state)
if diff:
yield 'update {}'.format(_type), diff
def inventory(self, node):
state = self.get(node, 'inventory', {})
cmd = node.create_command(Inventory)
new = cmd.execute().result_for(node.uid)[0]
diff = self.diff(node, 'inventory', new, state)
if diff:
yield 'set inventory', diff
def skills(self, node):
cmd = node.create_command(Skills)
new = cmd.execute().result_for(node.uid)[0]
state = self.get(node, 'skills', {})
skills_diff = self.diff(node, 'skills', new, state)
if skills_diff:
yield 'update skills', skills_diff
def vitals(self, node):
cmd = node.create_command(Stats)
new = cmd.execute().result_for(node.uid)[0]
state = self.get(node, 'skills', {})
skills_diff = self.diff(node, 'skills', new, state)
if skills_diff:
yield 'update vitals', new
def get(self, node, state_type, default):
node_state = self.state.get(node, {})
if not node_state:
self.state[node] = {}
specific_state = node_state.get(state_type, default)
if not specific_state:
self.state[node][state_type] = default
return specific_state
def diff(self, node, _type, new, state):
msg, state = self._dict_diffgen(node, new, state)
self.state[node][_type] = state
return msg
def _dict_diffgen(self, node, msg, state):
state = state or {}
diff = {}
for key in msg:
if key not in state or not isinstance(msg[key], type(state[key])):
state[key] = msg[key]
diff[key] = msg[key]
continue
if isinstance(msg[key], dict):
_diff, _state = self._dict_diffgen(node, msg[key], state[key])
if _diff:
diff[key] = _diff
state[key] = _state
continue
if msg[key] != state[key]:
diff[key] = msg[key]
state[key] = msg[key]
return diff, state
| agpl-3.0 | -5,780,901,964,227,090,000 | 34.04065 | 78 | 0.5529 | false | 3.751088 | false | false | false |
shirtsgroup/physical-validation | physical_validation/data/lammps_parser.py | 1 | 20326 | ###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by Pascal T. Merz <[email protected]> #
# Michael R. Shirts <[email protected]> #
# #
# Copyright (c) 2017-2021 University of Colorado Boulder #
# (c) 2012 The University of Virginia #
# #
###########################################################################
r"""
lammps_parser.py
"""
import numpy as np
from ..util import error as pv_error
from . import (
ObservableData,
SimulationData,
SystemData,
TrajectoryData,
UnitData,
parser,
)
class LammpsParser(parser.Parser):
"""
LammpsParser
"""
def units(self):
if self.__unit == "real":
return UnitData(
kb=8.314462435405199e-3 / 4.18400,
energy_str="kcal/mol",
energy_conversion=4.18400,
length_str="A",
length_conversion=0.1,
volume_str="A^3",
volume_conversion=1e-3,
temperature_str="K",
temperature_conversion=1,
pressure_str="atm",
pressure_conversion=1.01325,
time_str="fs",
time_conversion=1e-3,
)
else:
raise NotImplementedError("Only LAMMPS 'units real' is implemented.")
def __init__(self):
self.__unit = "lj"
# lammps energy codes
self.__lammps_energy_names = {
"kinetic_energy": "KinEng",
"potential_energy": "PotEng",
"total_energy": "TotEng",
"volume": "Vol",
"pressure": "Press",
"temperature": "Temp",
"constant_of_motion": "TotEng",
}
# BETA warning
print(
"###########################################################################"
)
print(
"# WARNING: The LAMMPS parser is an experimental feature under current #"
)
print(
"# development. You can help us to improve it by reporting errors #"
)
print(
"# at https://github.com/shirtsgroup/physical_validation #"
)
print(
"# Thank you! #"
)
print(
"###########################################################################"
)
def get_simulation_data(
self, ensemble=None, in_file=None, log_file=None, data_file=None, dump_file=None
):
"""
Parameters
----------
ensemble: EnsembleData, optional
in_file: str, optional
log_file: str, optional
data_file: str, optional
dump_file: str, optional
Returns
-------
result: SimulationData
"""
# input file
input_dict = None
if in_file is not None:
input_dict = self.__read_input_file(in_file)
if input_dict is not None:
self.__unit = input_dict["units"][0]
# data file
data_dict = None
if data_file is not None:
data_dict = self.__read_data_file(data_file)
# log file
log_dict = None
if log_file is not None:
log_dict = self.__read_log_file(log_file)
# dump file
dump_dict = None
if dump_file is not None:
dump_dict = self.__read_dump_file(dump_file)
# Create SimulationData object
result = SimulationData()
result.units = self.units()
# Ensemble must be provided
if ensemble is not None:
result.ensemble = ensemble
# trajectory data from dump
if dump_dict is not None:
result.trajectory = TrajectoryData(
dump_dict["position"], dump_dict["velocity"]
)
# system data
if data_dict is not None:
system = SystemData()
system.natoms = data_dict["Header"]["atoms"]
masses = data_dict["Masses"]
mass = []
molecule_idx = []
molec = -1
for atom in data_dict["Atoms"]:
mass.append(float(masses[atom["type"]][0]))
if molec != atom["molec"]:
molec = atom["molec"]
molecule_idx.append(atom["n"])
system.mass = mass
system.molecule_idx = molecule_idx
system.nconstraints = 0
system.nconstraints_per_molecule = np.zeros(len(system.molecule_idx))
system.ndof_reduction_tra = 0
system.ndof_reduction_rot = 0
if input_dict is not None:
if "shake" in input_dict["fix"] or "rattle" in input_dict["rattle"]:
print(
"NOTE: Found `fix shake` or `fix rattle`. Reading of\n"
" constraints is currently not implemented.\n"
" Please set system.nconstraints manually."
)
# center of mass constraining
if "recenter" in input_dict["fix"]:
system.ndof_reduction_tra = 3
result.system = system
# observable data
if log_dict is not None:
result.observables = ObservableData()
for key, lammpskey in self.__lammps_energy_names.items():
if lammpskey in log_dict:
result.observables[key] = log_dict[lammpskey]
if self.__lammps_energy_names["volume"] not in log_dict:
if dump_dict is not None:
vol = []
for b in dump_dict["box"]:
vol.append(b[0] * b[1] * b[2])
if len(vol) == 1:
vol = vol * result.observables.nframes
if len(vol) != result.observables.nframes and np.allclose(
[vol[0]] * len(vol), vol
):
vol = [vol[0]] * result.observables.nframes
key = "volume"
result.observables[key] = vol
return result
@staticmethod
def __read_input_file(name):
# parse input file
input_dict = {}
with open(name) as f:
for line in f:
line = line.split("#")[0].strip()
if not line:
continue
option = line.split(maxsplit=1)[0].strip()
value = line.split(maxsplit=1)[1].strip()
if option == "fix":
if "fix" not in input_dict:
input_dict["fix"] = {}
line = line.split()
style = line[3]
if style not in input_dict["fix"]:
input_dict["fix"][style] = []
input_dict["fix"][style].append(
{
"ID": line[1],
"group-ID": line[2],
"style": style,
"args": line[4:],
}
)
elif option == "unfix":
del_id = line.split()[1]
for style in input_dict["fix"]:
input_dict["fix"][style] = [
fix
for fix in input_dict["fix"][style]
if fix["ID"] != del_id
]
elif option in input_dict:
input_dict[option].append(value)
else:
input_dict[option] = [value]
return input_dict
@staticmethod
def __read_data_file(name):
# > available blocks
blocks = [
"Header", # 0
"Masses", # 1
"Nonbond Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
"Atoms", # 15
"Velocities", # 16
"Bonds", # 17
"Angles",
"Dihedrals",
"Impropers",
]
file_blocks = {}
# > read file
with open(name) as f:
# header section must appear first in file
block = "Header"
file_blocks["Header"] = []
# 1st 2 lines are ignored
next(f)
next(f)
for line in f:
line = line.strip()
if not line:
continue
if line in blocks:
block = line
file_blocks[block] = []
continue
file_blocks[block].append(line)
data_dict = {}
# > handle header
block = "Header"
header_single = [
"atoms",
"bonds",
"angles",
"dihedrals",
"impropers",
"atom types",
"bond types",
"angle types",
"dihedral types",
"improper types",
]
header_double = ["xlo xhi", "ylo yhi", "zlo zhi"]
# default values
data_dict[block] = {hs: 0 for hs in header_single}
data_dict[block].update({hd: [0.0, 0.0] for hd in header_double})
# read out
for line in file_blocks[block]:
if line.split(maxsplit=1)[1] in header_single:
hs = line.split(maxsplit=1)[1]
data_dict[block][hs] = int(line.split(maxsplit=1)[0])
elif line.split(maxsplit=2)[2] in header_double:
hs = line.split(maxsplit=2)[2]
data_dict[block][hs] = [
float(line.split(maxsplit=2)[0]),
float(line.split(maxsplit=2)[1]),
]
else:
raise pv_error.FileFormatError(name, "Unknown header line")
# > handle coeffs
# N type coeff1 coeff2 ...
for block in blocks[1:15]:
if block not in file_blocks:
continue
data_dict[block] = {}
for line in file_blocks[block]:
line = line.split()
data_dict[block][int(line[0])] = [line[1]] + [
float(c) for c in line[2:]
]
# > handle atoms
# n molecule-tag atom-type q x y z nx ny nz
block = blocks[15]
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
if len(line) == 7:
data_dict[block].append(
{
"n": int(line[0]),
"molec": int(line[1]),
"type": float(line[2]),
"q": float(line[3]),
"x": float(line[4]),
"y": float(line[5]),
"z": float(line[6]),
}
)
else:
data_dict[block].append(
{
"n": int(line[0]),
"molec": int(line[1]),
"type": float(line[2]),
"q": float(line[3]),
"x": float(line[4]),
"y": float(line[5]),
"z": float(line[6]),
"nx": float(line[7]),
"ny": float(line[8]),
"nz": float(line[9]),
}
)
# > handle velocities
# N vx vy vz
block = blocks[16]
if block in file_blocks:
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
data_dict[block].append(
{
"n": int(line[0]),
"vx": float(line[1]),
"vy": float(line[2]),
"vz": float(line[3]),
}
)
# > handle bonds etc
# N bond-type atom-1 atom-2 ...
for block in blocks[17:]:
if block not in file_blocks:
continue
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
data_dict[block].append(
{"n": int(line[0]), "atoms": [int(c) for c in line[1:]]}
)
# return dictionary
return data_dict
@staticmethod
def __read_log_file(name):
# parse log file
def start_single(line1, line2):
if not line1.split():
return False
if len(line1.split()) != len(line2.split()):
return False
try:
[float(nn) for nn in line2.split()]
except ValueError:
return False
return True
def end_single(line, length):
if len(line.split()) != length:
return True
try:
[float(nn) for nn in line.split()]
except ValueError:
return True
return False
def start_multi(line):
if "---- Step" in line and "- CPU =" in line:
return True
return False
def end_multi(line):
line = line.split()
# right length (is it actually always 9??)
if len(line) == 0 or len(line) % 3 != 0:
return True
# 2nd, 5th, 8th, ... entry must be '='
for eq in line[1::3]:
if eq != "=":
return True
# 3rd, 6th, 9th, ... entry must be numeric
try:
[float(nn) for nn in line[2::3]]
except ValueError:
return True
return False
ene_traj = {}
nreads = 0
with open(name) as f:
read_single = False
read_multi = False
continued = False
old_line = ""
fields = []
for new_line in f:
if read_single:
if end_single(new_line, len(fields)):
read_single = False
continued = True
else:
for field, n in zip(fields, new_line.split()):
ene_traj[field].append(float(n))
if read_multi:
if end_multi(new_line):
read_multi = False
continued = True
else:
for field, n in zip(
new_line.split()[0::3], new_line.split()[2::3]
):
if field not in ene_traj:
ene_traj[field] = []
ene_traj[field].append(float(n))
if not (read_single or read_multi):
if start_multi(new_line):
if not continued:
ene_traj = {}
nreads += 1
read_multi = True
old_line = new_line
if start_single(old_line, new_line):
if not continued:
ene_traj = {}
nreads += 1
read_single = True
fields = new_line.split()
for field in fields:
if field not in ene_traj:
ene_traj[field] = []
old_line = new_line
continued = False
if nreads > 1:
print(
"NOTE: Multiple runs found in log file. Assumed prior runs\n"
" were equilibration runs and used only last run."
)
return ene_traj
@staticmethod
def __read_dump_file(name):
# parse dump file
# the dictionary to be filled
dump_dict = {"position": [], "velocity": [], "box": []}
# helper function checking line items
def check_item(line_str, item):
item = "ITEM: " + item
if not line_str.startswith(item):
raise pv_error.FileFormatError(name, "dump file: was expecting " + item)
return line_str.replace(item, "")
with open(name) as f:
line = f.readline()
while line:
check_item(line, "TIMESTEP")
f.readline()
line = f.readline()
check_item(line, "NUMBER OF ATOMS")
natoms = int(f.readline())
line = f.readline()
line = check_item(line, "BOX BOUNDS")
bx = 0
by = 0
bz = 0
if len(line.split()) == 3:
# rectangular
# xx yy zz, where each of them one of
# p = periodic, f = fixed, s = shrink wrap,
# or m = shrink wrapped with a minimum value
line = f.readline().split()
bx = float(line[1]) - float(line[0])
line = f.readline().split()
by = float(line[1]) - float(line[0])
line = f.readline().split()
bz = float(line[1]) - float(line[0])
elif len(line.split()) == 6:
# orthogonal
# xy xz yz xx yy zz, where xy xz yz indicates
# 3 tilt factors will be included, and
# xx yy zz being each one of
# p = periodic, f = fixed, s = shrink wrap,
# or m = shrink wrapped with a minimum value
raise NotImplementedError("Orthogonal box reading not implemented.")
line = f.readline()
line = check_item(line, "ATOMS").split()
if "x" not in line or "y" not in line or "z" not in line:
raise pv_error.FileFormatError(name, "No positions in dump file.")
irx = line.index("x")
iry = line.index("y")
irz = line.index("z")
has_velocities = False
ivx = None
ivy = None
ivz = None
if "vx" in line and "vy" in line and "vz" in line:
has_velocities = True
ivx = line.index("vx")
ivy = line.index("vy")
ivz = line.index("vz")
positions = []
velocities = []
for n in range(natoms):
line = f.readline().split()
positions.append([float(line[idx]) for idx in [irx, iry, irz]])
if has_velocities:
velocities.append([float(line[idx]) for idx in [ivx, ivy, ivz]])
dump_dict["position"].append(positions)
dump_dict["velocity"].append(velocities)
dump_dict["box"].append([bx, by, bz])
# end of dump loop
line = f.readline()
return dump_dict
| lgpl-2.1 | -3,521,935,147,181,849,000 | 34.659649 | 89 | 0.409033 | false | 4.583089 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.