code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from unittest import TestCase
from voluptuous import MultipleInvalid
from correios_lib.entities import Destinatario
class TestDestinatario(TestCase):
def test_invalid_destinatario(self):
self.assertRaises(MultipleInvalid, Destinatario)
def test_valid_destinatario(self):
self.entity = Destinatario(
nome="Myname",
logradouro="Rua Pretty cool",
numero="123",
bairro="Neighborhood",
cidade="MyCity",
uf="SP",
cep="01508020",
email="[email protected]"
)
| trocafone/correios-lib | tests/unit/entities/destinatario_test.py | Python | mit | 606 |
import os
import re
import glob
import commands
import nose.tools
here = os.path.dirname( os.path.realpath( __file__ ) )
home = os.path.join( here, ".." )
executable = os.path.join( home, "apps/appginger/cpp/ginger-script")
def runtest( in_file_name ):
out_file_name = re.sub( r'\.in.gnx$', '.out.txt', in_file_name )
res = commands.getoutput( "cat {0} | {1} -i -g gnx".format( in_file_name, executable ) )
nose.tools.assert_equal( res, open( out_file_name ).read().strip() )
def test_examples():
for in_file_name in glob.glob( "appginger/*/*.in.gnx" ):
yield runtest, in_file_name
def runproblem( in_file_name ):
res = commands.getoutput( "cat {0} | {1} -i -g gnx".format( in_file_name, executable ) )
print "res = '" + res + "'"
nose.tools.assert_true( res.startswith( "\n### Mishap " ) )
def test_problems():
for in_file_name in glob.glob( "appginger/*/*.fail.gnx" ):
yield runproblem, in_file_name
| Spicery/ginger | functests/test_appginger.py | Python | gpl-3.0 | 957 |
#!/usr/bin/env python
import sys
import os
from string import Template
import templates
medRoot = sys.argv[1]
rt = 180
ns = 1000
nf = 10
nOrders = 2
nt = 9
t = []
for i in range(nt):
t.append(float(i)*10.0)
nThreads = 1
parTemplate = Template(templates.t_scatter)
parTemplate = Template(templates.t_submit)
sTypes = ['smooth','Gaussian','fBm']
HVals = [0.4, 0.6, 0.8]
lVals = [0.5, 1.0, 1.5]
SVals = [0.03, 0.04, 0.05, 0.06]
rVals = ['020','035','040','050','055']
for sType in sTypes:
if sType == 'smooth':
for rho in rVals:
medFilename = medRoot+'/medium_rho_'+rho+'.nc'
rhoStr = 'R'+rho
subFilename = 'scatter_smooth_' + rhoStr + '.sub'
parFilename = 'scatter_smooth_' + rhoStr + '.in'
outFilename = 'LommelSeeliger_smooth_' + rhoStr + '.vsc'
f = open(parFilename, 'w')
f.write(parTemplate.substitute(mFilename=medFilename,
oFilename=outFilename,
rTheta=rt,
nOrders=nOrders,
nSamples=ns*nf,
nTheta=nt,
theta=str(t).strip('[]'),
nFields=1,
H=0.0,
std=0.0,
applyFields = '.false.',
sType = 'fBm',
nThreads = nThreads))
f.close()
#f = open(subFilename, 'w')
#f.write(subTemplate.substitute(jobName='vScat'+rhoStr, parFile=parFilename))
#f.close()
else:
appFields = '.true.'
if sType == 'Gaussian':
P1 = lVals
else:
P1 = HVals
for P in P1:
for S in SVals:
for rho in rVals:
medFilename = medRoot+'/medium_rho_'+rho+'.nc'
rhoStr = 'R'+rho
hStr = '__P' + ('%4.2f' %P)#.replace('.','_')#[-1]
sStr = '__S' + ('%4.2f' %S)#[-2:]
subFilename = 'scatter_' + sType + '__' + rhoStr + hStr + sStr + '.sub'
parFilename = 'scatter_' + sType + '__' + rhoStr + hStr + sStr + '.in'
outFilename = 'LommelSeeliger_' + sType + '__' + rhoStr + hStr + sStr + '.vsc'
f = open(parFilename, 'w')
f.write(parTemplate.substitute(mFilename=medFilename,
oFilename=outFilename,
rTheta=rt,
nOrders=nOrders,
nSamples=ns,
nTheta=nt,
theta=str(t).strip('[]'),
nFields=nf,
H=P,
std=S,
applyFields = '.true.',
sType = sType,
nThreads = nThreads))
f.close()
#f = open(subFilename, 'w')
#f.write(subTemplate.substitute(jobName='vScat'+rhoStr+hStr+sStr,
# parFile=parFilename))
#f.close()
| dronir/EM | python/genInput.py | Python | gpl-3.0 | 3,797 |
from .modulation import ModulationTypes
class TuningParameterCollection(object):
"""
Holds multiple tuning parameters at various heights. Each acts as a calibration point.
"""
tuning_parameters = []
build_x_min = -50.0
build_y_min = -50.0
build_x_max = 50.0
build_y_max = 50.0
dwell_x = 0.0
dwell_y = 0.0
velocity_x_max = 6000.0
velocity_y_max = 6000.0
drips_per_height = 100.0
sublayer_height = 0.01
modulation = ModulationTypes.AM
def __init__(self):
self.tuning_parameters = [ ]
self._cached_tuning_parameters = None
self._cached_height = None
def reset_cache(self):
self._cached_tuning_parameters = None
self._cached_height = None
def get_tuning_parameters_for_height(self, height):
# Caching
if self._cached_height == height:
return self._cached_tuning_parameters
existing = [param for param in self.tuning_parameters if param.height == height ]
if existing:
return existing[0]
new_tp = TuningParameters()
self._cached_tuning_parameters = new_tp
self._cached_height = height
# Edge cases
if not self.tuning_parameters:
return new_tp
# Calculate new TuningParameters for given height
tps = sorted(self.tuning_parameters, key=lambda tp: tp.height)
lower_tp = None
higher_tp = None
for tp in tps:
if tp.height == height:
new_tp.update(tp)
return new_tp
elif tp.height < height:
lower_tp = tp
elif tp.height > height:
higher_tp = tp
break
if lower_tp is None:
new_tp.update(higher_tp)
new_tp.height = height
return new_tp
if higher_tp is None:
new_tp.update(lower_tp)
new_tp.height = height
return new_tp
ratio = (height - lower_tp.height)/(higher_tp.height - lower_tp.height)
for attr in new_tp.__dict__.keys():
if attr.startswith('_'):
continue
setattr(new_tp, attr, getattr(lower_tp, attr)*(1.0-ratio) + getattr(higher_tp, attr)*ratio)
new_tp.height = height
return new_tp
def update(self, other):
self.build_x_min = other.build_x_min
self.build_x_max = other.build_x_max
self.build_y_min = other.build_y_min
self.build_y_max = other.build_y_max
self.dwell_x = other.dwell_x
self.dwell_y = other.dwell_y
self.velocity_x_max = other.velocity_x_max
self.velocity_y_max = other.velocity_y_max
self.drips_per_height = other.drips_per_height
self.sublayer_height = other.sublayer_height
self.modulation = other.modulation
self.tuning_parameters = []
for other_tp in other.tuning_parameters:
my_tp = TuningParameters()
my_tp.update(other_tp)
self.tuning_parameters.append(my_tp)
self._cached_height = None
self._cached_tuning_parameters = None
class TuningParameters(object):
"""
Coefficients that modify the transformation from position to audio output. All values are valid only for a single
height. To handle multiple heights, the TuningParameterCollection must be used.
"""
def __init__(self):
self.height = 0.0
self.x_offset = 0.0
self.y_offset = 0.0
self.rotation = 0.0
self.x_shear = 0.0
self.y_shear = 0.0
self.x_scale = 0.75
self.y_scale = 0.75
self.x_trapezoid = 0.0
self.y_trapezoid = 0.0
def update(self, other):
"""Copy the values from another instance."""
self.height = other.height
self.x_offset = other.x_offset
self.y_offset = other.y_offset
self.rotation = other.rotation
self.x_shear = other.x_shear
self.y_shear = other.y_shear
self.x_scale = other.x_scale
self.y_scale = other.y_scale
self.x_trapezoid = other.x_trapezoid
self.y_trapezoid = other.y_trapezoid
| PeachyPrinter/peachytoolchain | src/audio/tuning_parameters.py | Python | gpl-3.0 | 4,182 |
# -*- coding: utf-8 -*-
# Copyright (c) Pilot Systems and Libération, 2010
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
import logging
from sesql import config
from sesql.typemap import typemap
log = logging.getLogger('sesql')
class SeSQLResultSet(object):
"""
A lazy SeSQL result set
It mimicks a bit the Django QuerySet, but doesn't work the same way,
and doesn't provide exactly the same methods
"""
def __init__(self, objs, fields):
"""
Constructor
Objs must be a list of (class, id) with optionally extra fields
"""
self.objs = objs
self.fields = fields
def brains(self):
"""
Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
"""
for obj in self.objs:
yield dict(zip(self.fields, obj))
def count(self):
"""
Count results
"""
return len(self.objs)
__len__ = count
def iterator(self):
"""
Iterate on self
"""
for obj in self.objs:
try:
yield self.load(obj)
except config.orm.not_found:
log.warning("Object %r does not exist ! Broken index ?" % (obj,))
__iter__ = iterator
def all(self):
"""
Get all the results as a list
"""
return list(self)
def get(self, index):
"""
Get the row at given index
"""
return self.load(self.objs[index])
__getitem__ = get
def __getslice__(self, i, j):
"""
Get a slice
"""
res = [ self.load(obj) for obj in self.objs[i:j] ]
return res
@staticmethod
def load(obj):
"""
Get a given object
"""
objclass, objid = obj[:2]
objclass = typemap.get_class_by_name(objclass)
if not objclass:
return config.orm.not_found
entry = "%s:%s" % (objclass.__name__, objid)
log.debug("Fetching %s" % entry)
return config.orm.load_object(objclass, objid)
def historize(self, query):
"""save in the database the query for future processing"""
nb_results = self.count()
query_text = query.get_fulltext_query()[2][0]
config.orm.historize(query=query_text, nb_results=nb_results)
| liberation/sesql | sesql/results.py | Python | gpl-2.0 | 2,929 |
""" CMAC Corrected Precipitation Radar Moments in Antenna Coordinates
Using fuzzy logic, scipy, and more to identify gates as rain, melting,
snow, no clutter, and second trip. Many fields such as reflectivity and
coorelation coefficient are used, but also SNR and sounding data is used.
More information can be found at https://www.arm.gov/data/data-sources/cmac-69
"""
import subprocess
from setuptools import setup, find_packages
DOCLINES = __doc__.split("\n")
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: Python
Programming Language :: Python :: 3.6
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Atmospheric Science
Operating System :: POSIX :: Linux
"""
NAME = 'cmac'
AUTHOR = 'Scott Collis, Zachary Sherman, Robert Jackson'
MAINTAINER = 'Data Informatics and Geophysical Retrievals (DIGR)'
DESCRIPTION = DOCLINES[0]
LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
URL = 'https://github.com/EVS-ATMOS/cmac2.0'
LICENSE = 'BSD'
CLASSIFIERS = filter(None, CLASSIFIERS.split('\n'))
MAJOR = 0
MINOR = 1
MICRO = 0
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename='cmac/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM PYART SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of cmac.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('cmac/version.py'):
# must be a source distribution, use existing version file
try:
from cmac.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"cmac/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev+' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
author=AUTHOR,
maintainer=MAINTAINER,
license=LICENSE,
classifiers=CLASSIFIERS,
packages=find_packages(),
scripts=['scripts/cmac',
'scripts/cmac_animation',
'scripts/cmac_dask',
'scripts/xsapr_cmac_ipcluster',
'scripts/xsapr_cmac_pyspark'],
)
| zssherman/cmac2.0 | setup.py | Python | bsd-3-clause | 3,745 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms_shiny', '0005_shinyapppluginmodel'),
]
operations = [
migrations.AlterField(
model_name='shinyapp',
name='name',
field=models.CharField(max_length=64, unique=True, help_text='Enter a brief, yet descriptive name for the Shiny app.'),
preserve_default=True,
),
migrations.AlterField(
model_name='shinyapp',
name='slug',
field=models.SlugField(verbose_name='slug', max_length=64, unique=True, help_text='Please enter a unique slug for this Shiny app. This should get auto-generated.'),
preserve_default=True,
),
]
| mfcovington/djangocms-shiny-app | cms_shiny/migrations/0006_auto_20151207_0046.py | Python | bsd-3-clause | 835 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
import random
import time
import threading
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
stat_len = 0
def stat_handler(port, data):
global stat_len
stat_len += data
def monitor():
while True:
global stat_len
speed = stat_len / 1024 / 3
logging.info('Speed: %d kb/s' % speed)
stat_len = 0
time.sleep(3)
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True,
stat_callback=stat_handler)
a_config = config.copy()
if a_config.get('port_password', None):
a_config['server_port'] = random.choice(
a_config['port_password'].keys())
a_config['password'] = a_config['port_password']\
[a_config['server_port']]
udp_server = udprelay.UDPRelay(a_config, dns_resolver, True,
stat_callback=stat_handler)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
t = threading.Thread(target=monitor, args=(), name='monitor')
t.daemon = True
t.start()
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| ultimate010/shadowsocks | shadowsocks/local.py | Python | apache-2.0 | 3,096 |
from subprocess import Popen, PIPE
from .record import Record
from .visitor import Visitor
BLUE = '#aec7e8'
ORANGE = '#ffbb78'
GREEN = '#dbdb8d'
RED = '#ff9896'
PURPLE = '#f7b6d2'
SILVER = '#eeeeee'
GRAY = 'gray'
DARKGRAY = '#888888'
def dot2svg(source):
process = Popen(
['dot', '-T', 'svg'],
stdin=PIPE, stdout=PIPE, stderr=PIPE
)
output, error = process.communicate(source.encode('utf8'))
if process.returncode != 0:
raise ValueError(error)
return output.decode('utf8')
class style(Record):
__attributes__ = ['attributes']
def __init__(self, **attributes):
self.attributes = attributes
def quote(self, value):
value = str(value)
replace = {
'"': r'\"',
'\n': r'\n',
'\r': r'\r'
}
for a, b in replace.items():
value = value.replace(a, b)
return '"' + value + '"'
def __str__(self):
return ', '.join(
'{key}={value}'.format(
key=key,
value=self.quote(value)
)
for key, value in self.attributes.items()
)
class Node(Record):
__attributes__ = ['item', 'style']
def __init__(self, item, style):
self.item = item
self.style = style
class Edge(Record):
__attributes__ = ['source', 'target', 'style']
def __init__(self, source, target, style):
self.source = source
self.target = target
self.style = style
class Graph(Record):
__attributes__ = ['nodes', 'edges']
graph_style = style(
margin=0,
nodesep=0,
ranksep=0,
splines='splines',
)
node_style = style(
shape='box',
height=0,
width=0,
fontname='sans',
fontsize=10,
color='none',
style='filled',
fillcolor=SILVER
)
edge_style = style(
fontname='sans',
fontsize=8,
fontcolor=GRAY,
arrowsize=0.3,
color=GRAY
)
def __init__(self):
self.nodes = []
self.edges = []
self.ids = {}
def add_node(self, item, style=None):
node = Node(item, style)
self.nodes.append(node)
def add_edge(self, source, target, style=None):
edge = Edge(source, target, style)
self.edges.append(edge)
def id(self, item):
item_id = id(item)
if item_id not in self.ids:
self.ids[item_id] = len(self.ids)
return self.ids[item_id]
@property
def source(self):
yield 'digraph G {'
yield 'graph [{graph_style}];'.format(graph_style=str(self.graph_style))
yield 'node [{node_style}];'.format(node_style=str(self.node_style))
yield 'edge [{edge_style}];'.format(edge_style=str(self.edge_style))
for node in self.nodes:
pattern = (
'{index} [{style}];'
if node.style
else '{index}'
)
yield pattern.format(
index=self.id(node.item),
style=str(node.style)
)
for edge in self.edges:
pattern = (
'{source} -> {target} [{style}];'
if edge.style
else '{source} -> {target};'
)
yield pattern.format(
source=self.id(edge.source),
target=self.id(edge.target),
style=str(edge.style)
)
yield '}'
def _repr_svg_(self):
return dot2svg('\n'.join(self.source))
def __repr__(self):
return '%s(nodes=[...], edges=[...])' % self.__class__.__name__
def _repr_pretty_(self, printer, cycle):
printer.text(repr(self))
class DotTransformator(Visitor):
def __init__(self):
self.graph = Graph()
def style(self, item, style):
self.graph.add_node(item, style)
for child in item.children:
self.graph.add_edge(item, child)
def __call__(self, root):
for item in root.walk():
self.visit(item)
return self.graph
| bureaucratic-labs/yargy | yargy/dot.py | Python | mit | 4,132 |
# Generated by Django 2.0.9 on 2018-12-04 11:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_feature_slug_unique'),
]
operations = [
migrations.AddField(
model_name='featureoption',
name='slug',
field=models.SlugField(blank=True),
),
]
| cmu-db/dbdb.io | dbdb/core/migrations/0014_featureoption_slug.py | Python | apache-2.0 | 384 |
from ..idasix import QtWidgets, QtCore
import ida_funcs
import ida_kernwin
import idc
from ..network import QueryWorker
class QItem(object):
def __init__(self, item, name_field, id_field, description_field,
exclude=None, columns=3, selected=None, empty_disabled=True,
**kwargs):
super(QItem, self).__init__(**kwargs)
self.item = item
self.name_field = name_field
self.id_field = id_field
self.description_field = description_field
self.exclude = exclude
self.columns = columns
self.selected = selected
self.empty_disabled = empty_disabled
self.found_selection = False
self.query = QueryWorker("GET", "collab/{}/".format(self.item), json=True)
self.query.start(self.load)
def load(self, response):
for i, obj in enumerate(response):
if not obj:
continue
item_name = obj[self.name_field]
item_description = obj[self.description_field]
item_id = obj[self.id_field]
if self.exclude and (item_name in self.exclude or
item_id in self.exclude):
continue
item = self.create_item(i=i, item_name=item_name, item_id=item_id,
item_description=item_description)
self.addWidget(item, i / self.columns, i % self.columns)
if self.selected:
selected = self.selected == "all"
selected = selected or self.selected in (item_name, item_id)
if not selected and hasattr(self.selected, '__contains__'):
selected = item_name in self.selected or item_id in self.selected
self.set_selected(i, selected)
if selected:
self.found_selection = True
if self.count() == 0 and self.empty_disabled:
self.setEnabled(False)
if self.selected is None and self.count() > 0:
self.set_selected(0, True)
class QItemSelect(QItem, QtWidgets.QComboBox):
def __init__(self, *args, **kwargs):
self.allow_none = kwargs.pop('allow_none', False)
if self.allow_none:
kwargs['empty_disabled'] = False
super(QItemSelect, self).__init__(*args, **kwargs)
def load(self, response):
super(QItemSelect, self).load(response)
if self.selected and not self.found_selection:
self.insertItem(0, "UNKNOWN ({})".format(self.selected), self.selected)
self.set_selected(0, True)
self.found_selection = True
if self.allow_none:
self.insertItem(0, "None", None)
if not self.found_selection:
self.set_selected(0, True)
@staticmethod
def create_item(i, item_name, item_id, item_description):
del i
# TODO: include the item description as tooltip
del item_description
text = "{} ({})".format(item_name, item_id)
return (text, item_id)
def addWidget(self, item, row, col):
del row, col
text, item_id = item
self.addItem(text, item_id)
def set_selected(self, i, selected):
if selected:
self.setCurrentIndex(i)
class QItemCheckBoxes(QItem, QtWidgets.QGridLayout):
def create_item(self, item_name, item_id, item_description, **kwargs):
del kwargs
widget = QtWidgets.QCheckBox(item_name)
widget.id = item_id
if item_description:
widget.setToolTip(item_description)
return widget
def set_selected(self, i, selected):
self.itemAt(i).widget().setChecked(selected)
def get_result(self):
return [self.itemAt(i).widget().id
for i in range(self.count())
if self.itemAt(i).widget().isChecked()]
class QRadioLayout(QtWidgets.QGridLayout):
def __init__(self, *radios, **kwargs):
super(QRadioLayout, self).__init__(**kwargs)
self.radiogroup = QtWidgets.QButtonGroup()
self.setColumnStretch(1, 1)
self.create_items(radios)
def create_items(self, radios):
for i, radio_details in enumerate(radios):
item_widget = self.create_item(i, *radio_details)
self.addWidget(item_widget, i, 0, QtCore.Qt.AlignTop)
def create_item(self, i, item_name, item_id, item_description, *args):
del args
item_widget = QtWidgets.QRadioButton(item_name)
item_widget.setObjectName(item_id)
item_widget.setToolTip(item_description)
self.radiogroup.addButton(item_widget, i)
return item_widget
def set_selected(self, i, selected):
self.radiogroup.button(i).setChecked(selected)
def get_result(self):
return self.radiogroup.checkedButton().objectName()
class QRadioExtraLayout(QRadioLayout):
def create_item(self, i, item_name, item_id, item_description, *args):
# slightly ugly to have overriden create_item to have the same parameters
item_extra, selected = args
item = super(QRadioExtraLayout, self).create_item(i, item_name, item_id,
item_description)
self.set_selected(i, selected)
if item_extra is not None:
self.update_item_extra(item, item_extra)
self.addWidget(item_extra, i, 1, QtCore.Qt.AlignTop)
return item
@staticmethod
def update_item_extra(item_widget, item_extra):
# if extra controller comes disabled, make sure it stays that way
# and also make the radio box disabled
if item_extra.isEnabled():
item_widget.toggled.connect(item_extra.setEnabled)
item_extra.setEnabled(False)
else:
item_widget.setEnabled(False)
# if item_extra controller comes with a tooltip, copy that tooltip to
# radio button itself
if item_extra.toolTip():
item_widget.setToolTip(item_extra.toolTip())
class QItemRadioGroup(QItem, QRadioLayout):
# TODO: cehck the multi inheritence
# TODO: check the extra param passed to create_item from load()
# TODO: make sure this is reasonavble and working
pass
class QFunctionSelect(QtWidgets.QWidget):
changed = QtCore.Signal()
def __init__(self, text_max_length=30, **kwargs):
super(QFunctionSelect, self).__init__(**kwargs)
self.text_max = text_max_length
self.func = None
self.label = QtWidgets.QPushButton()
self.label.clicked.connect(self.label_clicked)
self.label.setFlat(True)
self.btn = QtWidgets.QPushButton("...")
self.btn.setMaximumWidth(20)
self.btn.clicked.connect(self.btn_clicked)
current_func = ida_funcs.get_func(idc.ScreenEA())
if current_func:
self.set_func(current_func)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.label)
layout.addWidget(self.btn)
layout.setStretch(0, 1)
self.setLayout(layout)
def set_func(self, func):
self.func = func
text = idc.GetFunctionName(self.func.startEA)
text = text[:self.text_max] + "..." if len(text) > self.text_max else text
self.label.setText(text)
def label_clicked(self, checked):
del checked
idc.Jump(self.func.startEA)
def btn_clicked(self, checked):
del checked
f = ida_kernwin.choose_func("Choose function to match with database",
self.func.startEA if self.func else 0)
if f:
self.set_func(f)
self.changed.emit()
def get_result(self):
return self.func.startEA if self.func else None
class QFunctionRangeSelect(QtWidgets.QWidget):
def __init__(self, text_max_length=30, **kwargs):
super(QFunctionRangeSelect, self).__init__(**kwargs)
self.start = QFunctionSelect(text_max_length=text_max_length)
self.start.changed.connect(self.selection_changed)
self.end = QFunctionSelect(text_max_length=text_max_length)
self.end.changed.connect(self.selection_changed)
layout = QtWidgets.QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(QtWidgets.QLabel("From"), 0, 0)
layout.addWidget(QtWidgets.QLabel("To"), 1, 0)
layout.addWidget(self.start, 0, 1)
layout.addWidget(self.end, 1, 1)
self.setLayout(layout)
def selection_changed(self):
if not self.start.func or not self.end.func:
return
if self.start.func.startEA < self.end.func.endEA:
return
start_func = self.start.func
self.start.set_func(self.end.func)
self.end.set_func(start_func)
def get_result(self):
return [self.start.func.startEA if self.start.func else None,
self.start.func.endEA if self.start.func else None]
class MatchTreeWidgetItem(QtWidgets.QTreeWidgetItem):
def __lt__(self, other):
column = self.treeWidget().sortColumn()
if self.childCount() == 0 and other.childCount() == 0:
try:
return float(self.text(column)) < float(other.text(column))
except ValueError:
return self.text(column) < other.text(column)
elif self.childCount() == 0 and other.childCount() > 0:
return True
elif self.childCount() > 0 and other.childCount() == 0:
return False
else:
my_biggest_child = self.biggest_child()
other_biggest_child = other.biggest_child()
return my_biggest_child < other_biggest_child
def biggest_child(self):
return max(self.child(i) for i in range(self.childCount()))
class SearchTreeWidget(QtWidgets.QTreeWidget):
def __init__(self, search_box, match_column, *args, **kwargs):
super(SearchTreeWidget, self).__init__(*args, **kwargs)
self.search_box = search_box
self.match_column = match_column
self.search_box.textEdited.connect(self.search)
self.search_box.returnPressed.connect(self.search)
def keyPressEvent(self, event): # noqa: N802
if event.text():
self.search_box.keyPressEvent(event)
else:
super(SearchTreeWidget, self).keyPressEvent(event)
def search(self, _=None):
del _
text = self.search_box.text().lower()
start = self.currentItem()
it = QtWidgets.QTreeWidgetItemIterator(self.currentItem())
it += 1
while it.value() != start:
if it.value() is None:
it = QtWidgets.QTreeWidgetItemIterator(self.topLevelItem(0))
if text in it.value().text(self.match_column).lower():
self.setCurrentItem(it.value())
self.scrollToItem(it.value())
return
it += 1
| nirizr/rematch | idaplugin/rematch/dialogs/widgets.py | Python | gpl-3.0 | 10,008 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import argparse
import datetime
import logging
import sys
import threading
import time
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import notify # noqa
from oslo_messaging import rpc # noqa
LOG = logging.getLogger()
USAGE = """ Usage: ./simulator.py [-h] [--url URL] [-d DEBUG]\
{notify-server,notify-client,rpc-server,rpc-client} ...
Usage example:
python tools/simulator.py\
--url rabbit://stackrabbit:secretrabbit@localhost/ rpc-server
python tools/simulator.py\
--url rabbit://stackrabbit:secretrabbit@localhost/ rpc-client\
--exit-wait 15000 -p 64 -m 64"""
class LoggingNoParsingFilter(logging.Filter):
def filter(self, record):
msg = record.getMessage()
for i in ['received {', 'MSG_ID is ']:
if i in msg:
return False
return True
class NotifyEndpoint(object):
def __init__(self):
self.cache = []
def info(self, ctxt, publisher_id, event_type, payload, metadata):
LOG.info('msg rcv')
LOG.info("%s %s %s %s" % (ctxt, publisher_id, event_type, payload))
if payload not in self.cache:
LOG.info('requeue msg')
self.cache.append(payload)
for i in range(15):
eventlet.sleep(1)
return messaging.NotificationResult.REQUEUE
else:
LOG.info('ack msg')
return messaging.NotificationResult.HANDLED
def notify_server(transport):
endpoints = [NotifyEndpoint()]
target = messaging.Target(topic='n-t1')
server = notify.get_notification_listener(transport, [target],
endpoints, executor='eventlet')
server.start()
server.wait()
class RpcEndpoint(object):
def __init__(self, wait_before_answer):
self.count = None
self.wait_before_answer = wait_before_answer
def info(self, ctxt, message):
i = int(message.split(' ')[-1])
if self.count is None:
self.count = i
elif i == 0:
self.count = 0
else:
self.count += 1
LOG.info("######## RCV: %s/%s" % (self.count, message))
if self.wait_before_answer > 0:
time.sleep(self.wait_before_answer)
return "OK: %s" % message
class RpcEndpointMonitor(RpcEndpoint):
def __init__(self, *args, **kwargs):
super(RpcEndpointMonitor, self).__init__(*args, **kwargs)
self._count = self._prev_count = 0
self._monitor()
def _monitor(self):
threading.Timer(1.0, self._monitor).start()
print ("%d msg was received per second"
% (self._count - self._prev_count))
self._prev_count = self._count
def info(self, *args, **kwargs):
self._count += 1
super(RpcEndpointMonitor, self).info(*args, **kwargs)
def rpc_server(transport, target, wait_before_answer, executor, show_stats):
endpoint_cls = RpcEndpointMonitor if show_stats else RpcEndpoint
endpoints = [endpoint_cls(wait_before_answer)]
server = rpc.get_rpc_server(transport, target, endpoints,
executor=executor)
server.start()
server.wait()
def threads_spawner(threads, method, *args, **kwargs):
p = eventlet.GreenPool(size=threads)
for i in range(0, threads):
p.spawn_n(method, i, *args, **kwargs)
p.waitall()
def send_msg(_id, transport, target, messages, wait_after_msg, timeout,
is_cast):
client = rpc.RPCClient(transport, target)
client = client.prepare(timeout=timeout)
rpc_method = _rpc_cast if is_cast else _rpc_call
for i in range(0, messages):
msg = "test message %d" % i
LOG.info("SEND: %s" % msg)
rpc_method(client, msg)
if wait_after_msg > 0:
time.sleep(wait_after_msg)
def _rpc_call(client, msg):
try:
res = client.call({}, 'info', message=msg)
except Exception as e:
LOG.exception('Error %s on CALL for message %s' % (str(e), msg))
else:
LOG.info("SENT: %s, RCV: %s" % (msg, res))
def _rpc_cast(client, msg):
try:
client.cast({}, 'info', message=msg)
except Exception as e:
LOG.exception('Error %s on CAST for message %s' % (str(e), msg))
else:
LOG.info("SENT: %s" % msg)
def notifier(_id, transport, messages, wait_after_msg, timeout):
n1 = notify.Notifier(transport, topic="n-t1").prepare(
publisher_id='publisher-%d' % _id)
msg = 0
for i in range(0, messages):
msg = 1 + msg
ctxt = {}
payload = dict(msg=msg, vm='test', otherdata='ahah')
LOG.info("send msg")
LOG.info(payload)
n1.info(ctxt, 'compute.start1', payload)
if wait_after_msg > 0:
time.sleep(wait_after_msg)
def _setup_logging(is_debug):
log_level = logging.DEBUG if is_debug else logging.WARN
logging.basicConfig(stream=sys.stdout, level=log_level)
logging.getLogger().handlers[0].addFilter(LoggingNoParsingFilter())
for i in ['kombu', 'amqp', 'stevedore', 'qpid.messaging'
'oslo.messaging._drivers.amqp', ]:
logging.getLogger(i).setLevel(logging.WARN)
def main():
parser = argparse.ArgumentParser(
description='Tools to play with oslo.messaging\'s RPC',
usage=USAGE,
)
parser.add_argument('--url', dest='url',
default='rabbit://guest:password@localhost/',
help="oslo.messaging transport url")
parser.add_argument('-d', '--debug', dest='debug', type=bool,
default=False,
help="Turn on DEBUG logging level instead of WARN")
subparsers = parser.add_subparsers(dest='mode',
help='notify/rpc server/client mode')
server = subparsers.add_parser('notify-server')
client = subparsers.add_parser('notify-client')
client.add_argument('-p', dest='threads', type=int, default=1,
help='number of client threads')
client.add_argument('-m', dest='messages', type=int, default=1,
help='number of call per threads')
client.add_argument('-w', dest='wait_after_msg', type=int, default=-1,
help='sleep time between two messages')
client.add_argument('-t', dest='timeout', type=int, default=3,
help='client timeout')
server = subparsers.add_parser('rpc-server')
server.add_argument('-w', dest='wait_before_answer', type=int, default=-1)
server.add_argument('--show-stats', dest='show_stats',
type=bool, default=True)
server.add_argument('-e', '--executor', dest='executor',
type=str, default='eventlet',
help='name of a message executor')
client = subparsers.add_parser('rpc-client')
client.add_argument('-p', dest='threads', type=int, default=1,
help='number of client threads')
client.add_argument('-m', dest='messages', type=int, default=1,
help='number of call per threads')
client.add_argument('-w', dest='wait_after_msg', type=int, default=-1,
help='sleep time between two messages')
client.add_argument('-t', dest='timeout', type=int, default=3,
help='client timeout')
client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0,
help='Keep connections open N seconds after calls '
'have been done')
client.add_argument('--is-cast', dest='is_cast', type=bool, default=False,
help='Use `call` or `cast` RPC methods')
args = parser.parse_args()
_setup_logging(is_debug=args.debug)
# oslo.config defaults
cfg.CONF.heartbeat_interval = 5
cfg.CONF.notification_topics = "notif"
cfg.CONF.notification_driver = "messaging"
transport = messaging.get_transport(cfg.CONF, url=args.url)
target = messaging.Target(topic='profiler_topic', server='profiler_server')
if args.mode == 'rpc-server':
if args.url.startswith('zmq'):
cfg.CONF.rpc_zmq_matchmaker = "redis"
transport._driver.matchmaker._redis.flushdb()
rpc_server(transport, target, args.wait_before_answer, args.executor,
args.show_stats)
elif args.mode == 'notify-server':
notify_server(transport)
elif args.mode == 'notify-client':
threads_spawner(args.threads, notifier, transport, args.messages,
args.wait_after_msg, args.timeout)
elif args.mode == 'rpc-client':
start = datetime.datetime.now()
threads_spawner(args.threads, send_msg, transport, target,
args.messages, args.wait_after_msg, args.timeout,
args.is_cast)
time_ellapsed = (datetime.datetime.now() - start).total_seconds()
msg_count = args.messages * args.threads
print ('%d messages was sent for %s seconds. Bandwight is %s msg/sec'
% (msg_count, time_ellapsed, (msg_count / time_ellapsed)))
LOG.info("calls finished, wait %d seconds" % args.exit_wait)
time.sleep(args.exit_wait)
if __name__ == '__main__':
main()
| stevei101/oslo.messaging | tools/simulator.py | Python | apache-2.0 | 9,935 |
from django.db import models
class TaskStoreStatistic(models.Model):
MEASURE_SIZE = "size"
MEASURE_CHOICES = (
(
MEASURE_SIZE,
"Repository Size",
),
)
store = models.ForeignKey(
"TaskStore",
related_name="statistics",
on_delete=models.CASCADE,
)
measure = models.CharField(choices=MEASURE_CHOICES, max_length=50)
value = models.FloatField()
run_id = models.CharField(
max_length=255,
help_text=(
"If generated by an automated process, indicates the "
"job name used for generating this value."
),
)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.value} {self.measure} for {self.store} at {self.created}"
| coddingtonbear/inthe.am | inthe_am/taskmanager/models/taskstorestatistic.py | Python | agpl-3.0 | 813 |
#/usr/bin/env python
from ..ast_object import ASTObject
class SpecFrame(ASTObject):
'''
self.astObject is of type starlink.Ast.SpecFrame.
'''
def __init__(self):
pass | demitri/cornish | source/cornish/mapping/frame/spec_frame.py | Python | mit | 176 |
import msgpack
import requests
def authenticate(username, password):
pass
def create_public_key():
pass
def get_public_key():
pass
def is_internet_alive():
pass
def is_port_alive(port):
pass
def forward_port(auth_uid, public_key, port):
pass
| Cetsoft/macun | macun/client/client.py | Python | apache-2.0 | 278 |
#!/usr/bin/python
# This script is used to generate luabinding glue codes.
# Android ndk version must be ndk-r9b.
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, ''))
cxx_generator_root = os.path.abspath(os.path.join(project_root, 'tools/bindings-generator'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
try:
tolua_root = '%s/tools/tolua' % project_root
output_dir = '%s/cocos/scripting/lua-bindings/auto' % project_root
cmd_args = {'cocos2dx.ini' : ('cocos2d-x', 'lua_cocos2dx_auto'), \
'cocos2dx_extension.ini' : ('cocos2dx_extension', 'lua_cocos2dx_extension_auto'), \
'cocos2dx_ui.ini' : ('cocos2dx_ui', 'lua_cocos2dx_ui_auto'), \
'cocos2dx_studio.ini' : ('cocos2dx_studio', 'lua_cocos2dx_studio_auto'), \
'cocos2dx_spine.ini' : ('cocos2dx_spine', 'lua_cocos2dx_spine_auto'), \
'cocos2dx_physics.ini' : ('cocos2dx_physics', 'lua_cocos2dx_physics_auto'), \
'cocos2dx_custom.ini' : ('cocos2dx_custom', 'lua_cocos2dx_custom'), \
}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
if platform == 'win32':
with _pushd(output_dir):
_run_cmd('dos2unix *')
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
# -------------- main --------------
if __name__ == '__main__':
main()
| cedricporter/everlost | frameworks/cocos2d-x/tools/tolua/genbindings.py | Python | apache-2.0 | 5,614 |
# -*- coding: utf-8 -*-
#
# 2015-02-25 Cornelius Kölbel <[email protected]>
# Initial writeup
#
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = '''
Machine Resolvers are used to find machines in directories like LDAP, Active
Directory, puppet, salt, or the /etc/hosts file.
Machines can then be used to assign applications and tokens to those machines.
'''
from base import BaseMachineResolver
| wheldom01/privacyidea | privacyidea/lib/machines/__init__.py | Python | agpl-3.0 | 1,086 |
#SQL Server details
SQL_HOST = 'localhost'
SQL_USERNAME = 'root'
SQL_PASSWORD = ''
SQL_PORT = '3306'
#Youtube details
YOUTUBE_CLIENT_ID = ''
YOUTUBE_CLIENT_SECRET = ''
#Fab - configuration for deploying to a remote server
FAB_HOSTS = []
FAB_GITHUB_URL = 'https://github.com/UQ-UQx/uqx_api.git'
FAB_REMOTE_PATH = '/file/to/your/deployment/location'
#LDAP - Configuration
USE_LDAP = False
LDAP_SERVER = ""
LDAP_BIND_DN = ""
LDAP_PASSWORD = ""
LDAP_SEARCH_DN = ""
#The server where the course information is found
SERVER_URL = 'http://dashboard.ceit.uq.edu.au' | UQ-UQx/uqx_api | config.example.py | Python | mit | 557 |
## This Python file uses the following encoding: utf-8
import codecs
import tamil
import sys
from pprint import pprint
def safe_splitMeiUyir(arg):
try:
# when uyir letters are passed to splitMeiUyir function it will throw an IndexError
rval = tamil.utf8.splitMeiUyir(arg)
if len(rval) == 1:
return (rval,u'')
return rval
except IndexError as idxerr:
pass
except ValueError as valerr:
# non tamil letters cannot be split - e.g. '26வது'
pass
return (u'',u'')
class FilterDictionary:
def __init__(self,matchtype='ANY'):
# show all words containing only given letter series
self.fn=u"tamilvu_dictionary_words.txt"
self.db = []
self.type = matchtype.lower()
self.criteria_options = {}
with codecs.open(self.fn,"r","utf-8") as fp:
self.db = map(lambda l: l.strip(),fp.readlines())
@staticmethod
def getUyirMeiSeries(ref_letter):
# if we give 'கை' get full series 'க்','க','கா', ... 'கௌ'
mei,uyir = safe_splitMeiUyir(ref_letter)
if uyir in [u'',u' ']:
return [mei]
pulli = tamil.utf8.pulli_symbols[0]
mei = mei[0] #agaram
series = [mei+pulli]
for sym in tamil.utf8.accent_symbols[:-1]:
series.append(mei+sym)
return series
def is_in_sequence(self,letter):
for ol in self.criteria_options.keys():
if letter in self.criteria_options[ol]:
return True
return False
def rebuild_criteria(self,criteria):
Cl = list(set(tamil.utf8.get_letters(criteria)))
self.criteria_options = {} #reset
for cl in Cl:
self.criteria_options[cl] = FilterDictionary.getUyirMeiSeries(cl)
return
def select(self,criteria):
choices = [] #result
self.rebuild_criteria(criteria)
for w in self.db:
Ll = tamil.utf8.get_letters(w)
count = 0
for l in Ll:
if not self.is_in_sequence(l):
break
count = count + 1
if count == len(Ll):
if self.type == 'any':
choices.append( w )
elif self.type == 'all':
#scoreboard
rmatches = { x:False for x in self.criteria_options.keys() }
for letter in Ll:
for ol in self.criteria_options.keys():
if letter in self.criteria_options[ol]:
rmatches[ol] = rmatches[ol] or True
if all(rmatches.values()):
choices.append(w)
else:
raise Exception("Incorrect type")
return choices
while True:
choices = raw_input(u">> ").decode("UTF-8") #u"அக"
matchtype = len(sys.argv)> 1 and sys.argv[1] or "all"
options = FilterDictionary(matchtype).select(choices)
for w in options:
print(u"%s"%w)
continue
| arcturusannamalai/open-tamil | examples/classifier/typewords.py | Python | mit | 3,106 |
# Copyright 2016-2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _activation_profile module.
"""
from __future__ import absolute_import, print_function
import copy
import re
import pytest
from zhmcclient import Client, ActivationProfile
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestActivationProfile(object):
"""
All tests for the ActivationProfile and ActivationProfileManager classes.
"""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked CPC in classic mode,
and add two faked activation profiles of each type.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_cpc = self.session.hmc.cpcs.add({
'object-id': 'fake-cpc1-oid',
# object-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': 'fake-cpc1-name',
'description': 'CPC #1 (classic mode)',
'status': 'active',
'dpm-enabled': False,
'is-ensemble-member': False,
'iml-mode': 'lpar',
})
self.cpc = self.client.cpcs.find(name='fake-cpc1-name')
self.faked_reset_ap_1 = self.faked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_1',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'description': 'RAP #1',
})
self.faked_reset_ap_2 = self.faked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_2',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'description': 'RAP #2',
})
self.faked_image_ap_1 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_1',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #1',
})
self.faked_image_ap_2 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_2',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #2',
})
self.faked_load_ap_1 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_1',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #1',
})
self.faked_load_ap_2 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_2',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #2',
})
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
def test_profilemanager_initial_attrs(self, profile_type):
"""Test initial attributes of ActivationProfileManager."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Verify all public properties of the manager object
assert profile_mgr.resource_class == ActivationProfile
assert profile_mgr.session == self.session
assert profile_mgr.parent == self.cpc
assert profile_mgr.cpc == self.cpc
assert profile_mgr.profile_type == profile_type
# TODO: Test for ActivationProfileManager.__repr__()
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(),
['name', 'element-uri']),
(dict(full_properties=False),
['name', 'element-uri']),
(dict(full_properties=True),
None),
]
)
def test_profilemanager_list_full_properties(
self, full_properties_kwargs, prop_names, profile_type):
"""Test ActivationProfileManager.list() with full_properties."""
mgr_attr = profile_type + '_activation_profiles'
faked_profile_mgr = getattr(self.faked_cpc, mgr_attr)
exp_faked_profiles = faked_profile_mgr.list()
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(**full_properties_kwargs)
assert_resources(profiles, exp_faked_profiles, prop_names)
@pytest.mark.parametrize(
"profile_type, filter_args, exp_names", [
('reset',
{'name': 'rap_2'},
['rap_2']),
('reset',
{'name': '.*rap_1'},
['rap_1']),
('reset',
{'name': 'rap_1.*'},
['rap_1']),
('reset',
{'name': 'rap_.'},
['rap_1', 'rap_2']),
('reset',
{'name': '.ap_1'},
['rap_1']),
('reset',
{'name': '.+'},
['rap_1', 'rap_2']),
('reset',
{'name': 'rap_1.+'},
[]),
('reset',
{'name': '.+rap_1'},
[]),
('image',
{'name': 'iap_1'},
['iap_1']),
('image',
{'name': '.*iap_1'},
['iap_1']),
('image',
{'name': 'iap_1.*'},
['iap_1']),
('image',
{'name': 'iap_.'},
['iap_1', 'iap_2']),
('image',
{'name': '.ap_1'},
['iap_1']),
('image',
{'name': '.+'},
['iap_1', 'iap_2']),
('image',
{'name': 'iap_1.+'},
[]),
('image',
{'name': '.+iap_1'},
[]),
('load',
{'name': 'lap_2'},
['lap_2']),
('load',
{'name': '.*lap_1'},
['lap_1']),
('load',
{'name': 'lap_1.*'},
['lap_1']),
('load',
{'name': 'lap_.'},
['lap_1', 'lap_2']),
('load',
{'name': '.ap_1'},
['lap_1']),
('load',
{'name': '.+'},
['lap_1', 'lap_2']),
('load',
{'name': 'lap_1.+'},
[]),
('load',
{'name': '.+lap_1'},
[]),
('reset',
{'class': 'reset-activation-profile'},
['rap_1', 'rap_2']),
('image',
{'class': 'image-activation-profile'},
['iap_1', 'iap_2']),
('load',
{'class': 'load-activation-profile'},
['lap_1', 'lap_2']),
('reset',
{'class': 'reset-activation-profile',
'description': 'RAP #2'},
['rap_2']),
('image',
{'class': 'image-activation-profile',
'description': 'IAP #1'},
['iap_1']),
('load',
{'class': 'load-activation-profile',
'description': 'LAP #2'},
['lap_2']),
('reset',
{'description': 'RAP #1'},
['rap_1']),
('image',
{'description': 'IAP #2'},
['iap_2']),
('load',
{'description': 'LAP #1'},
['lap_1']),
]
)
def test_profilemanager_list_filter_args(
self, profile_type, filter_args, exp_names):
"""Test ActivationProfileManager.list() with filter_args."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(filter_args=filter_args)
assert len(profiles) == len(exp_names)
if exp_names:
names = [ap.properties['name'] for ap in profiles]
assert set(names) == set(exp_names)
# TODO: Test for initial ActivationProfile attributes
def test_profile_repr(self):
"""Test ActivationProfile.__repr__()."""
# We test __repr__() just for reset activation profiles, because the
# ActivationProfile class is the same for all profile types and we know
# that __repr__() does not depend on the profile type.
profile_mgr = self.cpc.reset_activation_profiles
reset_ap = profile_mgr.find(name='rap_1')
# Execute the code to be tested
repr_str = repr(reset_ap)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=reset_ap.__class__.__name__,
id=id(reset_ap)),
repr_str)
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New profile description'},
{'description': ''},
{'ssc-network-info': {
'chpid': '1a',
'port': 0,
'ipaddr-type': 'dhcp',
'vlan-id': None,
'static-ip-info': None,
}},
{'group-profile-uri': None},
{'zaware-gateway-info': None},
]
)
def test_profile_update_properties(self, input_props, profile_type):
"""Test ActivationProfile.update_properties()."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
profile = profile_mgr.list()[0]
profile.pull_full_properties()
saved_properties = copy.deepcopy(profile.properties)
# Execute the code to be tested
profile.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in profile.properties
prop_value = profile.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
profile.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in profile.properties
prop_value = profile.properties[prop_name]
assert prop_value == exp_prop_value
| zhmcclient/python-zhmcclient | tests/unit/zhmcclient/test_activation_profile.py | Python | apache-2.0 | 11,882 |
from datetime import date
from django.forms import DateInput
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class DateInputTest(WidgetTest):
widget = DateInput()
def test_render_none(self):
self.check_html(self.widget, 'date', None, html='<input type="text" name="date" />')
def test_render_value(self):
d = date(2007, 9, 17)
self.assertEqual(str(d), '2007-09-17')
self.check_html(self.widget, 'date', d, html='<input type="text" name="date" value="2007-09-17" />')
self.check_html(self.widget, 'date', date(2007, 9, 17), html=(
'<input type="text" name="date" value="2007-09-17" />'
))
def test_string(self):
"""
Should be able to initialize from a string value.
"""
self.check_html(self.widget, 'date', '2007-09-17', html=(
'<input type="text" name="date" value="2007-09-17" />'
))
def test_format(self):
"""
Use 'format' to change the way a value is displayed.
"""
d = date(2007, 9, 17)
widget = DateInput(format='%d/%m/%Y', attrs={'type': 'date'})
self.check_html(widget, 'date', d, html='<input type="date" name="date" value="17/09/2007" />')
@override_settings(USE_L10N=True)
@translation.override('de-at')
def test_l10n(self):
self.check_html(
self.widget, 'date', date(2007, 9, 17),
html='<input type="text" name="date" value="17.09.2007" />',
)
| Beauhurst/django | tests/forms_tests/widget_tests/test_dateinput.py | Python | bsd-3-clause | 1,557 |
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Disk File Interface for the Swift Object Server
The `DiskFile`, `DiskFileWriter` and `DiskFileReader` classes combined define
the on-disk abstraction layer for supporting the object server REST API
interfaces (excluding `REPLICATE`). Other implementations wishing to provide
an alternative backend for the object server must implement the three
classes. An example alternative implementation can be found in the
`mem_server.py` and `mem_diskfile.py` modules along size this one.
The `DiskFileManager` is a reference implemenation specific class and is not
part of the backend API.
The remaining methods in this module are considered implementation specific and
are also not considered part of the backend API.
"""
import six.moves.cPickle as pickle
import errno
import fcntl
import os
import time
import uuid
import hashlib
import logging
import traceback
import xattr
from os.path import basename, dirname, exists, getmtime, join, splitext
from random import shuffle
from tempfile import mkstemp
from contextlib import contextmanager
from collections import defaultdict
from eventlet import Timeout
from eventlet.hubs import trampoline
from swift import gettext_ as _
from swift.common.constraints import check_mount, check_dir
from swift.common.request_helpers import is_sys_meta
from swift.common.utils import mkdirs, Timestamp, \
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
fsync_dir, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \
config_true_value, listdir, split_path, ismount, remove_file, \
get_md5_socket, F_SETPIPE_SZ
from swift.common.splice import splice, tee
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
ReplicationLockTimeout, DiskFileExpired, DiskFileXattrNotSupported
from swift.common.swob import multi_range_iterator
from swift.common.storage_policy import (
get_policy_string, split_policy_string, PolicyError, POLICIES,
REPL_POLICY, EC_POLICY)
from functools import partial
PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
HASH_FILE = 'hashes.pkl'
METADATA_KEY = 'user.swift.metadata'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
DATAFILE_SYSTEM_META = set('content-length content-type deleted etag'.split())
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
get_data_dir = partial(get_policy_string, DATADIR_BASE)
get_async_dir = partial(get_policy_string, ASYNCDIR_BASE)
get_tmp_dir = partial(get_policy_string, TMP_BASE)
MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e'
def _get_filename(fd):
"""
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename.
"""
if hasattr(fd, 'name'):
# fd object
return fd.name
# fd is a filename
return fd
def read_metadata(fd):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:returns: dictionary of metadata
"""
metadata = ''
key = 0
try:
while True:
metadata += xattr.getxattr(fd, '%s%s' % (METADATA_KEY,
(key or '')))
key += 1
except (IOError, OSError) as e:
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
# TODO: we might want to re-raise errors that don't denote a missing
# xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX.
return pickle.loads(metadata)
def write_metadata(fd, metadata, xattr_size=65536):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0
while metastr:
try:
xattr.setxattr(fd, '%s%s' % (METADATA_KEY, key or ''),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
except IOError as e:
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise
def extract_policy(obj_path):
"""
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None
"""
try:
obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return None
try:
base, policy = split_policy_string(obj_dirname)
except PolicyError:
return None
return policy
def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename
"""
policy = extract_policy(corrupted_file_path)
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined',
get_data_dir(policy),
basename(from_dir))
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir, fsync=False)
return to_dir
def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating
"""
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
hashes_file = join(partition_dir, HASH_FILE)
if not os.path.exists(hashes_file):
return
with lock_path(partition_dir):
try:
with open(hashes_file, 'rb') as fp:
hashes = pickle.load(fp)
if suffix in hashes and not hashes[suffix]:
return
except Exception:
return
hashes[suffix] = None
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
class AuditLocation(object):
"""
Represents an object location to be audited.
Other than being a bucket of data, the only useful thing this does is
stringify to a filesystem path so the auditor's logs look okay.
"""
def __init__(self, path, device, partition, policy):
self.path, self.device, self.partition, self.policy = (
path, device, partition, policy)
def __str__(self):
return str(self.path)
def object_audit_location_generator(devices, mount_check=True, logger=None,
device_dirs=None):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory if device_dirs isn't set. If
device_dirs is set, only yield AuditLocation for the objects under the
entries in device_dirs. The AuditLocation only knows the path to the hash
directory, not to the .data file therein (if any). This is to avoid a
double listdir(hash_dir); the DiskFile object will always do one, so
we don't.
:param devices: parent directory of the devices to be audited
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:device_dirs: a list of directories under devices to traverse
"""
if not device_dirs:
device_dirs = listdir(devices)
else:
# remove bogus devices and duplicates from device_dirs
device_dirs = list(
set(listdir(devices)).intersection(set(device_dirs)))
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
for device in device_dirs:
if mount_check and not \
ismount(os.path.join(devices, device)):
if logger:
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
# loop through object dirs for all policies
for dir_ in os.listdir(os.path.join(devices, device)):
if not dir_.startswith(DATADIR_BASE):
continue
try:
base, policy = split_policy_string(dir_)
except PolicyError as e:
if logger:
logger.warn(_('Directory %r does not map '
'to a valid policy (%s)') % (dir_, e))
continue
datadir_path = os.path.join(devices, device, dir_)
partitions = listdir(datadir_path)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
def strip_self(f):
"""
Wrapper to attach module level functions to base class.
"""
def wrapper(self, *args, **kwargs):
return f(*args, **kwargs)
return wrapper
class DiskFileRouter(object):
policy_type_to_manager_cls = {}
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their DiskFile implementation.
"""
def register_wrapper(diskfile_cls):
if policy_type in cls.policy_type_to_manager_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_manager_cls[policy_type],
policy_type))
cls.policy_type_to_manager_cls[policy_type] = diskfile_cls
return diskfile_cls
return register_wrapper
def __init__(self, *args, **kwargs):
self.policy_to_manager = {}
for policy in POLICIES:
manager_cls = self.policy_type_to_manager_cls[policy.policy_type]
self.policy_to_manager[policy] = manager_cls(*args, **kwargs)
def __getitem__(self, policy):
return self.policy_to_manager[policy]
class BaseDiskFileManager(object):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
the object server REST API layer).
The `get_diskfile()` method is how this implementation creates a `DiskFile`
object.
.. note::
This class is reference implementation specific and not part of the
pluggable on-disk backend API.
.. note::
TODO(portante): Not sure what the right name to recommend here, as
"manager" seemed generic enough, though suggestions are welcome.
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
diskfile_cls = None # must be set by subclasses
invalidate_hash = strip_self(invalidate_hash)
quarantine_renamer = strip_self(quarantine_renamer)
def __init__(self, conf, logger):
self.logger = logger
self.devices = conf.get('devices', '/srv/node')
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.reclaim_age = int(conf.get('reclaim_age', ONE_WEEK))
self.replication_one_per_device = config_true_value(
conf.get('replication_one_per_device', 'true'))
self.replication_lock_timeout = int(conf.get(
'replication_lock_timeout', 15))
threads_per_disk = int(conf.get('threads_per_disk', '0'))
self.threadpools = defaultdict(
lambda: ThreadPool(nthreads=threads_per_disk))
self.use_splice = False
self.pipe_size = None
conf_wants_splice = config_true_value(conf.get('splice', 'no'))
# If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available:
self.logger.warn(
"Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. "
"splice() will not be used." % conf.get('splice'))
elif conf_wants_splice and splice.available:
try:
sockfd = get_md5_socket()
os.close(sockfd)
except IOError as err:
# AF_ALG socket support was introduced in kernel 2.6.38; on
# systems with older kernels (or custom-built kernels lacking
# AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT:
raise
self.logger.warn("MD5 sockets not supported. "
"splice() will not be used.")
else:
self.use_splice = True
with open('/proc/sys/fs/pipe-max-size') as f:
max_pipe_size = int(f.read())
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
def parse_on_disk_filename(self, filename):
"""
Parse an on disk file name.
:param filename: the data file name including extension
:returns: a dict, with keys for timestamp, and ext::
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension.
Subclases may add further keys to the returned dict.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
raise NotImplementedError
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:returns: True if a valid file set has been found, False otherwise
"""
raise NotImplementedError
def _verify_on_disk_files(self, accepted_files, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param accepted_files: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
raise NotImplementedError
def gather_ondisk_files(self, files, include_obsolete=False,
verify=False, **kwargs):
"""
Given a simple list of files names, iterate over them to determine the
files that constitute a valid object, and optionally determine the
files that are obsolete and could be deleted. Note that some files may
fall into neither category.
:param files: a list of file names.
:param include_obsolete: By default the iteration will stop when a
valid file set has been found. Setting this
argument to True will cause the iteration to
continue in order to find all obsolete files.
:param verify: if True verify that the ondisk file contract has not
been violated, otherwise do not verify.
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'.
"""
files.sort(reverse=True)
results = {}
for afile in files:
ts_file = results.get('.ts')
data_file = results.get('.data')
if not include_obsolete:
assert ts_file is None, "On-disk file search loop" \
" continuing after tombstone, %s, encountered" % ts_file
assert data_file is None, "On-disk file search loop" \
" continuing after data file, %s, encountered" % data_file
ext = splitext(afile)[1]
if self._gather_on_disk_file(
afile, ext, results, **kwargs):
if not include_obsolete:
break
if verify:
assert self._verify_on_disk_files(
results, **kwargs), \
"On-disk file search algorithm contract is broken: %s" \
% results.values()
return results
def get_ondisk_files(self, files, datadir, **kwargs):
"""
Given a simple list of files names, determine the files to use.
:param files: simple set of files as a python list
:param datadir: directory name files are from for convenience
:returns: a tuple of data, meta, and tombstone
"""
# maintain compatibility with 'legacy' get_ondisk_files return value
accepted_files = self.gather_ondisk_files(files, verify=True, **kwargs)
result = [(join(datadir, accepted_files.get(ext))
if accepted_files.get(ext) else None)
for ext in ('.data', '.meta', '.ts')]
return tuple(result)
def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, **kwargs):
"""
Clean up on-disk files that are obsolete and gather the set of valid
on-disk files for an object.
:param hsh_path: object hash path
:param reclaim_age: age in seconds at which to remove tombstones
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'; a list of files remaining in the directory,
reverse sorted, stored under the key 'files'.
"""
def is_reclaimable(filename):
timestamp = self.parse_on_disk_filename(filename)['timestamp']
return (time.time() - float(timestamp)) > reclaim_age
files = listdir(hsh_path)
files.sort(reverse=True)
results = self.gather_ondisk_files(files, include_obsolete=True,
**kwargs)
# TODO ref to durables here
if '.durable' in results and not results.get('fragments'):
# a .durable with no .data is deleted as soon as it is found
results.setdefault('obsolete', []).append(results.pop('.durable'))
if '.ts' in results and is_reclaimable(results['.ts']):
results.setdefault('obsolete', []).append(results.pop('.ts'))
for filename in results.get('fragments_without_durable', []):
# stray fragments are not deleted until reclaim-age
if is_reclaimable(filename):
results.setdefault('obsolete', []).append(filename)
for filename in results.get('obsolete', []):
remove_file(join(hsh_path, filename))
files.remove(filename)
results['files'] = files
return results
def hash_cleanup_listdir(self, hsh_path, reclaim_age=ONE_WEEK):
"""
List contents of a hash directory and clean up any old files.
For EC policy, delete files older than a .durable or .ts file.
:param hsh_path: object hash path
:param reclaim_age: age in seconds at which to remove tombstones
:returns: list of files remaining in the directory, reverse sorted
"""
# maintain compatibility with 'legacy' hash_cleanup_listdir
# return value
return self.cleanup_ondisk_files(
hsh_path, reclaim_age=reclaim_age)['files']
def _hash_suffix_dir(self, path, mapper, reclaim_age):
hashes = defaultdict(hashlib.md5)
try:
path_contents = sorted(os.listdir(path))
except OSError as err:
if err.errno in (errno.ENOTDIR, errno.ENOENT):
raise PathNotDir()
raise
for hsh in path_contents:
hsh_path = join(path, hsh)
try:
files = self.hash_cleanup_listdir(hsh_path, reclaim_age)
except OSError as err:
if err.errno == errno.ENOTDIR:
partition_path = dirname(path)
objects_path = dirname(partition_path)
device_path = dirname(objects_path)
quar_path = quarantine_renamer(device_path, hsh_path)
logging.exception(
_('Quarantined %(hsh_path)s to %(quar_path)s because '
'it is not a directory'), {'hsh_path': hsh_path,
'quar_path': quar_path})
continue
raise
if not files:
try:
os.rmdir(hsh_path)
except OSError:
pass
for filename in files:
key, value = mapper(filename)
hashes[key].update(value)
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
raise PathNotDir()
else:
# if we remove it, pretend like it wasn't there to begin with so
# that the suffix key gets removed
raise PathNotDir()
return hashes
def _hash_suffix(self, path, reclaim_age):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param reclaim_age: age in seconds at which to remove tombstones
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
"""
raise NotImplementedError
def _get_hashes(self, partition_path, recalculate=None, do_listdir=False,
reclaim_age=None):
"""
Get a list of hashes for the suffix dir. do_listdir causes it to
mistrust the hash cache for suffix existence at the (unexpectedly high)
cost of a listdir. reclaim_age is just passed on to hash_suffix.
:param partition_path: absolute path of partition to get hashes for
:param recalculate: list of suffixes which should be recalculated when
got
:param do_listdir: force existence check for all hashes in the
partition
:param reclaim_age: age at which to remove tombstones
:returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
"""
reclaim_age = reclaim_age or self.reclaim_age
hashed = 0
hashes_file = join(partition_path, HASH_FILE)
modified = False
force_rewrite = False
hashes = {}
mtime = -1
if recalculate is None:
recalculate = []
try:
with open(hashes_file, 'rb') as fp:
hashes = pickle.load(fp)
mtime = getmtime(hashes_file)
except Exception:
do_listdir = True
force_rewrite = True
if do_listdir:
for suff in os.listdir(partition_path):
if len(suff) == 3:
hashes.setdefault(suff, None)
modified = True
hashes.update((suffix, None) for suffix in recalculate)
for suffix, hash_ in hashes.items():
if not hash_:
suffix_dir = join(partition_path, suffix)
try:
hashes[suffix] = self._hash_suffix(suffix_dir, reclaim_age)
hashed += 1
except PathNotDir:
del hashes[suffix]
except OSError:
logging.exception(_('Error hashing suffix'))
modified = True
if modified:
with lock_path(partition_path):
if force_rewrite or not exists(hashes_file) or \
getmtime(hashes_file) == mtime:
write_pickle(
hashes, hashes_file, partition_path, PICKLE_PROTOCOL)
return hashed, hashes
return self._get_hashes(partition_path, recalculate, do_listdir,
reclaim_age)
else:
return hashed, hashes
def construct_dev_path(self, device):
"""
Construct the path to a device without checking if it is mounted.
:param device: name of target device
:returns: full path to the device
"""
return os.path.join(self.devices, device)
def get_dev_path(self, device, mount_check=None):
"""
Return the path to a device, first checking to see if either it
is a proper mount point, or at least a directory depending on
the mount_check configuration option.
:param device: name of target device
:param mount_check: whether or not to check mountedness of device.
Defaults to bool(self.mount_check).
:returns: full path to the device, None if the path to the device is
not a proper mount point or directory.
"""
# we'll do some kind of check unless explicitly forbidden
if mount_check is not False:
if mount_check or self.mount_check:
check = check_mount
else:
check = check_dir
if not check(self.devices, device):
return None
return os.path.join(self.devices, device)
@contextmanager
def replication_lock(self, device):
"""
A context manager that will lock on the device given, if
configured to do so.
:raises ReplicationLockTimeout: If the lock on the device
cannot be granted within the configured timeout.
"""
if self.replication_one_per_device:
dev_path = self.get_dev_path(device)
with lock_path(
dev_path,
timeout=self.replication_lock_timeout,
timeout_class=ReplicationLockTimeout):
yield True
else:
yield True
def pickle_async_update(self, device, account, container, obj, data,
timestamp, policy):
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, get_async_dir(policy))
ohash = hash_path(account, container, obj)
self.threadpools[device].run_in_thread(
write_pickle,
data,
os.path.join(async_dir, ohash[-3:], ohash + '-' +
Timestamp(timestamp).internal),
os.path.join(device_path, get_tmp_dir(policy)))
self.logger.increment('async_pendings')
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return self.diskfile_cls(self, dev_path, self.threadpools[device],
partition, account, container, obj,
policy=policy, use_splice=self.use_splice,
pipe_size=self.pipe_size, **kwargs)
def object_audit_location_generator(self, device_dirs=None):
return object_audit_location_generator(self.devices, self.mount_check,
self.logger, device_dirs)
def get_diskfile_from_audit_location(self, audit_location):
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
return self.diskfile_cls.from_hash_dir(
self, audit_location.path, dev_path,
audit_location.partition, policy=audit_location.policy)
def get_diskfile_from_hash(self, device, partition, object_hash,
policy, **kwargs):
"""
Returns a DiskFile instance for an object at the given
object_hash. Just in case someone thinks of refactoring, be
sure DiskFileDeleted is *not* raised, but the DiskFile
instance representing the tombstoned object is returned
instead.
:raises DiskFileNotExist: if the object does not exist
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
object_path = os.path.join(
dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
object_hash)
try:
filenames = self.hash_cleanup_listdir(object_path,
self.reclaim_age)
except OSError as err:
if err.errno == errno.ENOTDIR:
quar_path = self.quarantine_renamer(dev_path, object_path)
logging.exception(
_('Quarantined %(object_path)s to %(quar_path)s because '
'it is not a directory'), {'object_path': object_path,
'quar_path': quar_path})
raise DiskFileNotExist()
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist()
if not filenames:
raise DiskFileNotExist()
try:
metadata = read_metadata(os.path.join(object_path, filenames[-1]))
except EOFError:
raise DiskFileNotExist()
try:
account, container, obj = split_path(
metadata.get('name', ''), 3, 3, True)
except ValueError:
raise DiskFileNotExist()
return self.diskfile_cls(self, dev_path, self.threadpools[device],
partition, account, container, obj,
policy=policy, **kwargs)
def get_hashes(self, device, partition, suffixes, policy):
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = os.path.join(dev_path, get_data_dir(policy),
partition)
if not os.path.exists(partition_path):
mkdirs(partition_path)
_junk, hashes = self.threadpools[device].force_run_in_thread(
self._get_hashes, partition_path, recalculate=suffixes)
return hashes
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.error(
'ERROR: Skipping %r due to error with listdir attempt: %s',
path, err)
return []
def yield_suffixes(self, device, partition, policy):
"""
Yields tuples of (full_path, suffix_only) for suffixes stored
on the given device and partition.
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = os.path.join(dev_path, get_data_dir(policy),
partition)
for suffix in self._listdir(partition_path):
if len(suffix) != 3:
continue
try:
int(suffix, 16)
except ValueError:
continue
yield (os.path.join(partition_path, suffix), suffix)
def yield_hashes(self, device, partition, policy,
suffixes=None, **kwargs):
"""
Yields tuples of (full_path, hash_only, timestamp) for object
information stored for the given device, partition, and
(optionally) suffixes. If suffixes is None, all stored
suffixes will be searched for object hashes. Note that if
suffixes is not None but empty, such as [], then nothing will
be yielded.
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
if suffixes is None:
suffixes = self.yield_suffixes(device, partition, policy)
else:
partition_path = os.path.join(dev_path,
get_data_dir(policy),
str(partition))
suffixes = (
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
for suffix_path, suffix in suffixes:
for object_hash in self._listdir(suffix_path):
object_path = os.path.join(suffix_path, object_hash)
newest_valid_file = None
try:
results = self.cleanup_ondisk_files(
object_path, self.reclaim_age, **kwargs)
newest_valid_file = (results.get('.meta')
or results.get('.data')
or results.get('.ts'))
if newest_valid_file:
timestamp = self.parse_on_disk_filename(
newest_valid_file)['timestamp']
yield (object_path, object_hash, timestamp.internal)
except AssertionError as err:
self.logger.debug('Invalid file set in %s (%s)' % (
object_path, err))
except DiskFileError as err:
self.logger.debug(
'Invalid diskfile filename %r in %r (%s)' % (
newest_valid_file, object_path, err))
class BaseDiskFileWriter(object):
"""
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.create` method.
.. note::
It is the responsibility of the
:func:`swift.obj.diskfile.DiskFile.create` method context manager to
close the open file descriptor.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param name: name of object from REST API
:param datadir: on-disk directory object will end up in on
:func:`swift.obj.diskfile.DiskFileWriter.put`
:param fd: open file descriptor of temporary file to receive data
:param tmppath: full path name of the opened file descriptor
:param bytes_per_sync: number bytes written between sync calls
:param threadpool: internal thread pool to use for disk operations
:param diskfile: the diskfile creating this DiskFileWriter instance
"""
def __init__(self, name, datadir, fd, tmppath, bytes_per_sync, threadpool,
diskfile):
# Parameter tracking
self._name = name
self._datadir = datadir
self._fd = fd
self._tmppath = tmppath
self._bytes_per_sync = bytes_per_sync
self._threadpool = threadpool
self._diskfile = diskfile
# Internal attributes
self._upload_size = 0
self._last_sync = 0
self._extension = '.data'
self._put_succeeded = False
@property
def manager(self):
return self._diskfile.manager
@property
def put_succeeded(self):
return self._put_succeeded
def write(self, chunk):
"""
Write a chunk of data to disk. All invocations of this method must
come before invoking the :func:
For this implementation, the data is written into a temporary file.
:param chunk: the chunk of data to write as a string object
:returns: the total number of bytes written to an object
"""
def _write_entire_chunk(chunk):
while chunk:
written = os.write(self._fd, chunk)
self._upload_size += written
chunk = chunk[written:]
self._threadpool.run_in_thread(_write_entire_chunk, chunk)
# For large files sync every 512MB (by default) written
diff = self._upload_size - self._last_sync
if diff >= self._bytes_per_sync:
self._threadpool.force_run_in_thread(fdatasync, self._fd)
drop_buffer_cache(self._fd, self._last_sync, diff)
self._last_sync = self._upload_size
return self._upload_size
def _finalize_put(self, metadata, target_path, cleanup):
# Write the metadata before calling fsync() so that both data and
# metadata are flushed to disk.
write_metadata(self._fd, metadata)
# We call fsync() before calling drop_cache() to lower the amount of
# redundant work the drop cache code will perform on the pages (now
# that after fsync the pages will be all clean).
fsync(self._fd)
# From the Department of the Redundancy Department, make sure we call
# drop_cache() after fsync() to avoid redundant work (pages all
# clean).
drop_buffer_cache(self._fd, 0, self._upload_size)
self.manager.invalidate_hash(dirname(self._datadir))
# After the rename completes, this object will be available for other
# requests to reference.
renamer(self._tmppath, target_path)
# If rename is successful, flag put as succeeded. This is done to avoid
# unnecessary os.unlink() of tempfile later. As renamer() has
# succeeded, the tempfile would no longer exist at its original path.
self._put_succeeded = True
if cleanup:
try:
self.manager.hash_cleanup_listdir(self._datadir)
except OSError:
logging.exception(_('Problem cleaning up %s'), self._datadir)
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
raise NotImplementedError
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
replication policy type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
class BaseDiskFileReader(object):
"""
Encapsulation of the WSGI read context for servicing GET REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.reader` method.
.. note::
The quarantining behavior of this method is considered implementation
specific, and is not required of the API.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param fp: open file object pointer reference
:param data_file: on-disk data file name for the object
:param obj_size: verified on-disk size of the object
:param etag: expected metadata etag value for entire file
:param threadpool: thread pool to use for read operations
:param disk_chunk_size: size of reads from disk in bytes
:param keep_cache_size: maximum object size that will be kept in cache
:param device_path: on-disk device path, used when quarantining an obj
:param logger: logger caller wants this object to use
:param quarantine_hook: 1-arg callable called w/reason when quarantined
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param diskfile: the diskfile creating this DiskFileReader instance
:param keep_cache: should resulting reads be kept in the buffer cache
"""
def __init__(self, fp, data_file, obj_size, etag, threadpool,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
# Parameter tracking
self._fp = fp
self._data_file = data_file
self._obj_size = obj_size
self._etag = etag
self._threadpool = threadpool
self._diskfile = diskfile
self._disk_chunk_size = disk_chunk_size
self._device_path = device_path
self._logger = logger
self._quarantine_hook = quarantine_hook
self._use_splice = use_splice
self._pipe_size = pipe_size
if keep_cache:
# Caller suggests we keep this in cache, only do it if the
# object's size is less than the maximum.
self._keep_cache = obj_size < keep_cache_size
else:
self._keep_cache = False
# Internal Attributes
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._md5_of_sent_bytes = None
self._suppress_file_closing = False
self._quarantined_dir = None
@property
def manager(self):
return self._diskfile.manager
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
while True:
chunk = self._threadpool.run_in_thread(
self._fp.read, self._disk_chunk_size)
if chunk:
if self._iter_etag:
self._iter_etag.update(chunk)
self._bytes_read += len(chunk)
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
yield chunk
else:
self._read_to_eof = True
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
break
finally:
if not self._suppress_file_closing:
self.close()
def can_zero_copy_send(self):
return self._use_splice
def zero_copy_send(self, wsockfd):
"""
Does some magic with splice() and tee() to move stuff from disk to
network without ever touching userspace.
:param wsockfd: file descriptor (integer) of the socket out which to
send data
"""
# Note: if we ever add support for zero-copy ranged GET responses,
# we'll have to make this conditional.
self._started_at_0 = True
rfd = self._fp.fileno()
client_rpipe, client_wpipe = os.pipe()
hash_rpipe, hash_wpipe = os.pipe()
md5_sockfd = get_md5_socket()
# The actual amount allocated to the pipe may be rounded up to the
# nearest multiple of the page size. If we have the memory allocated,
# we may as well use it.
#
# Note: this will raise IOError on failure, so we don't bother
# checking the return value.
pipe_size = fcntl.fcntl(client_rpipe, F_SETPIPE_SZ, self._pipe_size)
fcntl.fcntl(hash_rpipe, F_SETPIPE_SZ, pipe_size)
dropped_cache = 0
self._bytes_read = 0
try:
while True:
# Read data from disk to pipe
(bytes_in_pipe, _1, _2) = self._threadpool.run_in_thread(
splice, rfd, None, client_wpipe, None, pipe_size, 0)
if bytes_in_pipe == 0:
self._read_to_eof = True
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
break
self._bytes_read += bytes_in_pipe
# "Copy" data from pipe A to pipe B (really just some pointer
# manipulation in the kernel, not actual copying).
bytes_copied = tee(client_rpipe, hash_wpipe, bytes_in_pipe, 0)
if bytes_copied != bytes_in_pipe:
# We teed data between two pipes of equal size, and the
# destination pipe was empty. If, somehow, the destination
# pipe was full before all the data was teed, we should
# fail here. If we don't raise an exception, then we will
# have the incorrect MD5 hash once the object has been
# sent out, causing a false-positive quarantine.
raise Exception("tee() failed: tried to move %d bytes, "
"but only moved %d" %
(bytes_in_pipe, bytes_copied))
# Take the data and feed it into an in-kernel MD5 socket. The
# MD5 socket hashes data that is written to it. Reading from
# it yields the MD5 checksum of the written data.
#
# Note that we don't have to worry about splice() returning
# None here (which happens on EWOULDBLOCK); we're splicing
# $bytes_in_pipe bytes from a pipe with exactly that many
# bytes in it, so read won't block, and we're splicing it into
# an MD5 socket, which synchronously hashes any data sent to
# it, so writing won't block either.
(hashed, _1, _2) = splice(hash_rpipe, None, md5_sockfd, None,
bytes_in_pipe, splice.SPLICE_F_MORE)
if hashed != bytes_in_pipe:
raise Exception("md5 socket didn't take all the data? "
"(tried to write %d, but wrote %d)" %
(bytes_in_pipe, hashed))
while bytes_in_pipe > 0:
try:
res = splice(client_rpipe, None, wsockfd, None,
bytes_in_pipe, 0)
bytes_in_pipe -= res[0]
except IOError as exc:
if exc.errno == errno.EWOULDBLOCK:
trampoline(wsockfd, write=True)
else:
raise
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
finally:
# Linux MD5 sockets return '00000000000000000000000000000000' for
# the checksum if you didn't write any bytes to them, instead of
# returning the correct value.
if self._bytes_read > 0:
bin_checksum = os.read(md5_sockfd, 16)
hex_checksum = ''.join("%02x" % ord(c) for c in bin_checksum)
else:
hex_checksum = MD5_OF_EMPTY_STRING
self._md5_of_sent_bytes = hex_checksum
os.close(client_rpipe)
os.close(client_wpipe)
os.close(hash_rpipe)
os.close(hash_wpipe)
os.close(md5_sockfd)
self.close()
def app_iter_range(self, start, stop):
"""Returns an iterator over the data file for range (start, stop)"""
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
"""Returns an iterator over the data file for a set of ranges"""
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
self.close()
def _drop_cache(self, fd, offset, length):
"""Method for no-oping buffer cache drop method."""
if not self._keep_cache:
drop_buffer_cache(fd, offset, length)
def _quarantine(self, msg):
self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path,
self._data_file)
self._logger.warn("Quarantined object %s: %s" % (
self._data_file, msg))
self._logger.increment('quarantines')
self._quarantine_hook(msg)
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
if self._iter_etag and not self._md5_of_sent_bytes:
self._md5_of_sent_bytes = self._iter_etag.hexdigest()
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self._bytes_read, self._obj_size))
elif self._md5_of_sent_bytes and \
self._etag != self._md5_of_sent_bytes:
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._md5_of_sent_bytes))
def close(self):
"""
Close the open file handle if present.
For this specific implementation, this method will handle quarantining
the file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except DiskFileQuarantined:
raise
except (Exception, Timeout) as e:
self._logger.error(_(
'ERROR DiskFile %(data_file)s'
' close failure: %(exc)s : %(stack)s'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self._data_file})
finally:
fp, self._fp = self._fp, None
fp.close()
class BaseDiskFile(object):
"""
Manage object files.
This specific implementation manages object files on a disk formatted with
a POSIX-compliant file system that supports extended attributes as
metadata on a file or directory.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
The following path format is used for data file locations:
<devices_path/<device_dir>/<datadir>/<partdir>/<suffixdir>/<hashdir>/
<datafile>.<ext>
:param mgr: associated DiskFileManager instance
:param device_path: path to the target device or drive
:param threadpool: thread pool to use for blocking operations
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param _datadir: override the full datadir otherwise constructed here
:param policy: the StoragePolicy instance
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
"""
reader_cls = None # must be set by subclasses
writer_cls = None # must be set by subclasses
def __init__(self, mgr, device_path, threadpool, partition,
account=None, container=None, obj=None, _datadir=None,
policy=None, use_splice=False, pipe_size=None, **kwargs):
self._manager = mgr
self._device_path = device_path
self._threadpool = threadpool or ThreadPool(nthreads=0)
self._logger = mgr.logger
self._disk_chunk_size = mgr.disk_chunk_size
self._bytes_per_sync = mgr.bytes_per_sync
self._use_splice = use_splice
self._pipe_size = pipe_size
self.policy = policy
if account and container and obj:
self._name = '/' + '/'.join((account, container, obj))
self._account = account
self._container = container
self._obj = obj
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
else:
# gets populated when we read the metadata
self._name = None
self._account = None
self._container = None
self._obj = None
self._datadir = None
self._tmpdir = join(device_path, get_tmp_dir(policy))
self._metadata = None
self._data_file = None
self._fp = None
self._quarantined_dir = None
self._content_length = None
if _datadir:
self._datadir = _datadir
else:
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
@property
def manager(self):
return self._manager
@property
def account(self):
return self._account
@property
def container(self):
return self._container
@property
def obj(self):
return self._obj
@property
def content_length(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._content_length
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
policy=policy)
def open(self):
"""
Open the object.
This implementation opens the data file representing the object, reads
the associated metadata in the extended attributes, additionally
combining metadata from fast-POST `.meta` files.
.. note::
An implementation is allowed to raise any of the following
exceptions, but is only required to raise `DiskFileNotExist` when
the object representation does not exist.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileNotExist: if the object does not exist
:raises DiskFileDeleted: if the object was previously deleted
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
:returns: itself for use as a context manager
"""
data_file, meta_file, ts_file = self._get_ondisk_file()
if not data_file:
raise self._construct_exception_from_ts_file(ts_file)
self._fp = self._construct_from_data_file(
data_file, meta_file)
# This method must populate the internal _metadata attribute.
self._metadata = self._metadata or {}
self._data_file = data_file
return self
def __enter__(self):
"""
Context enter.
.. note::
An implementation shall raise `DiskFileNotOpen` when has not
previously invoked the :func:`swift.obj.diskfile.DiskFile.open`
method.
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
"""
Context exit.
.. note::
This method will be invoked by the object server while servicing
the REST API *before* the object has actually been read. It is the
responsibility of the implementation to properly handle that.
"""
if self._fp is not None:
fp, self._fp = self._fp, None
fp.close()
def _quarantine(self, data_file, msg):
"""
Quarantine a file; responsible for incrementing the associated logger's
count of quarantines.
:param data_file: full path of data file to quarantine
:param msg: reason for quarantining to be included in the exception
:returns: DiskFileQuarantined exception object
"""
self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path, data_file)
self._logger.warn("Quarantined object %s: %s" % (
data_file, msg))
self._logger.increment('quarantines')
return DiskFileQuarantined(msg)
def _get_ondisk_file(self):
"""
Do the work to figure out if the data directory exists, and if so,
determine the on-disk files to use.
:returns: a tuple of data, meta and ts (tombstone) files, in one of
three states:
* all three are None
data directory does not exist, or there are no files in
that directory
* ts_file is not None, data_file is None, meta_file is None
object is considered deleted
* data_file is not None, ts_file is None
object exists, and optionally has fast-POST metadata
"""
raise NotImplementedError
def _construct_exception_from_ts_file(self, ts_file):
"""
If a tombstone is present it means the object is considered
deleted. We just need to pull the metadata from the tombstone file
which has the timestamp to construct the deleted exception. If there
was no tombstone, just report it does not exist.
:param ts_file: the tombstone file name found on disk
:returns: DiskFileDeleted if the ts_file was provided, else
DiskFileNotExist
"""
if not ts_file:
exc = DiskFileNotExist()
else:
try:
metadata = self._failsafe_read_metadata(ts_file, ts_file)
except DiskFileQuarantined:
# If the tombstone's corrupted, quarantine it and pretend it
# wasn't there
exc = DiskFileNotExist()
else:
# All well and good that we have found a tombstone file, but
# we don't have a data file so we are just going to raise an
# exception that we could not find the object, providing the
# tombstone's timestamp.
exc = DiskFileDeleted(metadata=metadata)
return exc
def _verify_name_matches_hash(self, data_file):
hash_from_fs = os.path.basename(self._datadir)
hash_from_name = hash_path(self._name.lstrip('/'))
if hash_from_fs != hash_from_name:
raise self._quarantine(
data_file,
"Hash of name in metadata does not match directory name")
def _verify_data_file(self, data_file, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:param data_file: data file name being consider, used when quarantines
occur
:param fp: open file pointer so that we can `fstat()` the file to
verify the on-disk size with Content-Length metadata value
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileExpired: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(data_file, "missing name metadata")
else:
if mname != self._name:
self._logger.error(
_('Client path %(client)s does not match '
'path stored in object metadata %(meta)s'),
{'client': self._name, 'meta': mname})
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time():
raise DiskFileExpired(metadata=self._metadata)
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
data_file, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
fd = fp.fileno()
try:
statbuf = os.fstat(fd)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(data_file, "not stat-able: %s" % err)
else:
obj_size = statbuf.st_size
if obj_size != metadata_size:
raise self._quarantine(
data_file, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, statbuf.st_size))
self._content_length = obj_size
return obj_size
def _failsafe_read_metadata(self, source, quarantine_filename=None):
# Takes source and filename separately so we can read from an open
# file if we have one
try:
return read_metadata(source)
except (DiskFileXattrNotSupported, DiskFileNotExist):
raise
except Exception as err:
raise self._quarantine(
quarantine_filename,
"Exception reading metadata: %s" % err)
def _construct_from_data_file(self, data_file, meta_file):
"""
Open the `.data` file to fetch its metadata, and fetch the metadata
from the fast-POST `.meta` file as well if it exists, merging them
properly.
:param data_file: on-disk `.data` file being considered
:param meta_file: on-disk fast-POST `.meta` file being considered
:returns: an opened data file pointer
:raises DiskFileError: various exceptions from
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
"""
fp = open(data_file, 'rb')
datafile_metadata = self._failsafe_read_metadata(fp, data_file)
if meta_file:
self._metadata = self._failsafe_read_metadata(meta_file, meta_file)
sys_metadata = dict(
[(key, val) for key, val in datafile_metadata.items()
if key.lower() in DATAFILE_SYSTEM_META
or is_sys_meta('object', key)])
self._metadata.update(sys_metadata)
else:
self._metadata = datafile_metadata
if self._name is None:
# If we don't know our name, we were just given a hash dir at
# instantiation, so we'd better validate that the name hashes back
# to us
self._name = self._metadata['name']
self._verify_name_matches_hash(data_file)
self._verify_data_file(data_file, fp)
return fp
def get_metadata(self):
"""
Provide the metadata for a previously opened object as a dictionary.
:returns: object's metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object without requiring the caller to open
the object first.
:returns: metadata dictionary for an object
:raises DiskFileError: this implementation will raise the same
errors as the `open()` method.
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False,
_quarantine_hook=lambda m: None):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:param _quarantine_hook: 1-arg callable called when obj quarantined;
the arg is the reason for quarantine.
Default is to ignore it.
Not needed by the REST layer.
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
dr = self.reader_cls(
self._fp, self._data_file, int(self._metadata['Content-Length']),
self._metadata['ETag'], self._threadpool, self._disk_chunk_size,
self._manager.keep_cache_size, self._device_path, self._logger,
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fp = None
return dr
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
.. note::
An implementation is not required to perform on-disk
preallocations even if the parameter is specified. But if it does
and it fails, it must raise a `DiskFileNoSpace` exception.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
if not exists(self._tmpdir):
mkdirs(self._tmpdir)
try:
fd, tmppath = mkstemp(dir=self._tmpdir)
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
# No more inodes in filesystem
raise DiskFileNoSpace()
raise
dfw = None
try:
if size is not None and size > 0:
try:
fallocate(fd, size)
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
raise DiskFileNoSpace()
raise
dfw = self.writer_cls(self._name, self._datadir, fd, tmppath,
bytes_per_sync=self._bytes_per_sync,
threadpool=self._threadpool,
diskfile=self)
yield dfw
finally:
try:
os.close(fd)
except OSError:
pass
if (dfw is None) or (not dfw.put_succeeded):
# Try removing the temp file only if put did NOT succeed.
#
# dfw.put_succeeded is set to True after renamer() succeeds in
# DiskFileWriter._finalize_put()
try:
os.unlink(tmppath)
except OSError:
self._logger.exception('Error removing tempfile: %s' %
tmppath)
def write_metadata(self, metadata):
"""
Write a block of metadata to an object without requiring the caller to
create the object first. Supports fast-POST behavior semantics.
:param metadata: dictionary of metadata to be associated with the
object
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
with self.create() as writer:
writer._extension = '.meta'
writer.put(metadata)
def delete(self, timestamp):
"""
Delete the object.
This implementation creates a tombstone file using the given
timestamp, and removes any older versions of the object file. Any
file that has an older timestamp than timestamp will be deleted.
.. note::
An implementation is free to use or ignore the timestamp
parameter.
:param timestamp: timestamp to compare with each file
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
# this is dumb, only tests send in strings
timestamp = Timestamp(timestamp)
with self.create() as deleter:
deleter._extension = '.ts'
deleter.put({'X-Timestamp': timestamp.internal})
class DiskFileReader(BaseDiskFileReader):
pass
class DiskFileWriter(BaseDiskFileWriter):
def put(self, metadata):
"""
Finalize writing the file on disk.
For this implementation, this method is responsible for renaming the
temporary file to the final name and directory location. This method
should be called after the final call to
:func:`swift.obj.diskfile.DiskFileWriter.write`.
:param metadata: dictionary of metadata to be associated with the
object
"""
timestamp = Timestamp(metadata['X-Timestamp']).internal
metadata['name'] = self._name
target_path = join(self._datadir, timestamp + self._extension)
cleanup = True
self._threadpool.force_run_in_thread(
self._finalize_put, metadata, target_path, cleanup)
class DiskFile(BaseDiskFile):
reader_cls = DiskFileReader
writer_cls = DiskFileWriter
def _get_ondisk_file(self):
"""
Do the work to figure out if the data directory exists, and if so,
determine the on-disk files to use.
:returns: a tuple of data, meta and ts (tombstone) files, in one of
three states:
* all three are None
data directory does not exist, or there are no files in
that directory
* ts_file is not None, data_file is None, meta_file is None
object is considered deleted
* data_file is not None, ts_file is None
object exists, and optionally has fast-POST metadata
"""
try:
files = os.listdir(self._datadir)
except OSError as err:
if err.errno == errno.ENOTDIR:
# If there's a file here instead of a directory, quarantine
# it; something's gone wrong somewhere.
raise self._quarantine(
# hack: quarantine_renamer actually renames the directory
# enclosing the filename you give it, but here we just
# want this one file and not its parent.
os.path.join(self._datadir, "made-up-filename"),
"Expected directory, found file at %s" % self._datadir)
elif err.errno != errno.ENOENT:
raise DiskFileError(
"Error listing directory %s: %s" % (self._datadir, err))
# The data directory does not exist, so the object cannot exist.
fileset = (None, None, None)
else:
fileset = self.manager.get_ondisk_files(files, self._datadir)
return fileset
@DiskFileRouter.register(REPL_POLICY)
class DiskFileManager(BaseDiskFileManager):
diskfile_cls = DiskFile
def parse_on_disk_filename(self, filename):
"""
Returns the timestamp extracted .data file name.
:param filename: the data file name including extension
:returns: a dict, with keys for timestamp, and ext::
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extenstion.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
filename, ext = splitext(filename)
return {
'timestamp': Timestamp(filename),
'ext': ext,
}
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:returns: True if a valid file set has been found, False otherwise
"""
# if first file with given extension then add filename to context
# dict and return True
accept_first = lambda: context.setdefault(ext, filename) == filename
# add the filename to the list of obsolete files in context dict
discard = lambda: context.setdefault('obsolete', []).append(filename)
# set a flag in the context dict indicating that a valid fileset has
# been found
set_valid_fileset = lambda: context.setdefault('found_valid', True)
# return True if the valid fileset flag is set in the context dict
have_valid_fileset = lambda: context.get('found_valid')
if ext == '.data':
if have_valid_fileset():
# valid fileset means we must have a newer
# .data or .ts, so discard the older .data file
discard()
else:
accept_first()
set_valid_fileset()
elif ext == '.ts':
if have_valid_fileset() or not accept_first():
# newer .data or .ts already found so discard this
discard()
# if not have_valid_fileset():
# # remove any .meta that may have been previously found
# context['.meta'] = None
set_valid_fileset()
elif ext == '.meta':
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
else:
# ignore unexpected files
pass
return have_valid_fileset()
def _verify_on_disk_files(self, accepted_files, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param accepted_files: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
# mimic legacy behavior - .meta is ignored when .ts is found
if accepted_files.get('.ts'):
accepted_files['.meta'] = None
data_file, meta_file, ts_file, durable_file = tuple(
[accepted_files.get(ext)
for ext in ('.data', '.meta', '.ts', '.durable')])
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
def _hash_suffix(self, path, reclaim_age):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param reclaim_age: age in seconds at which to remove tombstones
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
"""
mapper = lambda filename: (None, filename)
hashes = self._hash_suffix_dir(path, mapper, reclaim_age)
return hashes[None].hexdigest()
class ECDiskFileReader(BaseDiskFileReader):
pass
class ECDiskFileWriter(BaseDiskFileWriter):
def _finalize_durable(self, durable_file_path):
exc = None
try:
try:
with open(durable_file_path, 'w') as _fp:
fsync(_fp.fileno())
fsync_dir(self._datadir)
except (OSError, IOError) as err:
if err.errno not in (errno.ENOSPC, errno.EDQUOT):
# re-raise to catch all handler
raise
msg = (_('No space left on device for %s (%s)') %
(durable_file_path, err))
self.manager.logger.error(msg)
exc = DiskFileNoSpace(str(err))
else:
try:
self.manager.hash_cleanup_listdir(self._datadir)
except OSError as os_err:
self.manager.logger.exception(
_('Problem cleaning up %s (%s)') %
(self._datadir, os_err))
except Exception as err:
msg = (_('Problem writing durable state file %s (%s)') %
(durable_file_path, err))
self.manager.logger.exception(msg)
exc = DiskFileError(msg)
if exc:
raise exc
def commit(self, timestamp):
"""
Finalize put by writing a timestamp.durable file for the object. We
do this for EC policy because it requires a 2-phase put commit
confirmation.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
durable_file_path = os.path.join(
self._datadir, timestamp.internal + '.durable')
self._threadpool.force_run_in_thread(
self._finalize_durable, durable_file_path)
def put(self, metadata):
"""
The only difference between this method and the replication policy
DiskFileWriter method is the call into manager.make_on_disk_filename
to construct the data file name.
"""
timestamp = Timestamp(metadata['X-Timestamp'])
fi = None
cleanup = True
if self._extension == '.data':
# generally we treat the fragment index provided in metadata as
# canon, but if it's unavailable (e.g. tests) it's reasonable to
# use the frag_index provided at instantiation. Either way make
# sure that the fragment index is included in object sysmeta.
fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index',
self._diskfile._frag_index)
# defer cleanup until commit() writes .durable
cleanup = False
filename = self.manager.make_on_disk_filename(
timestamp, self._extension, frag_index=fi)
metadata['name'] = self._name
target_path = join(self._datadir, filename)
self._threadpool.force_run_in_thread(
self._finalize_put, metadata, target_path, cleanup)
class ECDiskFile(BaseDiskFile):
reader_cls = ECDiskFileReader
writer_cls = ECDiskFileWriter
def __init__(self, *args, **kwargs):
super(ECDiskFile, self).__init__(*args, **kwargs)
frag_index = kwargs.get('frag_index')
self._frag_index = None
if frag_index is not None:
self._frag_index = self.manager.validate_fragment_index(frag_index)
def _get_ondisk_file(self):
"""
The only difference between this method and the replication policy
DiskFile method is passing in the frag_index kwarg to our manager's
get_ondisk_files method.
"""
try:
files = os.listdir(self._datadir)
except OSError as err:
if err.errno == errno.ENOTDIR:
# If there's a file here instead of a directory, quarantine
# it; something's gone wrong somewhere.
raise self._quarantine(
# hack: quarantine_renamer actually renames the directory
# enclosing the filename you give it, but here we just
# want this one file and not its parent.
os.path.join(self._datadir, "made-up-filename"),
"Expected directory, found file at %s" % self._datadir)
elif err.errno != errno.ENOENT:
raise DiskFileError(
"Error listing directory %s: %s" % (self._datadir, err))
# The data directory does not exist, so the object cannot exist.
fileset = (None, None, None)
else:
fileset = self.manager.get_ondisk_files(
files, self._datadir, frag_index=self._frag_index)
return fileset
def purge(self, timestamp, frag_index):
"""
Remove a tombstone file matching the specified timestamp or
datafile matching the specified timestamp and fragment index
from the object directory.
This provides the EC reconstructor/ssync process with a way to
remove a tombstone or fragment from a handoff node after
reverting it to its primary node.
The hash will be invalidated, and if empty or invalid the
hsh_path will be removed on next hash_cleanup_listdir.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param frag_index: a fragment archive index, must be a whole number.
"""
for ext in ('.data', '.ts'):
purge_file = self.manager.make_on_disk_filename(
timestamp, ext=ext, frag_index=frag_index)
remove_file(os.path.join(self._datadir, purge_file))
self.manager.invalidate_hash(dirname(self._datadir))
@DiskFileRouter.register(EC_POLICY)
class ECDiskFileManager(BaseDiskFileManager):
diskfile_cls = ECDiskFile
def validate_fragment_index(self, frag_index):
"""
Return int representation of frag_index, or raise a DiskFileError if
frag_index is not a whole number.
"""
try:
frag_index = int(str(frag_index))
except (ValueError, TypeError) as e:
raise DiskFileError(
'Bad fragment index: %s: %s' % (frag_index, e))
if frag_index < 0:
raise DiskFileError(
'Fragment index must not be negative: %s' % frag_index)
return frag_index
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
*a, **kw):
"""
Returns the EC specific filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param frag_index: a fragment archive index, used with .data extension
only, must be a whole number.
:returns: a file name
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
a whole number
"""
rv = timestamp.internal
if ext == '.data':
# for datafiles only we encode the fragment index in the filename
# to allow archives of different indexes to temporarily be stored
# on the same node in certain situations
frag_index = self.validate_fragment_index(frag_index)
rv += '#' + str(frag_index)
if ext:
rv = '%s%s' % (rv, ext)
return rv
def parse_on_disk_filename(self, filename):
"""
Returns the timestamp extracted from a policy specific .data file name.
For EC policy the data file name includes a fragment index which must
be stripped off to retrieve the timestamp.
:param filename: the data file name including extension
:returns: a dict, with keys for timestamp, frag_index, and ext::
* timestamp is a :class:`~swift.common.utils.Timestamp`
* frag_index is an int or None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extenstion.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
frag_index = None
filename, ext = splitext(filename)
parts = filename.split('#', 1)
timestamp = parts[0]
if ext == '.data':
# it is an error for an EC data file to not have a valid
# fragment index
try:
frag_index = parts[1]
except IndexError:
# expect validate_fragment_index raise DiskFileError
pass
frag_index = self.validate_fragment_index(frag_index)
return {
'timestamp': Timestamp(timestamp),
'frag_index': frag_index,
'ext': ext,
}
def is_obsolete(self, filename, other_filename):
"""
Test if a given file is considered to be obsolete with respect to
another file in an object storage dir.
Implements EC policy specific behavior when comparing files against a
.durable file.
A simple string comparison would consider t2#1.data to be older than
t2.durable (since t2#1.data < t2.durable). By stripping off the file
extensions we get the desired behavior: t2#1 > t2 without compromising
the detection of t1#1 < t2.
:param filename: a string representing an absolute filename
:param other_filename: a string representing an absolute filename
:returns: True if filename is considered obsolete, False otherwise.
"""
if other_filename.endswith('.durable'):
return splitext(filename)[0] < splitext(other_filename)[0]
return filename < other_filename
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file.
:returns: True if a valid file set has been found, False otherwise
"""
# if first file with given extension then add filename to context
# dict and return True
accept_first = lambda: context.setdefault(ext, filename) == filename
# add the filename to the list of obsolete files in context dict
discard = lambda: context.setdefault('obsolete', []).append(filename)
# set a flag in the context dict indicating that a valid fileset has
# been found
set_valid_fileset = lambda: context.setdefault('found_valid', True)
# return True if the valid fileset flag is set in the context dict
have_valid_fileset = lambda: context.get('found_valid')
if context.get('.durable'):
# a .durable file has been found
if ext == '.data':
if self.is_obsolete(filename, context.get('.durable')):
# this and remaining data files are older than durable
discard()
set_valid_fileset()
else:
# accept the first .data file if it matches requested
# frag_index, or if no specific frag_index is requested
fi = self.parse_on_disk_filename(filename)['frag_index']
if frag_index is None or frag_index == int(fi):
accept_first()
set_valid_fileset()
# else: keep searching for a .data file to match frag_index
context.setdefault('fragments', []).append(filename)
else:
# there can no longer be a matching .data file so mark what has
# been found so far as the valid fileset
discard()
set_valid_fileset()
elif ext == '.data':
# not yet found a .durable
if have_valid_fileset():
# valid fileset means we must have a newer
# .ts, so discard the older .data file
discard()
else:
# .data newer than a .durable or .ts, don't discard yet
context.setdefault('fragments_without_durable', []).append(
filename)
elif ext == '.ts':
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
if not have_valid_fileset():
# remove any .meta that may have been previously found
context['.meta'] = None
set_valid_fileset()
elif ext in ('.meta', '.durable'):
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
else:
# ignore unexpected files
pass
return have_valid_fileset()
def _verify_on_disk_files(self, accepted_files, frag_index=None, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param accepted_files: files that have been found and accepted
:param frag_index: specifies a specific fragment index .data file
:returns: True if the file combination is compliant, False otherwise
"""
if not accepted_files.get('.data'):
# We may find only a .meta, which doesn't mean the on disk
# contract is broken. So we clear it to comply with
# superclass assertions.
accepted_files['.meta'] = None
data_file, meta_file, ts_file, durable_file = tuple(
[accepted_files.get(ext)
for ext in ('.data', '.meta', '.ts', '.durable')])
return ((data_file is None or durable_file is not None)
and (data_file is None and meta_file is None
and ts_file is None and durable_file is None)
or (ts_file is not None and data_file is None
and meta_file is None and durable_file is None)
or (data_file is not None and durable_file is not None
and ts_file is None)
or (durable_file is not None and meta_file is None
and ts_file is None))
def _hash_suffix(self, path, reclaim_age):
"""
The only difference between this method and the replication policy
function is the way that files are updated on the returned hash.
Instead of all filenames hashed into a single hasher, each file name
will fall into a bucket either by fragment index for datafiles, or
None (indicating a durable, metadata or tombstone).
"""
# hash_per_fi instead of single hash for whole suffix
# here we flatten out the hashers hexdigest into a dictionary instead
# of just returning the one hexdigest for the whole suffix
def mapper(filename):
info = self.parse_on_disk_filename(filename)
fi = info['frag_index']
if fi is None:
return None, filename
else:
return fi, info['timestamp'].internal
hash_per_fi = self._hash_suffix_dir(path, mapper, reclaim_age)
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
| Akanoa/swift | swift/obj/diskfile.py | Python | apache-2.0 | 98,599 |
"""
Sandbox tests for the projection operator
"""
import numpy as np
import theano
from pylearn2.sandbox.nlp.linear.matrixmul import MatrixMul
from pylearn2.utils import sharedX
from theano import tensor
def test_matrixmul():
"""
Tests for projection
"""
rng = np.random.RandomState(222)
dtypes = [
'int16', 'int32', 'int64'
]
tensor_x = [
tensor.wmatrix(),
tensor.imatrix(),
tensor.lmatrix(),
tensor.wvector(),
tensor.ivector(),
tensor.lvector()
]
np_W, np_x = [], []
for dtype in dtypes:
np_W.append(rng.rand(10, np.random.randint(1, 10)))
np_x.append(rng.randint(
0, 10, (rng.random_integers(5),
rng.random_integers(5))
).astype(dtype))
for dtype in dtypes:
np_W.append(rng.rand(10, np.random.randint(1, 10)))
np_x.append(
rng.randint(0, 10, (rng.random_integers(5),)).astype(dtype)
)
tensor_W = [sharedX(W) for W in np_W]
matrixmul = [MatrixMul(W) for W in tensor_W]
assert all(mm.get_params()[0] == W for mm, W in zip(matrixmul, tensor_W))
fn = [theano.function([x], mm.project(x))
for x, mm in zip(tensor_x, matrixmul)]
for W, x, f in zip(np_W, np_x, fn):
W_x = W[x]
if x.ndim == 2:
W_x = W_x.reshape((W_x.shape[0], np.prod(W_x.shape[1:])))
else:
W_x = W_x.flatten()
np.testing.assert_allclose(f(x), W_x)
| fyffyt/pylearn2 | pylearn2/sandbox/nlp/linear/tests/test_matrixmul.py | Python | bsd-3-clause | 1,494 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import time
child_pid = os.fork()
if child_pid > 0:
time.sleep(60)
else:
sys.exit()
| sivel/ansible | test/integration/targets/wait_for/files/zombie.py | Python | gpl-3.0 | 202 |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PayPalLegacySettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency': 'str',
'partner': 'str',
'password': 'str',
'user_name': 'str',
'vendor': 'str'
}
attribute_map = {
'currency': 'currency',
'partner': 'partner',
'password': 'password',
'user_name': 'userName',
'vendor': 'vendor'
}
def __init__(self, currency=None, partner=None, password=None, user_name=None, vendor=None): # noqa: E501
"""PayPalLegacySettings - a model defined in Swagger""" # noqa: E501
self._currency = None
self._partner = None
self._password = None
self._user_name = None
self._vendor = None
self.discriminator = None
if currency is not None:
self.currency = currency
if partner is not None:
self.partner = partner
if password is not None:
self.password = password
if user_name is not None:
self.user_name = user_name
if vendor is not None:
self.vendor = vendor
@property
def currency(self):
"""Gets the currency of this PayPalLegacySettings. # noqa: E501
# noqa: E501
:return: The currency of this PayPalLegacySettings. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this PayPalLegacySettings.
# noqa: E501
:param currency: The currency of this PayPalLegacySettings. # noqa: E501
:type: str
"""
self._currency = currency
@property
def partner(self):
"""Gets the partner of this PayPalLegacySettings. # noqa: E501
# noqa: E501
:return: The partner of this PayPalLegacySettings. # noqa: E501
:rtype: str
"""
return self._partner
@partner.setter
def partner(self, partner):
"""Sets the partner of this PayPalLegacySettings.
# noqa: E501
:param partner: The partner of this PayPalLegacySettings. # noqa: E501
:type: str
"""
self._partner = partner
@property
def password(self):
"""Gets the password of this PayPalLegacySettings. # noqa: E501
# noqa: E501
:return: The password of this PayPalLegacySettings. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this PayPalLegacySettings.
# noqa: E501
:param password: The password of this PayPalLegacySettings. # noqa: E501
:type: str
"""
self._password = password
@property
def user_name(self):
"""Gets the user_name of this PayPalLegacySettings. # noqa: E501
# noqa: E501
:return: The user_name of this PayPalLegacySettings. # noqa: E501
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this PayPalLegacySettings.
# noqa: E501
:param user_name: The user_name of this PayPalLegacySettings. # noqa: E501
:type: str
"""
self._user_name = user_name
@property
def vendor(self):
"""Gets the vendor of this PayPalLegacySettings. # noqa: E501
# noqa: E501
:return: The vendor of this PayPalLegacySettings. # noqa: E501
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""Sets the vendor of this PayPalLegacySettings.
# noqa: E501
:param vendor: The vendor of this PayPalLegacySettings. # noqa: E501
:type: str
"""
self._vendor = vendor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PayPalLegacySettings, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PayPalLegacySettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| docusign/docusign-python-client | docusign_esign/models/pay_pal_legacy_settings.py | Python | mit | 6,167 |
from setuptools import setup, find_packages
setup(
name = "django-test-utils",
version = "0.3",
packages = find_packages(),
author = "Eric Holscher",
author_email = "[email protected]",
description = "A package to help testing in Django",
url = "http://github.com/ericholscher/django-test-utils/tree/master",
download_url='http://www.github.com/ericholscher/django-test-utils/tarball/0.3.0',
test_suite = "test_project.run_tests.run_tests",
include_package_data = True,
install_requires=[
'BeautifulSoup',
'twill',
]
)
| ericholscher/django-test-utils | setup.py | Python | mit | 592 |
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def variantFrequencies(MEIObj, donorIdAncestryDict):
"""
For a variant absent in the reference genome compute its allele count and frequency for the complete PCAWG cohort and for each echnicity
"""
ancestryCodesList = set(donorIdAncestryDict.values())
# Initialize allele count dictionary
alleleCountDict = {}
alleleCountDict['PCAWG'] = 0
for ancestry in ancestryCodesList:
alleleCountDict[ancestry] = 0
## Total number of chromosome copies in the population
# Will be used for computing the allele frequency
# If no missing genotypes would be == Number of donors * 2 (diploid, two copies of a given chromosome)
nbChromDict = {}
nbChromDict['PCAWG'] = 0
for ancestry in ancestryCodesList:
nbChromDict[ancestry] = 0
# For each donor and genotype
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
# Select only whitelisted donors
# if True:
if (donorId not in blackDonorsList):
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
ancestry = donorIdAncestryDict[donorId]
# print "TIO: ", donorId, ancestry, genotype
# a) Heterozygous
if (genotype == "0/1"):
#print donorId, ancestry, genotype, genotypeField
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
alleleCountDict['PCAWG'] += 1
alleleCountDict[ancestry] += 1
# b) Homozygous alternative
elif (genotype == "1/1"):
#print donorId, ancestry, genotype, genotypeField
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
alleleCountDict['PCAWG'] += 2
alleleCountDict[ancestry] += 2
# c) Homozygous reference
elif (genotype == "0/0"):
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
# d) Haploid carrier (males X and Y outside PAR region)
elif (genotype == "1"):
nbChromDict['PCAWG'] += 1
nbChromDict[ancestry] += 1
alleleCountDict['PCAWG'] += 1
alleleCountDict[ancestry] += 1
# e) Haploid not carrier (males X and Y outside PAR region)
elif (genotype == "0"):
nbChromDict['PCAWG'] += 1
nbChromDict[ancestry] += 1
## Compute overall and per echnicity variant allele frequencies
alleleFreqDict = {}
# a) Allele freq. estimation not available for those insertions with unknown genotype in all the donors
if (nbChromDict['PCAWG'] == 0):
alleleFreqDict['PCAWG'] = "UNK"
# b) Allele freq. estimation available
else:
alleleFreqDict['PCAWG'] = float(alleleCountDict['PCAWG'])/float(nbChromDict['PCAWG'])
for ancestry in ancestryCodesList:
# a) Allele freq. estimation not available for those insertions with unknown genotype in all the donors
if (nbChromDict[ancestry] == 0):
alleleFreqDict[ancestry] = "UNK"
# b) Allele freq. estimation available
else:
alleleFreqDict[ancestry] = float(alleleCountDict[ancestry])/float(nbChromDict[ancestry])
## A) Novel insertion
if ('GERMDB' not in MEIObj.infoDict):
alleleFreqDict["novelty"] = "novel"
## B) Not novel insertion
else:
alleleFreqDict["novelty"] = "known"
return alleleCountDict, alleleFreqDict
def variantFrequencies_ref(MEIObj, donorIdAncestryDict):
"""
For a variant in the reference genome compute its allele count and frequency for the complete PCAWG cohort and for each echnicity
"""
ancestryCodesList = set(donorIdAncestryDict.values())
# Initialize allele count dictionary
alleleCountDict = {}
alleleCountDict['PCAWG'] = 0
for ancestry in ancestryCodesList:
alleleCountDict[ancestry] = 0
## Total number of chromosome copies in the population
# Will be used for computing the allele frequency
# If no missing genotypes would be == Number of donors * 2 (diploid, two copies of a given chromosome)
nbChromDict = {}
nbChromDict['PCAWG'] = 0
for ancestry in ancestryCodesList:
nbChromDict[ancestry] = 0
# For each donor and genotype
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
# Select only whitelisted donors
# if True:
if (donorId not in blackDonorsList):
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
ancestry = donorIdAncestryDict[donorId]
# a) Heterozygous
if (genotype == "0/1"):
#print donorId, ancestry, genotype, genotypeField
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
alleleCountDict['PCAWG'] += 1
alleleCountDict[ancestry] += 1
# b) Homozygous alternative (MEI absent)
elif (genotype == "1/1"):
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
# c) Homozygous reference (MEI present)
elif (genotype == "0/0"):
#print donorId, ancestry, genotype, genotypeField
nbChromDict['PCAWG'] += 2
nbChromDict[ancestry] += 2
alleleCountDict['PCAWG'] += 2
alleleCountDict[ancestry] += 2
# d) Haploid not carrier (males X and Y outside PAR region)
elif (genotype == "1"):
nbChromDict['PCAWG'] += 1
nbChromDict[ancestry] += 1
# e) Haploid carrier (males X and Y outside PAR region)
elif (genotype == "0"):
#print donorId, ancestry, genotype, genotypeField
nbChromDict['PCAWG'] += 1
nbChromDict[ancestry] += 1
alleleCountDict['PCAWG'] += 1
alleleCountDict[ancestry] += 1
## Compute overall and per echnicity variant allele frequencies
alleleFreqDict = {}
# a) Allele freq. estimation not available for those insertions with unknown genotype in all the donors
if (nbChromDict['PCAWG'] == 0):
alleleFreqDict['PCAWG'] = "UNK"
# b) Allele freq. estimation available
else:
alleleFreqDict['PCAWG'] = float(alleleCountDict['PCAWG'])/float(nbChromDict['PCAWG'])
for ancestry in ancestryCodesList:
# a) Allele freq. estimation not available for those insertions with unknown genotype in all the donors
if (nbChromDict[ancestry] == 0):
alleleFreqDict[ancestry] = "UNK"
# b) Allele freq. estimation available
else:
alleleFreqDict[ancestry] = float(alleleCountDict[ancestry])/float(nbChromDict[ancestry])
return alleleCountDict, alleleFreqDict
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import time
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy import stats
import seaborn as sns
import scipy
import formats
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('vcf', help='Multisample VCF containing genotyped MEI')
parser.add_argument('metadata', help='PCAWG donor metadata')
parser.add_argument('fileName', help='Output file name')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
inputVCF = args.vcf
metadata = args.metadata
fileName = args.fileName
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "vcf: ", inputVCF
print "metadata: ", metadata
print "fileName: ", fileName
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 0. Create dictionary with donor id ancestry equivalences
###############################################################
header("0. Create dictionary with donor id ancestry equivalences")
metadataFile = open(metadata, 'r')
donorIdAncestryDict = {}
for line in metadataFile:
# Skip header
if not line.startswith("#"):
line = line.rstrip('\n')
line = line.split('\t')
donorId = line[0]
ancestry = line[4]
donorIdAncestryDict[donorId] = ancestry
#print "donorIdAncestryDict: ", donorIdAncestryDict
### Generate list with PCAWG whitelisted donors
metadataFile = open(metadata, 'r')
blackDonorsList = []
for line in metadataFile:
# Skip header
if not line.startswith("#"):
line = line.rstrip('\n')
line = line.split('\t')
donorId = line[0]
status = line[1]
if (status == "Excluded"):
blackDonorsList.append(donorId)
#### 1. Read input multi-sample VCF and generate a VCF object
###############################################################
header("1. Process multi-sample VCF as input")
VCFObj = formats.VCF()
donorIdList = VCFObj.read_VCF_multiSample(inputVCF)
#### 2. Compute for each germline MEI that passes the filters its allele count and frequency
#############################################################################################
# Allele count and frequency computed overall and across the different echnicities.
header("2. Compute for each germline MEI that passes the filters its allele count and frequency")
alleleCountDict = {}
alleleFreqDict = {}
## For each MEI
for MEIObj in VCFObj.lineList:
## Select only those MEI that passes all the filters
if (MEIObj.filter == "PASS"):
## MEI identifier
## A) MEI correspond to a germline source element -> use source element identifier
if ('SRCID' in MEIObj.infoDict):
MEIid = MEIObj.infoDict['SRCID']
# B) MEI does not correspond a source element -> create coordinates based identifier
else:
MEIid = MEIObj.infoDict["CLASS"] + '_' + MEIObj.chrom + '_' + str(MEIObj.pos)
#print "MEIid: ", MEIid
## Compute MEI allele count and frequencies
alleleCountDict[MEIid] = {}
alleleFreqDict[MEIid] = {}
# A) MEI absent in reference genome
if (MEIObj.alt == "<MEI>"):
alleleCountDictTmp, alleleFreqDictTmp = variantFrequencies(MEIObj, donorIdAncestryDict)
# Add MEI allele counts and frequencies to the dict
alleleCountDict[MEIid] = alleleCountDictTmp
alleleFreqDict[MEIid] = alleleFreqDictTmp
# B) MEI in the reference genome
elif (MEIObj.ref == "<MEI>"):
alleleCountDictTmp, alleleFreqDictTmp = variantFrequencies_ref(MEIObj, donorIdAncestryDict)
# Add MEI allele counts and frequencies to the dict
alleleCountDict[MEIid] = alleleCountDictTmp
alleleFreqDict[MEIid] = alleleFreqDictTmp
# C) Raise error...
else:
msg="Incorrectly formated VCF line"
info(msg)
#### 3. Convert dictionaries into dataframes and generate output table
########################################################################
# For allele count and frequency generate a table with the following format:
# PCAWG EUR ASN ...
# source_element1 X1 Y1 Z1
# source_element2 X2 Y2 Z2
# ...
header("3. Convert dictionaries into dataframes and generate output table")
### 3.1 MEI allele count
# Create pandas dataframe from dictionary
alleleCountDf = pd.DataFrame(alleleCountDict)
# transpose to have MEI as rows
alleleCountDf = alleleCountDf.T
# Reorder columns and remove UNK columns:
colOrder = ['PCAWG', 'EUR', 'ASN', 'AFR', 'SAN', 'AMR']
#colOrder = ['PCAWG', 'EUR', 'EAS', 'AFR', 'SAS', 'AMR'] ** used for 1KGP MEI genotypes
alleleCountDf = alleleCountDf[colOrder]
# Save output into tsv
outFilePath = outDir + '/' + fileName + '.alleleCount.tsv'
alleleCountDf.to_csv(outFilePath, sep='\t')
### 3.1 MEI allele frequency
# Create pandas dataframe from dictionary
alleleFreqDf = pd.DataFrame(alleleFreqDict)
# transpose to have MEI as rows
alleleFreqDf = alleleFreqDf.T
# Reorder columns and remove UNK columns:
colOrder = ['PCAWG', 'EUR', 'ASN', 'AFR', 'SAN', 'AMR', 'novelty']
#colOrder = ['PCAWG', 'EUR', 'EAS', 'AFR', 'SAS', 'AMR' , 'novelty'] ** used for 1KGP MEI genotypes
alleleFreqDf = alleleFreqDf[colOrder]
# Save output into tsv
outFilePath = outDir + '/' + fileName + '.alleleFreq.tsv'
alleleFreqDf.to_csv(outFilePath, sep='\t')
#### End
header("FINISH!!")
| brguez/TEIBA | src/python/germlineMEI_frequency_adjustedCounts.py | Python | gpl-3.0 | 13,609 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iom', '0019_auto_20151023_1141'),
]
operations = [
migrations.CreateModel(
name='Waarneming',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datum', models.DateTimeField()),
('naam', models.CharField(max_length=20)),
('eenheid', models.CharField(max_length=20)),
('waarde', models.FloatField()),
('foto_url', models.CharField(max_length=200, null=True, blank=True)),
('opmerking', models.TextField(null=True, blank=True)),
],
),
migrations.AlterModelOptions(
name='akvoflow',
options={'verbose_name': 'Akvoflow API', 'verbose_name_plural': 'Akvoflow API'},
),
migrations.AlterField(
model_name='meetpunt',
name='photo_url',
field=models.CharField(max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='waarneming',
name='locatie',
field=models.ForeignKey(to='iom.Meetpunt'),
),
migrations.AddField(
model_name='waarneming',
name='waarnemer',
field=models.ForeignKey(to='iom.Waarnemer'),
),
]
| acaciawater/iom | iom/migrations/0020_auto_20151023_1445.py | Python | apache-2.0 | 1,528 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0003_auto_20150717_2226'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
),
]
| Turupawn/website | common/migrations/0004_auto_20150717_2348.py | Python | agpl-3.0 | 567 |
import math
import time
import numpy as np
class Timer:
def __init__(self, text=None):
self.text = text
def __enter__(self):
self.cpu = time.clock()
self.time = time.time()
if self.text:
print("{}...".format(self.text))
return self
def __exit__(self, *args):
self.cpu = time.clock() - self.cpu
self.time = time.time() - self.time
if self.text:
print("%s: cpu %0.2f, time %0.2f\n" % (self.text, self.cpu, self.time))
def date_to_int(str_date):
Y, M, D = [int(a) for a in str_date.strip().split("-")] # "2016-05-28"
int_date = (int(Y) - 2015) * 12 + int(M)
assert 1 <= int_date <= 12 + 6
return int_date
# "2016-05-28" or "" or nan
def date_to_float(str_date):
if str_date.__class__ is float and math.isnan(str_date) or str_date == "":
return np.nan
Y, M, D = [int(a) for a in str_date.strip().split("-")]
float_date = float(Y) * 12 + float(M)
return float_date
products = (
"ind_ahor_fin_ult1",
"ind_aval_fin_ult1",
"ind_cco_fin_ult1" ,
"ind_cder_fin_ult1",
"ind_cno_fin_ult1" ,
"ind_ctju_fin_ult1",
"ind_ctma_fin_ult1",
"ind_ctop_fin_ult1",
"ind_ctpp_fin_ult1",
"ind_deco_fin_ult1",
"ind_deme_fin_ult1",
"ind_dela_fin_ult1",
"ind_ecue_fin_ult1",
"ind_fond_fin_ult1",
"ind_hip_fin_ult1" ,
"ind_plan_fin_ult1",
"ind_pres_fin_ult1",
"ind_reca_fin_ult1",
"ind_tjcr_fin_ult1",
"ind_valo_fin_ult1",
"ind_viv_fin_ult1" ,
"ind_nomina_ult1" ,
"ind_nom_pens_ult1",
"ind_recibo_ult1" ,
)
dtypes = {
"fecha_dato": str,
"ncodpers": int,
"conyuemp": str, # Spouse index. 1 if the customer is spouse of an employee
}
def apk(actual, predicted, k=10, default=1.0):
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return default
return score / min(len(actual), k)
def mapk(actual, predicted, k=10, default=1.0):
return np.mean([apk(a,p,k,default) for a,p in zip(actual, predicted)])
| yaxinus/santander-product-recommendation-8th-place | utils.py | Python | mit | 2,269 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Xiangxiang Telecom Corporation.
# @author: Liusha
"""This is the lte module.
"""
from flask import Blueprint
lte = Blueprint('lte', __name__, static_folder='static', template_folder='templates')
from app.lte.views import *
| flowsha/zhwh | web_root/app/lte/__init__.py | Python | apache-2.0 | 270 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api import extensions
RESOURCE_NAME = "floatingip"
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
EXTENDED_ATTRIBUTES_2_0 = {}
class Floatingip(extensions.ExtensionDescriptor):
"""Extends Networks for quark API purposes."""
@classmethod
def get_name(cls):
return "floatingip"
@classmethod
def get_alias(cls):
return "floatingip"
@classmethod
def get_description(cls):
return "Floating IPs"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/network/ext/"
"networks_quark/api/v2.0")
@classmethod
def get_updated(cls):
return "2013-03-25T19:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| lmaycotte/quark | quark/api/extensions/floatingip.py | Python | apache-2.0 | 1,435 |
from __future__ import unicode_literals
from django.core import mail
from django.utils import six
from reviewboard.reviews.models import Review
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype,
review_reply_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin,
ReviewListMixin)
from reviewboard.webapi.tests.urls import (get_review_reply_item_url,
get_review_reply_list_url)
class BaseResourceTestCase(BaseWebAPITestCase):
def _create_test_review(self, with_local_site=False):
review_request = self.create_review_request(
submitter=self.user,
with_local_site=with_local_site)
file_attachment = self.create_file_attachment(review_request)
review_request.publish(review_request.submitter)
review = self.create_review(review_request, publish=True)
self.create_file_attachment_comment(review, file_attachment)
return review
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review),
review_reply_list_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
if populate_items:
items = [self.create_reply(review, publish=True)]
else:
items = []
return (get_review_reply_list_url(review, local_site_name),
review_reply_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API
"""
review = self._create_test_review()
self.create_reply(review, user=self.user, publish=True)
rsp = self.api_get(
'%s?counts-only=1' % get_review_reply_list_url(review),
expected_mimetype=review_reply_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], 1)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review, local_site_name),
review_reply_item_mimetype,
{},
[review])
def check_post_result(self, user, rsp, review):
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertFalse(reply.rich_text)
self.compare_item(rsp['reply'], reply)
def test_post_with_body_top(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_top
"""
body_top = 'My Body Top'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_top': body_top},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_top, body_top)
def test_post_with_body_bottom(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_bottom
"""
body_bottom = 'My Body Bottom'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_bottom': body_bottom},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_bottom, body_bottom)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
return (get_review_reply_item_url(review, reply.pk),
review_reply_item_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
[reply, review])
def check_delete_result(self, user, reply, review):
self.assertNotIn(reply, review.replies.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
reply)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/reviews/<id>/
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
self._testHttpCaching(
get_review_reply_item_url(reply.base_reply_to, reply.id),
check_last_modified=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
{
'body_top': 'New body top',
},
reply,
[])
def check_put_result(self, user, item_rsp, reply, *args):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], 'New body top')
self.assertEqual(item_rsp['text_type'], 'plain')
reply = Review.objects.get(pk=reply.pk)
self.compare_item(item_rsp, reply)
def test_put_with_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API
"""
self.siteconfig.set('mail_send_review_mail', True)
self.siteconfig.save()
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
mail.outbox = []
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.public, True)
self.assertEqual(len(mail.outbox), 1)
| 1tush/reviewboard | reviewboard/webapi/tests/test_review_reply.py | Python | mit | 10,358 |
from seeds.nodes import NodeSeeder
from app import flapp
import requests
from satoyama.tests.dbtestbase import DBTestBase
from app.apihelpers import UrlHelper, ApiResponseHelper
from satoyama.models import SensorType
# class SensorResourceTests(DBTestBase):
# def test_GET_existing_sensor_by_id(self):
# """
# Creates a ricefield node and tests that all three sensors can be accessed via the API
# """
# node = NodeSeeder.seed_ricefield_node(n_readings = 1)
# for sensor in node.sensors:
# # pprint(sensor.json())
# url = UrlHelper.get_url(flapp, 'sensor', sensor.id)
# response = requests.get(url)
# assert response.ok
# api_response = ApiResponseHelper.assert_api_response(response)
# assert api_response.first() == sensor.json(), 'First item in the api response was supposed to be sensor 1, but it was not'
# def test_GET_nonexisting_sensor_by_id(self):
# """
# Tries to GET a nonexising sensor
# """
# url = UrlHelper.get_url(flapp, 'sensor', 1111111111)
# response = requests.get(url)
# assert response.ok
# api_response = ApiResponseHelper.assert_api_response(response, expect_success = False)
# assert len(api_response.objects) == 0, 'The sensor does not exist, so the api should not return any objects'
# def test_POST_sensor_with_node_and_sensortype_success(self):
# st = SensorType.create(name = 'sonar', unit = 'cm')
# node = NodeSeeder.seed_node('empty')
# url = UrlHelper.get_url(flapp, 'sensor')
# data = {'alias' : 'myawesomesensor', 'sensortype' : st.name, 'node_id' : node.id}
# response = requests.post(url, data = data)
# assert response.ok
# api_response = ApiResponseHelper.assert_api_response(response, expect_success = True)
# assert api_response.first()['alias'] == 'myawesomesensor'
# assert api_response.first()['latest_reading'] == ''
# def test_POST_sensor_without_node_or_sensortype_failure(self):
# url = UrlHelper.get_url(flapp, 'sensor')
# data = {'alias' : 'myawesomesensor'}
# response = requests.post(url, data = data)
# assert response.ok
# ApiResponseHelper.assert_api_response(response, expect_success = False)
| DgFutureLab/satoyama-api | app/tests/test_sensor_resource.py | Python | mit | 2,146 |
from cert_wizard import cert_utils
import sys
def validation_error(message, abort=True):
print('[CW] Error: {}'.format(message))
if abort:
print('[CW] Aborting.')
sys.exit(1)
def combined_ca_and_server_separate_key(server_cert_path, private_key_path):
# server cert should have at least one cert in it
cert_count = cert_utils.count_certs_in_file(server_cert_path)
if cert_count == 0:
validation_error('no certs detected in server cert file')
# server cert should not have any private keys in it
key_count = cert_utils.count_keys_in_file(server_cert_path)
if key_count > 0:
validation_error(('server cert file should not contain '
'any private keys if a separate private '
'key file is provided'))
# the private key file should have a single key in it and
# no certs of any kind
key_count = cert_utils.count_keys_in_file(private_key_path)
if key_count == 0:
validation_error('no private keys found in private key file.')
if key_count > 1:
validation_error('multiple private keys found in private key file.')
if cert_utils.count_certs_in_file(private_key_path) > 0:
validation_error('certificates found in private key file.')
def combined_ca_and_server_integrated_key(server_cert_path):
# the server cert should have exactly one private key in it
key_count = cert_utils.count_keys_in_file(server_cert_path)
if key_count == 0:
validation_error(('no keys found in server cert file, '
'but no private key file specified'))
if key_count > 1:
validation_error('too many keys found in server cert file')
# the server cert should have at least one cert in it
cert_count = cert_utils.count_certs_in_file(server_cert_path)
if cert_count == 0:
validation_error('no certs detected in server cert file')
def separate_ca_and_server_integrated_key(server_cert_path, ca_cert_path):
# server cert should contain a single private key
key_count = cert_utils.count_keys_in_file(server_cert_path)
if key_count == 0:
validation_error(('no keys found in server cert file, '
'but no private key file specified'))
if key_count > 1:
validation_error('too many keys found in server cert file')
# the server cert file should have a single cert in it if a separate
# CA file was provided by the user
cert_count = cert_utils.count_certs_in_file(server_cert_path)
if cert_count > 1:
validation_error(('certificate chain detected in server cert file, '
'yet separate CA cert file was provided'))
if cert_count == 0:
validation_error('no certs detected in server cert file')
# the ca cert file should not have any private keys in it
if cert_utils.count_keys_in_file(ca_cert_path) > 0:
validation_error('private keys found in CA cert file.')
# the ca cert file should have at least one certificate in it
if cert_utils.count_certs_in_file(ca_cert_path) == 0:
validation_error('no certs found in CA cert file.')
def all_separate(server_cert_path, private_key_path, ca_cert_path):
print(server_cert_path)
print(private_key_path)
print(ca_cert_path)
# there should be no extra private keys lingering
# in the server cert file if a private key was provided by the user.
if cert_utils.count_keys_in_file(server_cert_path) > 0:
validation_error(('server cert file should not contain any private '
'keys if a separate private key file is provided'))
# the server cert file should have a single PEM in it if a separate
# CA file was provided by the user
cert_count = cert_utils.count_certs_in_file(server_cert_path)
if cert_count > 1:
validation_error(('certificate chain detected in server cert file, '
'yet separate CA cert file was provided'))
if cert_count == 0:
validation_error('no certs detected in server cert file')
# the private key file should have a single key in it and
# no certs of any kind
key_count = cert_utils.count_keys_in_file(private_key_path)
if key_count == 0:
validation_error('no private keys found in private key file.')
if key_count > 1:
validation_error('multiple private keys found in private key file.')
if cert_utils.count_certs_in_file(private_key_path) > 0:
validation_error('certificates found in private key file.')
# the ca cert file should not have any private keys in it
if cert_utils.count_keys_in_file(ca_cert_path) > 0:
validation_error('private keys found in CA cert file.')
# the ca cert file should have at least one certificate in it
if cert_utils.count_certs_in_file(ca_cert_path) == 0:
validation_error('no certs found in CA cert file.')
| s0lst1c3/eaphammer | cert_wizard/importer/validators.py | Python | gpl-3.0 | 4,975 |
import socket, console, time, sys
console.set_color(1,0,0)
print """ _____ _____ _____ _____
| _ | _ |___| __ | |
| | __|___| -| | |
|__|__|__|SavSec|__|__|_____|
UPnP Exploitation"""
console.set_color()
time.sleep(1)
ssdpsrc = { "ip_address" : "239.255.255.250",
"port" : 1900,
"mx" : 10,
"st" : "ssdp:all" }
exptpack1 = """M-SEARCH * HTTP/1.1
HOST: {ip_address}:{port}
MAN: "ssdp:discover"
ST: uuid:`reboot`
MX: 2
""".replace("\n", "\r\n").format(**ssdpsrc) + "\r\n"
ssdpre = """M-SEARCH * HTTP/1.1
HOST: {ip_address}:{port}
MAN: "ssdp:discover"
MX: {mx}
ST: {st}
""".replace("\n", "\r\n").format(**ssdpsrc) + "\r\n"
def discover(match="", timeout=2):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(ssdpre, (ssdpsrc["ip_address"], ssdpsrc["port"]))
s.settimeout(timeout)
responses = []
print ""
try:
while True:
response = s.recv(1000)
if match in response:
print response
responses.append(response)
except:
pass
return responses
def reboot(timeout=2):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(exptpack1, (ssdpsrc["ip_address"], ssdpsrc["port"]))
s.settimeout(timeout)
s.settimeout(timeout)
trg = raw_input("\nTarget: ")
tpg = int(input("Port: "))
for i in range(4):
sys.stdout.write("\rSending Reboot Payload" + "." * i)
time.sleep(0.05)
print ""
s.sendto(exptpack1, (trg, tpg))
try:
s.connect((str(tpg), int(tpg)))
time.sleep(0.1)
s.send(u"`REBOOT`")
s.close()
time.sleep(1)
s.connect((str(tpg), int(tpg)))
except:
print "UPnP Device Rebooted"
s.close()
while 1:
location = "upnp"
act = "\n~/" + str(location) + "$: "
console.set_color(1,1,1)
try:
data = raw_input(act)
except:
pass
console.set_color()
if data == "tool" or data == "tools" or data == "t":
while 1:
location = "tools"
act = "\n~/" + str(location) + "$: "
console.set_color(1,1,1)
try:
data = raw_input(act)
except:
sys.exit()
console.set_color()
if data == "discover" or data == "find":
discover()
if data == "quit" or data == "q" or data == "exit":
sys.exit()
if data == "clear" or data == "cls" or data == "clr":
console.clear()
if data == "back" or data == "cd":
break
if data == "?" or data == "help":
print ""
console.set_font("Arial-BoldMT",16)
print "Tool Commands: "
console.set_font()
time.sleep(0.3)
print "Discover - find: discover"
time.sleep(0.3)
print "Exit - q : exit"
time.sleep(0.3)
print "Back - cd : back"
time.sleep(0.3)
print "Clear - cls : clear"
time.sleep(0.3)
if data == "exploit" or data == "exploits" or data == "e":
while 1:
location = "exploits"
act = "\n~/" + str(location) + "$: "
console.set_color(1,1,1)
try:
data = raw_input(act)
except:
sys.exit()
console.set_color()
if data == "reboot" or data == "boot":
reboot()
if data == "quit" or data == "q" or data == "exit":
sys.exit()
if data == "clear" or data == "cls" or data == "clr":
console.clear()
if data == "?" or data == "help":
print ""
console.set_font("Arial-BoldMT",16)
print "Exploit Commands: "
console.set_font()
time.sleep(0.3)
print "Reboot - boot : reboot"
time.sleep(0.3)
print "Exit - q : exit"
time.sleep(0.3)
print "Back - cd : back"
time.sleep(0.3)
print "Clear - cls : clear"
time.sleep(0.3)
if data == "back" or data == "cd":
break
if data == "exit" or data == "quit" or data == "q":
sys.exit()
if data == "clear" or data == "cls" or data == "clr":
console.clear()
if data == "help" or data == "?":
print ""
console.set_font("Arial-BoldMT",16)
print "Menu Commands: "
console.set_font()
time.sleep(0.3)
print "Tools - t : tools"
time.sleep(0.3)
print "Exploits - e : exploits"
time.sleep(0.3)
print "Exit - q : exit"
time.sleep(0.3)
print "Back - cd : back"
time.sleep(0.3)
print "Clear - cls : clear"
time.sleep(0.3)
| RussianOtter/networking | ap-ro.py | Python | gpl-3.0 | 4,174 |
from __future__ import print_function, division
from warnings import warn
from ..node import Node
from ..utils import index_of_column_name
class Clip(Node):
"""Ensures that no value is below a lower limit or above an upper limit.
If self.lower and self.upper are None then will use clip settings from
'device': {'measurements': {'upper_limit' and 'lower_limit'}}.
"""
# Not very well specified. Really want to specify that
# we need 'lower_limit' and 'upper_limit' to be specified in
# each measurement...
requirements = {'device': {'measurements': 'ANY VALUE'}}
postconditions = {'preprocessing_applied': {'clip': {}}}
def reset(self):
self.lower = None
self.upper = None
def process(self):
self.check_requirements()
metadata = self.upstream.get_metadata()
measurements = metadata['device']['measurements']
for chunk in self.upstream.process():
for measurement in chunk:
lower, upper = _find_limits(measurement, measurements)
lower = lower if self.lower is None else self.lower
upper = upper if self.upper is None else self.upper
if lower is not None and upper is not None:
# We use `chunk.iloc[:,icol]` instead of iterating
# through each column so we can to the clipping in place
icol = index_of_column_name(chunk, measurement)
chunk.iloc[:,icol] = chunk.iloc[:,icol].clip(lower, upper)
yield chunk
def _find_limits(measurement, measurements):
"""
Returns
-------
lower, upper : numbers
"""
for m in measurements:
if ((m.get('physical_quantity'), m.get('type')) == measurement):
return m.get('lower_limit'), m.get('upper_limit')
warn('No measurement limits for {}.'.format(measurement), RuntimeWarning)
return None, None
| diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/preprocessing/clip.py | Python | cc0-1.0 | 1,947 |
import jwt
from aiohttp.web import json_response
from closure_table.auth.db.queries import user_get
from closure_table.settings import JWT_ALGORITHM, JWT_SECRET
def setup_middlewares(app):
app.middlewares.append(auth_middleware)
async def auth_middleware(app, handler):
async def middleware(request):
request.user = None
jwt_token = request.headers.get('X-Auth-Token')
if jwt_token:
try:
payload = jwt.decode(
jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM]
)
except jwt.DecodeError:
return json_response(status=400, data={
'error': 'Auth token is invalid'
})
except jwt.ExpiredSignatureError:
return json_response(status=400, data={
'error': 'Auth token is expired'
})
async with request.app['db'].acquire() as conn:
request.user = await user_get(conn, payload['email'])
return await handler(request)
return middleware
| vyacheslav-bezborodov/dvhb | src/closure_table/auth/middlewares.py | Python | mit | 1,093 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" MultiQC submodule to parse output from GATK varianteval """
import logging
from collections import OrderedDict
from multiqc import config
from multiqc.plots import bargraph, table
# Initialise the logger
log = logging.getLogger(__name__)
class VariantEvalMixin():
def parse_gatk_varianteval(self):
""" Find GATK varianteval logs and parse their data """
self.gatk_varianteval = dict()
for f in self.find_log_files('gatk/varianteval', filehandles=True):
parsed_data = parse_single_report(f['f'])
if len(parsed_data) > 0:
if f['s_name'] in self.gatk_varianteval:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='varianteval')
self.gatk_varianteval[f['s_name']] = parsed_data
# Filter to strip out ignored sample names
self.gatk_varianteval = self.ignore_samples(self.gatk_varianteval)
if len(self.gatk_varianteval) > 0:
# Write parsed report data to a file (restructure first)
self.write_data_file(self.gatk_varianteval, 'multiqc_gatk_varianteval')
# Get consensus TiTv references
titv_ref = None
for s_name in self.gatk_varianteval:
if titv_ref is None:
titv_ref = self.gatk_varianteval[s_name]['titv_reference']
elif titv_ref != self.gatk_varianteval[s_name]['titv_reference']:
titv_ref = 'Multiple'
break
# General Stats Table
varianteval_headers = dict()
varianteval_headers['known_titv'] = {
'title': 'TiTV ratio (known)',
'description': "TiTV ratio from variants found in '{}'".format(titv_ref),
'min': 0,
'scale': 'Blues',
'shared_key': 'titv_ratio'
}
varianteval_headers['novel_titv'] = {
'title': 'TiTV ratio (novel)',
'description': "TiTV ratio from variants NOT found in '{}'".format(titv_ref),
'min': 0,
'scale': 'Blues',
'shared_key': 'titv_ratio'
}
self.general_stats_addcols(self.gatk_varianteval, varianteval_headers, 'GATK VariantEval')
# Variant Counts plot
self.add_section (
name = 'Variant Counts',
anchor = 'gatk-count-variants',
plot = count_variants_barplot(self.gatk_varianteval)
)
# Compare Overlap Table
self.add_section (
name = 'Compare Overlap',
anchor = 'gatk-compare-overlap',
plot = comp_overlap_table(self.gatk_varianteval)
)
# Return the number of logs that were found
return len(self.gatk_varianteval)
def parse_single_report(f):
""" Parse a gatk varianteval varianteval """
data = dict()
in_CompOverlap = False
in_CountVariants = False
in_TiTv = False
for l in f:
# Detect section headers
if '#:GATKTable:CompOverlap' in l:
in_CompOverlap = True
elif '#:GATKTable:CountVariants' in l:
in_CountVariants = True
elif '#:GATKTable:TiTvVariantEvaluator' in l:
in_TiTv = True
else:
# Parse contents using nested loops
if in_CompOverlap:
headers = l.split()
while in_CompOverlap:
l = f.readline().strip("\n")
d = dict()
try:
for i, s in enumerate(l.split()):
d[headers[i]] = s
if d['Novelty'] == 'all':
data['reference'] = d['CompRod']
data['comp_rate'] = float(d['compRate'])
data['concordant_rate'] = float(d['concordantRate'])
data['eval_variants'] = int(d['nEvalVariants'])
data['novel_sites'] = int(d['novelSites'])
elif d['Novelty'] == 'known':
data['known_sites'] = int(d['nEvalVariants'])
except KeyError:
in_CompOverlap = False
elif in_CountVariants:
headers = l.split()
while in_CountVariants:
l = f.readline().strip("\n")
d = dict()
try:
for i, s in enumerate(l.split()):
d[headers[i]] = s
if d['Novelty'] == 'all':
data['snps'] = int(d['nSNPs'])
data['mnps'] = int(d['nMNPs'])
data['insertions'] = int(d['nInsertions'])
data['deletions'] = int(d['nDeletions'])
data['complex'] = int(d['nComplex'])
data['symbolic'] = int(d['nSymbolic'])
data['mixed'] = int(d['nMixed'])
data['nocalls'] = int(d['nNoCalls'])
except KeyError:
in_CountVariants = False
elif in_TiTv:
headers = l.split()
data['titv_reference'] = 'unknown'
while in_TiTv:
l = f.readline().strip("\n")
d = dict()
try:
for i, s in enumerate(l.split()):
d[headers[i]] = s
if d['Novelty'] == 'known':
data['titv_reference'] = d['CompRod']
data['known_titv'] = float(d['tiTvRatio'])
elif d['Novelty'] == 'novel':
data['novel_titv'] = float(d['tiTvRatio'])
except KeyError:
in_TiTv = False
return data
def count_variants_barplot(data):
""" Return HTML for the Variant Counts barplot """
keys = OrderedDict()
keys['snps'] = {'name': 'SNPs'}
keys['mnps'] = {'name': 'MNPs'}
keys['insertions'] = {'name': 'Insertions'}
keys['deletions'] = {'name': 'Deletions'}
keys['complex'] = {'name': 'Complex'}
keys['symbolic'] = {'name': 'Symbolic'}
keys['mixed'] = {'name': 'Mixed'}
keys['nocalls'] = {'name': 'No-calls'}
plot_conf = {
'id': 'gatk_varianteval_variant_plot',
'title': 'GATK VariantEval Variant Counts',
'ylab': '# Variants',
'cpswitch_counts_label': 'Number of Variants'
}
return bargraph.plot(data, keys, plot_conf)
def comp_overlap_table(data):
"""Build a table from the comp overlaps output."""
headers = OrderedDict()
headers['comp_rate'] = {
'title': 'Compare rate',
'description': 'Ratio of known variants found in the reference set.',
'namespace': 'GATK',
'min': 0,
'max': 100,
'suffix': '%',
'format': '{:,.2f}',
'scale': 'Blues',
}
headers['concordant_rate'] = {
'title': 'Concordant rate',
'description': 'Ratio of variants matching alleles in the reference set.',
'namespace': 'GATK',
'min': 0,
'max': 100,
'suffix': '%',
'format': '{:,.2f}',
'scale': 'Blues',
}
headers['eval_variants'] = {
'title': 'M Evaluated variants',
'description': 'Number of called variants (millions)',
'namespace': 'GATK',
'min': 0,
'modify': lambda x: float(x) / 1000000.0
}
headers['known_sites'] = {
'title': 'M Known sites',
'description': 'Number of known variants (millions)',
'namespace': 'GATK',
'min': 0,
'modify': lambda x: float(x) / 1000000.0
}
headers['novel_sites'] = {
'title': 'M Novel sites',
'description': 'Number of novel variants (millions)',
'namespace': 'GATK',
'min': 0,
'modify': lambda x: float(x) / 1000000.0
}
table_html = table.plot(data, headers, {'id': 'gatk_compare_overlap', 'table_title': 'GATK - Compare Overlap'})
return table_html
| robinandeer/MultiQC | multiqc/modules/gatk/varianteval.py | Python | gpl-3.0 | 8,431 |
from __future__ import absolute_import
from cas.config import DEBUG
import logging
LOG = logging.getLogger()
logging.basicConfig()
LOG.setLevel(logging.CRITICAL)
def enable_debug():
LOG.setLevel(logging.DEBUG)
if DEBUG:
enable_debug()
| jcmcken/cas | cas/log.py | Python | bsd-3-clause | 247 |
from datetime import datetime
from django.utils import timezone
from django.utils import dateparse
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from ..models import Event, User, Tag
from ..serializers import EventSerializer
class EventViewSet(APIView):
def get(self, request, *args, **kwargs):
# Get all events that have not ended yet
queryset = Event.objects.filter(end_date_time__gt=timezone.now())
# Obtain the list of user scopes (interests, single tag, text filtering and date filtering)
scopes = request.GET.getlist('scopes[]')
# Filter on user's interests
if 'interests' in scopes:
user_id = request.user.id
user = User.objects.get(pk=user_id)
tags = user.interest_tags.all()
queryset = queryset.filter(tag__in=tags)
# Filter to just one tag
elif 'tag' in scopes:
selected_tag = request.GET['tag']
tag = Tag.objects.get(name=selected_tag)
queryset = queryset.filter(tag=tag)
# Filter by text contained in event name
if 'name' in scopes:
text = request.GET['text']
queryset = queryset.filter(name__icontains=text)
# Filter by start date
if 'date' in scopes:
date = request.GET['date']
datetime_object = timezone.make_aware(datetime.strptime(date, '%m/%d/%y'))
queryset = queryset.filter(start_date_time__gt=datetime_object)
serializer_class = EventSerializer(queryset, many=True, context={'request': request})
return Response(serializer_class.data) | LorenzSelv/pinned | core/views/event_viewset.py | Python | mit | 1,723 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessTruncatedNormal")
@tf_export("random.stateless_uniform")
def stateless_random_uniform(shape,
seed,
minval=0,
maxval=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a uniform distribution.
This is a stateless version of `tf.random_uniform`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the
range of random values to generate. Defaults to 1 if `dtype` is floating
point.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "stateless_random_uniform",
[shape, seed, minval, maxval]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
return gen_stateless_random_ops.stateless_random_uniform_int(
shape, seed=seed, minval=minval, maxval=maxval, name=name)
else:
rnd = gen_stateless_random_ops.stateless_random_uniform(
shape, seed=seed, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
@tf_export("random.stateless_normal")
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export("random.stateless_truncated_normal")
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.truncated_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_truncated_normal(
shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export(v1=["random.stateless_multinomial"])
@deprecation.deprecated(
date=None, instructions="Use tf.random.stateless_categorical instead.")
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.multinomial`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_multinomial(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
output_dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples,
output_dtype, seed)
@tf_export("random.stateless_categorical")
def stateless_categorical(logits,
num_samples,
seed,
dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a categorical distribution.
This is a stateless version of `tf.categorical`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_categorical", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples, dtype,
seed)
def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed):
"""Implementation for stateless multinomial/categorical ops (v1/v2)."""
logits = ops.convert_to_tensor(logits, name="logits")
return gen_stateless_random_ops.stateless_multinomial(
logits, num_samples, seed, output_dtype=dtype)
| hfp/tensorflow-xsmm | tensorflow/python/ops/stateless_random_ops.py | Python | apache-2.0 | 11,692 |
"""
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from libmp import NoConvergence
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim(map(float, xlim))
if ylim:
axes.set_ylim(map(float, ylim))
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print n, "of", N
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
| mattpap/sympy-polys | sympy/mpmath/visualization.py | Python | bsd-3-clause | 9,017 |
# Define a custom User class to work with django-social-auth
from django.db import models
from django.contrib.auth.models import User
class Task(models.Model):
name = models.CharField(max_length=200)
owner = models.ForeignKey(User)
finished = models.BooleanField(default=False)
shared = models.BooleanField(default=False)
class Viewer(models.Model):
name = models.ForeignKey(User)
tasks = models.ForeignKey(Task)
class Friends(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
creator = models.ForeignKey(User, related_name="friendship_creator_set")
friend = models.ForeignKey(User, related_name="friend_set")
class CustomUserManager(models.Manager):
def create_user(self, username, email):
return self.model._default_manager.create(username=username)
class CustomUser(models.Model):
username = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
objects = CustomUserManager()
def is_authenticated(self):
return True
| kurdd/Oauth | app/models.py | Python | apache-2.0 | 1,109 |
#=========================================================================
# GcdUnit cycle-level model
#=========================================================================
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle
from pclib.fl import Queue
from pclib.cl import InValRdyQueue, OutValRdyQueue
import fractions
#=========================================================================
# GCD Unit Cycle-Level Model
#=========================================================================
class GcdProcCL( Model ):
#-----------------------------------------------------------------------
# Constructor: Define Interface
#-----------------------------------------------------------------------
def __init__( s, cpu_ifc_types ):
s.cpu_ifc_req = InValRdyBundle ( cpu_ifc_types.req )
s.cpu_ifc_resp = OutValRdyBundle ( cpu_ifc_types.resp )
s.cpu_req_q = InValRdyQueue ( cpu_ifc_types.req )
s.cpu_resp_q = OutValRdyQueue ( cpu_ifc_types.resp )
def elaborate_logic( s ):
s.connect( s.cpu_req_q.in_ , s.cpu_ifc_req )
s.connect( s.cpu_resp_q.out, s.cpu_ifc_resp)
s.go = False
s.src0 = 0
s.src1 = 0
s.result = 0
s.counter = 0
s.counter_done = False
@s.tick_cl
def logic():
s.cpu_req_q.xtick()
s.cpu_resp_q.xtick()
if s.go and not s.counter_done:
while( s.src1 != 0 ):
# Euclid's algorithm
if s.src0 < s.src1:
tmp = s.src1
s.src1 = s.src0
s.src0 = tmp
s.src0 = s.src0 - s.src1
s.counter += 1
s.counter_done = True
elif not s.cpu_req_q.is_empty() and not s.cpu_resp_q.is_full():
req = s.cpu_req_q.deq()
if req.ctrl_msg == 1:
s.src0 = req.data
elif req.ctrl_msg == 2:
s.src1 = req.data
elif req.ctrl_msg == 0:
s.go = True
if s.counter_done and not s.cpu_resp_q.is_full():
if s.counter != 0:
s.counter -= 1
else:
s.cpu_resp_q.enq( s.src0 )
s.go = False
#-----------------------------------------------------------------------
# Line Tracing: Debug Output
#-----------------------------------------------------------------------
def line_trace( s ):
return "(" + str(s.cpu_ifc_resp.val) + str(s.cpu_ifc_resp.rdy) + ")"
| Abhinav117/pymtl | examples/gcd_cp2/GcdProcCL.py | Python | bsd-3-clause | 2,395 |
from ScriptingLanguage.Interpreter import Interpreter
__author__ = 'chronium'
def visit_func_call(function_call):
try:
return Interpreter().get_function(function_call.value)()
except KeyError:
print('Function [{}] undefined'.format(function_call.value))
| chronium/ChronoScript | ScriptingLanguage/Visitors/FunctionVisitor.py | Python | gpl-2.0 | 280 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN)
##
## Indico is free software: you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
# Autoinstalls setuptools if the user doesn't have them already
import ez_setup
ez_setup.use_setuptools()
import commands
import getopt
import os
import getpass
import re
import shutil
import string
import sys
import itertools
from distutils.sysconfig import get_python_lib, get_python_version
from distutils.cmd import Command
from distutils.command import bdist
from indico.util import i18n
import pkg_resources
from setuptools.command import develop, install, sdist, bdist_egg, \
easy_install, test
from setuptools import setup, find_packages, findall
try:
from babel.messages import frontend as babel
BABEL_PRESENT = True
except ImportError:
BABEL_PRESENT = False
DEPENDENCY_URLS = ["http://indico-software.org/wiki/Admin/Installation/IndicoExtras"]
DEVELOP_REQUIRES = ['pojson>=0.4', 'termcolor', 'werkzeug', 'nodeenv', 'fabric', 'sphinx', 'repoze.sphinx.autointerface']
if sys.platform == 'linux2':
import pwd
import grp
class vars(object):
'''Variable holder.'''
packageDir = None
versionVal = 'None'
accessuser = None
accessgroup = None
dbInstalledBySetupPy = False
binDir = None
documentationDir = None
configurationDir = None
htdocsDir = None
### Methods required by setup() ##############################################
def _generateDataPaths(x):
dataFilesDict = {}
for (baseDstDir, srcDir) in x:
for f in findall(srcDir):
dst_dir = os.path.join(baseDstDir,
os.path.relpath(os.path.dirname(f), srcDir))
if dst_dir not in dataFilesDict:
dataFilesDict[dst_dir] = []
dataFilesDict[dst_dir].append(f)
dataFiles = []
for k, v in dataFilesDict.items():
dataFiles.append((k, v))
return dataFiles
def _getDataFiles(x):
"""
Returns a fully populated data_files ready to be fed to setup()
WARNING: when creating a bdist_egg we need to include files inside bin,
doc, config & htdocs into the egg therefore we cannot fetch indico.conf
values directly because they will not refer to the proper place. We
include those files in the egg's root folder.
"""
# setup expects a list like this (('foo/bar/baz', 'wiki.py'),
# ('a/b/c', 'd.jpg'))
#
# What we do below is transform a list like this:
# (('foo', 'bar/baz/wiki.py'),
# ('a', 'b/c/d.jpg'))
#
# first into a dict and then into a pallatable form for setuptools.
# This re will be used to filter out etc/*.conf files and therefore not overwritting them
dataFiles = _generateDataPaths((('bin', 'bin'),
('doc', 'doc'),
('etc', 'etc')))
return dataFiles
def _getInstallRequires():
'''Returns external packages required by Indico
These are the ones needed for runtime.'''
base = ['ZODB3==3.10.5', 'zope.index==3.6.4', 'zope.interface==3.8.0',
'pytz', 'lxml', 'cds-indico-extras', 'zc.queue==1.3',
'python-dateutil<2.0', 'pypdf', 'mako==0.9.1', 'babel',
'icalendar>=3.2', 'pyatom', 'jsmin', 'cssmin', 'webassets', 'pojson>=0.4',
'requests>=1.2.0', 'simplejson>=2.1.0', 'reportlab', 'Pillow', 'oauth2', 'pyscss==1.1.5', 'Werkzeug==0.9',
'Flask==0.10', 'bcrypt==1.0.2', 'beautifulsoup4==4.2.1', 'pycountry==1.2', 'Pillow==2.1.0', 'qrcode==3.0',
'markdown', 'bleach']
#for Python older than 2.7
if sys.version_info[0] <= 2 and sys.version_info[1] < 7:
base += ['argparse', 'ordereddict']
return base
def _versionInit():
'''Retrieves the version number from indico/MaKaC/__init__.py and returns it'''
from indico.MaKaC import __version__
v = __version__
print('Indico %s' % v)
return v
### Commands ###########################################################
class sdist_indico(sdist.sdist):
user_options = (sdist.sdist.user_options +
[('version=', None, 'version to distribute')])
version = 'dev'
def run(self):
sdist.sdist.run(self)
def _bdist_indico(dataFiles):
class bdist_indico(bdist.bdist):
def run(self):
compileAllLanguages(self)
bdist.bdist.run(self)
bdist_indico.dataFiles = dataFiles
return bdist_indico
def _bdist_egg_indico(dataFiles):
class bdist_egg_indico(bdist_egg.bdist_egg):
def run(self):
compileAllLanguages(self)
bdist_egg.bdist_egg.run(self)
bdist_egg_indico.dataFiles = dataFiles
return bdist_egg_indico
class develop_indico(develop.develop):
def run(self):
develop.develop.run(self)
# create symlink to legacy MaKaC dir
# this is so that the ".egg-link" created by the "develop" command works
if sys.platform in ["linux2", "darwin"] and not os.path.exists('MaKaC'):
os.symlink('indico/MaKaC', 'MaKaC')
# install dev dependencies
env = pkg_resources.Environment()
easy_install.main(DEVELOP_REQUIRES)
env.scan()
class develop_config(develop_indico):
description = "prepares the current directory for Indico development"
user_options = (develop.develop.user_options +
[('www-uid=', None, "Set user for cache/log/db (typically apache user)"),
('www-gid=', None, "Set group for cache/log/db (typically apache group)"),
('http-port=', None, "Set port used by HTTP server"),
('https-port=', None, "Set port used by HTTP server in HTTPS mode"),
('zodb-port=', None, "Set port used by ZODB"),
('smtp-port=', None, "Set port used for SMTP (e-mail sending)"),
('use-apache', None, "Use apache (will chmod directories accordingly)")])
www_uid = None
www_gid = None
http_port = 8000
https_port = 8443
zodb_port = 9675
use_apache = False
smtp_port = 8025
def run(self):
# dependencies, links, etc...
develop_indico.run(self)
local = 'etc/indico.conf'
if os.path.exists(local):
print 'Upgrading existing etc/indico.conf...'
else:
print 'Creating new etc/indico.conf..'
shutil.copy('etc/indico.conf.sample', local)
upgrade_indico_conf(local, 'etc/indico.conf.sample', {
'BaseURL': 'http://localhost:{0}/indico'.format(self.http_port),
'BaseSecureURL': 'https://localhost:{0}/indico'.format(self.https_port),
'DBConnectionParams': ("localhost", int(self.zodb_port)),
'SmtpServer': ("localhost", int(self.smtp_port))
})
for f in [x for x in ('etc/zdctl.conf', 'etc/zodb.conf', 'etc/logging.conf') if not os.path.exists(x)]:
shutil.copy('%s.sample' % f, f)
print """\nIndico needs to store some information in the filesystem (database, cache, temporary files, logs...)
Please specify the directory where you'd like it to be placed.
(Note that putting it outside of your sourcecode tree is recommended)"""
prefixDirDefault = os.path.dirname(os.getcwd())
prefixDir = raw_input('Full path [%s]: ' % prefixDirDefault).strip()
if prefixDir == '':
prefixDir = prefixDirDefault
directories = dict((d, os.path.join(prefixDir, d)) for d in
['db', 'log', 'tmp', 'cache', 'archive'])
print 'Creating directories...',
for d in directories.values():
if not os.path.exists(d):
os.makedirs(d)
print 'Done!'
# add existing dirs
directories.update(dict((d, os.path.join(os.getcwd(), 'indico', d)) for d in ['htdocs', 'bin', 'etc', 'doc']))
self._update_conf_dir_paths(local, directories)
# avoid modifying the htdocs folder permissions (it brings problems with git)
directories.pop('htdocs')
from MaKaC.consoleScripts.installBase import _databaseText, _findApacheUserGroup, _checkDirPermissions, \
_updateDbConfigFiles, _updateMaKaCEggCache
user = getpass.getuser()
sourcePath = os.getcwd()
if self.use_apache:
# find the apache user/group
user, group = _findApacheUserGroup(self.www_uid, self.www_gid)
_checkDirPermissions(directories, dbInstalledBySetupPy=directories['db'], accessuser=user, accessgroup=group)
_updateDbConfigFiles(os.path.join(sourcePath, 'etc'),
db=directories['db'],
log=directories['log'],
tmp=directories['tmp'],
port=self.zodb_port,
uid=user)
_updateMaKaCEggCache(os.path.join(os.path.dirname(__file__), 'indico', 'MaKaC', '__init__.py'),
directories['tmp'])
compileAllLanguages(self)
print '''
%s
''' % _databaseText('etc')
def _update_conf_dir_paths(self, filePath, dirs):
fdata = open(filePath).read()
for dir in dirs.items():
d = dir[1].replace("\\", "/") # For Windows users
fdata = re.sub('\/opt\/indico\/%s' % dir[0], d, fdata)
open(filePath, 'w').write(fdata)
class test_indico(test.test):
"""
Test command for Indico
"""
description = "Test Suite Framework"
user_options = (test.test.user_options + [('specify=', None, "Use nosetests style (file.class:testcase)"),
('coverage', None, "Output coverage report in html"),
('unit', None, "Run only Unit tests"),
('functional', None, "Run only Functional tests"),
('pylint', None, "Run python source analysis"),
('jsunit', None, "Run js unit tests"),
('jslint', None, "Run js source analysis"),
('jscoverage', None, "Output coverage report in html for js"),
('jsspecify=', None, "Use js-test-driver style (TestCaseName.testName)"),
('log=', None, "Log to console, using specified level"),
('browser=', None, "Browser to use for functional tests"),
('mode=', None, "Mode to use for functional tests"),
('server-url=', None, "Server URL to use for functional tests"),
('xml', None, "XML output"),
('html', None, "Make an HTML report (when possible)"),
('record', None, "Record tests (for --functional)"),
('silent', None, "Don't output anything in the console, just generate the report"),
('killself', None,
"Kill this script right after the tests finished without waiting for db shutdown.")])
boolean_options = []
specify = None
coverage = False
unit = False
functional = False
browser = None
pylint = False
jsunit = False
jslint = False
jscoverage = False
jsspecify = None
silent = False
mode = None
server_url = None
killself = True
html = False
record = False
log = False
xml = False
def _wrap(self, func, *params):
def wrapped():
self.res = func(*params)
self.with_project_on_sys_path(wrapped)
return self.res
def finalize_options(self):
testsToRun = []
allTests = ['unit', 'functional']
for testType in allTests:
if getattr(self, testType):
testsToRun.append(testType)
if self.jsspecify and 'jsunit' not in testsToRun:
testsToRun.append('jsunit')
if testsToRun == []:
testsToRun = allTests
self.testsToRun = testsToRun
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
from indico.tests import TestManager
options = {'silent': self.silent,
'killself': self.killself,
'html': self.html,
'browser': self.browser,
'mode': self.mode,
'specify': self.specify,
'coverage': self.coverage,
'record': self.record,
'server_url': self.server_url,
'log': self.log,
'xml': self.xml}
# get only options that are active
options = dict((k, v) for (k, v) in options.iteritems() if v)
manager = TestManager()
result = self._wrap(manager.main, self.testsToRun, options)
sys.exit(result)
def download(self, url, path):
"""Copy the contents of a file from a given URL
to a local file.
"""
import urllib
webFile = urllib.urlopen(url)
localFile = open(os.path.join(path, url.split('/')[-1]), 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
def unzip(self, zipPath, inZipPath, targetFile):
"""extract the needed file from zip and then delete the zip"""
import zipfile
try:
zfobj = zipfile.ZipFile(zipPath)
outfile = open(targetFile, 'wb')
outfile.write(zfobj.read(inZipPath))
outfile.flush()
outfile.close()
#delete zip file
os.unlink(zipPath)
except NameError, e:
print e
class egg_filename(Command):
description = "Get the file name of the generated egg"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and pkg_utils.get_build_platform).egg_name()
print basename
def run(self):
pass
if __name__ == '__main__':
# Always load source from the current folder
sys.path = [os.path.abspath('indico')] + sys.path
#PWD_INDICO_CONF = 'etc/indico.conf'
#if not os.path.exists(PWD_INDICO_CONF):
# shutil.copy('etc/indico.conf.sample', PWD_INDICO_CONF)
from MaKaC.consoleScripts.installBase import *
#Dirty trick: For running tests, we need to load all the modules and get rid of unnecessary outputs
tempLoggingDir = None
if 'test' in sys.argv:
import logging
import tempfile
tempLoggingDir = tempfile.mkdtemp()
logging.basicConfig(filename=os.path.join(tempLoggingDir, 'logging'),
level=logging.DEBUG)
setIndicoInstallMode(False)
else:
setIndicoInstallMode(True)
x = vars()
x.packageDir = os.path.join(get_python_lib(), 'MaKaC')
x.binDir = 'bin'
x.documentationDir = 'doc'
x.configurationDir = 'etc'
x.htdocsDir = 'htdocs'
dataFiles = _getDataFiles(x)
foundPackages = list('MaKaC.%s' % pkg for pkg in
find_packages(where='indico/MaKaC'))
foundPackages.append('MaKaC')
foundPackages.append('htdocs')
# add our namespace package
foundPackages += list('indico.%s' % pkg for pkg in
find_packages(where='indico',
exclude=['htdocs*', 'MaKaC*']))
foundPackages.append('indico')
cmdclass = {'sdist': sdist_indico,
'bdist': _bdist_indico(dataFiles),
'bdist_egg': _bdist_egg_indico(dataFiles),
'develop_config': develop_config,
'develop': develop_indico,
'test': test_indico,
'egg_filename': egg_filename
}
if BABEL_PRESENT:
for cmdname in ['init_catalog', 'extract_messages', 'compile_catalog', 'update_catalog']:
cmdclass['%s_js' % cmdname] = getattr(babel, cmdname)
cmdclass['compile_catalog_js'] = i18n.generate_messages_js
setup(name="indico",
cmdclass=cmdclass,
version=_versionInit(),
description="Indico is a full-featured conference lifecycle management and meeting/lecture scheduling tool",
author="Indico Team",
author_email="[email protected]",
url="http://indico-software.org",
download_url="http://indico-software.org/wiki/Releases/Indico1.1",
platforms=["any"],
long_description="Indico allows you to schedule conferences, from single talks to complex meetings with "
"sessions and contributions. It also includes an advanced user delegation mechanism, "
"allows paper reviewing, archival of conference information and electronic proceedings",
license="http://www.gnu.org/licenses/gpl-3.0.txt",
entry_points="""
[console_scripts]
indico_scheduler = indico.modules.scheduler.daemon_script:main
indico_initial_setup = MaKaC.consoleScripts.indicoInitialSetup:main
indico_ctl = MaKaC.consoleScripts.indicoCtl:main
indico_livesync = indico.ext.livesync.console:main
indico_shell = indico.util.shell:main
indico_admin = indico.util.admin:main
[indico.ext_types]
statistics = indico.ext.statistics
Collaboration = MaKaC.plugins.Collaboration
InstantMessaging = MaKaC.plugins.InstantMessaging
RoomBooking = MaKaC.plugins.RoomBooking
EPayment = MaKaC.plugins.EPayment
livesync = indico.ext.livesync
importer = indico.ext.importer
calendaring = indico.ext.calendaring
search = indico.ext.search
[indico.ext]
statistics.piwik = indico.ext.statistics.piwik
Collaboration.EVO = MaKaC.plugins.Collaboration.EVO
Collaboration.WebEx = MaKaC.plugins.Collaboration.WebEx
Collaboration.Vidyo = MaKaC.plugins.Collaboration.Vidyo
Collaboration.CERNMCU = MaKaC.plugins.Collaboration.CERNMCU
Collaboration.RecordingManager = MaKaC.plugins.Collaboration.RecordingManager
Collaboration.RecordingRequest = MaKaC.plugins.Collaboration.RecordingRequest
Collaboration.WebcastRequest = MaKaC.plugins.Collaboration.WebcastRequest
RoomBooking.CERN = MaKaC.plugins.RoomBooking.CERN
RoomBooking.default = MaKaC.plugins.RoomBooking.default
EPayment.payPal = MaKaC.plugins.EPayment.payPal
EPayment.worldPay = MaKaC.plugins.EPayment.worldPay
EPayment.yellowPay = MaKaC.plugins.EPayment.yellowPay
EPayment.skipjack = MaKaC.plugins.EPayment.skipjack
importer.invenio = indico.ext.importer.invenio
importer.dummy = indico.ext.importer.dummy
InstantMessaging.XMPP = MaKaC.plugins.InstantMessaging.XMPP
livesync.invenio = indico.ext.livesync.invenio
livesync.cern_search = indico.ext.livesync.cern_search
calendaring.outlook = indico.ext.calendaring.outlook
search.invenio = indico.ext.search.invenio
""",
zip_safe=False,
packages=foundPackages,
package_dir={'indico': 'indico',
'htdocs': os.path.join('indico', 'htdocs'),
'MaKaC': os.path.join('indico', 'MaKaC')},
package_data={'indico': ['*.*']},
include_package_data=True,
namespace_packages=['indico', 'indico.ext'],
install_requires=_getInstallRequires(),
tests_require=['nose', 'rednose', 'twill', 'selenium', 'figleaf'],
data_files=dataFiles,
dependency_links=DEPENDENCY_URLS
)
#delete the temp folder used for logging
if 'test' in sys.argv:
shutil.rmtree(tempLoggingDir)
| Ictp/indico | setup.py | Python | gpl-3.0 | 21,182 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from unittest import TestCase
from lib import unidades
from lib.meos import MEoS
class NF3(MEoS):
"""Multiparameter equation of state for nitrogen trifluoride"""
name = "nitrogen trifluoride"
CASNumber = "7783-54-2"
formula = "NF3"
synonym = ""
_refPropName = "NF3"
_coolPropName = ""
rhoc = unidades.Density(562.47)
Tc = unidades.Temperature(234.0)
Pc = unidades.Pressure(4460.7, "kPa")
M = 71.019 # g/mol
Tt = unidades.Temperature(66.36)
Tb = unidades.Temperature(144.138)
f_acent = 0.126
momentoDipolar = unidades.DipoleMoment(0.235, "Debye")
# id = 951
CP1 = {"ao": -7.140693612211,
"an": [0.7427518245951e6, -0.4389825372134e5, 0.1012629224351e4,
0.5481339146452e-1, -0.7677196006769e-4, 0.4203630864340e-7],
"pow": [-3, -2, -1, 1, 2, 3],
"ao_exp": [-0.6328752997967], "exp": [3000]}
younglove = {
"__type__": "MBWR",
"__name__": "MBWR equation of state for nitrogen trifluoride of "
"Younglove (1982)",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, "
"Ethylene, Parahydrogen, Nitrogen, Nitrogen "
"Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, 11(Suppl. 1) (1982)",
"doi": ""},
"R": 8.31441,
"rhoc": 7.92, "Tc": 234, "Pc": 4460.7, "M": 71.019,
"cp": CP1,
"ref": {"Tref": 300, "Pref": 101.325, "ho": 11900, "so": 260.9},
"Tmin": Tt, "Tmax": 500.0, "Pmax": 50000.0, "rhomax": 26.4,
"gamma": -0.0056,
"b": [None, 0.1774353868e-1, -0.5409379418, 0.3976634466e1,
-0.5209476694e3, -0.3286322888e5, -0.5990517411e-3, 0.9217525601,
-0.4848977075e3, -0.4235892691e7, -0.9824248063e-5, .05432235989,
-0.1462388500e2, -0.3366180440e-2, 0.2801374599, 0.8435288597e1,
-0.1324421452e-1, 0.1875604377e-3, 0.2959643991, -0.700997687e-2,
0.4365820912e7, -0.1111397536e8, 0.2411866612e5, 0.3179136276e7,
0.6166849090e2, 0.4260854720e2, 0.1090598789, -0.3340951059e2,
0.8597429644e-4, 0.1240544214e-2, 0.1286224248e-6,
-0.8941104276e-6, 0.3353054595e-4]}
eq = younglove,
_vapor_Pressure = {
"eq": 3,
"n": [-0.66672e1, 0.33864e1, -0.28222e1, -0.50602e1, 0.32481e1],
"t": [1.0, 1.5, 1.7, 5.5, 7.0]}
_liquid_Density = {
"eq": 1,
"n": [0.22080e1, 0.35709e2, -0.92868e2, 0.66666e2, -0.93589e1],
"t": [0.35, 2.4, 2.7, 3.0, 4.0]}
_vapor_Density = {
"eq": 2,
"n": [-3.061, -8.0541, -19.619, -13.432, -32.76, -67.907],
"t": [0.421, 1.48, 3.9, 7.0, 8.0, 15.0]}
class Test(TestCase):
def test_younglove(self):
# The saturation state use ancillary equation for saturation pressure
# and densities calculated values so differ of equation values
# Selected point from Appendix J, Pag 267, single phase region
st = NF3(T=120, P=2e4)
self.assertEqual(round(st.rho, 0), 1649)
self.assertEqual(round(st.rhoM, 2), 23.22)
self.assertEqual(round(st.uM.Jmol, 0), -8547)
self.assertEqual(round(st.hM.Jmol, 0), -8546)
self.assertEqual(round(st.sM.JmolK, 1), 134.6)
self.assertEqual(round(st.cvM.JmolK, 2), 41.82)
self.assertEqual(round(st.cpM.JmolK, 2), 70.90)
self.assertEqual(round(st.w, 1), 894.1)
st = NF3(T=200, P=4e4)
self.assertEqual(round(st.rho, 3), 1.717)
self.assertEqual(round(st.rhoM, 5), 0.02418)
self.assertEqual(round(st.uM.Jmol, 0), 5420)
self.assertEqual(round(st.hM.Jmol, 0), 7074)
self.assertEqual(round(st.sM.JmolK, 1), 249.2)
self.assertEqual(round(st.cvM.JmolK, 2), 34.57)
self.assertEqual(round(st.cpM.JmolK, 2), 43.07)
self.assertEqual(round(st.w, 1), 169.9)
st = NF3(T=500, P=6e4)
self.assertEqual(round(st.rho, 3), 1.025)
self.assertEqual(round(st.rhoM, 5), 0.01443)
self.assertEqual(round(st.uM.Jmol, 0), 20051)
self.assertEqual(round(st.hM.Jmol, 0), 24208)
self.assertEqual(round(st.sM.JmolK, 1), 296.4)
self.assertEqual(round(st.cvM.JmolK, 2), 59.25)
self.assertEqual(round(st.cpM.JmolK, 2), 67.59)
self.assertEqual(round(st.w, 1), 258.4)
st = NF3(T=68, P=1e5)
self.assertEqual(round(st.rho, 0), 1863)
self.assertEqual(round(st.rhoM, 2), 26.23)
self.assertEqual(round(st.uM.Jmol, 0), -12230)
self.assertEqual(round(st.hM.Jmol, 0), -12226)
self.assertEqual(round(st.sM.JmolK, 2), 94.41)
self.assertEqual(round(st.cvM.JmolK, 2), 46.54)
self.assertEqual(round(st.cpM.JmolK, 2), 73.05)
self.assertEqual(round(st.w, 0), 1429)
# Reference state
st = NF3(T=300, P=101325)
self.assertEqual(round(st.rho, 3), 2.895)
self.assertEqual(round(st.rhoM, 5), 0.04077)
self.assertEqual(round(st.uM.Jmol, 0), 9415)
self.assertEqual(round(st.hM.Jmol, 0), 11900)
self.assertEqual(round(st.sM.JmolK, 1), 260.9)
self.assertEqual(round(st.cvM.JmolK, 2), 45.28)
self.assertEqual(round(st.cpM.JmolK, 2), 53.74)
self.assertEqual(round(st.w, 1), 203.4)
st = NF3(T=155, P=2e5)
self.assertEqual(round(st.rho, 2), 11.73)
self.assertEqual(round(st.rhoM, 4), 0.1652)
self.assertEqual(round(st.uM.Jmol, 0), 3826)
self.assertEqual(round(st.hM.Jmol, 0), 5037)
self.assertEqual(round(st.sM.JmolK, 1), 224.5)
self.assertEqual(round(st.cvM.JmolK, 2), 31.20)
self.assertEqual(round(st.cpM.JmolK, 2), 42.08)
self.assertEqual(round(st.w, 1), 146.7)
st = NF3(T=140, P=3e5)
self.assertEqual(round(st.rho, 0), 1558)
self.assertEqual(round(st.rhoM, 2), 21.94)
self.assertEqual(round(st.uM.Jmol, 0), -7129)
self.assertEqual(round(st.hM.Jmol, 0), -7115)
self.assertEqual(round(st.sM.JmolK, 1), 145.6)
self.assertEqual(round(st.cvM.JmolK, 2), 40.40)
self.assertEqual(round(st.cpM.JmolK, 2), 71.73)
self.assertEqual(round(st.w, 1), 781.8)
st = NF3(T=420, P=4e5)
self.assertEqual(round(st.rho, 3), 8.164)
self.assertEqual(round(st.rhoM, 4), 0.1150)
self.assertEqual(round(st.uM.Jmol, 0), 15431)
self.assertEqual(round(st.hM.Jmol, 0), 18911)
self.assertEqual(round(st.sM.JmolK, 1), 269.1)
self.assertEqual(round(st.cvM.JmolK, 2), 54.89)
self.assertEqual(round(st.cpM.JmolK, 2), 63.46)
self.assertEqual(round(st.w, 1), 237.6)
st = NF3(T=170, P=5e5)
self.assertEqual(round(st.rho, 0), 1407)
self.assertEqual(round(st.rhoM, 2), 19.81)
self.assertEqual(round(st.uM.Jmol, 0), -4914)
self.assertEqual(round(st.hM.Jmol, 0), -4889)
self.assertEqual(round(st.sM.JmolK, 1), 159.9)
self.assertEqual(round(st.cvM.JmolK, 2), 40.40)
self.assertEqual(round(st.cpM.JmolK, 2), 77.55)
self.assertEqual(round(st.w, 1), 615.2)
st = NF3(T=220, P=6e5)
self.assertEqual(round(st.rho, 2), 24.77)
self.assertEqual(round(st.rhoM, 4), 0.3487)
self.assertEqual(round(st.uM.Jmol, 0), 5915)
self.assertEqual(round(st.hM.Jmol, 0), 7635)
self.assertEqual(round(st.sM.JmolK, 1), 229.9)
self.assertEqual(round(st.cvM.JmolK, 2), 37.81)
self.assertEqual(round(st.cpM.JmolK, 2), 48.56)
self.assertEqual(round(st.w, 1), 170.9)
st = NF3(T=185, P=8e5)
self.assertEqual(round(st.rho, 2), 43.39)
self.assertEqual(round(st.rhoM, 4), 0.6109)
self.assertEqual(round(st.uM.Jmol, 0), 4432)
self.assertEqual(round(st.hM.Jmol, 0), 5741)
self.assertEqual(round(st.sM.JmolK, 1), 218.3)
self.assertEqual(round(st.cvM.JmolK, 2), 36.33)
self.assertEqual(round(st.cpM.JmolK, 2), 52.63)
self.assertEqual(round(st.w, 1), 149.0)
st = NF3(T=68, P=1e6)
self.assertEqual(round(st.rho, 0), 1864)
self.assertEqual(round(st.rhoM, 2), 26.24)
self.assertEqual(round(st.uM.Jmol, 0), -12235)
self.assertEqual(round(st.hM.Jmol, 0), -12197)
self.assertEqual(round(st.sM.JmolK, 1), 94.3)
self.assertEqual(round(st.cvM.JmolK, 2), 47.25)
self.assertEqual(round(st.cpM.JmolK, 2), 73.01)
self.assertEqual(round(st.w, 0), 1412)
st = NF3(T=205, P=2e6)
self.assertEqual(round(st.rho, 0), 1180)
self.assertEqual(round(st.rhoM, 2), 16.62)
self.assertEqual(round(st.uM.Jmol, 0), -2005)
self.assertEqual(round(st.hM.Jmol, 0), -1884)
self.assertEqual(round(st.sM.JmolK, 1), 175.5)
self.assertEqual(round(st.cvM.JmolK, 2), 43.09)
self.assertEqual(round(st.cpM.JmolK, 2), 99.23)
self.assertEqual(round(st.w, 1), 387.4)
st = NF3(T=500, P=3e6)
self.assertEqual(round(st.rho, 2), 51.57)
self.assertEqual(round(st.rhoM, 4), 0.7261)
self.assertEqual(round(st.uM.Jmol, 0), 19732)
self.assertEqual(round(st.hM.Jmol, 0), 23864)
self.assertEqual(round(st.sM.JmolK, 1), 263.2)
self.assertEqual(round(st.cvM.JmolK, 2), 59.23)
self.assertEqual(round(st.cpM.JmolK, 2), 68.87)
self.assertEqual(round(st.w, 1), 259.5)
st = NF3(T=230, P=4e6)
self.assertEqual(round(st.rho, 1), 864.4)
self.assertEqual(round(st.rhoM, 2), 12.17)
self.assertEqual(round(st.uM.Jmol, 1), 848.9)
self.assertEqual(round(st.hM.Jmol, 0), 1178)
self.assertEqual(round(st.sM.JmolK, 1), 188.9)
self.assertEqual(round(st.cvM.JmolK, 2), 50.45)
self.assertEqual(round(st.cpM.JmolK, 1), 300.6)
self.assertEqual(round(st.w, 1), 171.9)
st = NF3(T=68, P=5e6)
self.assertEqual(round(st.rho, 0), 1867)
self.assertEqual(round(st.rhoM, 2), 26.29)
self.assertEqual(round(st.uM.Jmol, 0), -12255)
self.assertEqual(round(st.hM.Jmol, 0), -12065)
self.assertEqual(round(st.sM.JmolK, 2), 94.04)
self.assertEqual(round(st.cvM.JmolK, 2), 50.28)
self.assertEqual(round(st.cpM.JmolK, 2), 72.82)
self.assertEqual(round(st.w, 0), 1338)
st = NF3(T=272, P=6e6)
self.assertEqual(round(st.rho, 1), 280.4)
self.assertEqual(round(st.rhoM, 3), 3.948)
self.assertEqual(round(st.uM.Jmol, 0), 6213)
self.assertEqual(round(st.hM.Jmol, 0), 7732)
self.assertEqual(round(st.sM.JmolK, 1), 214.4)
self.assertEqual(round(st.cvM.JmolK, 2), 47.86)
self.assertEqual(round(st.cpM.JmolK, 2), 89.87)
self.assertEqual(round(st.w, 1), 160.7)
st = NF3(T=70, P=7e6)
self.assertEqual(round(st.rho, 0), 1861)
self.assertEqual(round(st.rhoM, 2), 26.21)
self.assertEqual(round(st.uM.Jmol, 0), -12122)
self.assertEqual(round(st.hM.Jmol, 0), -11855)
self.assertEqual(round(st.sM.JmolK, 2), 95.98)
self.assertEqual(round(st.cvM.JmolK, 2), 50.28)
self.assertEqual(round(st.cpM.JmolK, 2), 70.96)
self.assertEqual(round(st.w, 0), 1249)
st = NF3(T=284, P=8e6)
self.assertEqual(round(st.rho, 1), 373.9)
self.assertEqual(round(st.rhoM, 3), 5.265)
self.assertEqual(round(st.uM.Jmol, 0), 6210)
self.assertEqual(round(st.hM.Jmol, 0), 7729)
self.assertEqual(round(st.sM.JmolK, 1), 212.8)
self.assertEqual(round(st.cvM.JmolK, 2), 49.06)
self.assertEqual(round(st.cpM.JmolK, 2), 98.58)
self.assertEqual(round(st.w, 1), 169.4)
st = NF3(T=500, P=1e7)
self.assertEqual(round(st.rho, 1), 172.3)
self.assertEqual(round(st.rhoM, 3), 2.426)
self.assertEqual(round(st.uM.Jmol, 0), 19004)
self.assertEqual(round(st.hM.Jmol, 0), 23127)
self.assertEqual(round(st.sM.JmolK, 1), 251.8)
self.assertEqual(round(st.cvM.JmolK, 2), 59.17)
self.assertEqual(round(st.cpM.JmolK, 2), 71.81)
self.assertEqual(round(st.w, 1), 266.9)
st = NF3(T=72, P=2e7)
self.assertEqual(round(st.rho, 0), 1868)
self.assertEqual(round(st.rhoM, 2), 26.30)
self.assertEqual(round(st.uM.Jmol, 0), -12047)
self.assertEqual(round(st.hM.Jmol, 0), -11286)
self.assertEqual(round(st.sM.JmolK, 2), 97.02)
self.assertEqual(round(st.cvM.JmolK, 2), 53.87)
self.assertEqual(round(st.cpM.JmolK, 2), 68.70)
self.assertEqual(round(st.w, 0), 1047)
st = NF3(T=300, P=3e7)
self.assertEqual(round(st.rho, 1), 969.3)
self.assertEqual(round(st.rhoM, 2), 13.65)
self.assertEqual(round(st.uM.Jmol, 0), 3606)
self.assertEqual(round(st.hM.Jmol, 0), 5804)
self.assertEqual(round(st.sM.JmolK, 1), 198.8)
self.assertEqual(round(st.cvM.JmolK, 2), 49.48)
self.assertEqual(round(st.cpM.JmolK, 2), 82.92)
self.assertEqual(round(st.w, 1), 379.0)
st = NF3(T=80, P=4e7)
self.assertEqual(round(st.rho, 0), 1863)
self.assertEqual(round(st.rhoM, 2), 26.23)
self.assertEqual(round(st.uM.Jmol, 0), -11626)
self.assertEqual(round(st.hM.Jmol, 0), -10101)
self.assertEqual(round(st.sM.JmolK, 1), 102.6)
self.assertEqual(round(st.cvM.JmolK, 2), 45.38)
self.assertEqual(round(st.cpM.JmolK, 2), 69.45)
self.assertEqual(round(st.w, 0), 1068)
st = NF3(T=500, P=5e7)
self.assertEqual(round(st.rho, 1), 687.9)
self.assertEqual(round(st.rhoM, 3), 9.686)
self.assertEqual(round(st.uM.Jmol, 0), 16264)
self.assertEqual(round(st.hM.Jmol, 0), 21426)
self.assertEqual(round(st.sM.JmolK, 1), 234.2)
self.assertEqual(round(st.cvM.JmolK, 2), 60.63)
self.assertEqual(round(st.cpM.JmolK, 2), 78.20)
self.assertEqual(round(st.w, 1), 380.2)
| jjgomera/pychemqt | lib/mEoS/NF3.py | Python | gpl-3.0 | 14,872 |
"""Generate the test, save it, and then call all relevant checkers."""
# NOTE: Currenlty works only with S3DataStore
import os
from util.data_store.s3_data_store import S3DataStore
from util.analytics_platform_util import get_path_names
from evaluation_platform.uranus.src.generate_test_data import TestData
from evaluation_platform.uranus.src.alternate_testing import AlternateAccuracy
from evaluation_platform.uranus.src.companion_outlier_testing import CompanionOutlierAccuracy
from analytics_platform.kronos.src.config import (
AWS_S3_ACCESS_KEY_ID,
AWS_S3_SECRET_ACCESS_KEY)
from evaluation_platform.uranus.src.uranus_constants import (
URANUS_EVALUATION_RESULT_PATH)
def generate_evaluate_test_s3(training_url, result_id):
"""Generate the test, save it, and then call all relevant checkers."""
input_bucket_name, output_bucket_name, additional_path = get_path_names(
training_url)
input_data_store = S3DataStore(src_bucket_name=input_bucket_name,
access_key=AWS_S3_ACCESS_KEY_ID,
secret_key=AWS_S3_SECRET_ACCESS_KEY)
output_data_store = S3DataStore(src_bucket_name=input_bucket_name,
access_key=AWS_S3_ACCESS_KEY_ID,
secret_key=AWS_S3_SECRET_ACCESS_KEY)
generate_test(input_data_store, output_data_store, additional_path)
perform_kronos_test(training_url, result_id,
input_data_store, output_data_store, additional_path)
def generate_test(input_data_store, output_data_store, additional_path):
"""Generate test from given input data store."""
td = TestData()
td.generate_attributes(input_data_store, additional_path)
td.save_attributes(output_data_store, additional_path)
def perform_kronos_test(training_url,
result_id,
input_data_store,
output_data_store,
additional_path):
"""Call the Alternate, Companion and Outlier Accuracy checker.
:param training_url: Location where test data is loaded from.
:return testing_result_dict: Accuracy/Evaluation metrices.
"""
testing_result_dict = {"input_url": training_url}
testing_result_dict["evaluation_id"] = result_id
alt_acc_obj = AlternateAccuracy()
alt_acc_obj.load_attributes(input_data_store, additional_path)
testing_result_dict["Alternate"] = alt_acc_obj.alternate_precision()
co_acc_obj = CompanionOutlierAccuracy()
co_acc_obj.load_attributes(input_data_store, additional_path)
testing_result_dict[
"Number of Input Manifests"] = co_acc_obj.search_set_length
result = co_acc_obj.companion_outlier_precision()
testing_result_dict["Companion"] = result[0]
testing_result_dict["Outlier"] = result[1]
result_filename = os.path.join(
additional_path,
URANUS_EVALUATION_RESULT_PATH,
result_id + ".json")
output_data_store.write_json_file(result_filename, testing_result_dict)
| sara-02/fabric8-analytics-stack-analysis | evaluation_platform/uranus/src/evaluate_data.py | Python | gpl-3.0 | 3,062 |
from django.conf.urls import url
from .views import MainView
urlpatterns = [
url(r'^$', MainView.as_view(), name="main"),
] | rudikovrf/django_blog | main/urls.py | Python | mit | 129 |
from datetime import datetime
from settings import STORAGE
from settings import DATE_FORMAT
class Item(object):
def __init__(self, oid, **kwargs):
self.id = oid
self.content = kwargs.get("content", "")
self.title = kwargs.get("title", "")
self.created = kwargs.get("created", "")
self.complete = kwargs.get("complete", False)
self.complete_date = kwargs.get("complete_date", "")
if self.created != "":
self.created = datetime.strptime(self.created, DATE_FORMAT)
if self.complete_date != "":
self.complete_date = datetime.strptime(self.complete_date, DATE_FORMAT)
def __eq__(self, other):
if isinstance(other, Item):
return self.id == other.id
return False
def update(self, **kwargs):
self.content = kwargs.get("content", self.content)
self.title = kwargs.get("title", self.title)
self.created = kwargs.get("created", self.created)
oldComp = self.complete
self.complete = kwargs.get("complete", self.complete)
if self.complete == False and oldComp == True:
self.complete_date = datetime.utcnow().strftime(DATE_FORMAT) + "UTC"
if not isinstance(self.created, datetime) and self.created != "":
self.created = datetime.strptime(self.created, DATE_FORMAT)
if not isinstance(self.complete_date, datetime) and self.complete_date != "":
self.complete_date = datetime.strptime(self.complete_date, DATE_FORMAT)
def delete(self):
STORAGE.delete(self.id)
def save(self):
cd_str = self.complete_date.strftime(DATE_FORMAT) + "UTC" if self.complete_date != "" else ""
STORAGE.put(self.id, content=self.content,
created=self.created.strftime(DATE_FORMAT) + "UTC",
title=self.title,
complete=self.complete,
complete_date=cd_str)
| broganross/kivy_tests | todo/models/item.py | Python | mit | 1,964 |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 6 2014)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
###########################################################################
## Chris @ DCSLAB, NCTU
## gui_key.py
## GUI for user to give keys
###########################################################################
from threading import Thread
#import copy
import xml.etree.ElementTree as ET
import wx
import wx.xrc
import reconfig_single
import myheader as chris
import get_nodes
import get_keys
SCALE_POLICY = [ 'vcpu', 'ram', 'price', 'disk', 'bandwidth' ]
class ReconfigSingle( wx.Frame ):
def __init__(self, parent, options, system, allnode, alllb):
self.system = system
self.options = options
self.allnode = allnode
self.alllb = alllb
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"RC Reconfig Single Cluster", pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
#self.SetSizeHintsSz( wx.Size( -1,-1 ), wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
bSizer1.SetMinSize( wx.Size( 500,-1 ) )
self.m_scrolledWindow1 = wx.ScrolledWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.m_scrolledWindow1.SetScrollRate( 5, 5 )
self.m_scrolledWindow1.SetMinSize( wx.Size( 900,700 ) )
fgSizer1 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer1.SetFlexibleDirection( wx.BOTH )
fgSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
fgSizer1.SetMinSize( wx.Size( 500,-1 ) )
self.m_staticText1 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Reconfig Single Cluster", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
fgSizer1.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.m_staticline1 = wx.StaticLine( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
fgSizer1.Add( self.m_staticline1, 0, wx.EXPAND |wx.ALL, 5 )
fgSizer4 = wx.FlexGridSizer( 0, 8, 0, 0 )
fgSizer4.SetFlexibleDirection( wx.BOTH )
fgSizer4.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText5 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Cluster", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
fgSizer4.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.m_choice1Choices = [ cluster.name for cluster in system.clusters ]
self.m_choice1 = wx.Choice( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, self.m_choice1Choices, 0 )
self.m_choice1.SetSelection( 0 )
fgSizer4.Add( self.m_choice1, 0, wx.ALL, 5 )
self.m_staticText6 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Scale Type", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
fgSizer4.Add( self.m_staticText6, 0, wx.ALL, 5 )
m_choice2Choices = [ u"UP (Vertical)", u"OUT (Horizontal)" ]
self.m_choice2 = wx.Choice( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice2Choices, 0 )
self.m_choice2.SetSelection( 0 )
fgSizer4.Add( self.m_choice2, 0, wx.ALL, 5 )
self.m_staticText7 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Scale Direct", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
fgSizer4.Add( self.m_staticText7, 0, wx.ALL, 5 )
m_choice3Choices = [ u"Add (Increase)", u"Reduce (Decrease)" ]
self.m_choice3 = wx.Choice( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice3Choices, 0 )
self.m_choice3.SetSelection( 0 )
fgSizer4.Add( self.m_choice3, 0, wx.ALL, 5 )
self.m_staticText8 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Policy", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText8.Wrap( -1 )
fgSizer4.Add( self.m_staticText8, 0, wx.ALL, 5 )
m_choice4Choices = SCALE_POLICY
self.m_choice4 = wx.Choice( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice4Choices, 0 )
self.m_choice4.SetSelection( 0 )
fgSizer4.Add( self.m_choice4, 0, wx.ALL, 5 )
fgSizer1.Add( fgSizer4, 1, wx.EXPAND, 5 )
self.m_checkBox1 = wx.CheckBox( self.m_scrolledWindow1, wx.ID_ANY, u"Active Initialization Script", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer1.Add( self.m_checkBox1, 0, wx.ALL, 5 )
self.m_checkBox1.SetValue(True)
self.m_button1 = wx.Button( self.m_scrolledWindow1, wx.ID_ANY, u"Send", wx.Point( -1,-1 ), wx.DefaultSize, 0 )
fgSizer1.Add( self.m_button1, 0, wx.ALL, 5 )
self.m_staticline2 = wx.StaticLine( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
fgSizer1.Add( self.m_staticline2, 0, wx.EXPAND |wx.ALL, 5 )
self.m_staticText9 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"System", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
fgSizer1.Add( self.m_staticText9, 0, wx.ALL, 5 )
self.m_button3 = wx.Button( self.m_scrolledWindow1, wx.ID_ANY, u"Preview", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer1.Add( self.m_button3, 0, wx.ALL, 5 )
fgSizer2 = wx.FlexGridSizer( 0, 2, 0, 0 )
fgSizer2.SetFlexibleDirection( wx.BOTH )
fgSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText10 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"Now", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText10.Wrap( -1 )
fgSizer2.Add( self.m_staticText10, 0, wx.ALL, 5 )
self.m_staticText11 = wx.StaticText( self.m_scrolledWindow1, wx.ID_ANY, u"After Reconfig (Estimated)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText11.Wrap( -1 )
fgSizer2.Add( self.m_staticText11, 0, wx.ALL, 5 )
self.m_scrolledWindow2 = wx.ScrolledWindow( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.m_scrolledWindow2.SetScrollRate( 5, 5 )
self.m_scrolledWindow2.SetMinSize( wx.Size( 300,300 ) )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.m_treeCtrl1 = wx.TreeCtrl( self.m_scrolledWindow2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TR_DEFAULT_STYLE )
self.m_treeCtrl1.SetMinSize( wx.Size( 300,300 ) )
self.tree_generation(self.m_treeCtrl1)
self.m_treeCtrl1.ExpandAll()
bSizer3.Add( self.m_treeCtrl1, 0, wx.ALL, 5 )
self.m_scrolledWindow2.SetSizer( bSizer3 )
self.m_scrolledWindow2.Layout()
bSizer3.Fit( self.m_scrolledWindow2 )
fgSizer2.Add( self.m_scrolledWindow2, 1, wx.EXPAND |wx.ALL, 5 )
self.m_scrolledWindow3 = wx.ScrolledWindow( self.m_scrolledWindow1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.m_scrolledWindow3.SetScrollRate( 5, 5 )
self.m_scrolledWindow3.SetMinSize( wx.Size( 300,300 ) )
bSizer4 = wx.BoxSizer( wx.VERTICAL )
self.m_treeCtrl2 = wx.TreeCtrl( self.m_scrolledWindow3, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TR_DEFAULT_STYLE )
self.m_treeCtrl2.SetMinSize( wx.Size( 300,300 ) )
self.m_treeCtrl2.ExpandAll()
bSizer4.Add( self.m_treeCtrl2, 0, wx.ALL, 5 )
self.m_scrolledWindow3.SetSizer( bSizer4 )
self.m_scrolledWindow3.Layout()
bSizer4.Fit( self.m_scrolledWindow3 )
fgSizer2.Add( self.m_scrolledWindow3, 1, wx.EXPAND |wx.ALL, 5 )
fgSizer1.Add( fgSizer2, 1, wx.EXPAND, 5 )
self.m_button2 = wx.Button( self.m_scrolledWindow1, wx.ID_ANY, u"Update", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer1.Add( self.m_button2, 0, wx.ALL, 5 )
self.m_scrolledWindow1.SetSizer( fgSizer1 )
self.m_scrolledWindow1.Layout()
fgSizer1.Fit( self.m_scrolledWindow1 )
bSizer1.Add( self.m_scrolledWindow1, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( bSizer1 )
self.Layout()
bSizer1.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.m_button1.Bind(wx.EVT_BUTTON, self.buttonevent)
self.m_button3.Bind(wx.EVT_BUTTON, self.buttonevent_preview)
self.m_button2.Bind(wx.EVT_BUTTON, self.buttonevent_update)
def buttonevent(self, event):
print('debug: Send...')
#print('debug: ori type: ' + self.system.clusters[1].mynodes[0].extra['instance_type'])
self.options_generation()
if not reconfig_single.reconfig_single(self.options, self.system, self.allnode, self.alllb):
print('Error: reconfig_single()')
return
self.Close()
#tree = ET.parse('configuration.tmp.xml')
#root = tree.getroot()
#print('debug: finish parsing')
#self.system = chris.System(root.attrib['name'], [])
#if not self.system.reconfig(root, True, self.allnode, self.options, self.alllb): # This would cause the real modification on the system
# print('Error: Reconfig Failed')
# print('Warning: This faile might already cause some changes to the system')
# exit()
#self.tree_generation(self.m_treeCtrl2)
#self.m_staticText11.SetLabel("After Reconfig")
#self.m_staticText10.SetLabel("Original")
def buttonevent_preview(self, event):
print('debug: Preview...')
self.options_generation()
#system_preview = copy.deepcopy(self.system)
tree = ET.parse(self.options.config_file)
root = tree.getroot()
system_preview = chris.System(root.attrib['name'], [])
allnode, alllb, allimage = get_nodes.get_nodes(self.options.key_file, get_keys.get_keys(True, self.options.key_file))
system_preview.reconfig(root, False, allnode, self.options, alllb)
#system_preview.name = 'temp'
if not reconfig_single.reconfig_single(self.options, system_preview, self.allnode, self.alllb, modifyxml = False):
print('Error; reconfig_single()')
return
tree = ET.parse('configuration.preview.xml')
root = tree.getroot()
#print('debug: finish parsing')
system_preview = chris.System(root.attrib['name'], [])
if not system_preview.reconfig(root, False, allnode, self.options, alllb, preview = True):
print('Error: Preview Failed')
return
# #print('Warning: This faile might already cause some changes to the system')
# exit()
print('hi')
self.tree_generation(self.m_treeCtrl2, system_preview = system_preview)
self.m_treeCtrl2.ExpandAll()
self.m_staticText11.SetLabel("After Reconfig (Estimated)")
self.m_treeCtrl2.Update()
def buttonevent_update(self, event):
#pass
print('debug: Update...')
self.options.mode = 'u'
#print('debug: reconfig 1st stage')
tree = ET.parse(self.options.config_file)
root = tree.getroot()
self.system = chris.System(root.attrib['name'], [])
if not self.system.reconfig(root, False, self.allnode, self.options, self.alllb):
print("debug: reconfig 1st stage not completed")
exit()
#..... System data structure was established .....
self.system.generate_xml(self.options.config_file)
print("debug: The file " + self.options.config_file + ' has been updated')
self.options.mode = 'rs'
self.tree_generation(self.m_treeCtrl1)
self.m_treeCtrl1.ExpandAll()
self.m_treeCtrl2.CollapseAll()
self.m_staticText10.SetLabel("Now")
def options_generation(self):
self.options.target_cluster = self.m_choice1Choices[self.m_choice1.GetCurrentSelection()]
if self.m_choice2.GetCurrentSelection() == 0:
self.options.scale_type = 'up'
else:
self.options.scale_type = 'out'
if self.m_choice3.GetCurrentSelection() == 0:
self.options.scale_direct = 'a'
else:
self.options.scale_direct = 'd'
scale_policy = SCALE_POLICY[self.m_choice4.GetCurrentSelection()]
if self.m_checkBox1.IsChecked():
self.options.active_ini_script == 'y'
else:
self.options.active_ini_script == 'n'
def tree_generation(self, treeobj, system_preview = None):
if system_preview == None:
systemtree = self.system
else:
systemtree = system_preview
treeobj.CollapseAll()
tree_root = treeobj.AddRoot(systemtree.name)
for cluster in systemtree.clusters:
tree_cluster = treeobj.AppendItem(tree_root, cluster.name + ' (' + cluster.scaletype + ')')
for node in cluster.mynodes:
tree_node = treeobj.AppendItem(tree_cluster, node.name + ' (' + node.id + ')')
treeobj.AppendItem(tree_node, 'TYPE: ' + node.extra['instance_type'])
treeobj.AppendItem(tree_node, 'STATUS: ' + node.extra['status'])
def __del__( self ):
pass
class GUI_ReconfigSingle(ReconfigSingle):
def __init__(self, parent, options, system, allnode, alllb):
ReconfigSingle.__init__(self, parent, options, system, allnode, alllb)
class GUI_Thread_ReconfigSingle(Thread):
def __init__(self, options, system, allnode, alllb):
Thread.__init__(self)
self.app = wx.App(False) #mandatory in wx, create an app, False stands for not deteriction stdin/stdout
self.options = options
self.system = system
self.allnode = allnode
self.alllb = alllb
def run(self):
frame = GUI_ReconfigSingle(None, self.options, self.system, self.allnode, self.alllb)
frame.Show(True)
self.app.MainLoop()
| chungchris/cloud-system-reconfiguration-tool | gui_reconfigsingle.py | Python | mit | 12,850 |
from keras.models import Sequential
from keras.layers import GRU
import numpy as np
model = Sequential()
ly = GRU(2, activation='tanh', recurrent_activation='relu',implementation = 1, stateful=False, batch_input_shape=(5, 3, 3))
model.add(ly)
model.compile(optimizer='sgd', loss='mse')
kernel = np.ones((3, 6))
rec_kernel = np.ones((2, 6))
bias = np.array([1, 2, -1, 0, 3, 4])/10
k = 0
for h in range(0, 3):
for w in range(0, 6):
k += 1
kernel[h, w] = (k % 5 - 2)/10
k = 0
for h in range(0, 2):
for w in range(0, 6):
k += 1
rec_kernel[h, w] = (k % 5 - 2)/10
parameters = [kernel, rec_kernel, bias]
model.set_weights(parameters)
data = np.ndarray((5, 3, 3))
l = 0
for b in range(0, 5):
for h in range(0, 3):
for c in range(0, 3):
l += 1
data[b, h, c] = (l % 5 + 1)/10
output = model.predict(data, batch_size=5) # the batch_size has no impact on the result here
print(output)
print(model.summary())
print(model.get_config())
print(model.get_weights())
| adamtiger/NNSharp | PythonUtils/GRU.py | Python | mit | 985 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0030_kitchen_usertable'),
]
operations = [
migrations.AlterField(
model_name='kitchen',
name='menu_item',
field=models.CharField(max_length=200),
),
]
| gauravbose/digital-menu | digimenu2/restaurant/migrations/0031_auto_20150708_0754.py | Python | bsd-3-clause | 409 |
import gc
import sys
import unittest
import collections
import weakref
import operator
from test import support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assert_(ref1() is None,
"expected reference to be invalidated")
self.assert_(ref2() is None,
"expected reference to be invalidated")
self.assert_(self.cbcalled == 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assert_(ref() is not None,
"weak reference to live object should be live")
o2 = ref()
self.assert_(o is o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assert_(self.cbcalled == 1,
"callback did not properly set 'cbcalled'")
self.assert_(ref() is None,
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assert_(ref1 is ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assert_(ref1 is ref2,
"reference object w/out callback should be re-used")
self.assert_(weakref.getweakrefcount(o) == 2,
"wrong weak ref count for object")
del proxy
self.assert_(weakref.getweakrefcount(o) == 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assert_(proxy1 is proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.failIf(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.failUnless(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.failUnless(3 in p,
"proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assert_(p1 is p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assert_(p1 is p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assert_(p1 is p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assert_(p1 is p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assert_(type(ref1) is weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assert_(o.bar == 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assert_(o.bar == 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assert_(proxy.foo == 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assert_(proxy.foo == 2,
"proxy does not reflect attribute modification")
del o.foo
self.assert_(not hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assert_(o.foo == 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assert_(
o.foo == 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assert_(not hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assert_(weakref.getweakrefcount(o) == 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assert_(weakref.getweakrefcount(o) == 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assert_(weakref.getweakrefcount(o) == 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assert_(weakref.getweakrefcount(1) == 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assert_(weakref.getweakrefs(o) == [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assert_(weakref.getweakrefs(o) == [ref1],
"list of refs does not match")
del ref1
self.assert_(weakref.getweakrefs(o) == [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assert_(weakref.getweakrefs(1) == [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assert_(p + 1.0 == 3.0)
self.assert_(1.0 + p == 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assert_(external_wr() is callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assert_(mr() is o)
self.assert_(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assert_(mr() is None)
self.assert_(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assert_(r1 is not r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assert_(r2 is refs[0])
self.assert_(r1 in refs[1:])
self.assert_(r3 in refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assert_(r1 is not r2)
refs = weakref.getweakrefs(o)
self.assert_(r1 in refs)
self.assert_(r2 in refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.failIf(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
class MappingTestCase(TestBase):
COUNT = 10
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assert_(o is dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assert_(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assert_(o.arg is dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assert_(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assert_(len(dict) == 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assert_(o in dict)
self.assert_(34 not in dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assert_(ob in dict)
self.assert_(ob in dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assert_(ob in dict)
self.assert_(ob in dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assert_(v is value1)
else:
self.assert_(v is value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assert_(v is value1)
else:
self.assert_(v is value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assert_(value1 is not value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assert_(o is value1)
self.assert_(key in weakdict)
self.assert_(weakdict.get(key) is value1)
self.assert_(weakdict[key] is value1)
o = weakdict.setdefault(key, value2)
self.assert_(o is value1)
self.assert_(key in weakdict)
self.assert_(weakdict.get(key) is value1)
self.assert_(weakdict[key] is value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assert_(k in dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assert_(v is weakdict[k])
self.assert_(v is weakdict.get(k))
for k in dict.keys():
self.assert_(k in weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assert_(v is weakdict[k])
self.assert_(v is weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assert_(list(d.items()) == [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_weakref.py | Python | mit | 40,936 |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from models import Delivery
from django.core import serializers
import json
def helloworld(request):
return HttpResponse('Hello world', content_type='text/plain')
def list_all(request):
out = ''
for delivery in Delivery.objects.all():
#out += '{user:>20}:{assignment}\n'.format(user=delivery.user, assignment=delivery.assignment)
out += '{user:>20}:{assignment}\n'.format(**delivery.__dict__)
#out = []
#for delivery in Delivery.objects.all():
#out.append({'user': delivery.user})
#return HttpResponse(json.dumps(out), content_type='text/plain')
def list_all_tpl(request):
deliveries = Delivery.objects.all() # vanligvis mange linjer!
return render_to_response('list-all.django.html',
{'deliveries': deliveries})
def restful_delivery(request, id=None):
if request.method == 'GET':
if id == None:
qry = Delivery.objects.all()
q = request.GET.get('q')
if q != None:
qry = qry.filter(user__icontains=q)
jsondata = serializers.serialize("json", qry)
else:
delivery = Delivery.objects.get(id=id)
deliverydict = dict(id=delivery.id,
user=delivery.user,
contents=delivery.contents,
feedback=delivery.feedback)
jsondata = json.dumps(deliverydict)
return HttpResponse(jsondata, content_type='text/plain')
elif request.method == 'POST':
pass # TODO
elif request.method == 'PUT':
pass # TODO
elif request.method == 'DELETE':
pass # TODO
| espenak/inf3331-djevelskap | djevelskap/base/views.py | Python | bsd-3-clause | 1,764 |
#problem 51
| jhuang314/euler | p51.py | Python | mit | 14 |
# -*- coding: utf-8 -*-
{
'name': 'test-field-converter',
'version': '0.1',
'category': 'Tests',
'description': """Tests of field conversions""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['ir.model.access.csv'],
'installable': True,
'auto_install': False,
}
| vileopratama/vitech | src/openerp/addons/test_converter/__openerp__.py | Python | mit | 379 |
from distutils.core import setup
from distutils.extension import Extension
import os,os.path
import sys
import numpy as np
if sys.platform == 'win32' or sys.platform == 'win64':
print 'Windows is not a supported platform.'
quit()
else:
COOLCVMFSROOT="/cvmfs/icecube.opensciencegrid.org/py2-v3_early_access/"
include_dirs = [os.environ['SNOTBUILDPATH']+"/include",COOLCVMFSROOT+"/RHEL_6_x86_64/include",
np.get_include(),
'../Likelihood/',
'../LikelihoodFit/',
'.']
libraries = ['python2.7','boost_python','boost_filesystem','boost_iostreams','boost_system','boost_regex',
'LeptonWeighter',
'SQuIDS','nuSQuIDS',
'gsl','gslcblas','m',
'hdf5','hdf5_hl','PhysTools']
if sys.platform.startswith('linux'):
libraries.append('supc++')#'cxxrt'
library_dirs = [os.environ['SNOTBUILDPATH']+"/lib",COOLCVMFSROOT+"/RHEL_6_x86_64/lib",COOLCVMFSROOT+"/RHEL_6_x86_64/lib64"]
files = ['SterileSearchPy.cpp']
setup(name = 'SterileSearchPy',
ext_modules = [
Extension('SterileSearchPy',files,
library_dirs=library_dirs,
libraries=libraries,
include_dirs=include_dirs,
extra_objects=["../LikelihoodFit/analysisWeighting.o","../LikelihoodFit/Event.o","../LikelihoodFit/linpack.o","../LikelihoodFit/SterileSearch.o",
"../LikelihoodFit/compactIO.o","../LikelihoodFit/lbfgsb.o","../LikelihoodFit/oversizeWeight.o"],
extra_compile_args=['-O3','-fPIC','-std=c++11','-Wno-unused-local-typedef',"-DSQUIDS_USE_VECTOR_EXTENSIONS=0"],
depends=[]),
]
)
| arguelles/Sterilizer | python/setup.py | Python | lgpl-3.0 | 1,757 |
from django.contrib import admin
from openedx.features.data_extract.models import CourseDataExtraction
#admin.site.register(CourseDataExtraction)
| philanthropy-u/edx-platform | openedx/features/data_extract/admin.py | Python | agpl-3.0 | 147 |
#!/usr/bin/env python
import sys
import os
import re
try:
path = sys.argv[1]
length = int(sys.argv[2])
except:
print >>sys.stderr, "Usage: $0 <path> <length>"
sys.exit(1)
path = re.sub(os.getenv('HOME'), '~', path)
while len(path) > length:
dirs = path.split("/");
# Find the longest directory in the path.
max_index = -1
max_length = 3
for i in range(len(dirs) - 1):
if len(dirs[i]) > max_length:
max_index = i
max_length = len(dirs[i])
# Shorten it by one character.
if max_index >= 0:
dirs[max_index] = dirs[max_index][:max_length-3] + ".."
path = "/".join(dirs)
# Didn't find anything to shorten. This is as good as it gets.
else:
break
print(path)
| werebus/dotfiles | bin/shorten_path.py | Python | mit | 772 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from Peach.analyzer import *
from Peach.Engine.dom import *
from Peach.Engine.common import *
from Peach.Engine.parser import PeachResolver
from lxml import etree
class XmlAnalyzer(Analyzer):
"""
Produces data models or PeachPits from XML documents.
"""
supportDataElement = True
supportCommandLine = True
supportTopLevel = True
def __init__(self):
pass
def asDataElement(self, parent, args, dataBuffer):
"""
Called when Analyzer is used in a data model.
Should return a DataElement such as Block, Number or String.
"""
if len(dataBuffer) == 0:
return
dom = _Xml2Dom().xml2Dom(dataBuffer)
# replace parent with new dom
dom.name = parent.name
parentOfParent = parent.parent
indx = parentOfParent.index(parent)
del parentOfParent[parent.name]
parentOfParent.insert(indx, dom)
def asCommandLine(self, args):
"""
Called when Analyzer is used from command line.
Analyzer should produce PeachPit XML as output.
"""
try:
inFile = args["xmlfile"]
outFile = args["out"]
except:
raise PeachException("XmlAnalyzer requires two parameters, xmlfile and out.")
xml = _Xml2Peach().xml2Peach("file:" + inFile)
with open(outFile, "wb+") as fo:
fo.write(xml)
def asTopLevel(self, peach, args):
"""
Called when Analyzer is used from top level.
From the top level producing zero or more data models and state models is possible.
"""
raise Exception("asTopLevel not supported")
class _Xml2Peach(object):
XmlContainer = """
<?xml version="1.0" encoding="utf-8"?>
<Peach>
<Include ns="default" src="file:defaults.xml" />
<DataModel name="TheDataModel">
%s
</DataModel>
<StateModel name="TheState" initialState="Initial">
<State name="Initial">
<Action type="output">
<DataModel ref="TheDataModel" />
</Action>
</State>
</StateModel>
<Agent name="LocalAgent" location="http://127.0.0.1:9000">
<Monitor class="test.TestStopOnFirst" />
</Agent>
-->
<Test name="TheTest">
<!-- <Agent ref="LocalAgent"/> -->
<StateModel ref="TheState"/>
<!-- TODO: Complete publisher -->
<Publisher class="stdout.Stdout" />
</Test>
<Run name="DefaultRun">
<Test ref="TheTest" />
</Run>
</Peach>
"""
def xml2Peach(self, url):
parser = etree.XMLParser()
parser.resolvers.add(PeachResolver())
doc = etree.parse(url, parser=parser)
peachDoc = etree.Element("DEADBEEF")
self.handleElement(doc, peachDoc)
# Get the string representation
# TODO: make it better
value = etree.tostring(peachDoc, pretty_print=True).strip()
deadbeef, value = value[:10], value[10:]
assert deadbeef == "<DEADBEEF>"
value, deadbeef = value[:-11], value[-11:]
assert deadbeef == "</DEADBEEF>"
return self.XmlContainer % value
def handleElement(self, node, parent):
"""
Handle an XML element, children and attributes. Returns an XmlElement object.
"""
if parent is None:
return None
# Element
element = etree.Element("XmlElement")
ns, tag = split_ns(node.tag)
element.set("elementName", tag)
if ns is not None:
element.set("ns", ns)
parent.append(element)
# Element attributes
for attrib in node.keys():
attribElement = self.handleAttribute(attrib, node.get(attrib), element)
element.append(attribElement)
# Element children
self._handleText(node.text, element)
for child in node.iterchildren():
if etree.iselement(child): # TODO: skip comments
self.handleElement(child, element)
self._handleText(child.tail, element)
return element
def _handleText(self, text, parent):
if text is not None and len(text.strip('\n\r\t\x10 ')) > 0:
string = etree.Element("String")
string.set("value", text)
string.set("type", "utf8")
parent.append(string)
def handleAttribute(self, attrib, attribObj, parent):
"""
Handle an XML attribute. Returns an XmlAttribute object.
"""
# Attribute
element = etree.Element("XmlAttribute")
ns, attrib = split_ns(attrib)
if ns is not None:
element.set("ns", ns)
element.set("attributeName", attrib)
# Attribute value
string = etree.Element("String")
string.set("value", attribObj)
string.set("type", "utf8")
element.append(string)
return element
class _Xml2Dom(object):
"""
Convert an XML Document into a Peach DOM.
"""
def xml2Dom(self, data):
child = etree.XML(data)
doc = child.getroottree()
root = self.handleElement(child, None)
return root
def handleElement(self, node, parent):
"""
Handle an XML element, children and attributes. Returns an XmlElement object.
"""
doc = node.getroottree()
# Element
element = XmlElement(None, parent)
ns, tag = split_ns(node.tag)
if ns is not None:
element.xmlNamespace = ns
element.elementName = tag
# Element attributes
for attrib in node.keys():
attribElement = self.handleAttribute(attrib, node.get(attrib), element)
element.append(attribElement)
# Element children
self._handleText(node.text, element)
for child in node.iterchildren():
if etree.iselement(child): # TODO: skip comments
childElement = self.handleElement(child, element)
element.append(childElement)
self._handleText(child.tail, element)
return element
def _handleText(self, text, parent):
if text is not None and len(text.strip('\n\r\t\x10 ')) > 0:
string = String(None, parent)
string.defaultValue = text
parent.append(string)
try:
_ = int(string.defaultValue)
hint = Hint("NumericalString", string)
hint.value = "true"
string.hints.append(hint)
except ValueError:
pass
def handleAttribute(self, attrib, attribObj, parent):
"""
Handle an XML attribute. Returns an XmlAttribute object.
"""
# Attribute
element = XmlAttribute(None, parent)
ns, attrib = split_ns(attrib)
if ns is not None:
element.xmlNamespace = ns
element.attributeName = attrib
# Attribute value
string = String(None, element)
string.defaultValue = attribObj
element.append(string)
return element
| sigma-random/peach | Peach/Analyzers/xml.py | Python | mpl-2.0 | 7,147 |
import os
import uuid
import yaml
import json
import shutil
import typing
import itertools
from copy import deepcopy
from datetime import datetime
from collections import defaultdict, namedtuple
from flask import current_app
from flask_security import current_user
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import text
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import or_ as sql_or, and_ as sql_and
from cloudify.constants import TERMINATED_STATES as TERMINATED_TASK_STATES
from cloudify.cryptography_utils import encrypt
from cloudify.workflows import tasks as cloudify_tasks
from cloudify.utils import parse_utc_datetime_relative
from cloudify.models_states import (SnapshotState,
ExecutionState,
VisibilityState,
BlueprintUploadState,
DeploymentModificationState,
PluginInstallationState,
DeploymentState)
from cloudify_rest_client.client import CloudifyClient
from dsl_parser import constants, tasks
from manager_rest import premium_enabled
from manager_rest.maintenance import get_maintenance_state
from manager_rest.constants import (DEFAULT_TENANT_NAME,
FILE_SERVER_BLUEPRINTS_FOLDER,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
FILE_SERVER_DEPLOYMENTS_FOLDER)
from manager_rest.utils import (send_event,
get_formatted_timestamp,
is_create_global_permitted,
validate_global_modification,
validate_deployment_and_site_visibility,
extract_host_agent_plugins_from_plan)
from manager_rest.rest.rest_utils import (
update_inter_deployment_dependencies,
verify_blueprint_uploaded_state,
compute_rule_from_scheduling_params,
)
from manager_rest.deployment_update.constants import STATES as UpdateStates
from manager_rest.plugins_update.constants import STATES as PluginsUpdateStates
from manager_rest.storage import (db,
get_storage_manager,
models,
get_node)
from . import utils
from . import config
from . import app_context
from . import workflow_executor
from . import manager_exceptions
# used for keeping track how many executions are currently active, and how
# many can the group still run
_ExecGroupStats = namedtuple('ExecGroupStats', ['active', 'concurrency'])
class ResourceManager(object):
def __init__(self, sm=None):
self.sm = sm or get_storage_manager()
self._cached_queued_execs_query = None
def list_executions(self, include=None, is_include_system_workflows=False,
filters=None, pagination=None, sort=None,
all_tenants=False, get_all_results=False,
load_relationships=False):
filters = filters or {}
is_system_workflow = filters.get('is_system_workflow')
if is_system_workflow:
filters['is_system_workflow'] = []
for value in is_system_workflow:
value = str(value).lower() == 'true'
filters['is_system_workflow'].append(value)
elif not is_include_system_workflows:
filters['is_system_workflow'] = [False]
return self.sm.list(
models.Execution,
include=include,
filters=filters,
pagination=pagination,
sort=sort,
all_tenants=all_tenants,
get_all_results=get_all_results,
load_relationships=load_relationships,
)
def update_deployment_statuses(self, latest_execution):
"""
Update deployment statuses based on latest execution
:param latest_execution: Latest execution object
"""
if latest_execution.workflow_id == 'delete_deployment_environment' or\
not latest_execution.deployment_id:
return
node_instances = self.sm.list(
models.NodeInstance,
filters={
'deployment_id': latest_execution.deployment_id,
'state': lambda col: col != 'started'
},
get_all_results=True
)
installation_status = DeploymentState.ACTIVE
if node_instances:
installation_status = DeploymentState.INACTIVE
dep = latest_execution.deployment
dep.installation_status = installation_status
dep.latest_execution = latest_execution
dep.deployment_status = dep.evaluate_deployment_status()
self.sm.update(dep)
def update_execution_status(self, execution_id, status, error):
if status in ExecutionState.END_STATES:
with self.sm.transaction():
execution = self.sm.get(
models.Execution, execution_id, locking=True)
override_status = self._update_finished_execution_dependencies(
execution)
if override_status is not None:
status = override_status
affected_parent_deployments = set()
with self.sm.transaction():
execution = self.sm.get(models.Execution, execution_id,
locking=True)
if execution._deployment_fk:
affected_parent_deployments.add(execution._deployment_fk)
deployment = execution.deployment
else:
deployment = None
workflow_id = execution.workflow_id
if not self._validate_execution_update(execution.status, status):
raise manager_exceptions.InvalidExecutionUpdateStatus(
f"Invalid relationship - can't change status from "
f'{execution.status} to {status} for "{execution.id}" '
f'execution while running "{workflow_id}" workflow.')
execution.status = status
execution.error = error
self._update_execution_group(execution)
if status == ExecutionState.STARTED:
execution.started_at = utils.get_formatted_timestamp()
if deployment:
execution.deployment.deployment_status = \
DeploymentState.IN_PROGRESS
execution.deployment.latest_execution = execution
self.sm.update(deployment)
if status in ExecutionState.END_STATES:
execution.ended_at = utils.get_formatted_timestamp()
if workflow_id == 'delete_deployment_environment':
deleted_dep_parents = \
self._on_deployment_environment_deleted(execution)
if deleted_dep_parents:
affected_parent_deployments |= deleted_dep_parents
execution = self.sm.update(execution)
self.update_deployment_statuses(execution)
self._send_hook(execution)
# render the execution here, because immediately afterwards
# we'll delete it, and then we won't be able to render it anymore
res = execution.to_response()
# do not use `execution` after this transaction ends, because it
# would possibly require refetching the related objects, and by
# then, the execution could've been deleted already
del execution
if status in ExecutionState.END_STATES \
and get_maintenance_state() is None:
self.start_queued_executions()
# If the execution is a deployment update, and the status we're
# updating to is one which should cause the update to fail - do it here
if workflow_id in ('update', 'csys_new_deployment_update') and \
status in [ExecutionState.FAILED, ExecutionState.CANCELLED]:
dep_update = self.sm.get(models.DeploymentUpdate, None,
filters={'execution_id': execution_id})
dep_update.state = UpdateStates.FAILED
self.sm.update(dep_update)
# Similarly for a plugin update
if workflow_id == 'update_plugin' and \
status in [ExecutionState.FAILED,
ExecutionState.CANCELLED]:
plugin_update = self.sm.get(models.PluginsUpdate, None,
filters={'execution_id': execution_id})
if plugin_update:
plugin_update.state = PluginsUpdateStates.FAILED
self.sm.update(plugin_update)
if plugin_update.blueprint:
for dep_id in plugin_update.deployments_to_update:
dep = self.sm.get(models.Deployment, dep_id)
dep.blueprint = plugin_update.blueprint # original bp
self.sm.update(dep)
if plugin_update.temp_blueprint:
# Delete the temporary blueprint
if not plugin_update.temp_blueprint.deployments:
self.sm.delete(plugin_update.temp_blueprint)
else:
plugin_update.temp_blueprint.is_hidden = False
self.sm.update(plugin_update.temp_blueprint)
if affected_parent_deployments:
self.recalc_ancestors(affected_parent_deployments)
return res
def _on_deployment_environment_deleted(self, execution):
if execution.status == ExecutionState.TERMINATED:
return self.delete_deployment(execution.deployment)
if execution.status == ExecutionState.FAILED:
if execution.parameters and execution.parameters.get('force'):
return self.delete_deployment(execution.deployment)
def start_queued_executions(self):
"""Dequeue and start executions.
Attempt to fetch and run as many executions as we can, and if
any of those fail to run, try running more.
"""
to_run = []
for retry in range(5):
with self.sm.transaction():
dequeued = list(self._get_queued_executions())
if not dequeued:
break
all_started = True
for execution in dequeued:
refreshed, messages = self._refresh_execution(execution)
to_run.extend(messages)
all_started &= refreshed
if all_started:
break
workflow_executor.execute_workflow(to_run)
def _refresh_execution(self, execution: models.Execution) -> (bool, list):
"""Prepare the execution to be started.
Re-evaluate parameters, and return if the execution can run.
"""
execution.status = ExecutionState.PENDING
if not execution.deployment:
return True, self._prepare_execution_or_log(execution)
self.sm.refresh(execution.deployment)
try:
if execution and execution.deployment and \
execution.deployment.create_execution:
create_execution = execution.deployment.create_execution
delete_dep_env = 'delete_deployment_environment'
if (create_execution.status == ExecutionState.FAILED and
execution.workflow_id != delete_dep_env):
raise RuntimeError('create_deployment_environment failed')
execution.merge_workflow_parameters(
execution.parameters,
execution.deployment,
execution.workflow_id
)
execution.get_workflow() # try this here to fail early
except Exception as e:
execution.status = ExecutionState.FAILED
execution.error = str(e)
return False, []
else:
flag_modified(execution, 'parameters')
finally:
self.sm.update(execution)
db.session.flush([execution])
return True, self._prepare_execution_or_log(execution)
def _prepare_execution_or_log(self, execution: models.Execution) -> list:
try:
return self.prepare_executions(
[execution], queue=True, commit=False)
except Exception as e:
current_app.logger.warning(
'Could not dequeue execution %s: %s',
execution, e)
return []
def _queued_executions_query(self):
if self._cached_queued_execs_query is None:
executions = aliased(models.Execution)
queued_non_system_filter = db.and_(
executions.status == ExecutionState.QUEUED,
executions.is_system_workflow.is_(False)
)
exgrs = models.executions_groups_executions_table
group_concurrency_filter = (
~db.Query(exgrs)
.filter(exgrs.c.execution_group_id.in_(
db.bindparam('excluded_groups'))
)
.filter(exgrs.c.execution_id == executions._storage_id)
.exists()
)
# fetch only execution that:
# - are either create-dep-env (priority!)
# - belong to deployments that have none of:
# - active executions
# - queued create-dep-env executions
other_execs_in_deployment_filter = db.or_(
executions.workflow_id == 'create_deployment_environment',
~db.Query(models.Execution)
.filter(
models.Execution._deployment_fk ==
executions._deployment_fk,
)
.filter(
db.or_(
models.Execution.status.in_(
ExecutionState.ACTIVE_STATES),
db.and_(
models.Execution.status == ExecutionState.QUEUED,
models.Execution.workflow_id ==
'create_deployment_environment'
)
)
)
.exists()
)
queued_query = (
db.Query(executions)
.filter(queued_non_system_filter)
.filter(other_execs_in_deployment_filter)
.filter(group_concurrency_filter)
.outerjoin(executions.execution_groups)
.options(db.joinedload(executions.deployment))
.with_for_update(of=executions)
)
self._cached_queued_execs_query = (
queued_query
.order_by(executions._storage_id)
.limit(5)
)
return self._cached_queued_execs_query
def _report_running(self):
"""Report currently-running executions.
This returns the amount of currently-running executions total,
and a dict of {group_id: [active in the group, group concurrency]}
"""
exgrs = models.executions_groups_executions_table
active_execs = (
db.session.query(
models.Execution._storage_id,
exgrs.c.execution_group_id,
models.ExecutionGroup.concurrency,
)
.select_from(models.Execution)
.outerjoin(
exgrs,
models.Execution._storage_id == exgrs.c.execution_id
)
.outerjoin(
models.ExecutionGroup,
models.ExecutionGroup._storage_id == exgrs.c.execution_group_id
)
.filter(models.Execution.status.in_(ExecutionState.ACTIVE_STATES))
.order_by(models.Execution._storage_id)
.all()
)
total_running = 0
groups = {}
for exc_id, group_id, concurrency in active_execs:
total_running += 1
if group_id is None:
continue
if group_id not in groups:
groups[group_id] = _ExecGroupStats(
active=0, concurrency=concurrency)
groups[group_id] = groups[group_id]._replace(
active=groups[group_id].active + 1)
return total_running, groups
def _get_queued_executions(self):
sort_by = {'created_at': 'asc'}
system_executions = self.sm.list(
models.Execution, filters={
'status': ExecutionState.QUEUED_STATE,
'is_system_workflow': True,
},
sort=sort_by,
get_all_results=True,
all_tenants=True,
locking=True,
).items
if system_executions:
yield system_executions[0]
return
total, groups = self._report_running()
excluded_groups = [
group_id
for group_id, (active, concurrency) in groups.items()
if active >= concurrency
]
queued_executions = (
db.session.query(models.Execution)
.from_statement(self._queued_executions_query())
.params(
excluded_groups=excluded_groups,
)
.all()
)
# deployments we've already emitted an execution for - only emit 1
# execution per deployment
seen_deployments = set()
for execution in queued_executions:
if total >= config.instance.max_concurrent_workflows:
break
for group in execution.execution_groups:
if group._storage_id not in groups:
groups[group._storage_id] = _ExecGroupStats(
active=0, concurrency=group.concurrency)
if any(
groups[g._storage_id].active >=
groups[g._storage_id].concurrency
for g in execution.execution_groups
):
# this execution cannot run, because it would exceed one
# of its' groups concurrency limit
continue
if execution._deployment_fk in seen_deployments:
continue
for g in execution.execution_groups:
groups[g._storage_id] = groups[g._storage_id]._replace(
active=groups[g._storage_id].active + 1)
seen_deployments.add(execution._deployment_fk)
total += 1
yield execution
def _update_finished_execution_dependencies(self, execution):
"""Update IDDs affected by the finished executions.
This might result in invalid IDDs, in which case we'll override
the execution status to failed, and log the error. Nothing much
else we can do, the user has already created an invalid state,
let's just inform them of that.
"""
if execution._deployment_fk:
deployment = execution.deployment
else:
return
workflow_id = execution.workflow_id
if workflow_id == 'delete_deployment_environment':
return
try:
update_inter_deployment_dependencies(self.sm, deployment)
except Exception as e:
now = datetime.utcnow()
new_log = models.Log(
reported_timestamp=now,
timestamp=now,
execution=execution,
message=f'Failed updating dependencies of deployment '
f'{deployment.id}: {e}'
)
self.sm.put(new_log)
return ExecutionState.FAILED
def _validate_execution_update(self, current_status, future_status):
if current_status in ExecutionState.END_STATES:
return False
invalid_cancel_statuses = ExecutionState.ACTIVE_STATES + [
ExecutionState.TERMINATED]
if all((current_status == ExecutionState.CANCELLING,
future_status in invalid_cancel_statuses)):
return False
invalid_force_cancel_statuses = invalid_cancel_statuses + [
ExecutionState.CANCELLING]
if all((current_status == ExecutionState.FORCE_CANCELLING,
future_status in invalid_force_cancel_statuses)):
return False
return True
def _update_execution_group(self, execution: models.Execution):
for execution_group in execution.execution_groups:
event = models.Event(
event_type="execution_state_change",
reported_timestamp=utils.get_formatted_timestamp(),
execution_group=execution_group,
message=f"execution '{execution.id}' changed state "
f"to '{execution.status}'",
)
if execution.error:
event.message += f" with error '{execution.error}'"
self.sm.put(event)
if execution.deployment:
if execution.status == ExecutionState.TERMINATED and \
execution_group.success_group:
execution_group.success_group.deployments.append(
execution.deployment)
if execution.status == ExecutionState.FAILED and \
execution_group.failed_group:
execution_group.failed_group.deployments.append(
execution.deployment)
@staticmethod
def _get_conf_for_snapshots_wf():
config_instance = config.instance
return {
'file_server_root': config_instance.file_server_root,
'created_status': SnapshotState.CREATED,
'failed_status': SnapshotState.FAILED,
'postgresql_bin_path': config_instance.postgresql_bin_path,
'postgresql_username': config_instance.postgresql_username,
'postgresql_password': config_instance.postgresql_password,
'postgresql_db_name': config_instance.postgresql_db_name,
'db_host': config.instance.db_host,
'default_tenant_name': DEFAULT_TENANT_NAME,
'snapshot_restore_threads':
config_instance.snapshot_restore_threads
}
def create_snapshot_model(self,
snapshot_id,
status=SnapshotState.CREATING):
now = utils.get_formatted_timestamp()
visibility = VisibilityState.PRIVATE
new_snapshot = models.Snapshot(id=snapshot_id,
created_at=now,
status=status,
visibility=visibility,
error='')
return self.sm.put(new_snapshot)
def create_snapshot(self,
snapshot_id,
include_credentials,
include_logs,
include_events,
bypass_maintenance,
queue):
self.create_snapshot_model(snapshot_id)
try:
execution = models.Execution(
workflow_id='create_snapshot',
parameters={
'snapshot_id': snapshot_id,
'include_credentials': include_credentials,
'include_logs': include_logs,
'include_events': include_events,
'config': self._get_conf_for_snapshots_wf()
},
is_system_workflow=True,
status=ExecutionState.PENDING,
)
self.sm.put(execution)
return execution, self.prepare_executions(
[execution],
queue=queue,
bypass_maintenance=bypass_maintenance)
except manager_exceptions.ExistingRunningExecutionError:
snapshot = self.sm.get(models.Snapshot, snapshot_id)
self.sm.delete(snapshot)
self.sm.delete(execution)
raise
def restore_snapshot(self,
snapshot_id,
force,
bypass_maintenance,
timeout,
restore_certificates,
no_reboot):
# Throws error if no snapshot found
snapshot = self.sm.get(models.Snapshot, snapshot_id)
if snapshot.status == SnapshotState.FAILED:
raise manager_exceptions.SnapshotActionError(
'Failed snapshot cannot be restored'
)
execution = models.Execution(
workflow_id='restore_snapshot',
parameters={
'snapshot_id': snapshot_id,
'config': self._get_conf_for_snapshots_wf(),
'force': force,
'timeout': timeout,
'restore_certificates': restore_certificates,
'no_reboot': no_reboot,
'premium_enabled': premium_enabled,
'user_is_bootstrap_admin': current_user.is_bootstrap_admin
},
is_system_workflow=True,
status=ExecutionState.PENDING,
)
self.sm.put(execution)
return execution, self.prepare_executions(
[execution],
bypass_maintenance=bypass_maintenance)
def _validate_plugin_yaml(self, plugin):
"""Is the plugin YAML file valid?"""
with open(plugin.yaml_file_path()) as f:
plugin_yaml = yaml.safe_load(f)
plugins = plugin_yaml.get(constants.PLUGINS, {})
if not plugins:
raise manager_exceptions.InvalidPluginError(
'Plugin YAML file must contain "plugins" key.'
)
for plugin_spec in plugins.values():
if not plugin_spec.get(constants.PLUGIN_PACKAGE_NAME) == \
plugin.package_name:
raise manager_exceptions.InvalidPluginError(
'Plugin package name in YAML file must '
'match plugin package name in Wagon archive. '
'YAML package name:{0},'
'Wagon package name:{1}'.format(plugin_spec.get(
constants.PLUGIN_PACKAGE_NAME), plugin.package_name)
)
return True
def update_plugins(self, plugins_update,
no_changes_required=False,
auto_correct_types=False,
reevaluate_active_statuses=False):
"""Executes the plugin update workflow.
:param plugins_update: a PluginUpdate object.
:param no_changes_required: True if a fake execution should be created.
:return: execution ID.
"""
execution = models.Execution(
workflow_id='update_plugin',
parameters={
'update_id': plugins_update.id,
'deployments_to_update': plugins_update.deployments_to_update,
'temp_blueprint_id': plugins_update.temp_blueprint_id,
'force': plugins_update.forced,
'auto_correct_types': auto_correct_types,
'reevaluate_active_statuses': reevaluate_active_statuses,
},
status=ExecutionState.PENDING,
is_system_workflow=True
)
if no_changes_required:
execution.status = ExecutionState.TERMINATED
self.sm.put(execution)
return execution, []
else:
self.sm.put(execution)
return execution, self.prepare_executions(
[execution],
allow_overlapping_running_wf=True)
def remove_plugin(self, plugin_id, force):
# Verify plugin exists and can be removed
plugin = self.sm.get(models.Plugin, plugin_id)
validate_global_modification(plugin)
self._check_for_running_executions(
self._active_system_wide_execution_filter(), queue=False)
if not force:
affected_blueprint_ids = []
for b in self.sm.list(
models.Blueprint,
include=['id', 'plan'],
filters={'state': BlueprintUploadState.UPLOADED},
get_all_results=True,
):
if any(plugin.package_name == p.get('package_name')
and plugin.package_version == p.get('package_version')
for p in self._blueprint_plugins(b)):
affected_blueprint_ids.append(b.id)
if affected_blueprint_ids:
raise manager_exceptions.PluginInUseError(
'Plugin "{0}" is currently in use in blueprints: {1}. '
'You can "force" plugin removal if needed.'.format(
plugin.id, ', '.join(affected_blueprint_ids)))
workflow_executor.uninstall_plugin(plugin)
# Remove from storage
self.sm.delete(plugin)
# Remove from file system
archive_path = utils.get_plugin_archive_path(plugin_id,
plugin.archive_name)
shutil.rmtree(os.path.dirname(archive_path), ignore_errors=True)
@staticmethod
def _blueprint_plugins(blueprint):
return blueprint.plan[constants.WORKFLOW_PLUGINS_TO_INSTALL] + \
blueprint.plan[constants.DEPLOYMENT_PLUGINS_TO_INSTALL] + \
extract_host_agent_plugins_from_plan(blueprint.plan)
def upload_blueprint(self, blueprint_id, app_file_name, blueprint_url,
file_server_root, validate_only=False, labels=None):
execution = models.Execution(
workflow_id='upload_blueprint',
parameters={
'blueprint_id': blueprint_id,
'app_file_name': app_file_name,
'url': blueprint_url,
'file_server_root': file_server_root,
'validate_only': validate_only,
'labels': labels,
},
status=ExecutionState.PENDING,
)
self.sm.put(execution)
messages = self.prepare_executions([execution])
return execution, messages
def publish_blueprint(self,
application_dir,
application_file_name,
resources_base,
blueprint_id,
private_resource,
visibility):
plan = self.parse_plan(
application_dir, application_file_name, resources_base)
return self.publish_blueprint_from_plan(application_file_name,
blueprint_id,
plan,
private_resource,
visibility)
def publish_blueprint_from_plan(self,
application_file_name,
blueprint_id,
plan,
private_resource,
visibility,
state=None):
now = utils.get_formatted_timestamp()
visibility = self.get_resource_visibility(models.Blueprint,
blueprint_id,
visibility,
private_resource)
new_blueprint = models.Blueprint(
plan=plan,
id=blueprint_id,
description=plan.get('description'),
created_at=now,
updated_at=now,
main_file_name=application_file_name,
visibility=visibility,
state=state
)
return self.sm.put(new_blueprint)
def validate_blueprint(self,
application_dir,
application_file_name,
resources_base):
self.parse_plan(application_dir, application_file_name, resources_base)
@staticmethod
def parse_plan(application_dir, application_file_name, resources_base,
resolver_parameters=None):
dsl_location = os.path.join(
resources_base,
application_dir,
application_file_name
)
try:
return tasks.parse_dsl(
dsl_location,
resources_base,
**app_context.get_parser_context(
resolver_parameters=resolver_parameters)
)
except Exception as ex:
raise manager_exceptions.DslParseException(str(ex))
@staticmethod
def _remove_folder(folder_name, blueprints_location):
blueprint_folder = os.path.join(
config.instance.file_server_root,
blueprints_location,
utils.current_tenant.name,
folder_name.id)
# Don't cry if the blueprint folder never got created
if os.path.exists(blueprint_folder):
shutil.rmtree(blueprint_folder)
def delete_blueprint(self, blueprint_id, force, remove_files=True):
blueprint = self.sm.get(models.Blueprint, blueprint_id)
validate_global_modification(blueprint)
if blueprint.state in BlueprintUploadState.FAILED_STATES:
return self.sm.delete(blueprint)
if (blueprint.state and not force and
blueprint.state != BlueprintUploadState.UPLOADED):
# don't allow deleting blueprints while still uploading,
# so we don't leave a dirty file system
raise manager_exceptions.InvalidBlueprintError(
'Blueprint `{}` is still {}.'.format(blueprint.id,
blueprint.state))
if not force:
for b in self.sm.list(models.Blueprint,
include=['id', 'plan', 'state'],
get_all_results=True):
# we can't know whether the blueprint's plan will use the
# blueprint we try to delete, before we actually have a plan
if b.state not in BlueprintUploadState.FAILED_STATES \
and b.plan \
and blueprint_id in \
b.plan.get(constants.IMPORTED_BLUEPRINTS, []):
raise manager_exceptions.BlueprintInUseError(
'Blueprint {} is currently in use. You can "force" '
'blueprint removal.'.format(blueprint_id))
if len(blueprint.deployments) > 0:
raise manager_exceptions.DependentExistsError(
"Can't delete blueprint {0} - There exist "
"deployments for this blueprint; Deployments ids: {1}"
.format(blueprint_id,
','.join(dep.id for dep in blueprint.deployments)))
if remove_files:
# Delete blueprint resources from file server
self._remove_folder(
folder_name=blueprint,
blueprints_location=FILE_SERVER_BLUEPRINTS_FOLDER)
self._remove_folder(
folder_name=blueprint,
blueprints_location=FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER)
return self.sm.delete(blueprint)
def check_deployment_delete(self, deployment, force=False):
"""Check that deployment can be deleted"""
executions = self.sm.list(models.Execution, filters={
'deployment_id': deployment.id,
'status': (
ExecutionState.ACTIVE_STATES + ExecutionState.QUEUED_STATE
)
}, get_all_results=True)
if executions:
running_ids = ','.join(
execution.id for execution in executions
if execution.status not in ExecutionState.END_STATES
)
raise manager_exceptions.DependentExistsError(
f"Can't delete deployment {deployment.id} - There are "
f"running or queued executions for this deployment. "
f"Running executions ids: {running_ids}"
)
idds = self._get_blocking_dependencies(
deployment, skip_component_children=False)
if idds:
formatted_dependencies = '\n'.join(
f'[{i}] {idd.format()}' for i, idd in enumerate(idds, 1)
)
if force:
current_app.logger.warning(
"Force-deleting deployment %s despite having the "
"following existing dependent installations\n%s",
deployment.id, formatted_dependencies
)
else:
raise manager_exceptions.DependentExistsError(
f"Can't delete deployment {deployment.id} - the following "
f"existing installations depend on it:\n"
f"{formatted_dependencies}"
)
if not force:
# validate either all nodes for this deployment are still
# uninitialized or have been deleted
node_instances = self.sm.list(models.NodeInstance, filters={
'deployment_id': deployment.id,
'state': lambda col: ~col.in_(['uninitialized', 'deleted']),
}, include=['id'], get_all_results=True)
if node_instances:
raise manager_exceptions.DependentExistsError(
f"Can't delete deployment {deployment.id} - There are "
f"live nodes for this deployment. Live nodes ids: "
f"{ ','.join(ni.id for ni in node_instances) }"
)
def delete_deployment(self, deployment):
"""Delete the deployment.
This is run when delete-dep-env finishes.
"""
# check for external targets
deployment_dependencies = self.sm.list(
models.InterDeploymentDependencies,
filters={'source_deployment': deployment})
external_targets = set(
json.dumps(dependency.external_target) for dependency in
deployment_dependencies if dependency.external_target)
if external_targets:
self._clean_dependencies_from_external_targets(
deployment, external_targets)
parents = self.sm.list(
models.Deployment, filters={'id': deployment.deployment_parents})
parent_storage_ids = set()
if parents:
self.delete_deployment_from_labels_graph([deployment], parents)
parent_storage_ids = {p._storage_id for p in parents}
deployment_folder = os.path.join(
config.instance.file_server_root,
FILE_SERVER_DEPLOYMENTS_FOLDER,
utils.current_tenant.name,
deployment.id)
if os.path.exists(deployment_folder):
shutil.rmtree(deployment_folder)
self.sm.delete(deployment)
return parent_storage_ids
def _clean_dependencies_from_external_targets(self,
deployment,
external_targets):
manager_ips = [manager.private_ip for manager in self.sm.list(
models.Manager)]
external_source = {
'deployment': deployment.id,
'tenant': deployment.tenant_name,
'host': manager_ips
}
for target in external_targets:
target_client_config = json.loads(target)['client_config']
external_client = CloudifyClient(**target_client_config)
dep = external_client.inter_deployment_dependencies.list()
dep_for_removal = [d for d in dep if
d['external_source'] == external_source]
for d in dep_for_removal:
external_client.inter_deployment_dependencies.delete(
dependency_creator=d['dependency_creator'],
source_deployment=d['source_deployment_id'] or ' ',
target_deployment=d['target_deployment_id'] or ' ',
external_source=external_source
)
def reset_operations(self, execution, force=False):
"""Resume the execution: restart failed operations.
All operations that were failed are going to be retried,
the execution itself is going to be set to pending again.
Operations that were retried by another operation, will
not be reset.
"""
from_states = {
cloudify_tasks.TASK_RESCHEDULED,
cloudify_tasks.TASK_FAILED
}
if force:
# with force, we resend all tasks which haven't finished yet
from_states |= {
cloudify_tasks.TASK_STARTED,
cloudify_tasks.TASK_SENT,
cloudify_tasks.TASK_SENDING,
}
tasks_graphs = self.sm.list(models.TasksGraph,
filters={'execution': execution},
get_all_results=True)
for graph in tasks_graphs:
operations = self.sm.list(models.Operation,
filters={'tasks_graph': graph},
get_all_results=True)
retried_operations = set(
op.parameters['retried_task']
for op in operations
if op.parameters.get('retried_task'))
for operation in operations:
if operation.id in retried_operations:
continue
if operation.state in from_states:
operation.state = cloudify_tasks.TASK_PENDING
operation.parameters['current_retries'] = 0
self.sm.update(operation,
modified_attrs=('parameters', 'state'))
def resume_execution(self, execution_id, force=False):
execution = self.sm.get(models.Execution, execution_id)
if execution.status in {ExecutionState.CANCELLED,
ExecutionState.FAILED}:
self.reset_operations(execution, force=force)
elif force:
raise manager_exceptions.ConflictError(
'Cannot force-resume execution: `{0}` in state: `{1}`'
.format(execution.id, execution.status))
elif execution.status != ExecutionState.STARTED:
# not force and not cancelled/failed/started - invalid:
raise manager_exceptions.ConflictError(
'Cannot resume execution: `{0}` in state: `{1}`'
.format(execution.id, execution.status))
execution.status = ExecutionState.PENDING
execution.ended_at = None
execution.resume = True
message = execution.render_message(bypass_maintenance=False)
db.session.commit()
workflow_executor.execute_workflow([message])
# Dealing with the inner Components' deployments
components_executions = self._find_all_components_executions(
execution.deployment_id)
for exec_id in components_executions:
execution = self.sm.get(models.Execution, exec_id)
if execution.status in [ExecutionState.CANCELLED,
ExecutionState.FAILED]:
self.resume_execution(exec_id, force)
return execution
def get_component_executions(self, execution):
workflow = execution.get_workflow()
if not workflow.get('is_cascading', False):
return []
component_executions = []
components_dep_ids = self._find_all_components_deployment_id(
execution.deployment.id)
for component_dep_id in components_dep_ids:
dep = self.sm.get(models.Deployment, component_dep_id)
component_execution = models.Execution(
deployment=dep,
workflow_id=execution.workflow_id,
parameters=execution.parameters,
allow_custom_parameters=execution.allow_custom_parameters,
is_dry_run=execution.is_dry_run,
creator=execution.creator,
status=ExecutionState.PENDING,
)
self.sm.put(component_execution)
component_executions.append(component_execution)
return component_executions
def prepare_executions(self, executions, *, force=False, queue=False,
bypass_maintenance=None, wait_after_fail=600,
allow_overlapping_running_wf=False,
commit=True):
executions = list(executions)
messages = []
errors = []
while executions:
exc = executions.pop()
exc.ensure_defaults()
try:
if exc.is_system_workflow:
if self._system_workflow_modifies_db(exc.workflow_id):
self.assert_no_snapshot_creation_running_or_queued(exc)
elif exc.deployment:
self._check_allow_global_execution(exc.deployment)
self._verify_dependencies_not_affected(exc, force)
except Exception as e:
errors.append(e)
exc.status = ExecutionState.FAILED
exc.error = str(e)
self.sm.update(exc)
continue
should_queue = queue
if exc.is_system_workflow \
and exc.deployment is None \
and not allow_overlapping_running_wf:
should_queue = self._check_for_running_executions(
self._any_active_executions_filter(exc), queue)
elif not allow_overlapping_running_wf:
should_queue = self.check_for_executions(
exc, force, queue)
if should_queue:
self._workflow_queued(exc)
continue
if exc.deployment \
and exc.workflow_id != 'create_deployment_environment':
# refresh in case create-dep-env JUST finished, between the
# time we fetched the deployment, and checked that we don't
# need to queue. No need for create-dep-env, because the
# deployment is not persistent yet in that case
self.sm.refresh(exc.deployment)
message = exc.render_message(
wait_after_fail=wait_after_fail,
bypass_maintenance=bypass_maintenance
)
exc.status = ExecutionState.PENDING
messages.append(message)
workflow = exc.get_workflow()
if not workflow.get('is_cascading', False):
continue
component_executions = self.get_component_executions(exc)
executions.extend(component_executions)
if commit:
db.session.commit()
if errors:
raise errors[0]
return messages
@staticmethod
def _verify_workflow_in_deployment(wf_id, deployment, dep_id):
if wf_id not in deployment.workflows:
raise manager_exceptions.NonexistentWorkflowError(
'Workflow {0} does not exist in deployment {1}'.format(
wf_id, dep_id))
def check_for_executions(self, execution, force, queue):
"""Check if this execution should be queued.
:param execution: the execution object
:param force: allow running this execution in parallel with others
:param queue: if the execution can't be run in parallel with others,
and this is set, queue the execution. Otherwise, throw.
"""
system_exec_running = self._check_for_running_executions(
self._active_system_wide_execution_filter(), queue)
if system_exec_running:
return True
if force:
return system_exec_running
if not execution.deployment or not execution.deployment._storage_id:
return system_exec_running
return self._check_for_active_executions(execution, queue)
def _active_system_wide_execution_filter(self, *_):
return {
'is_system_workflow': [True],
'status': ExecutionState.ACTIVE_STATES + [ExecutionState.QUEUED],
}
def _any_active_executions_filter(self, execution):
return {
'status': ExecutionState.ACTIVE_STATES,
'id': lambda col: col != execution.id,
}
def _check_for_active_executions(self, execution, queue):
def status_filter(col):
if execution.created_at is not None:
return sql_or(
col.in_(ExecutionState.ACTIVE_STATES),
sql_and(
col == ExecutionState.QUEUED,
models.Execution.created_at < execution.created_at
)
)
else:
return col.in_(
ExecutionState.ACTIVE_STATES + [ExecutionState.QUEUED]
)
running = self.list_executions(
filters={
'deployment': execution.deployment,
'id': lambda col: col != execution.id,
'status': status_filter
},
is_include_system_workflows=True
).items
if not running:
return False
if queue or execution.scheduled_for:
return True
else:
raise manager_exceptions.ExistingRunningExecutionError(
f'The following executions are currently running for this '
f'deployment: {running}. To execute this workflow anyway, '
f'pass "force=true" as a query parameter to this request')
def _check_for_running_executions(self, filters, queue):
execution_ids = [
e.id
for e in self.list_executions(is_include_system_workflows=True,
filters=filters,
all_tenants=True,
get_all_results=True).items
]
if execution_ids and queue:
return True
elif execution_ids:
raise manager_exceptions.ExistingRunningExecutionError(
f'Cannot start execution because there are other executions '
f'running: { ", ".join(execution_ids) }'
)
else:
return False
@staticmethod
def _system_workflow_modifies_db(wf_id):
""" Returns `True` if the workflow modifies the DB and
needs to be blocked while a `create_snapshot` workflow
is running or queued.
"""
return wf_id == 'uninstall_plugin'
def _retrieve_components_from_deployment(self, deployment_id_filter):
return [node.id for node in
self.sm.list(models.Node,
include=['type_hierarchy', 'id'],
filters=deployment_id_filter,
get_all_results=True)
if 'cloudify.nodes.Component' in node.type_hierarchy]
def _retrieve_all_components_dep_ids(self, components_ids, deployment_id):
components_deployment_ids = []
for component in components_ids:
node_instance_filter = self.create_filters_dict(
deployment_id=deployment_id, node_id=component)
node_instance = self.sm.list(
models.NodeInstance,
filters=node_instance_filter,
get_all_results=True,
include=['runtime_properties',
'id']
).items[0]
component_deployment_props = node_instance.runtime_properties.get(
'deployment', {})
# This runtime property is set when a Component node is starting
# install workflow.
component_deployment_id = component_deployment_props.get(
'id', None)
if component_deployment_id:
components_deployment_ids.append(component_deployment_id)
return components_deployment_ids
def _retrieve_all_component_executions(self, components_deployment_ids):
executions = []
for deployment_id in components_deployment_ids:
deployment_id_filter = self.create_filters_dict(
deployment_id=deployment_id)
# Getting the last execution associated with the Component's
# deployment, which is the only one running now.
executions.append([execution.id for execution
in self.sm.list(models.Execution,
include=['id'],
sort={'created_at': 'desc'},
filters=deployment_id_filter,
get_all_results=True)][0])
return executions
def _find_all_components_deployment_id(self, deployment_id):
deployment_id_filter = self.create_filters_dict(
deployment_id=deployment_id)
components_node_ids = self._retrieve_components_from_deployment(
deployment_id_filter)
return self._retrieve_all_components_dep_ids(components_node_ids,
deployment_id)
def _find_all_components_executions(self, deployment_id):
components_deployment_ids = self._find_all_components_deployment_id(
deployment_id)
return self._retrieve_all_component_executions(
components_deployment_ids)
def cancel_execution(self, execution_ids, force=False, kill=False):
"""
Cancel an execution by its id
If force is False (default), this method will request the
executed workflow to gracefully terminate. It is up to the workflow
to follow up on that request.
If force is used, this method will request the abrupt and immediate
termination of the executed workflow. This is valid for all
workflows, regardless of whether they provide support for graceful
termination or not.
If kill is used, this method means that the process executing the
workflow is forcefully stopped, even if it is stuck or unresponsive.
Note that in either case, the execution is not yet cancelled upon
returning from the method. Instead, it'll be in a 'cancelling' or
'force_cancelling' status (as can be seen in models.Execution). Once
the execution is truly stopped, it'll be in 'cancelled' status (unless
force was not used and the executed workflow doesn't support
graceful termination, in which case it might simply continue
regardless and end up with a 'terminated' status)
:param execution_id: The execution id
:param force: A boolean describing whether to force cancellation
:param kill: A boolean describing whether to kill cancellation
:return: The updated execution object
:rtype: models.Execution
:raises manager_exceptions.IllegalActionError
"""
def _validate_cancel_execution(_execution: models.Execution,
_kill_execution: bool,
_force_execution: bool):
if _kill_execution:
return
if _force_execution and \
_execution.status == ExecutionState.CANCELLING:
return
if _execution.status in (ExecutionState.PENDING,
ExecutionState.STARTED,
ExecutionState.SCHEDULED):
return
raise manager_exceptions.IllegalActionError(
"Can't {0}cancel execution {1} because it's in "
"status {2}".format('force-' if _force_execution else '',
_execution.id, _execution.status))
if kill:
new_status = ExecutionState.KILL_CANCELLING
force = True
elif force:
new_status = ExecutionState.FORCE_CANCELLING
else:
new_status = ExecutionState.CANCELLING
if not isinstance(execution_ids, list):
execution_ids = [execution_ids]
# Prepare a dict of execution storage_id:(kill_execution, execution_id)
# int-tuple pairs for executions to be cancelled.
execution_storage_id_kill = {}
with self.sm.transaction():
executions = self.sm.list(models.Execution,
filters={'id': lambda col:
col.in_(execution_ids)})
if len(executions) > 0:
executions = executions.items
else:
raise manager_exceptions.NotFoundError(
f"Requested `Execution` {execution_ids} was not found")
result = None
while executions:
execution = executions.pop()
kill_execution, force_execution = kill, force
# When a user cancels queued execution automatically
# use the kill flag
if execution.status in (ExecutionState.QUEUED,
ExecutionState.SCHEDULED):
kill_execution, force_execution = True, True
_validate_cancel_execution(execution,
kill_execution, force_execution)
execution_storage_id_kill[execution._storage_id] = \
kill_execution, execution.id
# Dealing with the inner Components' deployments
components_executions = self._find_all_components_executions(
execution.deployment_id)
for exec_id in components_executions:
component_execution = self.sm.get(models.Execution,
exec_id)
if component_execution.status not in \
ExecutionState.END_STATES:
executions.append(component_execution)
result = execution
# Do the cancelling for a list of DB-transaction-locked executions.
with self.sm.transaction():
for execution in self.sm.list(
models.Execution, locking=True,
filters={'_storage_id': lambda col:
col.in_(execution_storage_id_kill.keys())}):
execution.status = new_status
execution.error = ''
execution = self.sm.update(execution)
if execution.deployment_id:
dep = execution.deployment
dep.latest_execution = execution
dep.deployment_status = \
dep.evaluate_deployment_status()
self.sm.update(dep)
result = execution
# Kill workflow executors if kill-cancelling
workflow_executor.cancel_execution(
[execution_id for storage_id, (kill_execution, execution_id)
in execution_storage_id_kill.items() if kill_execution])
return result
@staticmethod
def prepare_deployment_for_storage(deployment_id, deployment_plan):
now = utils.get_formatted_timestamp()
return models.Deployment(
id=deployment_id,
created_at=now,
updated_at=now,
description=deployment_plan['description'],
workflows=deployment_plan['workflows'],
inputs=deployment_plan['inputs'],
policy_types=deployment_plan['policy_types'],
policy_triggers=deployment_plan['policy_triggers'],
groups=deployment_plan['groups'],
scaling_groups=deployment_plan['scaling_groups'],
outputs=deployment_plan['outputs'],
capabilities=deployment_plan.get('capabilities', {})
)
def prepare_deployment_nodes_for_storage(self,
deployment_plan,
node_ids=None):
"""
create deployment nodes in storage based on a provided blueprint
:param deployment_plan: deployment_plan
:param node_ids: optionally create only nodes with these ids
"""
node_ids = node_ids or []
if not isinstance(node_ids, list):
node_ids = [node_ids]
raw_nodes = deployment_plan['nodes']
if node_ids:
raw_nodes = \
[node for node in raw_nodes if node['id'] in node_ids]
nodes = []
for raw_node in raw_nodes:
scalable = raw_node['capabilities']['scalable']['properties']
nodes.append(models.Node(
id=raw_node['name'],
type=raw_node['type'],
type_hierarchy=raw_node['type_hierarchy'],
number_of_instances=scalable['current_instances'],
planned_number_of_instances=scalable['current_instances'],
deploy_number_of_instances=scalable['default_instances'],
min_number_of_instances=scalable['min_instances'],
max_number_of_instances=scalable['max_instances'],
host_id=raw_node['host_id'] if 'host_id' in raw_node else None,
properties=raw_node['properties'],
operations=raw_node['operations'],
plugins=raw_node['plugins'],
plugins_to_install=raw_node.get('plugins_to_install'),
relationships=self._prepare_node_relationships(raw_node)))
return nodes
def _prepare_deployment_node_instances_for_storage(self,
deployment_id,
dsl_node_instances):
# The index is the index of a node instance list for a
# node. It is used for serial operations on node instances of the
# same node. First we get the list of current node instances for the
# deployment.
deployment_id_filter = self.create_filters_dict(
deployment_id=deployment_id)
all_deployment_node_instances = self.sm.list(
models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True
)
# We build a dictionary in order to track the current index.
current_node_index = defaultdict(int)
for ni in all_deployment_node_instances:
if ni.index > current_node_index[ni.node_id]:
current_node_index[ni.node_id] = ni.index
node_instances = []
for node_instance in dsl_node_instances:
node = get_node(deployment_id, node_instance['node_id'])
# Update current node index.
index = node_instance.get(
'index', current_node_index[node.id] + 1)
current_node_index[node.id] = index
instance_id = node_instance['id']
scaling_groups = node_instance.get('scaling_groups', [])
relationships = node_instance.get('relationships', [])
host_id = node_instance.get('host_id')
instance = models.NodeInstance(
id=instance_id,
host_id=host_id,
index=index,
relationships=relationships,
state='uninitialized',
runtime_properties={},
version=None,
scaling_groups=scaling_groups
)
instance.set_node(node)
node_instances.append(instance)
return node_instances
def _create_deployment_nodes(self,
deployment_id,
plan,
node_ids=None):
nodes = self.prepare_deployment_nodes_for_storage(plan, node_ids)
deployment = self.sm.get(models.Deployment, deployment_id)
for node in nodes:
node.set_deployment(deployment)
self.sm.put(node)
def _create_deployment_node_instances(self,
deployment_id,
dsl_node_instances):
node_instances = self._prepare_deployment_node_instances_for_storage(
deployment_id,
dsl_node_instances)
for node_instance in node_instances:
self.sm.put(node_instance)
def assert_no_snapshot_creation_running_or_queued(self, execution=None):
"""
Make sure no 'create_snapshot' workflow is currently running or queued.
We do this to avoid DB modifications during snapshot creation.
"""
status = ExecutionState.ACTIVE_STATES + ExecutionState.QUEUED_STATE
filters = {'status': status}
if execution is not None:
filters['id'] = lambda col: col != execution.id
for e in self.list_executions(is_include_system_workflows=True,
filters=filters,
get_all_results=True).items:
if e.workflow_id == 'create_snapshot':
raise manager_exceptions.ExistingRunningExecutionError(
'You cannot start an execution that modifies DB state'
' while a `create_snapshot` workflow is running or queued'
' (snapshot id: {0})'.format(e.id))
def cleanup_failed_deployment(self, deployment_id):
"""If create-dep-env failed, delete the deployment.
This is so that it's possible to retry creating the deployment,
if the user eg. provided invalid inputs.
"""
dep = self.sm.get(models.Deployment, deployment_id, fail_silently=True)
if not dep:
return
if len(dep.executions) != 1:
return
if dep.executions[0].workflow_id != 'create_deployment_environment':
return
create_env_execution = dep.executions[0]
if create_env_execution.status == ExecutionState.FAILED:
self.delete_deployment(dep)
def create_deployment(self,
blueprint,
deployment_id,
private_resource,
visibility,
site=None,
runtime_only_evaluation=False,
display_name=None):
verify_blueprint_uploaded_state(blueprint)
visibility = self.get_resource_visibility(models.Deployment,
deployment_id,
visibility,
private_resource)
if (visibility == VisibilityState.GLOBAL and
blueprint.visibility != VisibilityState.GLOBAL):
raise manager_exceptions.ForbiddenError(
f"Can't create global deployment {deployment_id} because "
f"blueprint {blueprint.id} is not global"
)
now = datetime.utcnow()
display_name = display_name or deployment_id
new_deployment = models.Deployment(
id=deployment_id,
display_name=display_name,
created_at=now,
updated_at=now,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
)
new_deployment.runtime_only_evaluation = runtime_only_evaluation
new_deployment.blueprint = blueprint
new_deployment.visibility = visibility
if site:
validate_deployment_and_site_visibility(new_deployment, site)
new_deployment.site = site
self.sm.put(new_deployment)
return new_deployment
@staticmethod
def get_deployment_parents_from_labels(labels):
parents = []
labels = labels or []
for label_key, label_value in labels:
if label_key == 'csys-obj-parent':
parents.append(label_value)
return parents
def get_object_types_from_labels(self, labels):
obj_types = set()
for key, value in labels:
if key == 'csys-obj-type' and value:
obj_types.add(value)
return obj_types
def get_deployment_object_types_from_labels(self, resource, labels):
labels_to_add = self.get_labels_to_create(resource, labels)
labels_to_delete = self.get_labels_to_delete(resource, labels)
created_types = self.get_object_types_from_labels(labels_to_add)
delete_types = self.get_object_types_from_labels(
[(label.key, label.value) for label in labels_to_delete]
)
return created_types, delete_types
def get_missing_deployment_parents(self, parents):
if not parents:
return
result = self.sm.list(
models.Deployment,
include=['id'],
filters={'id': lambda col: col.in_(parents)}
).items
_existing_parents = [_parent.id for _parent in result]
missing_parents = set(parents) - set(_existing_parents)
return missing_parents
def verify_deployment_parents_existence(self, parents, resource_id,
resource_type):
missing_parents = self.get_missing_deployment_parents(parents)
if missing_parents:
raise manager_exceptions.DeploymentParentNotFound(
'{0} {1}: is referencing deployments'
' using label `csys-obj-parent` that does not exist, '
'make sure that deployment(s) {2} exist before creating '
'{3}'.format(resource_type.capitalize(),
resource_id,
','.join(missing_parents), resource_type)
)
def verify_attaching_deployment_to_parents(self, dep, parents):
self.verify_deployment_parents_existence(parents, dep, 'deployment')
dependent_ids = [d.id for d in dep.get_all_dependents()]
for parent_id in parents:
if parent_id in dependent_ids:
raise manager_exceptions.ConflictError(
f'cyclic dependency between {dep.id} and {parent_id}'
)
def add_deployment_to_labels_graph(self, deployments, parent_ids):
if not deployments or not parent_ids:
return
parents = self.sm.list(
models.Deployment, filters={'id': list(parent_ids)})
missing_parents = set(parent_ids) - {d.id for d in parents}
if missing_parents:
raise manager_exceptions.DeploymentParentNotFound(
f'Deployment(s) referenced by `csys-obj-parent` not found: '
f'{ ",".join(missing_parents) }'
)
all_ancestors = models.DeploymentLabelsDependencies\
.get_dependencies(
[p._storage_id for p in parents], dependents=False)
cyclic_deps = set(all_ancestors) & set(deployments)
cyclic_deps |= (set(deployments) & set(parents))
if cyclic_deps:
cyclic_ids = {d.id for d in cyclic_deps}
raise manager_exceptions.ConflictError(
f'cyclic dependencies: { ",".join(cyclic_ids) }'
)
for parent in sorted(parents, key=lambda p: p._storage_id):
for dep in sorted(deployments, key=lambda d: d._storage_id):
dependency = models.DeploymentLabelsDependencies(
source_deployment=dep,
target_deployment=parent,
)
self.sm.put(dependency)
# Add deployment to parent's consumers
existing_consumer_label = self.sm.list(
models.DeploymentLabel,
filters={'key': 'csys-consumer-id',
'value': dep.id,
'deployment': parent}
)
if not existing_consumer_label:
new_label = {'key': 'csys-consumer-id',
'value': dep.id,
'created_at': datetime.utcnow(),
'creator': current_user,
'deployment': parent}
self.sm.put(models.DeploymentLabel(**new_label))
def delete_deployment_from_labels_graph(self, deployments, parents):
if not parents or not deployments:
return
dld = models.DeploymentLabelsDependencies.__table__
db.session.execute(
dld.delete()
.where(
db.and_(
dld.c._target_deployment.in_(
{d._storage_id for d in parents}),
dld.c._source_deployment.in_(
{d._storage_id for d in deployments})
)
)
)
# Delete deployment from parent's consumers
for parent in parents:
for dep in deployments:
for label in parent.labels:
if (label.key, label.value) == \
('csys-consumer-id', dep.id):
self.sm.delete(label)
break
def install_plugin(self, plugin, manager_names=None, agent_names=None):
"""Send the plugin install task to the given managers or agents."""
if manager_names:
managers = self.sm.list(
models.Manager, filters={'hostname': manager_names})
existing_manager_names = {m.hostname for m in managers}
if existing_manager_names != set(manager_names):
missing_managers = set(manager_names) - existing_manager_names
raise manager_exceptions.NotFoundError(
"Cannot install: requested managers do not exist: {0}"
.format(', '.join(missing_managers)))
for name in existing_manager_names:
manager = self.sm.get(
models.Manager, None, filters={'hostname': name})
self.set_plugin_state(plugin, manager=manager,
state=PluginInstallationState.PENDING)
if agent_names:
agents = self.sm.list(models.Agent, filters={'name': agent_names})
existing_agent_names = {a.name for a in agents}
if existing_agent_names != set(agent_names):
missing_agents = set(agent_names) - existing_agent_names
raise manager_exceptions.NotFoundError(
"Cannot install: requested agents do not exist: {0}"
.format(', '.join(missing_agents)))
for name in existing_agent_names:
agent = self.sm.get(models.Agent, None, filters={'name': name})
self.set_plugin_state(plugin, agent=agent,
state=PluginInstallationState.PENDING)
if agent_names or manager_names:
workflow_executor.install_plugin(plugin)
return plugin
def set_plugin_state(self, plugin, state,
manager=None, agent=None, error=None):
filters = {
'_plugin_fk': plugin._storage_id,
'_agent_fk': agent._storage_id if agent else None,
'_manager_fk': manager.id if manager else None
}
pstate = self.sm.get(models._PluginState, None, filters=filters,
fail_silently=True)
if pstate is None:
pstate = models._PluginState(state=state, error=error, **filters)
self.sm.put(pstate)
else:
pstate.state = state
pstate.error = error
self.sm.update(pstate)
def check_blueprint_plugins_installed(self, plan):
plugins_list = plan.get(constants.DEPLOYMENT_PLUGINS_TO_INSTALL, [])
# validate that all central-deployment plugins are installed
for plugin in plugins_list:
self.validate_plugin_is_installed(plugin)
# validate that all host_agent plugins are installed
host_agent_plugins = extract_host_agent_plugins_from_plan(plan)
for plugin in host_agent_plugins:
self.validate_plugin_is_installed(plugin)
def validate_plugin_is_installed(self, plugin):
"""
This method checks if a plugin is already installed on the manager,
if not - raises an appropriate exception.
:param plugin: A plugin from the blueprint
"""
if plugin['package_name'] == 'cloudify-diamond-plugin':
# It is meaningless to validate whether the diamond plugin is
# installed on the manager because it is an agent-only plugin.
# The name is hardcoded because it is currently the only plugin
# of its type but this check should be improved if that changes.
return
if not plugin['install']:
return
query_parameters = {'package_name': plugin['package_name']}
if plugin['package_version']:
query_parameters['package_version'] = plugin['package_version']
if plugin['distribution']:
query_parameters['distribution'] = plugin['distribution']
if plugin['distribution_version']:
query_parameters['distribution_version'] =\
plugin['distribution_version']
if plugin['distribution_release']:
query_parameters['distribution_release'] =\
plugin['distribution_release']
if plugin['supported_platform']:
query_parameters['supported_platform'] =\
plugin['supported_platform']
result = self.sm.list(models.Plugin, filters=query_parameters)
if result.metadata['pagination']['total'] == 0:
raise manager_exceptions.\
DeploymentPluginNotFound(
'Required plugin {}, version {} is not installed '
'on the manager'.format(
plugin['package_name'],
plugin['package_version'] or '`any`'))
def start_deployment_modification(self,
deployment_id,
modified_nodes,
context):
deployment = self.sm.get(models.Deployment, deployment_id)
deployment_id_filter = self.create_filters_dict(
deployment_id=deployment_id)
existing_modifications = self.sm.list(
models.DeploymentModification,
include=['id', 'status'],
filters=deployment_id_filter
)
active_modifications = [
m.id for m in existing_modifications
if m.status == DeploymentModificationState.STARTED]
if active_modifications:
raise \
manager_exceptions.ExistingStartedDeploymentModificationError(
'Cannot start deployment modification while there are '
'existing started deployment modifications. Currently '
'started deployment modifications: {0}'
.format(active_modifications))
# We need to store the pre-modification state here so that it can be
# used to roll back correctly on error.
# We have to deepcopy it because it contains a lot of mutable children
# which will then (sometimes) be modified by the other methods and
# result in a rollback that breaks the deployment and snapshots.
pre_modification = [
deepcopy(instance.to_dict()) for instance in
self.sm.list(models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True)]
nodes = [node.to_dict() for node
in self.sm.list(models.Node, filters=deployment_id_filter,
get_all_results=True)]
node_instances = [instance.to_dict() for instance
in self.sm.list(models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True)]
node_instances_modification = tasks.modify_deployment(
nodes=nodes,
previous_nodes=nodes,
previous_node_instances=node_instances,
modified_nodes=modified_nodes,
scaling_groups=deployment.scaling_groups)
node_instances_modification['before_modification'] = pre_modification
now = utils.get_formatted_timestamp()
modification_id = str(uuid.uuid4())
modification = models.DeploymentModification(
id=modification_id,
created_at=now,
ended_at=None,
status=DeploymentModificationState.STARTED,
modified_nodes=modified_nodes,
node_instances=node_instances_modification,
context=context)
modification.set_deployment(deployment)
self.sm.put(modification)
scaling_groups = deepcopy(deployment.scaling_groups)
for node_id, modified_node in modified_nodes.items():
if node_id in deployment.scaling_groups:
scaling_groups[node_id]['properties'].update({
'planned_instances': modified_node['instances']
})
deployment.scaling_groups = scaling_groups
else:
node = get_node(modification.deployment_id, node_id)
node.planned_number_of_instances = modified_node['instances']
self.sm.update(node)
self.sm.update(deployment)
added_and_related = node_instances_modification['added_and_related']
added_node_instances = []
for node_instance in added_and_related:
if node_instance.get('modification') == 'added':
added_node_instances.append(node_instance)
else:
node = get_node(
deployment_id=deployment_id,
node_id=node_instance['node_id']
)
target_names = [r['target_id'] for r in node.relationships]
current = self.sm.get(models.NodeInstance, node_instance['id'])
current_relationship_groups = {
target_name: list(group)
for target_name, group in itertools.groupby(
current.relationships,
key=lambda r: r['target_name'])
}
new_relationship_groups = {
target_name: list(group)
for target_name, group in itertools.groupby(
node_instance['relationships'],
key=lambda r: r['target_name'])
}
new_relationships = []
for target_name in target_names:
new_relationships += current_relationship_groups.get(
target_name, [])
new_relationships += new_relationship_groups.get(
target_name, [])
instance = self.sm.get(
models.NodeInstance,
node_instance['id'],
locking=True
)
instance.relationships = deepcopy(new_relationships)
instance.version += 1
self.sm.update(instance)
self._create_deployment_node_instances(deployment_id,
added_node_instances)
return modification
def finish_deployment_modification(self, modification_id):
modification = self.sm.get(
models.DeploymentModification,
modification_id
)
if modification.status in DeploymentModificationState.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot finish deployment modification: {0}. It is already in'
' {1} status.'.format(modification_id,
modification.status))
deployment = self.sm.get(models.Deployment, modification.deployment_id)
modified_nodes = modification.modified_nodes
scaling_groups = deepcopy(deployment.scaling_groups)
for node_id, modified_node in modified_nodes.items():
if node_id in deployment.scaling_groups:
scaling_groups[node_id]['properties'].update({
'current_instances': modified_node['instances']
})
deployment.scaling_groups = scaling_groups
else:
node = get_node(modification.deployment_id, node_id)
node.number_of_instances = modified_node['instances']
self.sm.update(node)
self.sm.update(deployment)
node_instances = modification.node_instances
for node_instance_dict in node_instances['removed_and_related']:
instance = self.sm.get(
models.NodeInstance,
node_instance_dict['id'],
locking=True
)
if node_instance_dict.get('modification') == 'removed':
self.sm.delete(instance)
else:
removed_relationship_target_ids = set(
[rel['target_id']
for rel in node_instance_dict['relationships']])
new_relationships = [rel for rel in instance.relationships
if rel['target_id']
not in removed_relationship_target_ids]
instance.relationships = deepcopy(new_relationships)
instance.version += 1
self.sm.update(instance)
modification.status = DeploymentModificationState.FINISHED
modification.ended_at = utils.get_formatted_timestamp()
self.sm.update(modification)
return modification
def rollback_deployment_modification(self, modification_id):
modification = self.sm.get(
models.DeploymentModification,
modification_id
)
if modification.status in DeploymentModificationState.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot rollback deployment modification: {0}. It is already '
'in {1} status.'.format(modification_id,
modification.status))
deployment = self.sm.get(models.Deployment, modification.deployment_id)
deployment_id_filter = self.create_filters_dict(
deployment_id=modification.deployment_id)
node_instances = self.sm.list(
models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True
)
modified_instances = deepcopy(modification.node_instances)
modified_instances['before_rollback'] = [
instance.to_dict() for instance in node_instances]
for instance in node_instances:
self.sm.delete(instance)
for instance_dict in modified_instances['before_modification']:
self.add_node_instance_from_dict(instance_dict)
nodes_num_instances = {
node.id: node for node in self.sm.list(
models.Node,
filters=deployment_id_filter,
include=['id', 'number_of_instances'],
get_all_results=True)
}
scaling_groups = deepcopy(deployment.scaling_groups)
for node_id, modified_node in modification.modified_nodes.items():
if node_id in deployment.scaling_groups:
props = scaling_groups[node_id]['properties']
props['planned_instances'] = props['current_instances']
deployment.scaling_groups = scaling_groups
else:
node = get_node(modification.deployment_id, node_id)
node.planned_number_of_instances = nodes_num_instances[
node_id].number_of_instances
self.sm.update(node)
self.sm.update(deployment)
modification.status = DeploymentModificationState.ROLLEDBACK
modification.ended_at = utils.get_formatted_timestamp()
modification.node_instances = modified_instances
self.sm.update(modification)
return modification
def add_node_instance_from_dict(self, instance_dict):
# Remove the IDs from the dict - they don't have comparable columns
deployment_id = instance_dict.pop('deployment_id')
node_id = instance_dict.pop('node_id')
tenant_name = instance_dict.pop('tenant_name')
created_by = instance_dict.pop('created_by')
resource_availability = instance_dict.pop('resource_availability')
private_resource = instance_dict.pop('private_resource')
# Link the node instance object to to the node, and add it to the DB
new_node_instance = models.NodeInstance(**instance_dict)
node = get_node(deployment_id, node_id)
new_node_instance.set_node(node)
self.sm.put(new_node_instance)
# Manually update version, because of how `version_id_col` works in
# SQLAlchemy (it is set to 1 automatically)
new_node_instance.version = instance_dict['version']
self.sm.update(new_node_instance)
# Return the IDs to the dict for later use
instance_dict['deployment_id'] = deployment_id
instance_dict['node_id'] = node_id
instance_dict['tenant_name'] = tenant_name
instance_dict['created_by'] = created_by
# resource_availability and private_resource are deprecated.
# For backwards compatibility - adding it to the response.
instance_dict['resource_availability'] = resource_availability
instance_dict['private_resource'] = private_resource
def create_operation(self, id, name, dependencies,
parameters, type, graph_id=None,
graph_storage_id=None, state='pending'):
# allow passing graph_storage_id directly as an optimization, so that
# we don't need to query for the graph based on display id
if (graph_id and graph_storage_id) or \
(not graph_id and not graph_storage_id):
raise ValueError(
'Pass exactly one of graph_id and graph_storage_id')
if graph_id:
graph = self.sm.list(models.TasksGraph,
filters={'id': graph_id},
get_all_results=True,
all_tenants=True)[0]
graph_storage_id = graph._storage_id
operation = models.Operation(
id=id,
name=name,
_tasks_graph_fk=graph_storage_id,
created_at=utils.get_formatted_timestamp(),
state=state,
dependencies=dependencies,
parameters=parameters,
type=type,
)
self.sm.put(operation)
execution = graph.execution
if execution.total_operations is None:
execution.total_operations = 0
execution.finished_operations = 0
execution.total_operations += 1
self.sm.update(
execution,
modified_attrs=('total_operations', 'finished_operations'))
return operation
def create_tasks_graph(self, name, execution_id, operations=None,
created_at=None, graph_id=None):
execution = self.sm.list(models.Execution,
filters={'id': execution_id},
get_all_results=True,
all_tenants=True)[0]
created_at = created_at or datetime.utcnow()
graph = models.TasksGraph(
name=name,
_execution_fk=execution._storage_id,
created_at=created_at,
_tenant_id=execution._tenant_id,
_creator_id=execution._creator_id
)
if graph_id:
graph.id = graph_id
db.session.add(graph)
if execution.total_operations is None:
execution.total_operations = 0
execution.finished_operations = 0
if operations:
created_ops = []
for operation in operations:
operation.setdefault('state', 'pending')
op = models.Operation(
tenant=utils.current_tenant,
_creator_id=execution._creator_id,
created_at=operation.pop('created_at', created_at),
tasks_graph=graph,
**operation)
created_ops.append(op)
db.session.add(op)
execution.total_operations += sum(
not op.is_nop
for op in created_ops
)
execution.finished_operations += sum(
not op.is_nop and op.state in TERMINATED_TASK_STATES
for op in created_ops
)
self.sm.update(
execution,
modified_attrs=('total_operations', 'finished_operations'))
return graph
@staticmethod
def _prepare_node_relationships(raw_node):
if 'relationships' not in raw_node:
return []
prepared_relationships = []
for raw_relationship in raw_node['relationships']:
relationship = {
'target_id': raw_relationship['target_id'],
'type': raw_relationship['type'],
'type_hierarchy': raw_relationship['type_hierarchy'],
'properties': raw_relationship['properties'],
'source_operations': raw_relationship['source_operations'],
'target_operations': raw_relationship['target_operations'],
}
prepared_relationships.append(relationship)
return prepared_relationships
def verify_deployment_environment_created_successfully(self, deployment):
if not deployment.create_execution:
# the user removed the execution, let's assume they knew
# what they were doing and allow this
return
status = deployment.create_execution.status
if status == ExecutionState.TERMINATED:
return
elif status == ExecutionState.PENDING:
raise manager_exceptions \
.DeploymentEnvironmentCreationPendingError(
'Deployment environment creation is still pending, '
'try again in a minute')
elif status == ExecutionState.STARTED:
raise manager_exceptions\
.DeploymentEnvironmentCreationInProgressError(
'Deployment environment creation is still in progress, '
'try again in a minute')
elif status == ExecutionState.FAILED:
error_line = deployment.create_execution.error\
.strip().split('\n')[-1]
raise manager_exceptions.DeploymentCreationError(
"Can't launch executions since environment creation for "
"deployment {0} has failed: {1}".format(
deployment.id, error_line))
elif status in (
ExecutionState.CANCELLED, ExecutionState.CANCELLING,
ExecutionState.FORCE_CANCELLING):
raise manager_exceptions.DeploymentCreationError(
"Can't launch executions since the environment creation for "
"deployment {0} has been cancelled [status={1}]".format(
deployment.id, status))
else:
raise manager_exceptions.DeploymentCreationError(
'Unexpected deployment status for deployment {0} '
'[status={1}]'.format(deployment.id, status))
@staticmethod
def create_filters_dict(**kwargs):
filters = {}
for key, val in kwargs.items():
if val:
filters[key] = val
return filters or None
@staticmethod
def _get_only_user_execution_parameters(execution_parameters):
return {k: v for k, v in execution_parameters.items()
if not k.startswith('__')}
def update_provider_context(self, update, context_dict):
if update:
context_instance = self.sm.get(
models.ProviderContext,
context_dict['id']
)
else:
context_instance = models.ProviderContext(id=context_dict['id'])
context_instance.name = context_dict['name']
context_instance.context = context_dict['context']
self.sm.update(context_instance)
app_context.update_parser_context(context_dict['context'])
def get_resource_visibility(self,
model_class,
resource_id,
visibility,
private_resource=None,
plugin_info=None):
"""
Determine the visibility of the resource.
:param model_class: SQL DB table class
:param resource_id: The id of the resource
:param visibility: The new parameter for the user to set the
visibility of the resource.
:param private_resource: The old parameter the user used to set
the visibility, kept for backwards
compatibility and it will be deprecated soon
:param plugin_info: In case the resource is a plugin,
it's package_name and archive_name
:return: The visibility to set
"""
# Validate we're not using the old parameter with new parameter
if private_resource is not None and visibility:
raise manager_exceptions.BadParametersError(
"The `private_resource` and `visibility` "
"parameters cannot be used together"
)
# Handle the old parameter
if private_resource:
return VisibilityState.PRIVATE
# Validate that global visibility is permitted
if visibility == VisibilityState.GLOBAL:
self.validate_global_permitted(model_class,
resource_id,
create_resource=True,
plugin_info=plugin_info)
return visibility or VisibilityState.TENANT
def set_deployment_visibility(self, deployment_id, visibility):
deployment = self.sm.get(models.Deployment, deployment_id)
blueprint = deployment.blueprint
if utils.is_visibility_wider(visibility, blueprint.visibility):
raise manager_exceptions.IllegalActionError(
"The visibility of deployment `{0}` can't be wider than "
"the visibility of its blueprint `{1}`. Current "
"blueprint visibility is {2}".format(deployment.id,
blueprint.id,
blueprint.visibility)
)
return self.set_visibility(models.Deployment,
deployment_id,
visibility)
def set_visibility(self, model_class, resource_id, visibility):
resource = self.sm.get(model_class, resource_id)
self.validate_visibility_value(model_class, resource, visibility)
# Set the visibility
resource.visibility = visibility
resource.updated_at = utils.get_formatted_timestamp()
return self.sm.update(resource)
def validate_visibility_value(self, model_class, resource, new_visibility):
current_visibility = resource.visibility
if utils.is_visibility_wider(current_visibility, new_visibility):
raise manager_exceptions.IllegalActionError(
"Can't set the visibility of `{0}` to {1} because it "
"already has wider visibility".format(resource.id,
new_visibility)
)
if new_visibility == VisibilityState.GLOBAL:
plugin_info = None
if model_class == models.Plugin:
plugin_info = {'package_name': resource.package_name,
'archive_name': resource.archive_name}
self.validate_global_permitted(model_class,
resource.id,
plugin_info=plugin_info)
def validate_global_permitted(self,
model_class,
resource_id,
create_resource=False,
plugin_info=None):
# Only admin is allowed to set a resource to global
if not is_create_global_permitted(self.sm.current_tenant):
raise manager_exceptions.ForbiddenError(
'User `{0}` is not permitted to set or create a global '
'resource'.format(current_user.username))
if model_class == models.Plugin:
archive_name = plugin_info['archive_name']
unique_filter = {
model_class.package_name: plugin_info['package_name'],
model_class.archive_name: archive_name
}
else:
unique_filter = {model_class.id: resource_id}
# Check if the resource is unique
max_resource_number = 0 if create_resource else 1
if self.sm.count(
model_class, unique_filter, all_tenants=True
) > max_resource_number:
raise manager_exceptions.IllegalActionError(
"Can't set or create the resource `{0}`, it's visibility "
"can't be global because it also exists in other tenants"
.format(resource_id)
)
def _check_allow_global_execution(self, deployment):
if (deployment.visibility == VisibilityState.GLOBAL and
deployment.tenant != self.sm.current_tenant and
not utils.can_execute_global_workflow(utils.current_tenant)):
raise manager_exceptions.ForbiddenError(
f'User `{current_user.username}` is not allowed to execute '
f'workflows on a global deployment {deployment.id} from a '
f'different tenant'
)
def _get_blocking_dependencies(
self,
deployment: models.Deployment,
skip_component_children: bool,
limit=3) -> typing.List[models.BaseDeploymentDependencies]:
"""Get dependencies that would block destructive actions on deployment
This returns dependencies that cause deployment to not be able to
be uninstalled, stopped, or deleted.
Those dependencies are:
- children of this deployment: cannot delete a parent who has
existing children - that would orphan them
- compoent creators: cannot delete a deployment if it is a
component of another deployment, UNLESS that another deployment
is currently being uninstalled as well
:param skip_component_children: do not include children who are
components of the given deployment. Components are also children,
so this has to be used to allow uninstalling a deployment that
uses some components.
:param limit: only return up to this many DLDs and this many IDDs
:return: a list of dependencies blocking destructive actions on
the given deployment
"""
dld = aliased(models.DeploymentLabelsDependencies)
idd = aliased(models.InterDeploymentDependencies)
children = (
db.session.query(dld)
.filter_by(target_deployment=deployment)
)
if skip_component_children:
children = children.filter(
~db.session.query(idd)
.filter(dld._target_deployment == idd._source_deployment)
.filter(idd.dependency_creator.like('component.%'))
.exists()
)
children = children.limit(limit)
component_creators = (
db.session.query(idd)
.filter_by(target_deployment=deployment)
.filter(
~db.session.query(models.Execution)
.filter(
models.Execution._deployment_fk == idd._source_deployment,
models.Execution.status.in_([
ExecutionState.STARTED,
ExecutionState.PENDING,
ExecutionState.QUEUED
]),
models.Execution.workflow_id.in_([
'stop', 'uninstall', 'update',
'csys_new_deployment_update'
])
)
.exists()
)
.filter(~sql_and(idd.external_source != text("'null'"),
idd.dependency_creator.like('component.%')))
.limit(limit)
)
# TODO: the last filter is a temporary measure to allow external
# components to be uninstalled during their parent's uninstall
# (RD-4420). This should be solved properly.
return children.all() + component_creators.all()
def _verify_dependencies_not_affected(self, execution, force):
if execution.workflow_id not in [
'stop', 'uninstall', 'update', 'csys_new_deployment_update'
]:
return
# if we're in the middle of an execution initiated by the component
# creator, we'd like to drop the component dependency from the list
deployment = execution.deployment
idds = self._get_blocking_dependencies(
deployment, skip_component_children=True)
# allow uninstall of get-capability dependencies
# if the dependent deployment is inactive
if execution.workflow_id == 'uninstall':
idds = [idd for idd in idds if not (
idd.dependency_creator.endswith('.get_capability') and
idd.source_deployment.installation_status == 'inactive')]
if not idds:
return
formatted_dependencies = '\n'.join(
f'[{i}] {idd.format()}' for i, idd in enumerate(idds, 1)
)
if force:
current_app.logger.warning(
"Force-executing workflow `%s` on deployment %s despite "
"having existing dependent installations:\n%s",
execution.workflow_id, execution.deployment.id,
formatted_dependencies)
return
# If part of a deployment update - mark the update as failed
if execution.workflow_id in ('update', 'csys_new_deployment_update'):
dep_update = self.sm.get(
models.DeploymentUpdate,
None,
filters={'deployment_id': execution.deployment.id,
'state': UpdateStates.UPDATING}
)
if dep_update:
dep_update.state = UpdateStates.FAILED
self.sm.update(dep_update)
raise manager_exceptions.DependentExistsError(
f"Can't execute workflow `{execution.workflow_id}` on deployment "
f"{execution.deployment.id} - existing installations depend "
f"on it:\n{formatted_dependencies}")
def _workflow_queued(self, execution):
execution.status = ExecutionState.QUEUED
self.sm.update(execution)
self._send_hook(execution)
def _send_hook(self, execution):
try:
event_type = {
ExecutionState.QUEUED: 'workflow_queued',
ExecutionState.STARTED: 'workflow_started',
ExecutionState.TERMINATED: 'workflow_succeeded',
ExecutionState.FAILED: 'workflow_failed',
ExecutionState.CANCELLED: 'workflow_cancelled',
}[execution.status]
except KeyError:
return
if execution.status == ExecutionState.STARTED:
start_resume = 'Resuming' if execution.resume else 'Starting'
dry_run = ' (dry run)' if execution.is_dry_run else ''
message = (
f"{start_resume} '{execution.workflow_id}' "
f"workflow execution{dry_run}"
)
else:
message = (
f"'{execution.workflow_id}' workflow "
f"execution {execution.status}"
)
message_context = {
'message_type': 'hook',
'is_system_workflow': execution.is_system_workflow,
'blueprint_id': execution.blueprint_id,
'deployment_id': execution.deployment_id,
'execution_id': execution.id,
'workflow_id': execution.workflow_id,
'tenant_name': execution.tenant_name,
'rest_token': execution.creator.get_auth_token(),
'tenant': {
'name': execution.tenant_name,
}
}
if not execution.is_system_workflow:
message_context['execution_parameters'] = execution.parameters
event = {
'type': 'cloudify_event',
'event_type': event_type,
'context': message_context,
'message': {
'text': message,
'arguments': None
}
}
send_event(event, 'hook')
def update_resource_labels(self,
labels_resource_model,
resource,
new_labels,
creator=None,
created_at=None):
"""
Updating the resource labels.
This function replaces the existing resource labels with the new labels
that were passed in the request.
If a new label already exists, it won't be created again.
If an existing label is not in the new labels list, it will be deleted.
"""
labels_to_create = self.get_labels_to_create(resource, new_labels)
labels_to_delete = self.get_labels_to_delete(resource, new_labels)
for label in labels_to_delete:
self.sm.delete(label)
if label in resource.labels:
resource.labels.remove(label)
self.create_resource_labels(labels_resource_model,
resource,
labels_to_create,
creator=creator,
created_at=created_at)
@staticmethod
def get_labels_to_create(resource, new_labels):
new_labels_set = set(new_labels)
existing_labels = resource.labels
existing_labels_tup = set(
(label.key, label.value) for label in existing_labels)
return new_labels_set - existing_labels_tup
@staticmethod
def get_labels_to_delete(resource, new_labels):
labels_to_delete = set()
new_labels_set = set(new_labels)
for label in resource.labels:
if (label.key, label.value) not in new_labels_set:
labels_to_delete.add(label)
return labels_to_delete
def create_resource_labels(self,
labels_resource_model,
resource,
labels_list,
creator=None,
created_at=None):
"""
Populate the resource_labels table.
:param labels_resource_model: A labels resource model
:param resource: A resource element
:param labels_list: A list of labels of the form:
[(key1, value1), (key2, value2)]
:param creator: Specify creator (e.g. for snapshots).
:param created_at: Specify creation time (e.g. for snapshots).
"""
if not labels_list:
return
lowercase_labels = {'csys-obj-type'}
current_time = datetime.utcnow()
for key, value in labels_list:
if key.lower() in lowercase_labels:
key = key.lower()
value = value.lower()
new_label = {'key': key,
'value': value,
'created_at': created_at or current_time,
'creator': creator or current_user}
if labels_resource_model == models.DeploymentLabel:
new_label['deployment'] = resource
elif labels_resource_model == models.BlueprintLabel:
new_label['blueprint'] = resource
elif labels_resource_model == models.DeploymentGroupLabel:
new_label['deployment_group'] = resource
self.sm.put(labels_resource_model(**new_label))
def create_deployment_schedules(self, deployment, plan):
plan_deployment_settings = plan.get('deployment_settings')
if not plan_deployment_settings:
return
plan_schedules_dict = plan_deployment_settings.get('default_schedules')
if not plan_schedules_dict:
return
self.create_deployment_schedules_from_dict(plan_schedules_dict,
deployment)
def create_deployment_schedules_from_dict(self, schedules_dict, deployment,
base_datetime=None):
"""
:param schedules_dict: a dict of deployment schedules to create
:param deployment: the deployment for which the schedules are created
:param base_datetime: a datetime object representing the absolute date
and time to which we apply relative time deltas.
By default: UTC now.
"""
for schedule_id, schedule in schedules_dict.items():
workflow_id = schedule['workflow']
execution_arguments = schedule.get('execution_arguments', {})
parameters = schedule.get('workflow_parameters')
self._verify_workflow_in_deployment(
workflow_id, deployment, deployment.id)
since = self._get_utc_datetime_from_sched_plan(schedule['since'],
base_datetime)
until = schedule.get('until')
if until:
until = self._get_utc_datetime_from_sched_plan(until,
base_datetime)
rule = compute_rule_from_scheduling_params({
'rrule': schedule.get('rrule'),
'recurrence': schedule.get('recurrence'),
'weekdays': schedule.get('weekdays'),
'count': schedule.get('count')
})
slip = schedule.get('slip', 0)
stop_on_fail = schedule.get('stop_on_fail', False)
enabled = schedule.get('default_enabled', True)
now = get_formatted_timestamp()
schedule = models.ExecutionSchedule(
id=schedule_id,
deployment=deployment,
created_at=now,
since=since,
until=until,
rule=rule,
slip=slip,
workflow_id=workflow_id,
parameters=parameters,
execution_arguments=execution_arguments,
stop_on_fail=stop_on_fail,
enabled=enabled,
)
schedule.next_occurrence = schedule.compute_next_occurrence()
self.sm.put(schedule)
@staticmethod
def _get_utc_datetime_from_sched_plan(time_expression, base_datetime=None):
"""
:param time_expression: Either a string representing an absolute
datetime, or a relative time delta, such as '+4 hours' or '+1y+1d'.
:param base_datetime: a datetime object representing the absolute date
and time to which we apply the time delta. By default: UTC now
(relevant only for relative time).
:return: A naive datetime object, in UTC time.
"""
time_fmt = '%Y-%m-%d %H:%M:%S'
if time_expression.startswith('+'):
base_datetime = base_datetime or datetime.utcnow()
return parse_utc_datetime_relative(time_expression, base_datetime)
return datetime.strptime(time_expression, time_fmt)
def recalc_ancestors(self, deployment_ids):
"""Recalculate statuses & counts for all ancestors of deployment_ids"""
if not deployment_ids:
return
with self.sm.transaction():
deps = models.DeploymentLabelsDependencies.get_dependencies(
deployment_ids, dependents=False, locking=True)
if not deps:
# no deps, means there's no tree to speak of, we just need to
# update deployment_ids only
deps = (
db.session.query(models.Deployment)
.filter(models.Deployment._storage_id.in_(deployment_ids))
.with_for_update()
.all()
)
for dep in deps:
summary = models.DeploymentLabelsDependencies\
.get_children_summary(dep)
envs = 0
services = 0
srv_statuses = []
env_statuses = []
for source in (summary.environments, summary.services):
if not source.count:
continue
envs += source.sub_environments_total
env_statuses += source.sub_environment_statuses
services += source.sub_services_total
srv_statuses += source.sub_service_statuses
envs += summary.environments.count
services += summary.services.count
if summary.environments.count:
env_statuses += summary.environments.deployment_statuses
if summary.services.count:
srv_statuses += summary.services.deployment_statuses
if srv_statuses:
srv_status = models.Deployment.compare_statuses(
*srv_statuses)
else:
srv_status = None
if env_statuses:
env_status = models.Deployment.compare_statuses(
*env_statuses)
else:
env_status = None
new_status = \
models.Deployment.decide_deployment_status(
latest_execution_status=dep.latest_execution_status,
installation_status=dep.installation_status,
sub_services_status=srv_status,
sub_environments_status=env_status,
)
db.session.execute(
models.Deployment.__table__.update()
.where(models.Deployment.__table__.c._storage_id ==
dep._storage_id)
.values(
deployment_status=new_status,
sub_services_count=services,
sub_environments_count=envs,
sub_services_status=srv_status,
sub_environments_status=env_status
)
)
# What we need to access this manager in Flask
def get_resource_manager(sm=None):
"""
Get the current app's resource manager, create if necessary
"""
if sm:
return ResourceManager(sm)
return current_app.config.setdefault('resource_manager',
ResourceManager())
def create_secret(key, secret, tenant, created_at=None,
updated_at=None, creator=None):
sm = get_storage_manager()
timestamp = utils.get_formatted_timestamp()
new_secret = models.Secret(
id=key,
value=encrypt(secret['value']),
created_at=created_at or timestamp,
updated_at=updated_at or timestamp,
visibility=secret['visibility'],
is_hidden_value=secret['is_hidden_value'],
tenant=tenant
)
if creator:
new_secret.creator = creator
created_secret = sm.put(new_secret)
return created_secret
def update_secret(existing_secret, secret):
existing_secret.value = encrypt(secret['value'])
existing_secret.updated_at = utils.get_formatted_timestamp()
return get_storage_manager().update(existing_secret, validate_global=True)
def update_imported_secret(existing_secret, imported_secret):
existing_secret.is_hidden_value = imported_secret['is_hidden_value']
existing_secret.visibility = imported_secret['visibility']
update_secret(existing_secret, imported_secret)
def add_to_dict_values(dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
return
dictionary[key] = [value]
| cloudify-cosmo/cloudify-manager | rest-service/manager_rest/resource_manager.py | Python | apache-2.0 | 122,592 |
__author__ = 'nekmo'
def append_or_update(original_list, new_list, override=True):
for i, arg in enumerate(new_list):
if i < len(original_list) and override:
original_list[i] = arg
elif i >= len(original_list):
original_list.append(arg)
return original_list
| Nekmo/nekutils | iter.py | Python | mit | 308 |
#!/usr/bin/env python
import os
from distutils.core import setup
from tvrage import __version__, __author__, __license__
setup(name='python-tvrage',
description='python client for the tvrage.com XML API',
long_description = file(
os.path.join(os.path.dirname(__file__),'README.rst')).read(),
license=__license__,
version=__version__,
author=__author__,
author_email='[email protected]',
url='http://bitbucket.org/ckreutzer/python-tvrage/',
packages=['tvrage'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Operating System :: OS Independent'
]
)
| GetSomeBlocks/Score_Soccer | resources/lib/tvrage/setup.py | Python | mit | 872 |
import unittest
import six
from pyrad import tools
try:
import ipaddress
except ImportError:
ipaddress = None
class EncodingTests(unittest.TestCase):
def testStringEncoding(self):
self.assertRaises(ValueError, tools.EncodeString, 'x' * 254)
self.assertEqual(
tools.EncodeString('1234567890'),
six.b('1234567890'))
def testInvalidStringEncodingRaisesTypeError(self):
self.assertRaises(TypeError, tools.EncodeString, 1)
def testAddressEncoding(self):
self.assertRaises(ValueError, tools.EncodeAddress, '123')
self.assertEqual(
tools.EncodeAddress('192.168.0.255'),
six.b('\xc0\xa8\x00\xff'))
def testInvalidAddressEncodingRaisesTypeError(self):
self.assertRaises(TypeError, tools.EncodeAddress, 1)
def testIntegerEncoding(self):
self.assertEqual(tools.EncodeInteger(0x01020304),
six.b('\x01\x02\x03\x04'))
def testUnsignedIntegerEncoding(self):
self.assertEqual(tools.EncodeInteger(0xFFFFFFFF),
six.b('\xff\xff\xff\xff'))
def testInvalidIntegerEncodingRaisesTypeError(self):
self.assertRaises(TypeError, tools.EncodeInteger, '1')
def testDateEncoding(self):
self.assertEqual(tools.EncodeDate(0x01020304),
six.b('\x01\x02\x03\x04'))
def testInvalidDataEncodingRaisesTypeError(self):
self.assertRaises(TypeError, tools.EncodeDate, '1')
def testStringDecoding(self):
self.assertEqual(
tools.DecodeString(six.b('1234567890')),
'1234567890')
def testAddressDecoding(self):
self.assertEqual(
tools.DecodeAddress(six.b('\xc0\xa8\x00\xff')),
'192.168.0.255')
def testIntegerDecoding(self):
self.assertEqual(
tools.DecodeInteger(six.b('\x01\x02\x03\x04')),
0x01020304)
@unittest.skipUnless(ipaddress, 'Requires ipaddress module.')
def testIPv6PrefixDecoding(self):
self.assertEqual(
tools.DecodeIPv6Prefix(
six.b('\x00\x40\x20\x01\x0d\xb8\x3c\x4d\x00\x15')),
ipaddress.IPv6Network(six.u('2001:db8:3c4d:15::/64')))
self.assertEqual(
tools.DecodeIPv6Prefix(
six.b('\x00\x38\x20\x01\x0d\xb8\x3c\x4d\x15')),
ipaddress.IPv6Network(six.u('2001:db8:3c4d:1500::/56')))
self.assertEqual(
tools.DecodeIPv6Prefix(
six.b('\x00\x80\x20\x01\x0d\xb8\x85\xa3\x08\xd3'
'\x13\x19\x8a\x2e\x03\x70\x73\x48')),
ipaddress.IPv6Network(
six.u('2001:db8:85a3:8d3:1319:8a2e:370:7348/128')))
@unittest.skipUnless(ipaddress, 'Requires ipaddress module.')
def testIPv6PrefixEncoding(self):
self.assertEqual(
tools.EncodeIPv6Prefix(
ipaddress.IPv6Network(six.u('2001:db8:3c4d:15::/64'))),
six.b('\x00\x40\x20\x01\x0d\xb8\x3c\x4d\x00\x15'))
self.assertEqual(
tools.EncodeIPv6Prefix(
ipaddress.IPv6Network(six.u('2001:db8:3c4d:1500::/56'))),
six.b('\x00\x38\x20\x01\x0d\xb8\x3c\x4d\x15'))
self.assertEqual(
tools.EncodeIPv6Prefix(
ipaddress.IPv6Network(
six.u('2001:db8:85a3:8d3:1319:8a2e:370:7348/128'))),
six.b('\x00\x80\x20\x01\x0d\xb8\x85\xa3\x08\xd3'
'\x13\x19\x8a\x2e\x03\x70\x73\x48'))
def testDateDecoding(self):
self.assertEqual(
tools.DecodeDate(six.b('\x01\x02\x03\x04')),
0x01020304)
def testUnknownTypeEncoding(self):
self.assertRaises(ValueError, tools.EncodeAttr, 'unknown', None)
def testUnknownTypeDecoding(self):
self.assertRaises(ValueError, tools.DecodeAttr, 'unknown', None)
def testDecodeTaggedAttr(self):
self.assertEqual(
tools.DecodeTaggedAttr('octets', six.b('\x00123')),
(0, six.b('123')))
self.assertEqual(
tools.DecodeTaggedAttr('octets', six.b('\x01\x02\x03')),
(1, six.b('\x02\x03')))
self.assertEqual(
tools.DecodeTaggedAttr('octets', six.b('\x1F\x02\x03')),
(31, six.b('\x02\x03')))
# Invalid tunnel tag (>32)
self.assertRaises(ValueError, tools.DecodeTaggedAttr,
'octets', six.b('\x20\x02\x03'))
def testDecodeTaggedAttrInt(self):
# Test for correct handling of tagged integers (tag + 3 octets)
self.assertEqual(
tools.DecodeTaggedAttr('integer', six.b('\x01\x02\x03\x04')),
(1, six.b('\x00\x02\x03\x04')))
def testEncodeTaggedAttr(self):
self.assertEqual(
tools.EncodeTaggedAttr('octets', 1, six.b('123')),
six.b('\x01123'))
self.assertEqual(
tools.EncodeTaggedAttr('octets', 31, six.b('\x07\x08')),
six.b('\x1F\x07\x08'))
self.assertEqual(
tools.EncodeTaggedAttr('octets', 0, six.b('\x02\x03\x05')),
six.b('\x00\x02\x03\x05'))
def testEncodeFunction(self):
self.assertEqual(
tools.EncodeAttr('string', six.u('string')),
six.b('string'))
self.assertEqual(
tools.EncodeAttr('octets', six.b('string')),
six.b('string'))
self.assertEqual(
tools.EncodeAttr('ipaddr', '192.168.0.255'),
six.b('\xc0\xa8\x00\xff'))
self.assertEqual(
tools.EncodeAttr('integer', 0x01020304),
six.b('\x01\x02\x03\x04'))
self.assertEqual(
tools.EncodeAttr('date', 0x01020304),
six.b('\x01\x02\x03\x04'))
self.assertEqual(
tools.EncodeAttr('integer64', 0x0102030405060708),
six.b('\x01\x02\x03\x04\x05\x06\x07\x08'))
@unittest.skipUnless(ipaddress, 'Requires ipaddress module.')
def testEncodeFunctionIP(self):
self.assertEqual(
tools.EncodeAttr(
'ipv6prefix',
ipaddress.IPv6Network(six.u('2001:db8:1234::/48'))),
six.b('\x00\x30\x20\x01\x0d\xb8\x12\x34'))
def testDecodeFunction(self):
self.assertEqual(
tools.DecodeAttr('string', six.b('string')),
six.u('string'))
self.assertEqual(
tools.EncodeAttr('octets', six.b('string')),
six.b('string'))
self.assertEqual(
tools.DecodeAttr('ipaddr', six.b('\xc0\xa8\x00\xff')),
'192.168.0.255')
self.assertEqual(
tools.DecodeAttr('integer', six.b('\x01\x02\x03\x04')),
0x01020304)
self.assertEqual(
tools.DecodeAttr('date', six.b('\x01\x02\x03\x04')),
0x01020304)
self.assertEqual(
tools.DecodeAttr('integer64',
six.b('\x01\x02\x03\x04\x05\x06\x07\x08')),
0x0102030405060708)
@unittest.skipUnless(ipaddress, 'Requires ipaddress module.')
def testDecodeFunctionIP(self):
self.assertEqual(
tools.DecodeAttr(
'ipv6prefix', six.b('\x00\x30\x20\x01\x0d\xb8\x12\x34')),
ipaddress.IPv6Network(six.u('2001:db8:1234::/48')))
| tom-mi/pyrad | pyrad/tests/testTools.py | Python | bsd-3-clause | 7,407 |
from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
N = x.shape[0]
D = w.shape[0]
out = np.dot(np.reshape(x, [N, D]), w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
N = x.shape[0]
D = w.shape[0]
dx = np.dot(dout, w.T).reshape(x.shape)
dw = np.dot(np.reshape(x, [N, D]).T, dout)
db = np.sum(dout, 0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = x.copy()
out[x < 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = dout.copy()
dx[x < 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
sample_mean = np.mean(x, 0)
x_mean = x - sample_mean
x_mean_sqr = np.square(x_mean)
sample_var = np.mean(x_mean_sqr, 0)
sample_var_sqrt = np.sqrt(sample_var + eps)
inv_svs = 1 / sample_var_sqrt #6
x_norm = x_mean * inv_svs #7
out = gamma * x_norm + beta
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
cache = (x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
x_norm = (x - running_mean) / np.sqrt(running_var + eps)
out = gamma * x_norm + beta
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
N = dout.shape[0]
x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr = cache
dbeta = np.sum(dout, 0)
dgamma = np.sum(dout * x_norm, 0)
dx_norm = dout * gamma
dinv_svs = np.sum(dx_norm * x_mean, 0)
dsample_var_sqrt = dinv_svs * -np.square(inv_svs)
dsample_var = dsample_var_sqrt * 1 / (2 * sample_var_sqrt)
dx_mean_sqr = dsample_var / N * np.ones_like(x_mean_sqr)
dx_mean = dx_norm * inv_svs
dx_mean += dx_mean_sqr * 2 * x_mean
dsample_mean = -np.sum(dx_mean, 0)
dx = dx_mean
dx += dsample_mean / N
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
N = dout.shape[0]
x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr = cache
dbeta = np.sum(dout, 0)
dgamma = np.sum(dout * x_norm, 0)
dx_norm = dout * gamma
tmp = (1 - 1 / N)
dx_normdx = (1 - 1 / N) * (inv_svs + -0.5 * inv_svs ** 3 * 2 * x_mean * x_mean)
dx = dx_norm * dx_normdx
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| halimacc/CS231n-assignments | assignment2/cs231n/layers.py | Python | unlicense | 25,347 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"scandir==1.10.0",
]
test_requirements = [
"mock==3.0.5",
"pytest==4.4.1",
]
setup(
name='se_mailbox',
version='1.0.0',
description="Additional mailbox functionality.",
long_description=readme + '\n\n' + history,
author="SolarWinds Mail WG",
author_email='[email protected]',
url='https://github.com/spamexperts/se-mailbox',
packages=[
'se_mailbox',
],
package_dir={'se_mailbox':
'se_mailbox'},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='se_mailbox',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
tests_require=test_requirements
)
| SpamExperts/se-mailbox | setup.py | Python | gpl-2.0 | 1,161 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides ways to store benchmark output."""
def store_benchmark(data, storage_type=None):
"""Store benchmark data.
Args:
data: Dictionary mapping from string benchmark name to
numeric benchmark value.
storage_type: (string) Specifies where to store benchmark
result. If storage_type is
'cbuild_benchmark_datastore': store outputs in our continuous
build datastore. gcloud must be setup in current environment
pointing to the project where data will be added.
"""
if storage_type == 'cbuild_benchmark_datastore':
try:
# pylint: disable=g-import-not-at-top
import cbuild_benchmark_storage
# pylint: enable=g-import-not-at-top
except ImportError:
raise ImportError(
'Missing cbuild_benchmark_storage.py required for '
'benchmark_cloud_datastore option')
cbuild_benchmark_storage.upload_to_benchmark_datastore(data)
else:
assert False, 'unknown storage_type: ' + storage_type
| e-bug/distributed-tensorflow-benchmarks | google-benchmarks/tf_cnn_benchmarks/benchmark_storage.py | Python | gpl-3.0 | 1,679 |
import numpy as np
from tools.walk_trees import walk_trees
from tools.game_tree.builder import GameTreeBuilder
from tools.game_tree.nodes import ActionNode
from tools.game_tree.node_provider import NodeProvider
class StrategiesWeightedMixtureActionNode(ActionNode):
def __init__(self, parent, player):
super().__init__(parent, player)
self.strategies = None
self.weights = None
def __getattr__(self, attr):
if attr == 'strategy':
return np.average(self.strategies, axis=0, weights=self.weights)
class StrategiesWeightedMixtureTreeNodeProvider(NodeProvider):
def create_action_node(self, parent, player):
return StrategiesWeightedMixtureActionNode(parent, player)
class StrategiesWeightedMixture():
def __init__(self, game, strategies):
self.strategy = GameTreeBuilder(game, StrategiesWeightedMixtureTreeNodeProvider()).build_tree()
self.weights = np.ones(len(strategies)) / len(strategies)
def on_nodes(*nodes):
mixture_node = nodes[0]
if isinstance(mixture_node, ActionNode):
mixture_node.weights = self.weights
mixture_node.strategies = np.zeros([len(strategies), 3])
for i, node in enumerate(nodes[1:]):
mixture_node.strategies[i, :] = node.strategy
walk_trees(on_nodes, self.strategy, *strategies)
def update_weights(self, weights):
np.copyto(self.weights, weights)
| JakubPetriska/poker-cfr | implicit_modelling/strategies_weighted_mixeture.py | Python | mit | 1,484 |
from SpecImports import *
from toontown.toonbase import ToontownGlobals
CogParent = 10000
BattleCellId = 0
BattleCells = {BattleCellId: {'parentEntId': CogParent,
'pos': Point3(0, 0, 0)}}
CogData = [{'parentEntId': CogParent,
'boss': 1,
'level': 16,
'battleCell': BattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': 12,
'battleCell': BattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': 12,
'battleCell': BattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': 13,
'battleCell': BattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0}]
ReserveCogData = []
| silly-wacky-3-town-toon/SOURCE-COD | toontown/coghq/LawbotOfficeOilRoom_Battle01_Cogs.py | Python | apache-2.0 | 960 |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torchvision import transforms
from PIL import Image
# torch config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Configurations
CHANNELS = 16
WIDTH = HEIGHT = 16 #64
BATCH_LEN = 8
plt.rcParams['toolbar'] = 'None'
###
class Grid:
def __init__(self):
super().__init__()
# Initialization
self.image = None # dim: (CHANNELS, WIDTH, HEIGHT)
def default(self):
im = Image.open("owl_mini.png") # it should be RGBA
#im = im.convert('RGBA')
im = transforms.ToTensor()(im)
self.load(im)
return self
def load(self, img):
self.image = img[:4,:,:].cpu()
self.image = torch.clamp(self.image, 0.0, 1.0 )
return self
def show(self, title=""):
img = transforms.ToPILImage()(self.image)
plt.imshow(img)
plt.title(title)
plt.show()
###
# Model is explained here -> https://distill.pub/2020/growing-ca/
class CAModel(nn.Module):
def __init__(self):
super().__init__()
filterY = torch.tensor([[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]).to(device)
filterX = filterY.t()
filterId = torch.tensor([[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]]).to(device)
self.sobelX_kernel = filterX.expand(CHANNELS, 1, 3, 3) / 8.0
self.sobelY_kernel = filterY.expand(CHANNELS, 1, 3, 3) / 8.0
self.identity_kernel = filterId.expand(CHANNELS, 1, 3, 3)
self.fc1 = nn.Conv2d(CHANNELS * 3, 128, 1)
self.fc2 = nn.Conv2d(128, CHANNELS, 1)
torch.nn.init.zeros_(self.fc2.weight) # paper recommendation
torch.nn.init.zeros_(self.fc2.bias) # paper recommendation
def step(self, input, debug=False, showLayers=False):
# Filters
sx = F.conv2d(input, self.sobelX_kernel, padding=1, groups=CHANNELS)
sy = F.conv2d(input, self.sobelY_kernel, padding=1, groups=CHANNELS)
id = F.conv2d(input, self.identity_kernel, padding=1, groups=CHANNELS)
# every pixel will have 3*CHANNELS channels now in perception tensor
perception = torch.cat([id, sx, sy], 1)
#print(perception.shape)
x = self.fc1(perception)
#x = self.norm1(x)
x = F.relu(x)
diff = self.fc2(x) # No relu, we want also negative values, they are the differential image
# stochastic update for differential image
stochastic = torch.rand((BATCH_LEN, 1, WIDTH, HEIGHT)) < 0.5
stochastic = stochastic.type(torch.float).repeat(1,CHANNELS,1,1).to(device)
#print("stoch:{}".format(stochastic.shape))
output = input + diff * stochastic # same tensor will be applied over all the channels
# alive masking
alive = F.max_pool2d(output[:, 3:4, :, :], 3, stride=1, padding=1) > 0.1
alive = alive.type(torch.float).repeat(1,CHANNELS,1,1).to(device)
#print("alive:{}".format(alive.shape))
output *= alive
if showLayers:
Grid().load(sx[0]).show("Sobel X")
Grid().load(sy[0]).show("Sobel Y")
Grid().load(id[0]).show("Identity")
Grid().load(diff[0]).show("Differential Image")
if debug or showLayers:
Grid().load(output[0]).show("Updated Image")
return output
def forward(self, input, debug=False, showLayers=False):
# Chose random steps in between range
#n_steps = torch.randint(64, 96, (1,))
# Range of steps to grow up should be within grid dimension
min_steps = int(WIDTH - WIDTH * 0.2)
max_steps = int(WIDTH + WIDTH * 0.2)
n_steps = torch.randint( min_steps, max_steps, (1,))
output = input.detach().clone().to(device)
for _ in range(n_steps):
output = self.step(output, debug, showLayers)
return output
def train(m, origin, target, debug=False):
target = target.to(device)
output = origin.to(device)
loss_f = nn.MSELoss()
optimizer = optim.Adam(m.parameters(), lr=0.002)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.1)
for epoch in range(10000):
optimizer.zero_grad()
output = m.forward(origin)
loss = loss_f(output[:,:4,...], target[:,:4,...]) # Loss is only calculated with RGBA
loss.backward()
torch.nn.utils.clip_grad_norm(m.parameters(), 1) # Prevent gradient to explode (time series problem)
optimizer.step()
scheduler.step()
if epoch % 100 == 0:
Grid().load(output[0]).show("Epoch {}".format(epoch))
print("Epoch:{} MSE_Loss:{}".format(epoch, loss))
###
model = CAModel().to(device)
print("model: {} over device={}".format(model, device))
img_target = Grid().default().image
Grid().load(img_target).show("Target")
# Initialize origin image
img_orig = torch.zeros(CHANNELS, WIDTH, HEIGHT)
img_orig[3:, WIDTH//2, HEIGHT//2] = 1.0
# Adding batch dimension
img_orig = img_orig.unsqueeze(0).repeat(BATCH_LEN, 1 , 1, 1)
img_target = img_target.unsqueeze(0).repeat(BATCH_LEN, 1 , 1, 1)
train(model, img_orig, img_target)
| samuxiii/prototypes | cellauto/CellAutomata.py | Python | mit | 5,230 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-19 07:52
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
import opaque_keys.edx.django.models
class Migration(migrations.Migration):
dependencies = [
('video_config', '0004_transcriptmigrationsetting_command_run'),
]
operations = [
migrations.CreateModel(
name='MigrationEnqueuedCourse',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('course_id', opaque_keys.edx.django.models.CourseKeyField(db_index=True, max_length=255, primary_key=True, serialize=False)),
('command_run', models.PositiveIntegerField(default=0)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='transcriptmigrationsetting',
name='batch_size',
field=models.PositiveIntegerField(default=0),
),
]
| ESOedX/edx-platform | openedx/core/djangoapps/video_config/migrations/0005_auto_20180719_0752.py | Python | agpl-3.0 | 1,315 |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import re
import math
import random
PREAMBLE = """
# WARNING: This file is auto-generated. Do NOT modify it manually, but rather
# modify the generating script file. Otherwise changes will be lost!
"""[1:]
class CaseGroup:
def __init__(self, name, description, children):
self.name = name
self.description = description
self.children = children
class ShaderCase:
def __init__(self):
pass
g_processedCases = {}
def indentTextBlock(text, indent):
indentStr = indent * "\t"
lines = text.split("\n")
lines = [indentStr + line for line in lines]
lines = [ ["", line][line.strip() != ""] for line in lines]
return "\n".join(lines)
def writeCase(f, case, indent, prefix):
print "\t%s" % (prefix + case.name)
if isinstance(case, CaseGroup):
f.write(indentTextBlock('group %s "%s"\n\n' % (case.name, case.description), indent))
for child in case.children:
writeCase(f, child, indent + 1, prefix + case.name + ".")
f.write(indentTextBlock("\nend # %s\n" % case.name, indent))
else:
# \todo [petri] Fix hack.
fullPath = prefix + case.name
assert (fullPath not in g_processedCases)
g_processedCases[fullPath] = None
f.write(indentTextBlock(str(case) + "\n", indent))
def writeAllCases(fileName, caseList):
# Write all cases to file.
print " %s.." % fileName
f = file(fileName, "wb")
f.write(PREAMBLE + "\n")
for case in caseList:
writeCase(f, case, 0, "")
f.close()
print "done! (%d cases written)" % len(g_processedCases)
# Template operations.
def genValues(inputs, outputs):
res = []
for (name, values) in inputs:
res.append("input %s = [ %s ];" % (name, " | ".join([str(v) for v in values]).lower()))
for (name, values) in outputs:
res.append("output %s = [ %s ];" % (name, " | ".join([str(v) for v in values]).lower()))
return ("\n".join(res))
def fillTemplate(template, params):
s = template
for (key, value) in params.items():
m = re.search(r"^(\s*)\$\{\{%s\}\}$" % key, s, re.M)
if m is not None:
start = m.start(0)
end = m.end(0)
ws = m.group(1)
if value is not None:
repl = "\n".join(["%s%s" % (ws, line) for line in value.split("\n")])
s = s[:start] + repl + s[end:]
else:
s = s[:start] + s[end+1:] # drop the whole line
else:
s = s.replace("${{%s}}" % key, value)
return s
# Return shuffled version of list
def shuffled(lst):
tmp = lst[:]
random.shuffle(tmp)
return tmp
def repeatToLength(lst, toLength):
return (toLength / len(lst)) * lst + lst[: toLength % len(lst)]
# Helpers to convert a list of Scalar/Vec values into another type.
def toFloat(lst): return [Scalar(float(v.x)) for v in lst]
def toInt(lst): return [Scalar(int(v.x)) for v in lst]
def toBool(lst): return [Scalar(bool(v.x)) for v in lst]
def toVec4(lst): return [v.toFloat().toVec4() for v in lst]
def toVec3(lst): return [v.toFloat().toVec3() for v in lst]
def toVec2(lst): return [v.toFloat().toVec2() for v in lst]
def toIVec4(lst): return [v.toInt().toVec4() for v in lst]
def toIVec3(lst): return [v.toInt().toVec3() for v in lst]
def toIVec2(lst): return [v.toInt().toVec2() for v in lst]
def toBVec4(lst): return [v.toBool().toVec4() for v in lst]
def toBVec3(lst): return [v.toBool().toVec3() for v in lst]
def toBVec2(lst): return [v.toBool().toVec2() for v in lst]
def toMat2(lst): return [v.toMat2() for v in lst]
def toMat3(lst): return [v.toMat3() for v in lst]
def toMat4(lst): return [v.toMat4() for v in lst]
# Random value generation.
class GenRandom:
def __init__(self):
pass
def uniformVec4(self, count, mn, mx):
ret = [Vec4(random.uniform(mn, mx), random.uniform(mn, mx), random.uniform(mn, mx), random.uniform(mn, mx)) for x in xrange(count)]
ret[0].x = mn
ret[1].x = mx
ret[2].x = (mn + mx) * 0.5
return ret
def uniformBVec4(self, count):
ret = [Vec4(random.random() >= 0.5, random.random() >= 0.5, random.random() >= 0.5, random.random() >= 0.5) for x in xrange(count)]
ret[0].x = True
ret[1].x = False
return ret
# def uniform(self,
# Math operating on Scalar/Vector types.
def glslSign(a): return 0.0 if (a == 0) else +1.0 if (a > 0.0) else -1.0
def glslMod(x, y): return x - y*math.floor(x/y)
def glslClamp(x, mn, mx): return mn if (x < mn) else mx if (x > mx) else x
class GenMath:
@staticmethod
def unary(func): return lambda val: val.applyUnary(func)
@staticmethod
def binary(func): return lambda a, b: (b.expandVec(a)).applyBinary(func, a.expandVec(b))
@staticmethod
def frac(val): return val.applyUnary(lambda x: x - math.floor(x))
@staticmethod
def exp2(val): return val.applyUnary(lambda x: math.pow(2.0, x))
@staticmethod
def log2(val): return val.applyUnary(lambda x: math.log(x, 2.0))
@staticmethod
def rsq(val): return val.applyUnary(lambda x: 1.0 / math.sqrt(x))
@staticmethod
def sign(val): return val.applyUnary(glslSign)
@staticmethod
def isEqual(a, b): return Scalar(a.isEqual(b))
@staticmethod
def isNotEqual(a, b): return Scalar(not a.isEqual(b))
@staticmethod
def step(a, b): return (b.expandVec(a)).applyBinary(lambda edge, x: [1.0, 0.0][x < edge], a.expandVec(b))
@staticmethod
def length(a): return a.length()
@staticmethod
def distance(a, b): return a.distance(b)
@staticmethod
def dot(a, b): return a.dot(b)
@staticmethod
def cross(a, b): return a.cross(b)
@staticmethod
def normalize(a): return a.normalize()
@staticmethod
def boolAny(a): return a.boolAny()
@staticmethod
def boolAll(a): return a.boolAll()
@staticmethod
def boolNot(a): return a.boolNot()
# ..
class Scalar:
def __init__(self, x):
self.x = x
def applyUnary(self, func): return Scalar(func(self.x))
def applyBinary(self, func, other): return Scalar(func(self.x, other.x))
def isEqual(self, other): assert isinstance(other, Scalar); return (self.x == other.x)
def expandVec(self, val): return val
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.x)
def toVec3(self): return Vec3(self.x, self.x, self.x)
def toVec4(self): return Vec4(self.x, self.x, self.x, self.x)
def toMat2(self): return self.toVec2().toMat2()
def toMat3(self): return self.toVec3().toMat3()
def toMat4(self): return self.toVec4().toMat4()
def toFloat(self): return Scalar(float(self.x))
def toInt(self): return Scalar(int(self.x))
def toBool(self): return Scalar(bool(self.x))
def getNumScalars(self): return 1
def getScalars(self): return [self.x]
def typeString(self):
if isinstance(self.x, bool):
return "bool"
elif isinstance(self.x, int):
return "int"
elif isinstance(self.x, float):
return "float"
else:
assert False
def vec4Swizzle(self):
return ""
def __str__(self):
return "%s" % self.x
def length(self):
return Scalar(abs(self.x))
def distance(self, v):
assert isinstance(v, Scalar)
return Scalar(abs(self.x - v.x))
def dot(self, v):
assert isinstance(v, Scalar)
return Scalar(self.x * v.x)
def normalize(self):
return Scalar(glslSign(self.x))
def __neg__(self):
return Scalar(-self.x)
def __add__(self, val):
assert isinstance(val, Scalar)
return Scalar(self.x + val.x)
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
return Scalar(self.x * val.x)
elif isinstance(val, Vec2):
return Vec2(self.x * val.x, self.x * val.y)
elif isinstance(val, Vec3):
return Vec3(self.x * val.x, self.x * val.y, self.x * val.z)
elif isinstance(val, Vec4):
return Vec4(self.x * val.x, self.x * val.y, self.x * val.z, self.x * val.w)
else:
assert False
def __div__(self, val):
if isinstance(val, Scalar):
return Scalar(self.x / val.x)
elif isinstance(val, Vec2):
return Vec2(self.x / val.x, self.x / val.y)
elif isinstance(val, Vec3):
return Vec3(self.x / val.x, self.x / val.y, self.x / val.z)
elif isinstance(val, Vec4):
return Vec4(self.x / val.x, self.x / val.y, self.x / val.z, self.x / val.w)
else:
assert False
class Vec:
@staticmethod
def fromScalarList(lst):
assert (len(lst) >= 1 and len(lst) <= 4)
if (len(lst) == 1): return Scalar(lst[0])
elif (len(lst) == 2): return Vec2(lst[0], lst[1])
elif (len(lst) == 3): return Vec3(lst[0], lst[1], lst[2])
else: return Vec4(lst[0], lst[1], lst[2], lst[3])
def isEqual(self, other):
assert isinstance(other, Vec);
return (self.getScalars() == other.getScalars())
def length(self):
return Scalar(math.sqrt(self.dot(self).x))
def normalize(self):
return self * Scalar(1.0 / self.length().x)
def swizzle(self, indexList):
inScalars = self.getScalars()
outScalars = map(lambda ndx: inScalars[ndx], indexList)
return Vec.fromScalarList(outScalars)
def __init__(self):
pass
class Vec2(Vec):
def __init__(self, x, y):
assert(x.__class__ == y.__class__)
self.x = x
self.y = y
def applyUnary(self, func): return Vec2(func(self.x), func(self.y))
def applyBinary(self, func, other): return Vec2(func(self.x, other.x), func(self.y, other.y))
def expandVec(self, val): return val.toVec2()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, 0.0)
def toVec4(self): return Vec4(self.x, self.y, 0.0, 0.0)
def toMat2(self): return Mat2(float(self.x), 0.0, 0.0, float(self.y));
def toFloat(self): return Vec2(float(self.x), float(self.y))
def toInt(self): return Vec2(int(self.x), int(self.y))
def toBool(self): return Vec2(bool(self.x), bool(self.y))
def getNumScalars(self): return 2
def getScalars(self): return [self.x, self.y]
def typeString(self):
if isinstance(self.x, bool):
return "bvec2"
elif isinstance(self.x, int):
return "ivec2"
elif isinstance(self.x, float):
return "vec2"
else:
assert False
def vec4Swizzle(self):
return ".xyxy"
def __str__(self):
if isinstance(self.x, bool):
return "bvec2(%s, %s)" % (str(self.x).lower(), str(self.y).lower())
elif isinstance(self.x, int):
return "ivec2(%i, %i)" % (self.x, self.y)
elif isinstance(self.x, float):
return "vec2(%s, %s)" % (self.x, self.y)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec2)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec2)
return Scalar(self.x*v.x + self.y*v.y)
def __neg__(self):
return Vec2(-self.x, -self.y)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec2(self.x + val, self.y + val)
elif isinstance(val, Vec2):
return Vec2(self.x + val.x, self.y + val.y)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec2()
assert isinstance(val, Vec2)
return Vec2(self.x * val.x, self.y * val.y)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec2(self.x / val.x, self.y / val.x)
else:
assert isinstance(val, Vec2)
return Vec2(self.x / val.x, self.y / val.y)
def boolAny(self): return Scalar(self.x or self.y)
def boolAll(self): return Scalar(self.x and self.y)
def boolNot(self): return Vec2(not self.x, not self.y)
class Vec3(Vec):
def __init__(self, x, y, z):
assert((x.__class__ == y.__class__) and (x.__class__ == z.__class__))
self.x = x
self.y = y
self.z = z
def applyUnary(self, func): return Vec3(func(self.x), func(self.y), func(self.z))
def applyBinary(self, func, other): return Vec3(func(self.x, other.x), func(self.y, other.y), func(self.z, other.z))
def expandVec(self, val): return val.toVec3()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, self.z)
def toVec4(self): return Vec4(self.x, self.y, self.z, 0.0)
def toMat3(self): return Mat3(float(self.x), 0.0, 0.0, 0.0, float(self.y), 0.0, 0.0, 0.0, float(self.z));
def toFloat(self): return Vec3(float(self.x), float(self.y), float(self.z))
def toInt(self): return Vec3(int(self.x), int(self.y), int(self.z))
def toBool(self): return Vec3(bool(self.x), bool(self.y), bool(self.z))
def getNumScalars(self): return 3
def getScalars(self): return [self.x, self.y, self.z]
def typeString(self):
if isinstance(self.x, bool):
return "bvec3"
elif isinstance(self.x, int):
return "ivec3"
elif isinstance(self.x, float):
return "vec3"
else:
assert False
def vec4Swizzle(self):
return ".xyzx"
def __str__(self):
if isinstance(self.x, bool):
return "bvec3(%s, %s, %s)" % (str(self.x).lower(), str(self.y).lower(), str(self.z).lower())
elif isinstance(self.x, int):
return "ivec3(%i, %i, %i)" % (self.x, self.y, self.z)
elif isinstance(self.x, float):
return "vec3(%s, %s, %s)" % (self.x, self.y, self.z)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec3)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec3)
return Scalar(self.x*v.x + self.y*v.y + self.z*v.z)
def cross(self, v):
assert isinstance(v, Vec3)
return Vec3(self.y*v.z - v.y*self.z,
self.z*v.x - v.z*self.x,
self.x*v.y - v.x*self.y)
def __neg__(self):
return Vec3(-self.x, -self.y, -self.z)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x + val, self.y + val)
elif isinstance(val, Vec3):
return Vec3(self.x + val.x, self.y + val.y, self.z + val.z)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec3()
assert isinstance(val, Vec3)
return Vec3(self.x * val.x, self.y * val.y, self.z * val.z)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x / val.x, self.y / val.x, self.z / val.x)
else:
assert False
def boolAny(self): return Scalar(self.x or self.y or self.z)
def boolAll(self): return Scalar(self.x and self.y and self.z)
def boolNot(self): return Vec3(not self.x, not self.y, not self.z)
class Vec4(Vec):
def __init__(self, x, y, z, w):
assert((x.__class__ == y.__class__) and (x.__class__ == z.__class__) and (x.__class__ == w.__class__))
self.x = x
self.y = y
self.z = z
self.w = w
def applyUnary(self, func): return Vec4(func(self.x), func(self.y), func(self.z), func(self.w))
def applyBinary(self, func, other): return Vec4(func(self.x, other.x), func(self.y, other.y), func(self.z, other.z), func(self.w, other.w))
def expandVec(self, val): return val.toVec4()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, self.z)
def toVec4(self): return Vec4(self.x, self.y, self.z, self.w)
def toMat2(self): return Mat2(float(self.x), float(self.y), float(self.z), float(self.w))
def toMat4(self): return Mat4(float(self.x), 0.0, 0.0, 0.0, 0.0, float(self.y), 0.0, 0.0, 0.0, 0.0, float(self.z), 0.0, 0.0, 0.0, 0.0, float(self.w));
def toFloat(self): return Vec4(float(self.x), float(self.y), float(self.z), float(self.w))
def toInt(self): return Vec4(int(self.x), int(self.y), int(self.z), int(self.w))
def toBool(self): return Vec4(bool(self.x), bool(self.y), bool(self.z), bool(self.w))
def getNumScalars(self): return 4
def getScalars(self): return [self.x, self.y, self.z, self.w]
def typeString(self):
if isinstance(self.x, bool):
return "bvec4"
elif isinstance(self.x, int):
return "ivec4"
elif isinstance(self.x, float):
return "vec4"
else:
assert False
def vec4Swizzle(self):
return ""
def __str__(self):
if isinstance(self.x, bool):
return "bvec4(%s, %s, %s, %s)" % (str(self.x).lower(), str(self.y).lower(), str(self.z).lower(), str(self.w).lower())
elif isinstance(self.x, int):
return "ivec4(%i, %i, %i, %i)" % (self.x, self.y, self.z, self.w)
elif isinstance(self.x, float):
return "vec4(%s, %s, %s, %s)" % (self.x, self.y, self.z, self.w)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec4)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec4)
return Scalar(self.x*v.x + self.y*v.y + self.z*v.z + self.w*v.w)
def __neg__(self):
return Vec4(-self.x, -self.y, -self.z, -self.w)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x + val, self.y + val)
elif isinstance(val, Vec4):
return Vec4(self.x + val.x, self.y + val.y, self.z + val.z, self.w + val.w)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec4()
assert isinstance(val, Vec4)
return Vec4(self.x * val.x, self.y * val.y, self.z * val.z, self.w * val.w)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec4(self.x / val.x, self.y / val.x, self.z / val.x, self.w / val.x)
else:
assert False
def boolAny(self): return Scalar(self.x or self.y or self.z or self.w)
def boolAll(self): return Scalar(self.x and self.y and self.z and self.w)
def boolNot(self): return Vec4(not self.x, not self.y, not self.z, not self.w)
# \note Column-major storage.
class Mat:
def __init__ (self, numCols, numRows, scalars):
assert len(scalars) == numRows*numCols
self.numCols = numCols
self.numRows = numRows
self.scalars = scalars
@staticmethod
def identity (numCols, numRows):
scalars = []
for col in range(0, numCols):
for row in range(0, numRows):
scalars.append(1.0 if col == row else 0.0)
return Mat(numCols, numRows, scalars)
def get (self, colNdx, rowNdx):
assert 0 <= colNdx and colNdx < self.numCols
assert 0 <= rowNdx and rowNdx < self.numRows
return self.scalars[colNdx*self.numRows + rowNdx]
def set (self, colNdx, rowNdx, scalar):
assert 0 <= colNdx and colNdx < self.numCols
assert 0 <= rowNdx and rowNdx < self.numRows
self.scalars[colNdx*self.numRows + rowNdx] = scalar
def toMatrix (self, numCols, numRows):
res = Mat.identity(numCols, numRows)
for col in range(0, min(self.numCols, numCols)):
for row in range(0, min(self.numRows, numRows)):
res.set(col, row, self.get(col, row))
return res
def toMat2 (self): return self.toMatrix(2, 2)
def toMat2x3 (self): return self.toMatrix(2, 3)
def toMat2x4 (self): return self.toMatrix(2, 4)
def toMat3x2 (self): return self.toMatrix(3, 2)
def toMat3 (self): return self.toMatrix(3, 3)
def toMat3x4 (self): return self.toMatrix(3, 4)
def toMat4x2 (self): return self.toMatrix(4, 2)
def toMat4x3 (self): return self.toMatrix(4, 3)
def toMat4 (self): return self.toMatrix(4, 4)
def typeString(self):
if self.numRows == self.numCols:
return "mat%d" % self.numRows
else:
return "mat%dx%d" % (self.numCols, self.numRows)
def __str__(self):
return "%s(%s)" % (self.typeString(), ", ".join([str(s) for s in self.scalars]))
def isTypeEqual (self, other):
return isinstance(other, Mat) and self.numRows == other.numRows and self.numCols == other.numCols
def isEqual(self, other):
assert self.isTypeEqual(other)
return (self.scalars == other.scalars)
def compMul(self, val):
assert self.isTypeEqual(val)
return Mat(self.numRows, self.numCols, [self.scalars(i) * val.scalars(i) for i in range(self.numRows*self.numCols)])
class Mat2(Mat):
def __init__(self, m00, m01, m10, m11):
Mat.__init__(self, 2, 2, [m00, m10, m01, m11])
class Mat3(Mat):
def __init__(self, m00, m01, m02, m10, m11, m12, m20, m21, m22):
Mat.__init__(self, 3, 3, [m00, m10, m20,
m01, m11, m21,
m02, m12, m22])
class Mat4(Mat):
def __init__(self, m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33):
Mat.__init__(self, 4, 4, [m00, m10, m20, m30,
m01, m11, m21, m31,
m02, m12, m22, m32,
m03, m13, m23, m33])
| chadversary/deqp | modules/gles2/scripts/genutil.py | Python | apache-2.0 | 20,521 |
""" The Watchdog Factory instantiates a given Watchdog based on a quick
determination of the local operating system.
"""
__RCSID__ = "$Id$"
import re
import platform
from DIRAC import S_OK, S_ERROR, gLogger
class WatchdogFactory( object ):
#############################################################################
def __init__(self):
""" Standard constructor
"""
self.version = platform.uname()
self.log = gLogger.getSubLogger( 'WatchdogFactory' )
self.watchDogsLocation = 'DIRAC.WorkloadManagementSystem.JobWrapper'
#############################################################################
def getWatchdog( self, pid, thread, spObject, jobcputime, memoryLimit, processors = 1 ):
""" This method returns the CE instance corresponding to the local OS. The Linux watchdog is returned by default.
"""
if re.search( 'Darwin', self.version[0] ):
localOS = 'Mac'
self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
# elif re.search( 'Windows', self.version[0] ):
# localOS = 'Windows'
# self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
else:
localOS = 'Linux'
self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
subClassName = "Watchdog%s" % ( localOS )
try:
wdModule = __import__( self.watchDogsLocation + '.%s' % subClassName, globals(), locals(), [subClassName] )
except ImportError as e:
self.log.exception( "Failed to import module" + self.watchDogsLocation + '.%s' % subClassName + '.%s' % subClassName + ': ' + str(e) )
return S_ERROR( "Failed to import module" )
try:
wd_o = getattr( wdModule, subClassName )( pid, thread, spObject, jobcputime, memoryLimit, processors )
return S_OK( wd_o )
except AttributeError as e:
self.log.exception( "Failed to create %s(): %s." % ( subClassName, e ) )
return S_ERROR( "Failed to create object" )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | WorkloadManagementSystem/JobWrapper/WatchdogFactory.py | Python | gpl-3.0 | 2,076 |
"""
Read data from Mi Flora plant sensor.
Reading from the sensor is handled by the command line tool "gatttool" that
is part of bluez on Linux.
No other operating systems are supported at the moment
inspired by # https://github.com/open-homeautomation/miflora
usage:
cd [path plugin ou copy de ce script]/jeedom_MiFlora/resources
/usr/bin/python3 ./getMiFloraDataPy3.py C4:7C:8D:60:E8:21 2.7.0 0 hci0 high
"""
# from struct import *
# from datetime import datetime, timedelta
from threading import Lock
import sys
import re
import subprocess
import logging
import time
logger = logging.getLogger(__name__)
lock = Lock()
def parse_data(data):
"""
@param: data - result of gatttool
When debug mode is on, display gatttool result in human readable format
"""
print("MI_TEMPERATURE=", float(data[1] * 256 + data[0]) / 10)
print("MI_MOISTURE=", data[7])
print("MI_LIGHT=", data[4] * 256 + data[3])
print("MI_CONDUCTIVITY=", data[9] * 256 + data[8])
return data
# pylint: disable=too-many-arguments
def write_ble(mac, handle, value, write_adpater="hci0", \
write_security="high", retries=3, timeout=8):
"""
Read from a BLE address
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the handle
@param: timeout - timeout in seconds
"""
global lock # pylint: disable=global-statement
attempt = 0
delay = 2
# print("write_ble")
while attempt <= retries:
return_value = None
try:
cmd = "gatttool --adapter={} --device={} --char-write-req -a {} -n {} \
--sec-level={} ".format(write_adpater, mac, handle, value, write_security)
#cmd = "gatttool --device={} --char-read -a {} 2>/dev/null".format(mac, handle)
with lock:
result = subprocess.check_output(cmd, shell=True, timeout=timeout)
result = result.decode("utf-8").strip(' \n\t')
# print("write_ble - Got ",result," from gatttool")
return return_value
except subprocess.CalledProcessError as err:
print("Error ", err.returncode, " from gatttool (", err.output, ")")
return_value = -1
except subprocess.TimeoutExpired:
print("Error - Timeout while waiting for gatttool output")
return_value = -1
attempt += 1
# print("Waiting for ",delay," seconds before retrying")
if attempt < retries:
time.sleep(delay)
delay *= 2
return return_value
def read_ble(mac, handle, read_adpater="hci0", read_security="high", \
read_flora_debug=0, retries=3, timeout=8):
"""
Read from a BLE address
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
global lock # pylint: disable=global-statement
attempt = 0
delay = 2
# print("read_ble")
while attempt <= retries:
return_value = None
try:
cmd = "gatttool --adapter={} --device={} --char-read -a {} \
--sec-level={} 2>/dev/null".format(read_adpater, mac, handle, read_security)
with lock:
result = subprocess.check_output(cmd,
shell=True,
timeout=timeout)
result = result.decode("utf-8").strip(' \n\t')
# print("read_ble - Got ",result, " from gatttool")
# Parse the output
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
if read_flora_debug == "1":
return [int(x, 16) for x in res.group(0).split()]
return result
except subprocess.CalledProcessError as err:
print("Error ", err.returncode, " from gatttool (", err.output, ")")
return_value = -1
except subprocess.TimeoutExpired:
print("Error - Timeout while waiting for gatttool output")
return_value = -1
attempt += 1
# print("Waiting for ",delay," seconds before retrying")
if attempt < retries:
time.sleep(delay)
delay *= 2
return return_value
#address = "C4:7C:8D:60:E8:21"
#Read battery and firmware version attribute
#macAdd="C4:7C:8D:60:E8:21"
mac_add = sys.argv[1]
handlerd = "0x0035"
handlewr = "0x0033"
firmware = sys.argv[2]
flora_debug = sys.argv[3]
adpater = sys.argv[4]
security = sys.argv[5]
print("start ")
res_write = 0
if firmware != "2.6.2":
res_write = write_ble(mac_add, handlewr, "A01F", adpater, security, 2)
if res_write != -1:
result_flora = read_ble(mac_add, handlerd, adpater, security, flora_debug)
if flora_debug == "1":
print("read_ble:", parse_data(result_flora))
else:
result_flora = -1
if flora_debug == "0":
print(result_flora) # pylint: disable=superfluous-parens
| rjullien/jeedom_MiFlora | resources/GetMiFloraDataPy3.py | Python | gpl-2.0 | 5,040 |
# encoding: utf8
from sympy import Add
from uncertainties import __version_info__ as uncert_version
from uncertainties import ufloat, ufloat_fromstr
from uncertainties.core import Variable, AffineScalarFunc
if uncert_version < (3, 0):
raise Warning("Your version of uncertanties is not supported. Try\n"
"$ sudo pip install uncertainties --upgrade")
class Series:
"""
The class that provides the expansion in powers of g up to the n-th order,
taking the error into account.
"""
def __init__(self, n, d={0: 0}, name='g', analytic=None):
"""
Example:
`z2 = Series(3, {0: ufloat(-1, 0.4), 1: ufloat(-2, .004), 2: ufloat(999, .1)})`
will give:
Z₂(g) = -1.0(4) - 2.000(4) g + 999.00(10) g²
:param n: number of the "known" orders, `int`
:param d: dictionary with k=powers, v=`ufloat`s
:param name: name of the series variable, arbitrary character, default is `'g'`
:param analytic: boolean
"""
self.n = n
self.gSeries = d
self.name = name
for k, v in d.items():
if isinstance(v, AffineScalarFunc):
self.gSeries[k] = v
elif isinstance(v, (list, tuple)):
self.gSeries[k] = ufloat(v[0], v[1])
elif isinstance(v, str):
self.gSeries[k] = ufloat_fromstr(v)
elif isinstance(v, int):
self.gSeries[k] = v
self.analytic = True
else:
raise TypeError("Series constructor warning: Type(v)={}".format(type(v)))
if analytic is not None:
# XXX: if defined explicitly:
self.analytic = bool(analytic)
else:
# XXX: if all values are ints assume analytic
self.analytic = all(map(lambda x: type(x) == int, d.values()))
for i in range(0, n):
if i not in d.keys():
if self.analytic:
self.gSeries[i] = 0
else:
self.gSeries[i] = ufloat(0, 0)
def __lt__(self, other):
return len(self.gSeries) < len(other.gSeries)
def __add__(self, other):
tmp = dict(self.gSeries)
# print "From __add__:",self.analytic," + ",other.pprint() ## FIXME
if isinstance(other, Series):
stop = min(self.n, other.n)
if stop == 0:
stop = max(self.n, other.n)
for g in other.gSeries.keys():
if g <= stop:
try:
tmp[g] += other.gSeries[g]
except KeyError:
tmp[g] = other.gSeries[g]
elif isinstance(other, (int, float)):
tmp[0] += other
else:
print("{} {}".format(type(self), type(other)))
raise NotImplementedError
return Series(len(tmp), tmp, name=self.name, analytic=self.analytic)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + (-1) * other
def __mul__(self, other):
tmp = {}
if isinstance(other, Series):
stop = min(self.n, other.n)
for i in self.gSeries.keys():
for j in other.gSeries.keys():
if (i + j) <= stop:
try:
tmp[i + j] += self.gSeries[i] * other.gSeries[j]
except KeyError:
tmp[i + j] = self.gSeries[i] * other.gSeries[j]
res = Series(max(self.n, other.n), tmp, name=self.name, analytic=self.analytic)
elif isinstance(other, (int, float, Variable, AffineScalarFunc, Add)):
for i in self.gSeries.keys():
tmp[i] = self.gSeries[i] * other
res = Series(self.n, tmp, name=self.name, analytic=self.analytic)
elif other == 0 or sum(map(lambda v: v == 0, self.gSeries.values())) == len(self.gSeries):
return 0
# elif isinstance(other, sympy.core.add.Add):
# print "\n\nself=",self
# print "other=",other
# return 0
else:
print("\nself = {}, type(self) = {}".format(self.gSeries, type(self)))
print("\nother = {}, type(other) = {}".format(other, type(other)))
raise NotImplementedError
return res
def __rmul__(self, other):
return self * other
def __neg__(self):
return self * (-1)
def __invert__(self):
""" Z.__invert__() = 1/Z
1/(1+x)=Sum_i (-1)^i x^i
"""
res = Series(self.n, {}, self.name, analytic=self.analytic)
if self.gSeries[0] == 1:
c = 1.
normed_series = self + Series(self.n, {0: -1}, self.name, analytic=self.analytic) # <-- it's -1!
elif self.gSeries[0] != 0:
c = 1. / self.gSeries[0]
normed_series = self / self.gSeries[0] + Series(self.n, {0: -1}, self.name,
analytic=self.analytic) # <-- it's -1!
else:
raise NotImplementedError("no constant term in series: %s" % self.gSeries)
# if self.gSeries[0] == 1:
# tmp = Series(self.gSeries[1:], n = self.n-1, name=self.name)
# for i in range(tmp.n):
for i in range(len(self.gSeries)):
res += (-1) ** i * normed_series ** i
return res * c
def __div__(self, other):
""" For now we assume all the powers of g as non-negative
"""
if isinstance(other, Series):
return self * other.__invert__()
elif isinstance(other, (int, float, Variable, AffineScalarFunc)):
return self * (1. / other)
else:
raise NotImplementedError("type: {}; {}".format(type(other), other.__repr__()))
def __rdiv__(self, other):
return other * self.__invert__()
def __pow__(self, power, modulo=None):
if isinstance(power, int) and power > 1:
return reduce(lambda x, y: x * y, [self] * power)
elif isinstance(power, int) and power == 1:
return self
elif isinstance(power, int) and power == 0:
if self.analytic:
return Series(self.n, {0: 1}, self.name, analytic=self.analytic)
else:
return Series(self.n, {0: ufloat(1, 0)}, self.name, analytic=self.analytic)
else:
print("power = {}, type(power) = {}".format(power, type(power)))
raise NotImplementedError
def diff(self):
"""
Differentiation of the polynomial in g
"""
res = {}
for i in range(len(self.gSeries) - 1):
res[i] = (i + 1) * self.gSeries[i + 1]
return Series(self.n, res, analytic=self.analytic)
def __repr__(self):
return self.pprint()
## FIXME
def _approx(self, other):
for k, v in self.gSeries.items():
if v != other.gSeries[k]:
return False
return True
def __str__(self):
"""
The result is truncated according to the error, indicating the accuracy of the least significant digit
"""
res = ''
for g, c in self.gSeries.items():
if c != 0 and g == 0 and isinstance(c, int):
res += " %d + " % (c)
elif c != 0 and g == 0:
res += " %s + " % (c.format('S'))
elif c != 0 and g <= self.n and isinstance(c, (Variable, AffineScalarFunc)):
if c.s < 1e-14:
res += "%s * %s**%s + " % (str(c.n), self.name, str(g))
else:
res += " %s * %s**%s + " % (c.format('S'), self.name, str(g))
elif c != 0 and g <= self.n and isinstance(c, (int, float)):
res += "%s * %s**%s + " % (str(c), self.name, str(g))
return res[:-3] or '0'
def coeffs(self):
"""
"""
return map(lambda x: float(x.format('S').split("(")[0]), self.gSeries.values())[:self.n + 1]
def pprint(self):
res = ""
for g, c in self.gSeries.items():
if c != 0 and g <= self.n and not self.analytic:
res += "(%s ± %s) * %s**%s + " % (str(c.n), str(c.s), self.name, str(g))
elif c != 0 and g <= self.n and self.analytic:
try:
this_term = c.format('S')
except AttributeError:
this_term = str(c)
res += "(%s) * %s**%s + " % (this_term, self.name, str(g))
return res[:-3] or '0'
def __len__(self):
return len(self.gSeries)
def subs(self, point):
res = Series(n=self.n, d={0: ufloat(0, 0)}, name=point.name, analytic=self.analytic)
for i, c in self.gSeries.items():
res += c * (point ** i)
return res
def save(self):
"""Save value to file"""
slov = ''
for k, v in self.gSeries.items():
slov += "%d: '%s', " % (k, v)
print("Series({}, {}, '{}')".format(self.n, slov, self.name))
if __name__ == "__main__":
Z1 = Series(1)
Z2 = Series(2, {0: ufloat(-4, 0.3), 1: ufloat(2, .002)})
print("Z1 = {}".format(Z1))
print("Z2 = {}".format(Z2))
print("Z2.diff() = {}".format(Z2.diff()))
print("Z2 - Z2 = {}".format(Z2-Z2))
print("1/Z2 = {}".format(1 / Z2))
print("Z1*Z2 = {}".format(Z1 * Z2))
print("Z2**2 = {}".format(Z2 ** 2))
| kirienko/unseries | unseries.py | Python | gpl-3.0 | 9,529 |
#!/usr/bin/python
#
# Given a game lib, generate HTML map for all components
#
import olypy.oio as oio
from olypy.oid import to_oid
import olypy.dbck as dbck
import pathlib
from jinja2 import Environment, PackageLoader, select_autoescape
from olymap.loc import build_complete_loc_dict
from olymap.ship import build_complete_ship_dict
from olymap.char import build_complete_char_dict
from olymap.item import build_complete_item_dict
from olymap.skill import build_complete_skill_dict
from olymap.storm import build_complete_storm_dict
from olymap.player import build_complete_player_dict
import olymap.utilities as u
import olymap.reports as reports
from olymap.maps import write_index, write_map_leaves, write_top_map, write_bitmap
from olymap.legacy import create_map_matrix, write_legacy_bitmap, write_legacy_top_map, write_legacy_map_leaves
env = None
def make_map(inlib, outdir, instance):
global env
env = Environment(
loader=PackageLoader('olymap', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
inst_dict = {'g2': {'main': [10000, 100, 100], 'hades': [24251, 76, 76, 'Y'], 'faery': [20013, 7, 938, 'Y'],
'cloudlands': [23184, 5, 5, 'Y'], 'subworld': [39167, 11, 11, 'Y']},
'g4': {'main': [10000, 80, 80], 'hades': [24000, 50, 50], 'faery': [18000, 46, 46],
'cloudlands': [30000, 5, 5], 'subworld': [32005, 6, 6]},
'qa': {'main': [10000, 10, 10], 'hades': [14000, 7, 7], 'faery': [12000, 10, 10]}}
data = oio.read_lib(inlib)
dbck.check_db(data, fix=True, checknames=True)
print('Creating custom maps (if any)')
dimensions = inst_dict[instance]
map_matrices = {}
for world in dimensions:
world_rec = dimensions[world]
if len(world_rec) > 3:
if world_rec[3] == 'Y':
map_matrices[world] = create_map_matrix(data, inst_dict[instance][world][0])
chains = resolve_chains(data)
write_box_pages(data, chains, outdir, instance, inst_dict, map_matrices)
write_reports(data, chains, outdir)
write_maps(data, chains, outdir, instance, inst_dict, map_matrices)
def resolve_chains(data):
print('Making chains')
chains = {}
chains['pledges'] = u.resolve_all_pledges(data)
chains['prisoners'] = u.resolve_all_prisoners(data)
chains['hidden'] = u.resolve_hidden_locs(data)
chains['storms'] = u.resolve_bound_storms(data)
chains['teaches'] = u.resolve_teaches(data)
chains['child_skills'] = u.resolve_child_skills(data)
chains['skills_knowns'] = u.resolve_skills_known(data)
chains['garrisons'] = u.resolve_garrisons(data)
chains['trades'] = u.resolve_trades(data)
chains['castles'] = u.resolve_castles(data)
return chains
def write_box_pages(data, chains, outdir, instance, inst_dict, map_matrices):
print('Writing box pages')
for k, v in data.items():
if u.is_loc(v):
write_loc_html(v, k, data, chains['hidden'], chains['garrisons'],
chains['trades'], outdir, instance, inst_dict, map_matrices)
elif u.is_char(v):
write_char_html(v, k, data, chains['pledges'],
chains['prisoners'], outdir, instance)
elif u.is_player(v) :
write_player_html(v, k, data, outdir)
elif u.is_item(v):
write_item_html(v, k, data, chains['trades'], outdir)
elif u.is_ship(v):
write_ship_html(v, k, data, outdir, instance, chains['pledges'], chains['prisoners'])
elif u.is_skill(v):
write_skill_html(v, k, data, outdir, chains['teaches'],
chains['child_skills'], chains['skills_knowns'])
elif u.return_kind(v) == 'storm':
write_storm_html(v, k, data, chains['storms'], outdir)
def write_reports(data, chains, outdir):
print('Writing reports')
reports.ship_report(data, outdir)
reports.player_report(data, outdir)
reports.item_report(data, chains['trades'], outdir)
reports.healing_potion_report(data, outdir)
reports.orb_report(data, outdir)
reports.projected_cast_potion_report(data, outdir)
reports.location_report(data, outdir)
reports.skill_xref_report(data, chains['teaches'], outdir)
reports.trade_report(data, chains['trades'], outdir)
reports.road_report(data, outdir)
reports.gate_report(data, outdir)
reports.character_report(data, outdir)
reports.graveyard_report(data, outdir)
reports.faeryhill_report(data, outdir)
reports.castle_report(data, outdir, chains['garrisons'])
reports.city_report(data, outdir)
reports.region_report(data, outdir)
reports.mage_report(data, outdir)
reports.priest_report(data, outdir)
reports.gold_report(data, outdir)
def write_maps(data, chains, outdir, instance, inst_dict, map_matrices):
print('Writing Maps')
# inst_dict = {'g2': {'main': [10000, 100, 100]},
# 'g4': {'main': [10000, 80, 80], 'hades': [24000, 50, 50], 'faery': [18000, 46, 46], 'cloudlands': [30000, 5, 5]},
# 'qa': {'main': [10000, 10, 10], 'hades': [14000, 7, 7], 'faery': [12000, 10, 10]}}
dimensions = inst_dict[instance]
write_index(outdir, instance, inst_dict)
for world in dimensions:
world_rec = dimensions[world]
if len(world_rec) > 3 and world_rec[3] == 'Y':
write_legacy_bitmap(outdir,
data,
world_rec[0],
world_rec[1],
world_rec[2],
world,
map_matrices[world])
write_legacy_top_map(outdir,
world_rec[0],
world_rec[1],
world_rec[2],
world,
map_matrices[world])
write_legacy_map_leaves(data,
chains['castles'],
outdir,
world_rec[0],
world_rec[1],
world_rec[2],
world,
instance,
map_matrices[world])
else:
write_bitmap(outdir,
data,
world_rec[0],
world_rec[1],
world_rec[2],
world)
write_top_map(outdir,
world_rec[0],
world_rec[1],
world_rec[2],
world)
write_map_leaves(data,
chains['castles'],
outdir,
world_rec[0],
world_rec[1],
world_rec[2],
world,
instance)
def write_loc_html(v, k, data, hidden_chain, garrisons_chain, trade_chain, outdir, instance, inst_dict, map_matrices):
# generate loc page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('loc.html')
loc = build_complete_loc_dict(k, v, data, garrisons_chain, hidden_chain, trade_chain, instance, inst_dict, map_matrices)
outf.write(template.render(loc=loc))
def write_ship_html(v, k, data, outdir, instance, pledge_chain, prisoner_chain):
# generate ship page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('ship.html')
ship = build_complete_ship_dict(k, v, data, instance, pledge_chain, prisoner_chain)
outf.write(template.render(ship=ship))
def write_char_html(v, k, data, pledge_chain, prisoner_chain, outdir, instance):
# generate char page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('char.html')
char = build_complete_char_dict(k, v, data, instance, pledge_chain, prisoner_chain, False)
outf.write(template.render(char=char))
def write_item_html(v, k, data, trade_chain, outdir):
# generate item page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('item.html')
item = build_complete_item_dict(k, v, data, trade_chain)
outf.write(template.render(item=item))
def write_skill_html(v, k, data, outdir, teaches_chain, child_skills_chain, skills_known_chain):
# generate skill page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('skill.html')
skill = build_complete_skill_dict(k, v, data, teaches_chain, child_skills_chain, skills_known_chain)
outf.write(template.render(skill=skill))
def write_storm_html(v, k, data, storm_chain, outdir):
# generate storm page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('storm.html')
storm = build_complete_storm_dict(k, v, data, storm_chain)
outf.write(template.render(storm=storm))
def write_player_html(v, k, data, outdir):
# generate player page
outf = open(pathlib.Path(outdir).joinpath(to_oid(k) + '.html'), 'w')
template = env.get_template('player.html')
player = build_complete_player_dict(k, v, data)
outf.write(template.render(player=player))
| olympiag3/olypy | olymap/__init__.py | Python | apache-2.0 | 9,708 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for large portions of L{twisted.mail}.
"""
import os
import errno
import shutil
import pickle
import StringIO
import rfc822
import tempfile
import signal
import time
from hashlib import md5
from zope.interface.verify import verifyClass
from zope.interface import Interface, implements
from twisted.trial import unittest
from twisted.mail import smtp
from twisted.mail import pop3
from twisted.names import dns
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet import interfaces
from twisted.internet import task
from twisted.internet.error import DNSLookupError, CannotListenError
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.internet import address
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.mail.relaymanager import _AttemptManager
from twisted.test.proto_helpers import MemoryReactorClock
from twisted import mail
import twisted.mail.mail
import twisted.mail.maildir
import twisted.mail.relay
import twisted.mail.relaymanager
import twisted.mail.protocols
import twisted.mail.alias
from twisted.names.error import DNSNameError
from twisted.names.dns import RRHeader, Record_CNAME, Record_MX
from twisted import cred
import twisted.cred.credentials
import twisted.cred.checkers
import twisted.cred.portal
from twisted.test.proto_helpers import LineSendingProtocol
class DomainWithDefaultsTests(unittest.TestCase):
def testMethods(self):
d = dict([(x, x + 10) for x in range(10)])
d = mail.mail.DomainWithDefaultDict(d, 'Default')
self.assertEqual(len(d), 10)
self.assertEqual(list(iter(d)), range(10))
self.assertEqual(list(d.iterkeys()), list(iter(d)))
items = list(d.iteritems())
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = list(d.itervalues())
values.sort()
self.assertEqual(values, range(10, 20))
items = d.items()
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = d.values()
values.sort()
self.assertEqual(values, range(10, 20))
for x in range(10):
self.assertEqual(d[x], x + 10)
self.assertEqual(d.get(x), x + 10)
self.assertTrue(x in d)
self.assertTrue(d.has_key(x))
del d[2], d[4], d[6]
self.assertEqual(len(d), 7)
self.assertEqual(d[2], 'Default')
self.assertEqual(d[4], 'Default')
self.assertEqual(d[6], 'Default')
d.update({'a': None, 'b': (), 'c': '*'})
self.assertEqual(len(d), 10)
self.assertEqual(d['a'], None)
self.assertEqual(d['b'], ())
self.assertEqual(d['c'], '*')
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.setdefault('key', 'value'), 'value')
self.assertEqual(d['key'], 'value')
self.assertEqual(d.popitem(), ('key', 'value'))
self.assertEqual(len(d), 0)
dcopy = d.copy()
self.assertEqual(d.domains, dcopy.domains)
self.assertEqual(d.default, dcopy.default)
def _stringificationTest(self, stringifier):
"""
Assert that the class name of a L{mail.mail.DomainWithDefaultDict}
instance and the string-formatted underlying domain dictionary both
appear in the string produced by the given string-returning function.
@type stringifier: one-argument callable
@param stringifier: either C{str} or C{repr}, to be used to get a
string to make assertions against.
"""
domain = mail.mail.DomainWithDefaultDict({}, 'Default')
self.assertIn(domain.__class__.__name__, stringifier(domain))
domain['key'] = 'value'
self.assertIn(str({'key': 'value'}), stringifier(domain))
def test_str(self):
"""
L{DomainWithDefaultDict.__str__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(str)
def test_repr(self):
"""
L{DomainWithDefaultDict.__repr__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(repr)
class BounceTests(unittest.TestCase):
def setUp(self):
self.domain = mail.mail.BounceDomain()
def testExists(self):
self.assertRaises(smtp.AddressError, self.domain.exists, "any user")
def testRelay(self):
self.assertEqual(
self.domain.willRelay("random q emailer", "protocol"),
False
)
def testAddUser(self):
self.domain.addUser("bob", "password")
self.assertRaises(smtp.SMTPBadRcpt, self.domain.exists, "bob")
class FileMessageTests(unittest.TestCase):
def setUp(self):
self.name = "fileMessage.testFile"
self.final = "final.fileMessage.testFile"
self.f = file(self.name, 'w')
self.fp = mail.mail.FileMessage(self.f, self.name, self.final)
def tearDown(self):
try:
self.f.close()
except:
pass
try:
os.remove(self.name)
except:
pass
try:
os.remove(self.final)
except:
pass
def testFinalName(self):
return self.fp.eomReceived().addCallback(self._cbFinalName)
def _cbFinalName(self, result):
self.assertEqual(result, self.final)
self.assertTrue(self.f.closed)
self.assertFalse(os.path.exists(self.name))
def testContents(self):
contents = "first line\nsecond line\nthird line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.eomReceived()
self.assertEqual(file(self.final).read(), contents)
def testInterrupted(self):
contents = "first line\nsecond line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.connectionLost()
self.assertFalse(os.path.exists(self.name))
self.assertFalse(os.path.exists(self.final))
class MailServiceTests(unittest.TestCase):
def setUp(self):
self.service = mail.mail.MailService()
def testFactories(self):
f = self.service.getPOP3Factory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), pop3.POP3)
f = self.service.getSMTPFactory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), smtp.SMTP)
f = self.service.getESMTPFactory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), smtp.ESMTP)
def testPortals(self):
o1 = object()
o2 = object()
self.service.portals['domain'] = o1
self.service.portals[''] = o2
self.assertTrue(self.service.lookupPortal('domain') is o1)
self.assertTrue(self.service.defaultPortal() is o2)
class StringListMailboxTests(unittest.TestCase):
"""
Tests for L{StringListMailbox}, an in-memory only implementation of
L{pop3.IMailbox}.
"""
def test_listOneMessage(self):
"""
L{StringListMailbox.listMessages} returns the length of the message at
the offset into the mailbox passed to it.
"""
mailbox = mail.maildir.StringListMailbox(["abc", "ab", "a"])
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(1), 2)
self.assertEqual(mailbox.listMessages(2), 1)
def test_listAllMessages(self):
"""
L{StringListMailbox.listMessages} returns a list of the lengths of all
messages if not passed an index.
"""
mailbox = mail.maildir.StringListMailbox(["a", "abc", "ab"])
self.assertEqual(mailbox.listMessages(), [1, 3, 2])
def test_getMessage(self):
"""
L{StringListMailbox.getMessage} returns a file-like object from which
the contents of the message at the given offset into the mailbox can be
read.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "real contents"])
self.assertEqual(mailbox.getMessage(1).read(), "real contents")
def test_getUidl(self):
"""
L{StringListMailbox.getUidl} returns a unique identifier for the
message at the given offset into the mailbox.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "bar"])
self.assertNotEqual(mailbox.getUidl(0), mailbox.getUidl(1))
def test_deleteMessage(self):
"""
L{StringListMailbox.deleteMessage} marks a message for deletion causing
further requests for its length to return 0.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
def test_undeleteMessages(self):
"""
L{StringListMailbox.undeleteMessages} causes any messages marked for
deletion to be returned to their original state.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(), [3])
def test_sync(self):
"""
L{StringListMailbox.sync} causes any messages as marked for deletion to
be permanently deleted.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.sync()
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
class FailingMaildirMailboxAppendMessageTask(mail.maildir._MaildirMailboxAppendMessageTask):
_openstate = True
_writestate = True
_renamestate = True
def osopen(self, fn, attr, mode):
if self._openstate:
return os.open(fn, attr, mode)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
def oswrite(self, fh, data):
if self._writestate:
return os.write(fh, data)
else:
raise OSError(errno.ENOSPC, "Faked Space problem")
def osrename(self, oldname, newname):
if self._renamestate:
return os.rename(oldname, newname)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
class _AppendTestMixin(object):
"""
Mixin for L{MaildirMailbox.appendMessage} test cases which defines a helper
for serially appending multiple messages to a mailbox.
"""
def _appendMessages(self, mbox, messages):
"""
Deliver the given messages one at a time. Delivery is serialized to
guarantee a predictable order in the mailbox (overlapped message deliver
makes no guarantees about which message which appear first).
"""
results = []
def append():
for m in messages:
d = mbox.appendMessage(m)
d.addCallback(results.append)
yield d
d = task.cooperate(append()).whenDone()
d.addCallback(lambda ignored: results)
return d
class MaildirAppendStringTests(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def _append(self, ignored, mbox):
d = mbox.appendMessage('TEST')
return self.assertFailure(d, Exception)
def _setState(self, ignored, mbox, rename=None, write=None, open=None):
"""
Change the behavior of future C{rename}, C{write}, or C{open} calls made
by the mailbox C{mbox}.
@param rename: If not C{None}, a new value for the C{_renamestate}
attribute of the mailbox's append factory. The original value will
be restored at the end of the test.
@param write: Like C{rename}, but for the C{_writestate} attribute.
@param open: Like C{rename}, but for the C{_openstate} attribute.
"""
if rename is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_renamestate',
mbox.AppendFactory._renamestate)
mbox.AppendFactory._renamestate = rename
if write is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_writestate',
mbox.AppendFactory._writestate)
mbox.AppendFactory._writestate = write
if open is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_openstate',
mbox.AppendFactory._openstate)
mbox.AppendFactory._openstate = open
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
mbox.AppendFactory = FailingMaildirMailboxAppendMessageTask
d = self._appendMessages(mbox, ["X" * i for i in range(1, 11)])
d.addCallback(self.assertEqual, [None] * 10)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, ignored, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
# test in the right order: last to first error location.
self._setState(None, mbox, rename=False)
d = self._append(None, mbox)
d.addCallback(self._setState, mbox, rename=True, write=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, write=True, open=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, open=True)
return d
class MaildirAppendFileTests(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
messages = []
for i in xrange(1, 11):
temp = tempfile.TemporaryFile()
temp.write("X" * i)
temp.seek(0, 0)
messages.append(temp)
self.addCleanup(temp.close)
d = self._appendMessages(mbox, messages)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, result, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
class MaildirTests(unittest.TestCase):
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def tearDown(self):
shutil.rmtree(self.d)
def testInitializer(self):
d = self.d
trash = os.path.join(d, '.Trash')
self.assertTrue(os.path.exists(d) and os.path.isdir(d))
self.assertTrue(os.path.exists(os.path.join(d, 'new')))
self.assertTrue(os.path.exists(os.path.join(d, 'cur')))
self.assertTrue(os.path.exists(os.path.join(d, 'tmp')))
self.assertTrue(os.path.isdir(os.path.join(d, 'new')))
self.assertTrue(os.path.isdir(os.path.join(d, 'cur')))
self.assertTrue(os.path.isdir(os.path.join(d, 'tmp')))
self.assertTrue(os.path.exists(os.path.join(trash, 'new')))
self.assertTrue(os.path.exists(os.path.join(trash, 'cur')))
self.assertTrue(os.path.exists(os.path.join(trash, 'tmp')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'new')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'cur')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'tmp')))
def test_nameGenerator(self):
"""
Each call to L{_MaildirNameGenerator.generate} returns a unique
string suitable for use as the basename of a new message file. The
names are ordered such that those generated earlier sort less than
those generated later.
"""
clock = task.Clock()
clock.advance(0.05)
generator = mail.maildir._MaildirNameGenerator(clock)
firstName = generator.generate()
clock.advance(0.05)
secondName = generator.generate()
self.assertTrue(firstName < secondName)
def test_mailbox(self):
"""
Exercise the methods of L{IMailbox} as implemented by
L{MaildirMailbox}.
"""
j = os.path.join
n = mail.maildir._generateMaildirName
msgs = [j(b, n()) for b in ('cur', 'new') for x in range(5)]
# Toss a few files into the mailbox
i = 1
for f in msgs:
fObj = file(j(self.d, f), 'w')
fObj.write('x' * i)
fObj.close()
i = i + 1
mb = mail.maildir.MaildirMailbox(self.d)
self.assertEqual(mb.listMessages(), range(1, 11))
self.assertEqual(mb.listMessages(1), 2)
self.assertEqual(mb.listMessages(5), 6)
self.assertEqual(mb.getMessage(6).read(), 'x' * 7)
self.assertEqual(mb.getMessage(1).read(), 'x' * 2)
d = {}
for i in range(10):
u = mb.getUidl(i)
self.assertFalse(u in d)
d[u] = None
p, f = os.path.split(msgs[5])
mb.deleteMessage(5)
self.assertEqual(mb.listMessages(5), 0)
self.assertTrue(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.assertFalse(os.path.exists(j(self.d, msgs[5])))
mb.undeleteMessages()
self.assertEqual(mb.listMessages(5), 6)
self.assertFalse(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.assertTrue(os.path.exists(j(self.d, msgs[5])))
class AbstractMaildirDomainTests(unittest.TestCase):
"""
Tests for L{twisted.mail.maildir.AbstractMaildirDomain}.
"""
def test_interface(self):
"""
L{maildir.AbstractMaildirDomain} implements L{mail.IAliasableDomain}.
"""
verifyClass(mail.mail.IAliasableDomain,
mail.maildir.AbstractMaildirDomain)
class MaildirDirdbmDomainTests(unittest.TestCase):
"""
Tests for L{MaildirDirdbmDomain}.
"""
def setUp(self):
"""
Create a temporary L{MaildirDirdbmDomain} and parent
L{MailService} before running each test.
"""
self.P = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.P)
def tearDown(self):
"""
Remove the temporary C{maildir} directory when the test has
finished.
"""
shutil.rmtree(self.P)
def test_addUser(self):
"""
L{MaildirDirdbmDomain.addUser} accepts a user and password
argument. It stores those in a C{dbm} dictionary
attribute and creates a directory for each user.
"""
toAdd = (('user1', 'pwd1'), ('user2', 'pwd2'), ('user3', 'pwd3'))
for (u, p) in toAdd:
self.D.addUser(u, p)
for (u, p) in toAdd:
self.assertTrue(u in self.D.dbm)
self.assertEqual(self.D.dbm[u], p)
self.assertTrue(os.path.exists(os.path.join(self.P, u)))
def test_credentials(self):
"""
L{MaildirDirdbmDomain.getCredentialsCheckers} initializes and
returns one L{ICredentialsChecker} checker by default.
"""
creds = self.D.getCredentialsCheckers()
self.assertEqual(len(creds), 1)
self.assertTrue(cred.checkers.ICredentialsChecker.providedBy(creds[0]))
self.assertTrue(cred.credentials.IUsernamePassword in creds[0].credentialInterfaces)
def test_requestAvatar(self):
"""
L{MaildirDirdbmDomain.requestAvatar} raises L{NotImplementedError}
unless it is supplied with an L{pop3.IMailbox} interface.
When called with an L{pop3.IMailbox}, it returns a 3-tuple
containing L{pop3.IMailbox}, an implementation of that interface
and a NOOP callable.
"""
class ISomething(Interface):
pass
self.D.addUser('user', 'password')
self.assertRaises(
NotImplementedError,
self.D.requestAvatar, 'user', None, ISomething
)
t = self.D.requestAvatar('user', None, pop3.IMailbox)
self.assertEqual(len(t), 3)
self.assertTrue(t[0] is pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(t[1]))
t[2]()
def test_requestAvatarId(self):
"""
L{DirdbmDatabase.requestAvatarId} raises L{UnauthorizedLogin} if
supplied with invalid user credentials.
When called with valid credentials, L{requestAvatarId} returns
the username associated with the supplied credentials.
"""
self.D.addUser('user', 'password')
database = self.D.getCredentialsCheckers()[0]
creds = cred.credentials.UsernamePassword('user', 'wrong password')
self.assertRaises(
cred.error.UnauthorizedLogin,
database.requestAvatarId, creds
)
creds = cred.credentials.UsernamePassword('user', 'password')
self.assertEqual(database.requestAvatarId(creds), 'user')
def test_userDirectory(self):
"""
L{MaildirDirdbmDomain.userDirectory} is supplied with a user name
and returns the path to that user's maildir subdirectory.
Calling L{MaildirDirdbmDomain.userDirectory} with a
non-existent user returns the 'postmaster' directory if there
is a postmaster or returns L{None} if there is no postmaster.
"""
self.D.addUser('user', 'password')
self.assertEqual(self.D.userDirectory('user'),
os.path.join(self.D.root, 'user'))
self.D.postmaster = False
self.assertIdentical(self.D.userDirectory('nouser'), None)
self.D.postmaster = True
self.assertEqual(self.D.userDirectory('nouser'),
os.path.join(self.D.root, 'postmaster'))
class StubAliasableDomain(object):
"""
Minimal testable implementation of IAliasableDomain.
"""
implements(mail.mail.IAliasableDomain)
def exists(self, user):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def addUser(self, user, password):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def getCredentialsCheckers(self):
"""
This needs to succeed in order for other tests to complete
successfully, but we don't actually assert anything about its
behavior. Return an empty list. Sometime later we should return
something else and assert that a portal got set up properly.
"""
return []
def setAliasGroup(self, aliases):
"""
Just record the value so the test can check it later.
"""
self.aliasGroup = aliases
class ServiceDomainTests(unittest.TestCase):
def setUp(self):
self.S = mail.mail.MailService()
self.D = mail.protocols.DomainDeliveryBase(self.S, None)
self.D.service = self.S
self.D.protocolName = 'TEST'
self.D.host = 'hostname'
self.tmpdir = self.mktemp()
domain = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
domain.addUser('user', 'password')
self.S.addDomain('test.domain', domain)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAddAliasableDomain(self):
"""
Test that adding an IAliasableDomain to a mail service properly sets
up alias group references and such.
"""
aliases = object()
domain = StubAliasableDomain()
self.S.aliases = aliases
self.S.addDomain('example.com', domain)
self.assertIdentical(domain.aliasGroup, aliases)
def testReceivedHeader(self):
hdr = self.D.receivedHeader(
('remotehost', '123.232.101.234'),
smtp.Address('<someguy@someplace>'),
['[email protected]']
)
fp = StringIO.StringIO(hdr)
m = rfc822.Message(fp)
self.assertEqual(len(m.items()), 1)
self.assertIn('Received', m)
def testValidateTo(self):
user = smtp.User('[email protected]', 'helo', None, 'wherever@whatever')
return defer.maybeDeferred(self.D.validateTo, user
).addCallback(self._cbValidateTo
)
def _cbValidateTo(self, result):
self.assertTrue(callable(result))
def testValidateToBadUsername(self):
user = smtp.User('[email protected]', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateToBadDomain(self):
user = smtp.User('[email protected]', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateFrom(self):
helo = ('hostname', '127.0.0.1')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
self.assertRaises(
smtp.SMTPBadSender,
self.D.validateFrom, None, origin
)
class VirtualPOP3Tests(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
self.D.addUser('user', 'password')
self.S.addDomain('test.domain', self.D)
portal = cred.portal.Portal(self.D)
map(portal.registerChecker, self.D.getCredentialsCheckers())
self.S.portals[''] = self.S.portals['test.domain'] = portal
self.P = mail.protocols.VirtualPOP3()
self.P.service = self.S
self.P.magic = '<unit test magic>'
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAuthenticateAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.P.authenticateUserAPOP('user', resp
).addCallback(self._cbAuthenticateAPOP
)
def _cbAuthenticateAPOP(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateIncorrectUserAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('resu', resp),
cred.error.UnauthorizedLogin)
def testAuthenticateIncorrectResponseAPOP(self):
resp = md5('wrong digest').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('user', resp),
cred.error.UnauthorizedLogin)
def testAuthenticatePASS(self):
return self.P.authenticateUserPASS('user', 'password'
).addCallback(self._cbAuthenticatePASS
)
def _cbAuthenticatePASS(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateBadUserPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('resu', 'password'),
cred.error.UnauthorizedLogin)
def testAuthenticateBadPasswordPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('user', 'wrong password'),
cred.error.UnauthorizedLogin)
class empty(smtp.User):
def __init__(self):
pass
class RelayTests(unittest.TestCase):
def testExists(self):
service = mail.mail.MailService()
domain = mail.relay.DomainQueuer(service)
doRelay = [
address.UNIXAddress('/var/run/mail-relay'),
address.IPv4Address('TCP', '127.0.0.1', 12345),
]
dontRelay = [
address.IPv4Address('TCP', '192.168.2.1', 62),
address.IPv4Address('TCP', '1.2.3.4', 1943),
]
for peer in doRelay:
user = empty()
user.orig = 'user@host'
user.dest = 'tsoh@resu'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
self.assertTrue(callable(domain.exists(user)))
for peer in dontRelay:
user = empty()
user.orig = 'some@place'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
user.dest = 'who@cares'
self.assertRaises(smtp.SMTPBadRcpt, domain.exists, user)
class RelayerTests(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.messageFiles = []
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
f = file(name + '-H', 'w')
pickle.dump(['from-%d' % (i,), 'to-%d' % (i,)], f)
f.close()
f = file(name + '-D', 'w')
f.write(name)
f.seek(0, 0)
self.messageFiles.append(name)
self.R = mail.relay.RelayerMixin()
self.R.loadMessages(self.messageFiles)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testMailFrom(self):
for i in range(10):
self.assertEqual(self.R.getMailFrom(), 'from-%d' % (i,))
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailFrom(), None)
def testMailTo(self):
for i in range(10):
self.assertEqual(self.R.getMailTo(), ['to-%d' % (i,)])
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailTo(), None)
def testMailData(self):
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
self.assertEqual(self.R.getMailData().read(), name)
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailData(), None)
class Manager:
def __init__(self):
self.success = []
self.failure = []
self.done = []
def notifySuccess(self, factory, message):
self.success.append((factory, message))
def notifyFailure(self, factory, message):
self.failure.append((factory, message))
def notifyDone(self, factory):
self.done.append(factory)
class ManagedRelayerTests(unittest.TestCase):
def setUp(self):
self.manager = Manager()
self.messages = range(0, 20, 2)
self.factory = object()
self.relay = mail.relaymanager.ManagedRelayerMixin(self.manager)
self.relay.messages = self.messages[:]
self.relay.names = self.messages[:]
self.relay.factory = self.factory
def testSuccessfulSentMail(self):
for i in self.messages:
self.relay.sentMail(250, None, None, None, None)
self.assertEqual(
self.manager.success,
[(self.factory, m) for m in self.messages]
)
def testFailedSentMail(self):
for i in self.messages:
self.relay.sentMail(550, None, None, None, None)
self.assertEqual(
self.manager.failure,
[(self.factory, m) for m in self.messages]
)
def testConnectionLost(self):
self.relay.connectionLost(failure.Failure(Exception()))
self.assertEqual(self.manager.done, [self.factory])
class DirectoryQueueTests(unittest.TestCase):
def setUp(self):
# This is almost a test case itself.
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.queue = mail.relaymanager.Queue(self.tmpdir)
self.queue.noisy = False
for m in range(25):
hdrF, msgF = self.queue.createNewMessage()
pickle.dump(['header', m], hdrF)
hdrF.close()
msgF.lineReceived('body: %d' % (m,))
msgF.eomReceived()
self.queue.readDirectory()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testWaiting(self):
self.assertTrue(self.queue.hasWaiting())
self.assertEqual(len(self.queue.getWaiting()), 25)
waiting = self.queue.getWaiting()
self.queue.setRelaying(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 24)
self.queue.setWaiting(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 25)
def testRelaying(self):
for m in self.queue.getWaiting():
self.queue.setRelaying(m)
self.assertEqual(
len(self.queue.getRelayed()),
25 - len(self.queue.getWaiting())
)
self.assertFalse(self.queue.hasWaiting())
relayed = self.queue.getRelayed()
self.queue.setWaiting(relayed[0])
self.assertEqual(len(self.queue.getWaiting()), 1)
self.assertEqual(len(self.queue.getRelayed()), 24)
def testDone(self):
msg = self.queue.getWaiting()[0]
self.queue.setRelaying(msg)
self.queue.done(msg)
self.assertEqual(len(self.queue.getWaiting()), 24)
self.assertEqual(len(self.queue.getRelayed()), 0)
self.assertFalse(msg in self.queue.getWaiting())
self.assertFalse(msg in self.queue.getRelayed())
def testEnvelope(self):
envelopes = []
for msg in self.queue.getWaiting():
envelopes.append(self.queue.getEnvelope(msg))
envelopes.sort()
for i in range(25):
self.assertEqual(
envelopes.pop(0),
['header', i]
)
from twisted.names import server
from twisted.names import client
from twisted.names import common
class TestAuthority(common.ResolverBase):
def __init__(self):
common.ResolverBase.__init__(self)
self.addresses = {}
def _lookup(self, name, cls, type, timeout = None):
if name in self.addresses and type == dns.MX:
results = []
for a in self.addresses[name]:
hdr = dns.RRHeader(
name, dns.MX, dns.IN, 60, dns.Record_MX(0, a)
)
results.append(hdr)
return defer.succeed((results, [], []))
return defer.fail(failure.Failure(dns.DomainError(name)))
def setUpDNS(self):
self.auth = TestAuthority()
factory = server.DNSServerFactory([self.auth])
protocol = dns.DNSDatagramProtocol(factory)
while 1:
self.port = reactor.listenTCP(0, factory, interface='127.0.0.1')
portNumber = self.port.getHost().port
try:
self.udpPort = reactor.listenUDP(portNumber, protocol, interface='127.0.0.1')
except CannotListenError:
self.port.stopListening()
else:
break
self.resolver = client.Resolver(servers=[('127.0.0.1', portNumber)])
def tearDownDNS(self):
dl = []
dl.append(defer.maybeDeferred(self.port.stopListening))
dl.append(defer.maybeDeferred(self.udpPort.stopListening))
try:
self.resolver._parseCall.cancel()
except:
pass
return defer.DeferredList(dl)
class MXTests(unittest.TestCase):
"""
Tests for L{mail.relaymanager.MXCalculator}.
"""
def setUp(self):
setUpDNS(self)
self.clock = task.Clock()
self.mx = mail.relaymanager.MXCalculator(self.resolver, self.clock)
def tearDown(self):
return tearDownDNS(self)
def test_defaultClock(self):
"""
L{MXCalculator}'s default clock is C{twisted.internet.reactor}.
"""
self.assertIdentical(
mail.relaymanager.MXCalculator(self.resolver).clock,
reactor)
def testSimpleSuccess(self):
self.auth.addresses['test.domain'] = ['the.email.test.domain']
return self.mx.getMX('test.domain').addCallback(self._cbSimpleSuccess)
def _cbSimpleSuccess(self, mx):
self.assertEqual(mx.preference, 0)
self.assertEqual(str(mx.name), 'the.email.test.domain')
def testSimpleFailure(self):
self.mx.fallbackToDomain = False
return self.assertFailure(self.mx.getMX('test.domain'), IOError)
def testSimpleFailureWithFallback(self):
return self.assertFailure(self.mx.getMX('test.domain'), DNSLookupError)
def _exchangeTest(self, domain, records, correctMailExchange):
"""
Issue an MX request for the given domain and arrange for it to be
responded to with the given records. Verify that the resulting mail
exchange is the indicated host.
@type domain: C{str}
@type records: C{list} of L{RRHeader}
@type correctMailExchange: C{str}
@rtype: L{Deferred}
"""
class DummyResolver(object):
def lookupMailExchange(self, name):
if name == domain:
return defer.succeed((
records,
[],
[]))
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
def gotMailExchange(record):
self.assertEqual(str(record.name), correctMailExchange)
d.addCallback(gotMailExchange)
return d
def test_mailExchangePreference(self):
"""
The MX record with the lowest preference is returned by
L{MXCalculator.getMX}.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, good)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(2, bad))]
return self._exchangeTest(domain, records, good)
def test_badExchangeExcluded(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
which is not also marked as bad.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(bad)
return self._exchangeTest(domain, records, good)
def test_fallbackForAllBadExchanges(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if all the MX records in the response have been marked bad.
"""
domain = "example.com"
bad = "bad.example.com"
worse = "worse.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, worse))]
self.mx.markBad(bad)
self.mx.markBad(worse)
return self._exchangeTest(domain, records, bad)
def test_badExchangeExpires(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was last marked bad longer than L{MXCalculator.timeOutBadMX}
seconds ago.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_goodExchangeUsed(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was marked good after it was marked bad.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.mx.markGood(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_successWithoutResults(self):
"""
If an MX lookup succeeds but the result set is empty,
L{MXCalculator.getMX} should try to look up an I{A} record for the
requested name and call back its returned Deferred with that
address.
"""
ip = '1.2.3.4'
domain = 'example.org'
class DummyResolver(object):
"""
Fake resolver which will respond to an MX lookup with an empty
result set.
@ivar mx: A dictionary mapping hostnames to three-tuples of
results to be returned from I{MX} lookups.
@ivar a: A dictionary mapping hostnames to addresses to be
returned from I{A} lookups.
"""
mx = {domain: ([], [], [])}
a = {domain: ip}
def lookupMailExchange(self, domain):
return defer.succeed(self.mx[domain])
def getHostByName(self, domain):
return defer.succeed(self.a[domain])
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
d.addCallback(self.assertEqual, Record_MX(name=ip))
return d
def test_failureWithSuccessfulFallback(self):
"""
Test that if the MX record lookup fails, fallback is enabled, and an A
record is available for the name, then the Deferred returned by
L{MXCalculator.getMX} ultimately fires with a Record_MX instance which
gives the address in the A record for the name.
"""
class DummyResolver(object):
"""
Fake resolver which will fail an MX lookup but then succeed a
getHostByName call.
"""
def lookupMailExchange(self, domain):
return defer.fail(DNSNameError())
def getHostByName(self, domain):
return defer.succeed("1.2.3.4")
self.mx.resolver = DummyResolver()
d = self.mx.getMX("domain")
d.addCallback(self.assertEqual, Record_MX(name="1.2.3.4"))
return d
def test_cnameWithoutGlueRecords(self):
"""
If an MX lookup returns a single CNAME record as a result, MXCalculator
will perform an MX lookup for the canonical name indicated and return
the MX record which results.
"""
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
"""
Fake resolver which will return a CNAME for an MX lookup of a name
which is an alias and an MX for an MX lookup of the canonical name.
"""
def lookupMailExchange(self, domain):
if domain == alias:
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical))],
[], []))
elif domain == canonical:
return defer.succeed((
[RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, exchange))],
[], []))
else:
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameChain(self):
"""
If L{MXCalculator.getMX} encounters a CNAME chain which is longer than
the length specified, the returned L{Deferred} should errback with
L{CanonicalNameChainTooLong}.
"""
class DummyResolver(object):
"""
Fake resolver which generates a CNAME chain of infinite length in
response to MX lookups.
"""
chainCounter = 0
def lookupMailExchange(self, domain):
self.chainCounter += 1
name = 'x-%d.example.com' % (self.chainCounter,)
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(name))],
[], []))
cnameLimit = 3
self.mx.resolver = DummyResolver()
d = self.mx.getMX("mail.example.com", cnameLimit)
self.assertFailure(
d, twisted.mail.relaymanager.CanonicalNameChainTooLong)
def cbChainTooLong(error):
self.assertEqual(error.args[0], Record_CNAME("x-%d.example.com" % (cnameLimit + 1,)))
self.assertEqual(self.mx.resolver.chainCounter, cnameLimit + 1)
d.addCallback(cbChainTooLong)
return d
def test_cnameWithGlueRecords(self):
"""
If an MX lookup returns a CNAME and the MX record for the CNAME, the
L{Deferred} returned by L{MXCalculator.getMX} should be called back
with the name from the MX record without further lookups being
attempted.
"""
lookedUp = []
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
if domain != alias or lookedUp:
# Don't give back any results for anything except the alias
# or on any request after the first.
return ([], [], [])
return defer.succeed((
[RRHeader(name=alias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical)),
RRHeader(name=canonical,
type=Record_MX.TYPE,
payload=Record_MX(name=exchange))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameLoopWithGlueRecords(self):
"""
If an MX lookup returns two CNAME records which point to each other,
the loop should be detected and the L{Deferred} returned by
L{MXCalculator.getMX} should be errbacked with L{CanonicalNameLoop}.
"""
firstAlias = "cname1.example.com"
secondAlias = "cname2.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
return defer.succeed((
[RRHeader(name=firstAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(secondAlias)),
RRHeader(name=secondAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(firstAlias))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(firstAlias)
self.assertFailure(d, twisted.mail.relaymanager.CanonicalNameLoop)
return d
def testManyRecords(self):
self.auth.addresses['test.domain'] = [
'mx1.test.domain', 'mx2.test.domain', 'mx3.test.domain'
]
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsSuccessfulLookup
)
def _cbManyRecordsSuccessfulLookup(self, mx):
self.assertTrue(str(mx.name).split('.', 1)[0] in ('mx1', 'mx2', 'mx3'))
self.mx.markBad(str(mx.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsDifferentResult, mx
)
def _cbManyRecordsDifferentResult(self, nextMX, mx):
self.assertNotEqual(str(mx.name), str(nextMX.name))
self.mx.markBad(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsLastResult, mx, nextMX
)
def _cbManyRecordsLastResult(self, lastMX, mx, nextMX):
self.assertNotEqual(str(mx.name), str(lastMX.name))
self.assertNotEqual(str(nextMX.name), str(lastMX.name))
self.mx.markBad(str(lastMX.name))
self.mx.markGood(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsRepeatSpecificResult, nextMX
)
def _cbManyRecordsRepeatSpecificResult(self, againMX, nextMX):
self.assertEqual(str(againMX.name), str(nextMX.name))
class LiveFireExerciseTests(unittest.TestCase):
if interfaces.IReactorUDP(reactor, None) is None:
skip = "UDP support is required to determining MX records"
def setUp(self):
setUpDNS(self)
self.tmpdirs = [
'domainDir', 'insertionDomain', 'insertionQueue',
'destinationDomain', 'destinationQueue'
]
def tearDown(self):
for d in self.tmpdirs:
if os.path.exists(d):
shutil.rmtree(d)
return tearDownDNS(self)
def testLocalDelivery(self):
service = mail.mail.MailService()
service.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(service, 'domainDir')
domain.addUser('user', 'password')
service.addDomain('test.domain', domain)
service.portals[''] = service.portals['test.domain']
map(service.portals[''].registerChecker, domain.getCredentialsCheckers())
service.setQueue(mail.relay.DomainQueuer(service))
f = service.getSMTPFactory()
self.smtpServer = reactor.listenTCP(0, f, interface='127.0.0.1')
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@hostname>',
'RCPT TO: <[email protected]>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.smtpServer.getHost().port, f)
def finished(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.failIfEqual(msg.find('This is the message'), -1)
return self.smtpServer.stopListening()
done.addCallback(finished)
return done
def testRelayDelivery(self):
# Here is the service we will connect to and send mail from
insServ = mail.mail.MailService()
insServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(insServ, 'insertionDomain')
insServ.addDomain('insertion.domain', domain)
os.mkdir('insertionQueue')
insServ.setQueue(mail.relaymanager.Queue('insertionQueue'))
insServ.domains.setDefaultDomain(mail.relay.DomainQueuer(insServ))
manager = mail.relaymanager.SmartHostSMTPRelayingManager(insServ.queue)
manager.fArgs += ('test.identity.hostname',)
helper = mail.relaymanager.RelayStateHelper(manager, 1)
# Yoink! Now the internet obeys OUR every whim!
manager.mxcalc = mail.relaymanager.MXCalculator(self.resolver)
# And this is our whim.
self.auth.addresses['destination.domain'] = ['127.0.0.1']
f = insServ.getSMTPFactory()
self.insServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Here is the service the previous one will connect to for final
# delivery
destServ = mail.mail.MailService()
destServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(destServ, 'destinationDomain')
domain.addUser('user', 'password')
destServ.addDomain('destination.domain', domain)
os.mkdir('destinationQueue')
destServ.setQueue(mail.relaymanager.Queue('destinationQueue'))
helper = mail.relaymanager.RelayStateHelper(manager, 1)
helper.startService()
f = destServ.getSMTPFactory()
self.destServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Update the port number the *first* relay will connect to, because we can't use
# port 25
manager.PORT = self.destServer.getHost().port
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@wherever>',
'RCPT TO: <[email protected]>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.insServer.getHost().port, f)
def finished(ign):
# First part of the delivery is done. Poke the queue manually now
# so we don't have to wait for the queue to be flushed.
delivery = manager.checkState()
def delivered(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.failIfEqual(msg.find('This is the message'), -1)
self.insServer.stopListening()
self.destServer.stopListening()
helper.stopService()
delivery.addCallback(delivered)
return delivery
done.addCallback(finished)
return done
aliasFile = StringIO.StringIO("""\
# Here's a comment
# woop another one
testuser: address1,address2, address3,
continuation@address, |/bin/process/this
usertwo:thisaddress,thataddress, lastaddress
lastuser: :/includable, /filename, |/program, address
""")
class LineBufferMessage:
def __init__(self):
self.lines = []
self.eom = False
self.lost = False
def lineReceived(self, line):
self.lines.append(line)
def eomReceived(self):
self.eom = True
return defer.succeed('<Whatever>')
def connectionLost(self):
self.lost = True
class AliasTests(unittest.TestCase):
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def setUp(self):
aliasFile.seek(0)
def testHandle(self):
result = {}
lines = [
'user: another@host\n',
'nextuser: |/bin/program\n',
'user: me@again\n',
'moreusers: :/etc/include/filename\n',
'multiuser: first@host, second@host,last@anotherhost',
]
for l in lines:
mail.alias.handle(result, l, 'TestCase', None)
self.assertEqual(result['user'], ['another@host', 'me@again'])
self.assertEqual(result['nextuser'], ['|/bin/program'])
self.assertEqual(result['moreusers'], [':/etc/include/filename'])
self.assertEqual(result['multiuser'], ['first@host', 'second@host', 'last@anotherhost'])
def testFileLoader(self):
domains = {'': object()}
result = mail.alias.loadAliasFile(domains, fp=aliasFile)
self.assertEqual(len(result), 3)
group = result['testuser']
s = str(group)
for a in ('address1', 'address2', 'address3', 'continuation@address', '/bin/process/this'):
self.failIfEqual(s.find(a), -1)
self.assertEqual(len(group), 5)
group = result['usertwo']
s = str(group)
for a in ('thisaddress', 'thataddress', 'lastaddress'):
self.failIfEqual(s.find(a), -1)
self.assertEqual(len(group), 3)
group = result['lastuser']
s = str(group)
self.assertEqual(s.find('/includable'), -1)
for a in ('/filename', 'program', 'address'):
self.failIfEqual(s.find(a), -1, '%s not found' % a)
self.assertEqual(len(group), 3)
def testMultiWrapper(self):
msgs = LineBufferMessage(), LineBufferMessage(), LineBufferMessage()
msg = mail.alias.MultiWrapper(msgs)
for L in self.lines:
msg.lineReceived(L)
return msg.eomReceived().addCallback(self._cbMultiWrapper, msgs)
def _cbMultiWrapper(self, ignored, msgs):
for m in msgs:
self.assertTrue(m.eom)
self.assertFalse(m.lost)
self.assertEqual(self.lines, m.lines)
def testFileAlias(self):
tmpfile = self.mktemp()
a = mail.alias.FileAlias(tmpfile, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
return m.eomReceived().addCallback(self._cbTestFileAlias, tmpfile)
def _cbTestFileAlias(self, ignored, tmpfile):
lines = file(tmpfile).readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
class DummyDomain(object):
"""
Test domain for L{AddressAliasTests}.
"""
def __init__(self, address):
self.address = address
def exists(self, user, memo=None):
"""
@returns: When a C{memo} is passed in this will raise a
L{smtp.SMTPBadRcpt} exception, otherwise a boolean
indicating if the C{user} and string version of
L{self.address} are equal or not.
@rtype: C{bool}
"""
if memo:
raise mail.smtp.SMTPBadRcpt('ham')
return lambda: user == str(self.address)
class AddressAliasTests(unittest.TestCase):
"""
Tests for L{twisted.mail.alias.AddressAlias}.
"""
def setUp(self):
"""
Setup an L{AddressAlias}.
"""
self.address = mail.smtp.Address('foo@bar')
domains = {self.address.domain: DummyDomain(self.address)}
self.alias = mail.alias.AddressAlias(self.address, domains,
self.address)
def test_createMessageReceiver(self):
"""
L{createMessageReceiever} calls C{exists()} on the domain object
which key matches the C{alias} passed to L{AddressAlias}.
"""
self.assertTrue(self.alias.createMessageReceiver())
def test_str(self):
"""
The string presentation of L{AddressAlias} includes the alias.
"""
self.assertEqual(str(self.alias), '<Address foo@bar>')
def test_resolve(self):
"""
L{resolve} will look for additional aliases when an C{aliasmap}
dictionary is passed, and returns C{None} if none were found.
"""
self.assertEqual(self.alias.resolve({self.address: 'bar'}), None)
def test_resolveWithoutAliasmap(self):
"""
L{resolve} returns C{None} when the alias could not be found in the
C{aliasmap} and no L{mail.smtp.User} with this alias exists either.
"""
self.assertEqual(self.alias.resolve({}), None)
class DummyProcess(object):
__slots__ = ['onEnd']
class MockProcessAlias(mail.alias.ProcessAlias):
"""
A alias processor that doesn't actually launch processes.
"""
def spawnProcess(self, proto, program, path):
"""
Don't spawn a process.
"""
class MockAliasGroup(mail.alias.AliasGroup):
"""
An alias group using C{MockProcessAlias}.
"""
processAliasFactory = MockProcessAlias
class StubProcess(object):
"""
Fake implementation of L{IProcessTransport}.
@ivar signals: A list of all the signals which have been sent to this fake
process.
"""
def __init__(self):
self.signals = []
def loseConnection(self):
"""
No-op implementation of disconnection.
"""
def signalProcess(self, signal):
"""
Record a signal sent to this process for later inspection.
"""
self.signals.append(signal)
class ProcessAliasTests(unittest.TestCase):
"""
Tests for alias resolution.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "IReactorProcess not supported"
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def exitStatus(self, code):
"""
Construct a status from the given exit code.
@type code: L{int} between 0 and 255 inclusive.
@param code: The exit status which the code will represent.
@rtype: L{int}
@return: A status integer for the given exit code.
"""
# /* Macros for constructing status values. */
# #define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
status = (code << 8) | 0
# Sanity check
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), code)
self.assertFalse(os.WIFSIGNALED(status))
return status
def signalStatus(self, signal):
"""
Construct a status from the given signal.
@type signal: L{int} between 0 and 255 inclusive.
@param signal: The signal number which the status will represent.
@rtype: L{int}
@return: A status integer for the given signal.
"""
# /* If WIFSIGNALED(STATUS), the terminating signal. */
# #define __WTERMSIG(status) ((status) & 0x7f)
# /* Nonzero if STATUS indicates termination by a signal. */
# #define __WIFSIGNALED(status) \
# (((signed char) (((status) & 0x7f) + 1) >> 1) > 0)
status = signal
# Sanity check
self.assertTrue(os.WIFSIGNALED(status))
self.assertEqual(os.WTERMSIG(status), signal)
self.assertFalse(os.WIFEXITED(status))
return status
def setUp(self):
"""
Replace L{smtp.DNSNAME} with a well-known value.
"""
self.DNSNAME = smtp.DNSNAME
smtp.DNSNAME = ''
def tearDown(self):
"""
Restore the original value of L{smtp.DNSNAME}.
"""
smtp.DNSNAME = self.DNSNAME
def test_processAlias(self):
"""
Standard call to C{mail.alias.ProcessAlias}: check that the specified
script is called, and that the input is correctly transferred to it.
"""
sh = FilePath(self.mktemp())
sh.setContent("""\
#!/bin/sh
rm -f process.alias.out
while read i; do
echo $i >> process.alias.out
done""")
os.chmod(sh.path, 0700)
a = mail.alias.ProcessAlias(sh.path, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
def _cbProcessAlias(ignored):
lines = file('process.alias.out').readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
return m.eomReceived().addCallback(_cbProcessAlias)
def test_processAliasTimeout(self):
"""
If the alias child process does not exit within a particular period of
time, the L{Deferred} returned by L{MessageWrapper.eomReceived} should
fail with L{ProcessAliasTimeout} and send the I{KILL} signal to the
child process..
"""
reactor = task.Clock()
transport = StubProcess()
proto = mail.alias.ProcessAliasProtocol()
proto.makeConnection(transport)
receiver = mail.alias.MessageWrapper(proto, None, reactor)
d = receiver.eomReceived()
reactor.advance(receiver.completionTimeout)
def timedOut(ignored):
self.assertEqual(transport.signals, ['KILL'])
# Now that it has been killed, disconnect the protocol associated
# with it.
proto.processEnded(
ProcessTerminated(self.signalStatus(signal.SIGKILL)))
self.assertFailure(d, mail.alias.ProcessAliasTimeout)
d.addCallback(timedOut)
return d
def test_earlyProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
before I{eomReceived} is called, the L{Deferred} returned by
I{eomReceived} should fail.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(failure.Failure(ProcessDone(0)))
return self.assertFailure(receiver.eomReceived(), ProcessDone)
def _terminationTest(self, status):
"""
Verify that if the process associated with an
L{mail.alias.MessageWrapper} exits with the given status, the
L{Deferred} returned by I{eomReceived} fails with L{ProcessTerminated}.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(
failure.Failure(ProcessTerminated(status)))
return self.assertFailure(receiver.eomReceived(), ProcessTerminated)
def test_errorProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
with a non-zero exit code, the L{Deferred} returned by I{eomReceived}
should fail.
"""
return self._terminationTest(self.exitStatus(1))
def test_signalProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
because it received a signal, the L{Deferred} returned by
I{eomReceived} should fail.
"""
return self._terminationTest(self.signalStatus(signal.SIGHUP))
def test_aliasResolution(self):
"""
Check that the C{resolve} method of alias processors produce the correct
set of objects:
- direct alias with L{mail.alias.AddressAlias} if a simple input is passed
- aliases in a file with L{mail.alias.FileWrapper} if an input in the format
'/file' is given
- aliases resulting of a process call wrapped by L{mail.alias.MessageWrapper}
if the format is '|process'
"""
aliases = {}
domain = {'': TestDomain(aliases, ['user1', 'user2', 'user3'])}
A1 = MockAliasGroup(['user1', '|echo', '/file'], domain, 'alias1')
A2 = MockAliasGroup(['user2', 'user3'], domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3,
})
res1 = A1.resolve(aliases)
r1 = map(str, res1.objs)
r1.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r1, expected)
res2 = A2.resolve(aliases)
r2 = map(str, res2.objs)
r2.sort()
expected = map(str, [
mail.alias.AddressAlias('user2', None, None),
mail.alias.AddressAlias('user3', None, None)
])
expected.sort()
self.assertEqual(r2, expected)
res3 = A3.resolve(aliases)
r3 = map(str, res3.objs)
r3.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r3, expected)
def test_cyclicAlias(self):
"""
Check that a cycle in alias resolution is correctly handled.
"""
aliases = {}
domain = {'': TestDomain(aliases, [])}
A1 = mail.alias.AddressAlias('alias2', domain, 'alias1')
A2 = mail.alias.AddressAlias('alias3', domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3
})
self.assertEqual(aliases['alias1'].resolve(aliases), None)
self.assertEqual(aliases['alias2'].resolve(aliases), None)
self.assertEqual(aliases['alias3'].resolve(aliases), None)
A4 = MockAliasGroup(['|echo', 'alias1'], domain, 'alias4')
aliases['alias4'] = A4
res = A4.resolve(aliases)
r = map(str, res.objs)
r.sort()
expected = map(str, [
mail.alias.MessageWrapper(DummyProcess(), 'echo')
])
expected.sort()
self.assertEqual(r, expected)
class TestDomain:
def __init__(self, aliases, users):
self.aliases = aliases
self.users = users
def exists(self, user, memo=None):
user = user.dest.local
if user in self.users:
return lambda: mail.alias.AddressAlias(user, None, None)
try:
a = self.aliases[user]
except:
raise smtp.SMTPBadRcpt(user)
else:
aliases = a.resolve(self.aliases, memo)
if aliases:
return lambda: aliases
raise smtp.SMTPBadRcpt(user)
class SSLContextFactoryTests(unittest.TestCase):
"""
Tests for twisted.mail.protocols.SSLContextFactory.
"""
def test_deprecation(self):
"""
Accessing L{twisted.mail.protocols.SSLContextFactory} emits a
deprecation warning recommending the use of the more general SSL context
factory from L{twisted.internet.ssl}.
"""
mail.protocols.SSLContextFactory
warningsShown = self.flushWarnings([self.test_deprecation])
self.assertEqual(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
'twisted.mail.protocols.SSLContextFactory was deprecated in '
'Twisted 12.2.0: Use twisted.internet.ssl.'
'DefaultOpenSSLContextFactory instead.')
class DummyQueue(object):
"""
A fake relay queue to use for testing.
This queue doesn't keep track of which messages are waiting to be relayed
or are in the process of being relayed.
@ivar directory: See L{__init__}.
"""
def __init__(self, directory):
"""
@type directory: L{bytes}
@param directory: The pathname of the directory holding messages in the
queue.
"""
self.directory = directory
def done(self, message):
"""
Remove a message from the queue.
@type message: L{bytes}
@param message: The base filename of a message.
"""
message = os.path.basename(message)
os.remove(self.getPath(message) + '-D')
os.remove(self.getPath(message) + '-H')
def getEnvelopeFile(self, message):
"""
Get the envelope file for a message in the queue.
@type message: L{bytes}
@param message: The base filename of a message.
@rtype: L{file}
@return: The envelope file for the message.
"""
return open(os.path.join(self.directory, message+'-H'), 'rb')
def getPath(self, message):
"""
Return the full base pathname of a message in the queue.
@type message: L{bytes}
@param message: The base filename of a message.
@rtype: L{bytes}
@return: The full base pathname of the message.
"""
return os.path.join(self.directory, message)
def createNewMessage(self):
"""
Create a new message in the queue.
@rtype: 2-L{tuple} of (E{1}) L{file}, (E{2}) L{FileMessage}
@return: The envelope file and a message receiver for a new message in
the queue.
"""
fname = "%s_%s" % (time.time(), id(self))
headerFile = open(os.path.join(self.directory, fname+'-H'), 'wb')
tempFilename = os.path.join(self.directory, fname+'-C')
finalFilename = os.path.join(self.directory, fname+'-D')
messageFile = open(tempFilename, 'wb')
return headerFile, mail.mail.FileMessage(messageFile, tempFilename,
finalFilename)
def setWaiting(self, message):
"""
Ignore the request to mark a message as waiting to be relayed.
@type message: L{bytes}
@param message: The base filename of a message.
"""
pass
class DummySmartHostSMTPRelayingManager(object):
"""
A fake smart host to use for testing.
@type managed: L{dict} of L{bytes} -> L{list} of
L{list} of L{bytes}
@ivar managed: A mapping of a string identifying a managed relayer to
filenames of messages the managed relayer is responsible for.
@ivar queue: See L{__init__}.
"""
def __init__(self, queue):
"""
Initialize the minimum necessary members of a smart host.
@type queue: L{DummyQueue}
@param queue: A queue that can be used for testing purposes.
"""
self.managed = {}
self.queue = queue
class _AttemptManagerTests(unittest.TestCase):
"""
Test the behavior of L{_AttemptManager}.
@type tmpdir: L{bytes}
@ivar tmpdir: The path to a temporary directory holding the message files.
@type reactor: L{MemoryReactorClock}
@ivar reactor: The reactor used for test purposes.
@type eventLog: L{types.NoneType} or L{dict} of L{bytes} -> L{object}
@ivar eventLog: Information about the last informational log message
generated or none if no log message has been generated.
@type noisyAttemptMgr: L{_AttemptManager}
@ivar noisyAttemptMgr: An attempt manager which generates informational
log messages.
@type quietAttemptMgr: L{_AttemptManager}
@ivar quietAttemptMgr: An attempt manager which does not generate
informational log messages.
@type noisyMessage: L{bytes}
@ivar noisyMessage: The full base pathname of the message to be used with
the noisy attempt manager.
@type quietMessage: L{bytes}
@ivar quietMessage: The full base pathname of the message to be used with
the quiet.
"""
def setUp(self):
"""
Set up a temporary directory for the queue, attempt managers with the
noisy flag on and off, message files for use with each attempt manager,
and a reactor. Also, register to be notified when log messages are
generated.
"""
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.reactor = MemoryReactorClock()
self.eventLog = None
log.addObserver(self._logObserver)
self.noisyAttemptMgr = _AttemptManager(
DummySmartHostSMTPRelayingManager(DummyQueue(self.tmpdir)),
True, self.reactor)
self.quietAttemptMgr = _AttemptManager(
DummySmartHostSMTPRelayingManager(DummyQueue(self.tmpdir)),
False, self.reactor)
noisyBaseName = "noisyMessage"
quietBaseName = "quietMessage"
self.noisyMessage = os.path.join(self.tmpdir, noisyBaseName)
self.quietMessage = os.path.join(self.tmpdir, quietBaseName)
message = file(self.noisyMessage+'-D', "w")
message.close()
message = file(self.quietMessage+'-D', "w")
message.close()
self.noisyAttemptMgr.manager.managed['noisyRelayer'] = [
noisyBaseName]
self.quietAttemptMgr.manager.managed['quietRelayer'] = [
quietBaseName]
envelope = file(self.noisyMessage+'-H', 'w')
pickle.dump(['from-noisy@domain', 'to-noisy@domain'], envelope)
envelope.close()
envelope = file(self.quietMessage+'-H', 'w')
pickle.dump(['from-quiet@domain', 'to-quiet@domain'], envelope)
envelope.close()
def tearDown(self):
"""
Unregister for log events and remove the temporary directory.
"""
log.removeObserver(self._logObserver)
shutil.rmtree(self.tmpdir)
def _logObserver(self, eventDict):
"""
A log observer.
@type eventDict: L{dict} of L{bytes} -> L{object}
@param eventDict: Information about the last informational log message
generated.
"""
self.eventLog = eventDict
def test_initNoisyDefault(self):
"""
When an attempt manager is created without the noisy parameter, the
noisy instance variable should default to true.
"""
am = _AttemptManager(DummySmartHostSMTPRelayingManager(
DummyQueue(self.tmpdir)))
self.assertTrue(am.noisy)
def test_initNoisy(self):
"""
When an attempt manager is created with the noisy parameter set to
true, the noisy instance variable should be set to true.
"""
self.assertTrue(self.noisyAttemptMgr.noisy)
def test_initQuiet(self):
"""
When an attempt manager is created with the noisy parameter set to
false, the noisy instance variable should be set to false.
"""
self.assertFalse(self.quietAttemptMgr.noisy)
def test_initReactorDefault(self):
"""
When an attempt manager is created without the reactor parameter, the
reactor instance variable should default to the global reactor.
"""
am = _AttemptManager(DummySmartHostSMTPRelayingManager(
DummyQueue(self.tmpdir)))
self.assertEqual(am.reactor, reactor)
def test_initReactor(self):
"""
When an attempt manager is created with a reactor provided, the
reactor instance variable should default to that reactor.
"""
self.assertEqual(self.noisyAttemptMgr.reactor, self.reactor)
def test_notifySuccessNoisy(self):
"""
For an attempt manager with the noisy flag set, notifySuccess should
result in a log message.
"""
self.noisyAttemptMgr.notifySuccess('noisyRelayer', self.noisyMessage)
self.assertTrue(self.eventLog)
def test_notifySuccessQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifySuccess
should result in no log message.
"""
self.quietAttemptMgr.notifySuccess('quietRelayer', self.quietMessage)
self.assertFalse(self.eventLog)
def test_notifyFailureNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyFailure should
result in a log message.
"""
self.noisyAttemptMgr.notifyFailure('noisyRelayer', self.noisyMessage)
self.assertTrue(self.eventLog)
def test_notifyFailureQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyFailure
should result in no log message.
"""
self.quietAttemptMgr.notifyFailure('quietRelayer', self.quietMessage)
self.assertFalse(self.eventLog)
def test_notifyDoneNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyDone should
result in a log message.
"""
self.noisyAttemptMgr.notifyDone('noisyRelayer')
self.assertTrue(self.eventLog)
def test_notifyDoneQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyDone
should result in no log message.
"""
self.quietAttemptMgr.notifyDone('quietRelayer')
self.assertFalse(self.eventLog)
def test_notifyNoConnectionNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyNoConnection
should result in a log message.
"""
self.noisyAttemptMgr.notifyNoConnection('noisyRelayer')
self.assertTrue(self.eventLog)
self.reactor.advance(60)
def test_notifyNoConnectionQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyNoConnection
should result in no log message.
"""
self.quietAttemptMgr.notifyNoConnection('quietRelayer')
self.assertFalse(self.eventLog)
self.reactor.advance(60)
from twisted.python.runtime import platformType
import types
if platformType != "posix":
for o in locals().values():
if isinstance(o, (types.ClassType, type)) and issubclass(o, unittest.TestCase):
o.skip = "twisted.mail only works on posix"
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/mail/test/test_mail.py | Python | gpl-3.0 | 83,905 |
# This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
import sys
from zs import ZS
def open_zs(opts, **kwargs):
zs_path_or_url = opts["<zs_file>"]
if zs_path_or_url.startswith("http"):
kwargs["url"] = zs_path_or_url
else:
kwargs["path"] = zs_path_or_url
if "__j__" in opts:
kwargs["parallelism"] = opts["__j__"]
return ZS(**kwargs)
def optfail(msg):
sys.stderr.write(msg)
sys.stderr.write("\n")
sys.exit(2)
| njsmith/zs | zs/cmdline/util.py | Python | bsd-2-clause | 545 |
#-------Main Package Settings-----------#
name = "Cheetah"
from src.Version import Version as version
maintainer = "Tavis Rudd"
author = "Tavis Rudd"
author_email = "[email protected]"
url = "http://www.CheetahTemplate.org/"
packages = ['Cheetah',
'Cheetah.Macros',
'Cheetah.Templates',
'Cheetah.Tests',
'Cheetah.Tools',
'Cheetah.Utils',
'Cheetah.Utils.optik',
]
classifiers = [line.strip() for line in '''\
#Development Status :: 4 - Beta
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python
Topic :: Internet :: WWW/HTTP
Topic :: Internet :: WWW/HTTP :: Dynamic Content
Topic :: Internet :: WWW/HTTP :: Site Management
Topic :: Software Development :: Code Generators
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Software Development :: User Interfaces
Topic :: Text Processing'''.splitlines() if not line.strip().startswith('#')]
del line
package_dir = {'Cheetah':'src'}
import os
import os.path
from distutils.core import Extension
## we only assume the presence of a c compiler on Posix systems, NT people will
# have to enable this manually.
if os.name == 'posix':
ext_modules=[Extension("Cheetah._namemapper", [os.path.join("src" ,"_namemapper.c")]
)
]
else:
ext_modules=[]
## Data Files and Scripts
scripts = ['bin/cheetah-compile',
'bin/cheetah',
]
data_files = ['recursive: src *.tmpl *.txt LICENSE README TODO CHANGES',
]
description = "Cheetah is a template engine and code generation tool."
long_description = '''Cheetah is an open source template engine and code generation tool.
It can be used standalone or combined with other tools and frameworks. Web
development is its principle use, but Cheetah is very flexible and is also being
used to generate C++ game code, Java, sql, form emails and even Python code.
Documentation
================================================================================
For a high-level introduction to Cheetah please refer to the User\'s Guide
at http://cheetahtemplate.org/learn.html
Mailing list
================================================================================
[email protected]
Subscribe at http://lists.sourceforge.net/lists/listinfo/cheetahtemplate-discuss
Credits
================================================================================
http://cheetahtemplate.org/credits.html
Praise
================================================================================
"I\'m enamored with Cheetah" - Sam Ruby, senior member of IBM Emerging
Technologies Group & director of Apache Software Foundation
"Give Cheetah a try. You won\'t regret it. ... Cheetah is a truly powerful
system. ... Cheetah is a serious contender for the 'best of breed' Python
templating." - Alex Martelli
"People with a strong PHP background absolutely love Cheetah for being Smarty,
but much, much better." - Marek Baczynski
"I am using Smarty and I know it very well, but compiled Cheetah Templates with
its inheritance approach is much powerful and easier to use than Smarty." -
Jaroslaw Zabiello
"There is no better solution than Cheetah" - Wilk
"A cheetah template can inherit from a python class, or a cheetah template, and
a Python class can inherit from a cheetah template. This brings the full power
of OO programming facilities to the templating system, and simply blows away
other templating systems" - Mike Meyer
"Cheetah has successfully been introduced as a replacement for the overweight
XSL Templates for code generation. Despite the power of XSL (and notably XPath
expressions), code generation is better suited to Cheetah as templates are much
easier to implement and manage." - The FEAR development team
(http://fear.sourceforge.net/docs/latest/guide/Build.html#id2550573)
"I\'ve used Cheetah quite a bit and it\'s a very good package" - Kevin Dangoor,
lead developer of TurboGears.
Recent Changes
================================================================================
See http://cheetahtemplate.org/docs/CHANGES for full details.
'''
try:
recentChanges = open('CHANGES').read().split('\n1.0')[0]
long_description += recentChanges
del recentChanges
except:
pass
| carvalhomb/tsmells | lib/Cheetah/SetupConfig.py | Python | gpl-2.0 | 4,534 |
# Library for full image cnn operations
import numpy as np
import scipy.ndimage
from scipy.signal import convolve2d
from scipy.signal import fftconvolve
from numpy.fft import rfftn
from numpy.fft import irfftn
import mahotas
import time
import h5py
import pycuda.autoinit
import pycuda.driver as cu
import pycuda.compiler as nvcc
import pycuda.gpuarray as gpuarray
BLOCK_BATCHES = 1024
BLOCK_PIXELS = 1
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
gpu_maxout_layer_source = """
__global__ void maxout_layer( float* input, float* filters, float* bias, float* output,
int batches, int channels, int width, int height,
int nfilters, int filter_width, int filter_height,
int output_width, int output_height,
int maxout_size, int maxpool_size)
{
//int batch_index = blockIdx.x * blockDim.x + threadIdx.x;
int ochannel_index = blockIdx.x * blockDim.x + threadIdx.x;
int oi = blockIdx.y * blockDim.y + threadIdx.y;
int oj = blockIdx.z * blockDim.z + threadIdx.z;
int conv_size = (width - filter_width + 1);
int conv_size2 = conv_size * conv_size;
int wh = width * height;
int input_batchsize = wh * channels;
int filter_wh = filter_width * filter_height;
int output_wh = output_width * output_height;
int output_batchsize = output_wh * (nfilters / maxout_size);
int start_filter = ochannel_index / maxout_size;
int end_filter = start_filter + maxout_size - 1;
if (ochannel_index < nfilters / maxout_size && oi < output_width && oj < output_height)
{
for (int batch_index = 0; batch_index < batches; ++batch_index)
{
float current_max;
// Calculate convolution result for output pixel oi, oj with all filters
for(int filter_index = start_filter; filter_index <= end_filter; ++filter_index )
{
// Maxpool region
for (int i = oi * maxpool_size; i < (oi + 1) * maxpool_size; ++i)
{
for (int j = oj * maxpool_size; j < (oj + 1) * maxpool_size; ++j)
{
float conv_sum = 0;
// Convolve for all channels
for(int c = 0; c < channels; ++c)
{
for (int fi = 0; fi < filter_width; ++fi)
{
for (int fj = 0; fj < filter_height; ++fj)
{
if (i + fi < width && j + fj < height)
{
float in_pix = input[(i + fi) + (j + fj) * width + c * wh + batch_index * input_batchsize];
float filt_pix = filters[fi + fj * filter_width + (filter_index * channels + c) * filter_wh];
conv_sum += in_pix * filt_pix;
}
}
}
}
// Add pixel-wise bias
conv_sum += bias[i + j * conv_size + filter_index * conv_size2];
// Maxout across channels and maxpool across pixels
if (((filter_index % maxout_size == 0) && (i % maxpool_size == 0) && (j % maxpool_size == 0)) ||
(conv_sum > current_max))
{
current_max = conv_sum;
}
}
}
if (filter_index % maxout_size == maxout_size - 1)
{
output[oi + oj * output_width + (filter_index / maxout_size) * output_wh + batch_index * output_batchsize] = current_max;
}
}
}
}
}
"""
gpu_softmax_layer_source = """
__global__ void softmax_layer( float* input, float* filters, float* bias, float* output,
int batches, int channels, int width, int height,
int nfilters, int filter_size,
int output_width, int output_height)
{
int batch_index = blockIdx.x * blockDim.x + threadIdx.x;
int oi = blockIdx.y * blockDim.y + threadIdx.y;
int oj = blockIdx.z * blockDim.z + threadIdx.z;
int wh = width * height;
int input_batchsize = wh * channels;
int output_wh = output_width * output_height;
int output_batchsize = output_wh * nfilters;
if (batch_index < batches && oi < output_width && oj < output_height)
{
float current_max;
for(int filter_index = 0; filter_index < nfilters; ++filter_index )
{
float dot_product = 0;
// Calculate dot product for output pixel oi, oj
for (int fi = 0; fi < filter_size; ++fi)
{
for (int fj = 0; fj < filter_size; ++fj)
{
for(int c = 0; c < channels; ++c)
{
float in_pix = input[(oi + fi) + (oj + fj) * width + c * wh + batch_index * input_batchsize];
float filt_pix = filters[filter_index + c * nfilters + fi * channels * nfilters + fj * filter_size * channels * nfilters];
dot_product += in_pix * filt_pix;
}
}
}
dot_product += bias[filter_index];
if ((filter_index == 0) || (dot_product > current_max))
{
current_max = dot_product;
}
output[oi + oj * output_width + filter_index * output_wh + batch_index * output_batchsize] = dot_product;
}
// Softmax
float esum = 0;
for(int filter_index = 0; filter_index < nfilters; ++filter_index )
{
float softout = output[oi + oj * output_width + filter_index * output_wh + batch_index * output_batchsize];
softout = __expf(softout - current_max);
//softout = expf(softout - current_max);
esum += softout;
output[oi + oj * output_width + filter_index * output_wh + batch_index * output_batchsize] = softout;
}
for(int filter_index = 0; filter_index < nfilters; ++filter_index )
{
output[oi + oj * output_width + filter_index * output_wh + batch_index * output_batchsize] /= esum;
}
}
}
"""
gpu_maxout_layer = nvcc.SourceModule(gpu_maxout_layer_source).get_function('maxout_layer')
gpu_softmax_layer = nvcc.SourceModule(gpu_softmax_layer_source).get_function('softmax_layer')
class MaxoutMaxpoolLayer(object):
def __init__(self, nkernels, ninputs, kernel_size, stride_in, maxpool_size, maxout_size, W, b):
self.ninputs = ninputs
self.nkernels = nkernels
self.kernel_size = kernel_size
self.maxpool_size = maxpool_size
self.maxout_size = maxout_size
self.stride_in = stride_in
self.stride_out = stride_in
self.noutputs = nkernels / maxpool_size
# Size of previous convolution operation (for fft result cache)
self.prev_conv_size = 0
# Input / output footprint - set once full network has been constructed
self.input_footprint = 0
self.output_footprint = 0
self.W = gpuarray.to_gpu(W.copy())
self.b = gpuarray.to_gpu(b)
def apply_layer(self, input_image, nbatches):
# start with convoludion output size (before maxout and maxpool operations)
output_size = (nbatches, self.noutputs, self.output_footprint, self.output_footprint)
print output_size
block = (int(self.noutputs), 4, 4)
grid = (int((self.noutputs - 1) / block[0] + 1), int((self.input_footprint - 1) / block[1] + 1), int((self.input_footprint - 1) / block[2] + 1))
if not isinstance(input_image, gpuarray.GPUArray):
input_image = gpuarray.to_gpu(input_image)
d_maxout_result = gpuarray.zeros(long(np.prod(output_size)), np.float32).reshape(output_size)
gpu_maxout_layer(input_image, self.W, self.b, d_maxout_result,
np.int32(nbatches), np.int32(self.ninputs), np.int32(self.input_footprint), np.int32(self.input_footprint),
np.int32(self.W.shape[0]), np.int32(self.W.shape[2]), np.int32(self.W.shape[3]),
np.int32(output_size[2]), np.int32(output_size[3]),
np.int32(self.maxout_size), np.int32(self.maxpool_size),
block=block, grid=grid)
print "MO Layer: Complete."
return d_maxout_result
class SoftmaxLayer(object):
def __init__(self, ninputs, noutputs, kernel_size, stride, W, b):
self.ninputs = ninputs
self.noutputs = noutputs
self.kernel_size = kernel_size
self.stride_in = stride
self.stride_out = stride
# Input / output footprint - set once full network has been constructed
self.input_footprint = 0
self.output_footprint = 0
self.W = gpuarray.to_gpu(W)
self.b = gpuarray.to_gpu(b)
def apply_layer(self, input_image, nbatches):
# Calculate feed-forward result
output_size = (nbatches, self.noutputs, self.output_footprint, self.output_footprint)
print output_size
block = (BLOCK_BATCHES, BLOCK_PIXELS, BLOCK_PIXELS)
grid = (int((nbatches - 1) / block[0] + 1), int((self.input_footprint - 1) / block[1] + 1), int((self.input_footprint - 1) / block[2] + 1))
if not isinstance(input_image, gpuarray.GPUArray):
input_image = gpuarray.to_gpu(input_image)
d_softmax_result = gpuarray.zeros(long(np.prod(output_size)), np.float32).reshape(output_size)
gpu_softmax_layer(input_image, self.W, self.b, d_softmax_result,
np.int32(nbatches), np.int32(self.ninputs), np.int32(self.input_footprint), np.int32(self.input_footprint),
np.int32(self.W.shape[1]), np.int32(self.input_footprint),
np.int32(output_size[2]), np.int32(output_size[3]),
block=block, grid=grid)
print "SM Layer: Complete."
return d_softmax_result
class DeepNetwork(object):
def __init__(self, filename):
network_h5 = h5py.File(filename, 'r')
self.nlayers = network_h5['/layers'][...]
print 'Network has {0} layers.'.format(self.nlayers)
if '/downsample_factor' in network_h5:
self.downsample = network_h5['/downsample_factor'][...]
else:
self.downsample = 1
self.best_sigma = 0
self.best_offset = (0,0)
all_layers = []
stride_in = 1
for layer_i in range(self.nlayers):
layer_string = '/layer{0}/'.format(layer_i)
layer_type = network_h5[layer_string + 'type'][...]
if layer_type == 'MaxoutConvC01B':
layer_weights = network_h5[layer_string + 'weights'][...]
layer_bias = network_h5[layer_string + 'bias'][...]
layer_maxpoolsize = network_h5[layer_string + 'pool_shape'][...][0]
layer_maxoutsize = network_h5[layer_string + 'num_pieces'][...]
# Arrange weights as [kernels, inputs, ksize, ksize]
layer_weights = np.rollaxis(layer_weights, 3, 0)
new_layer = MaxoutMaxpoolLayer(
layer_weights.shape[0], layer_weights.shape[1], layer_weights.shape[2],
stride_in, layer_maxpoolsize, layer_maxoutsize, W=layer_weights, b=layer_bias)
elif layer_type == 'Softmax':
layer_weights = network_h5[layer_string + 'weights'][...]
layer_bias = network_h5[layer_string + 'bias'][...]
layer_ksize = network_h5[layer_string + 'ksize'][...][0]
new_layer = SoftmaxLayer(
layer_weights.shape[0] / (layer_ksize ** 2), layer_weights.shape[1], layer_ksize,
stride_in, W=layer_weights, b=layer_bias)
else:
raise Exception("Unknown layer type: {0}".format(layer_type))
all_layers.append(new_layer)
stride_in = new_layer.stride_out
# Calculate network footprint and therefore pad size
footprint = 1
for layer in range(self.nlayers-1, -1, -1):
all_layers[layer].output_footprint = footprint
if layer == self.nlayers - 1:
footprint = all_layers[layer].kernel_size
else:
footprint = footprint * all_layers[layer].maxpool_size - 1 + all_layers[layer].kernel_size
all_layers[layer].input_footprint = footprint
self.all_layers = all_layers
self.pad_by = int(self.downsample * (footprint // 2))
def apply_net(self, input_image, perform_downsample=False, perform_pad=False, perform_upsample=False, perform_blur=False, perform_offset=False):
if perform_pad:
input_image = np.pad(input_image, ((self.pad_by, self.pad_by), (self.pad_by, self.pad_by)), 'symmetric')
if perform_downsample and self.downsample != 1:
input_image = np.float32(mahotas.imresize(input_image, 1.0/self.downsample))
nx = input_image.shape[0] - self.all_layers[0].input_footprint + 1
ny = input_image.shape[1] - self.all_layers[0].input_footprint + 1
nbatches = nx * ny
layer_temp = np.zeros((nbatches, 1, self.all_layers[0].input_footprint, self.all_layers[0].input_footprint), dtype=np.float32)
print layer_temp.shape
batchi = 0
for x in range(nx):
for y in range(ny):
#print (x,y)
layer_temp[batchi, :, :, :] = input_image[x:(x + self.all_layers[0].input_footprint), y:(y + self.all_layers[0].input_footprint)]
batchi += 1
assert batchi == nbatches
output = np.zeros(nbatches, dtype=np.float32)
for block in range(nbatches / BLOCK_BATCHES + 1):
block_from = block * BLOCK_BATCHES
block_to = min((block+1) * BLOCK_BATCHES, layer_temp.shape[0])
nbatches = block_to - block_from
block_temp = layer_temp[block_from:block_to,:,:,:]
for layeri in range(len(self.all_layers)):
print layeri
start_time = time.clock()
block_temp = self.all_layers[layeri].apply_layer(block_temp, nbatches)
end_time = time.clock()
print('Layer time = %.2fm' % ((end_time - start_time) / 60.))
if isinstance(block_temp, gpuarray.GPUArray):
block_temp = block_temp.get()
output[block_from:block_to] = block_temp[:,0,0,0]
output = output.reshape(nx, ny)
if perform_upsample:
output = np.float32(mahotas.imresize(output, self.downsample))
if perform_blur and self.best_sigma != 0:
output = scipy.ndimage.filters.gaussian_filter(output, self.best_sigma)
if perform_offset:
#Translate
output = np.roll(output, self.best_offset[0], axis=0)
output = np.roll(output, self.best_offset[1], axis=1)
# Crop to valid size
#output = output[self.pad_by:-self.pad_by,self.pad_by:-self.pad_by]
return output
| Rhoana/membrane_cnn | maxout/custom_classify/lib_maxout_gpu.py | Python | bsd-3-clause | 15,712 |
#!/usr/bin/env python3
"""Prepares a simple TVM library for testing."""
from os import path as osp
import sys
import tvm
def main():
n = tvm.var('n')
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
tvm.build(s, [A, B, C], 'llvm --system-lib').save(osp.join(sys.argv[1], 'test.o'))
if __name__ == '__main__':
main()
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/rust/tests/test_tvm_basic/src/build_test_lib.py | Python | apache-2.0 | 551 |
from openzwave.network import ZWaveNode
from openzwave.value import ZWaveValue
from Firefly import logging
from Firefly.components.zwave.device_types.contact_sensor import ZwaveContactSensor
TITLE = 'Ecolink Contact Sensor'
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
sensor = ContactSensor(firefly, package, **kwargs)
firefly.install_component(sensor)
return sensor.id
class ContactSensor(ZwaveContactSensor):
def __init__(self, firefly, package, **kwargs):
super().__init__(firefly, package, TITLE, **kwargs)
def update_from_zwave(self, node: ZWaveNode = None, ignore_update=False, values: ZWaveValue = None, values_only=False, **kwargs):
if node is None:
return
if not node.values[self.value_map['Sensor']].is_polled:
node.values[self.value_map['Sensor']].enable_poll()
super().update_from_zwave(node, ignore_update, values, values_only, **kwargs)
| Firefly-Automation/Firefly | Firefly/components/zwave/ecolink/contact_sensor.py | Python | apache-2.0 | 940 |
from test_health_ntd_dengue import suite
| kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_ntd_dengue/tests/__init__.py | Python | gpl-3.0 | 41 |
"""Convenience methods for extracting metadata from an audio file."""
import asyncio
import io
from typing import NamedTuple, Optional, Union
from mediafile import MediaFile
class AudioMetadata(NamedTuple):
"""Audio metadata."""
title: Optional[str]
artist: Optional[str]
album: Optional[str]
duration: Optional[float]
EMPTY_METADATA = AudioMetadata(None, None, None, None)
def _open_file(file: io.BufferedReader) -> MediaFile:
start_position = file.tell()
in_file = MediaFile(file)
file.seek(start_position)
return in_file
async def get_metadata(file: Union[str, io.BufferedReader]) -> AudioMetadata:
"""Extract metadata from a file and return it."""
loop = asyncio.get_event_loop()
if isinstance(file, io.BufferedReader):
in_file = await loop.run_in_executor(None, _open_file, file)
else:
in_file = await loop.run_in_executor(None, MediaFile, file)
return AudioMetadata(
in_file.title,
in_file.artist,
in_file.album,
in_file.length,
)
| postlund/pyatv | pyatv/support/metadata.py | Python | mit | 1,057 |
from ._abstract import AbstractScraper
from ._utils import get_minutes
class MarthaStewart(AbstractScraper):
@classmethod
def host(cls):
return "marthastewart.com"
def title(self):
return self.schema.title()
def total_time(self):
s = (
self.soup.findAll("div", {"class": "two-subcol-content-wrapper"})[0]
.find("div", {"class": "recipe-meta-item-body"})
.text.strip()
)
return get_minutes(s)
def yields(self):
return (
self.soup.findAll("div", {"class": "two-subcol-content-wrapper"})[1]
.find("div", {"class": "recipe-meta-item-body"})
.text.strip()
)
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return self.schema.ratings()
| hhursev/recipe-scraper | recipe_scrapers/marthastewart.py | Python | mit | 907 |
#!/usr/bin/env python2.7
# coding=utf-8
# Author: Dustyn Gibson <[email protected]>
# URL: http://github.com/SickRage/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test sickbeard.helpers
Methods:
fixGlob
indentXML
remove_non_release_groups
isMediaFile
isRarFile
isBeingWritten
remove_file_failed
makeDir
searchIndexerForShowID
listMediaFiles
copyFile
moveFile
link
hardlinkFile
symlink
moveAndSymlinkFile
make_dirs
rename_ep_file
delete_empty_folders
fileBitFilter
chmodAsParent
fixSetGroupID
is_anime_in_show_list
update_anime_support
get_absolute_number_from_season_and_episode
get_all_episodes_from_absolute_number
sanitizeSceneName
arithmeticEval
create_https_certificates
backupVersionedFile
restoreVersionedFile
md5_for_file
get_lan_ip
check_url
anon_url
encrypt
decrypt
full_sanitizeSceneName
_check_against_names
get_show
is_hidden_folder
real_path
validateShow
set_up_anidb_connection
makeZip
extractZip
backupConfigZip
restoreConfigZip
mapIndexersToShow
touchFile
_getTempDir
_setUpSession
getURL
download_file
get_size
generateApiKey
remove_article
generateCookieSecret
verify_freespace
pretty_time_delta
isFileLocked
getDiskSpaceUsage
"""
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard.helpers import remove_non_release_groups
TEST_RESULT = 'Show.Name.S01E01.HDTV.x264-RLSGROUP'
TEST_CASES = {
'removewords': [
TEST_RESULT,
'Show.Name.S01E01.HDTV.x264-RLSGROUP[cttv]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP.RiPSaLoT',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[GloDLS]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[EtHD]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP-20-40',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[NO-RAR] - [ www.torrentday.com ]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[rarbg]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[Seedbox]',
'{ www.SceneTime.com } - Show.Name.S01E01.HDTV.x264-RLSGROUP',
'].[www.tensiontorrent.com] - Show.Name.S01E01.HDTV.x264-RLSGROUP',
'[ www.TorrentDay.com ] - Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[silv4]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[AndroidTwoU]',
'[www.newpct1.com]Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP-NZBGEEK',
'.www.Cpasbien.pwShow.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP [1044]',
'[ www.Cpasbien.pw ] Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP.[BT]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[vtv]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP.[www.usabit.com]',
'[www.Cpasbien.com] Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[ettv]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[rartv]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP-Siklopentan',
'Show.Name.S01E01.HDTV.x264-RLSGROUP-RP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[PublicHD]',
'[www.Cpasbien.pe] Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP[eztv]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP-[SpastikusTV]',
'].[ www.tensiontorrent.com ] - Show.Name.S01E01.HDTV.x264-RLSGROUP',
'[ www.Cpasbien.com ] Show.Name.S01E01.HDTV.x264-RLSGROUP',
'Show.Name.S01E01.HDTV.x264-RLSGROUP- { www.SceneTime.com }',
'Show.Name.S01E01.HDTV.x264-RLSGROUP- [ www.torrentday.com ]',
'Show.Name.S01E01.HDTV.x264-RLSGROUP.Renc'
]
}
class HelpersTests(unittest.TestCase):
"""
Test using test generator
"""
def __init__(self, *args, **kwargs):
"""
Initialize test
"""
super(HelpersTests, self).__init__(*args, **kwargs)
def test_generator(test_strings):
"""
Generate tests from test strings
:param test_strings: to generate tests from
:return: test
"""
def _test(self):
"""
Generate tests
:param self:
:return: test to run
"""
for test_string in test_strings:
self.assertEqual(remove_non_release_groups(test_string), TEST_RESULT)
return _test
class HelpersZipTests(unittest.TestCase):
"""
Test zip methods
"""
@unittest.skip('Not yet implemented')
def test_make_zip(self):
"""
Test makeZip
"""
pass
@unittest.skip('Not yet implemented')
def test_extract_zip(self):
"""
Test extractZip
"""
pass
@unittest.skip('Not yet implemented')
def test_backup_config_zip(self):
"""
Test backupConfigZip
"""
pass
@unittest.skip('Not yet implemented')
def test_restore_config_zip(self):
"""
Test restoreConfigZip
"""
pass
@unittest.skip('Not yet implemented')
def test_is_rar_file(self):
"""
Test isRarFile
"""
pass
class HelpersDirectoryTests(unittest.TestCase):
"""
Test directory methods
"""
@unittest.skip('Not yet implemented')
def test_make_dirs(self):
"""
Test make_dirs
"""
pass
@unittest.skip('Not yet implemented')
def test_delete_empty_folders(self):
"""
Test delete_empty_folders
"""
pass
@unittest.skip('Not yet implemented')
def test_make_dir(self):
"""
Test makeDir
"""
pass
@unittest.skip('Not yet implemented')
def test_get_temp_dir(self):
"""
Test _getTempDir
"""
pass
@unittest.skip('Not yet implemented')
def test_is_hidden_folder(self):
"""
Test is_hidden_folder
"""
pass
@unittest.skip('Not yet implemented')
def test_real_path(self):
"""
Test real_path
"""
pass
class HelpersFileTests(unittest.TestCase):
"""
Test file helpers
"""
@unittest.skip('Not yet implemented')
def test_is_media_file(self):
"""
Test isMediaFile
"""
pass
@unittest.skip('Not yet implemented')
def test_is_file_locked(self):
"""
Test isFileLocked
"""
pass
@unittest.skip('Not yet implemented')
def test_is_being_written(self):
"""
Test isBeingWritten
"""
pass
@unittest.skip('Not yet implemented')
def test_remove_file_failed(self):
"""
Test remove_file_failed
"""
pass
@unittest.skip('Not yet implemented')
def test_list_media_files(self):
"""
Test listMediaFiles
"""
pass
@unittest.skip('Not yet implemented')
def test_copy_file(self):
"""
Test copyFile
"""
pass
@unittest.skip('Not yet implemented')
def test_move_file(self):
"""
Test moveFile
"""
pass
@unittest.skip('Not yet implemented')
def test_rename_ep_file(self):
"""
Test rename_ep_file
"""
pass
@unittest.skip('Not yet implemented')
def test_file_bit_filter(self):
"""
Test fileBitFilter
"""
pass
@unittest.skip('Not yet implemented')
def test_chmod_as_parent(self):
"""
Test chmodAsParent
"""
pass
@unittest.skip('Not yet implemented')
def test_backup_versioned_file(self):
"""
Test backupVersionedFile
"""
pass
@unittest.skip('Not yet implemented')
def test_restore_versioned_file(self):
"""
Test restoreVersionedFile
"""
pass
@unittest.skip('Not yet implemented')
def test_verify_free_space(self):
"""
Test verify_freespace
"""
pass
@unittest.skip('Not yet implemented')
def test_get_disk_space_usage(self):
"""
Test getDiskSpaceUsage
"""
pass
@unittest.skip('Not yet implemented')
def test_download_file(self):
"""
Test download_file
"""
pass
@unittest.skip('Not yet implemented')
def test_get_size(self):
"""
Test get_size
"""
pass
@unittest.skip('Not yet implemented')
def test_md5_for_file(self):
"""
Test md5_for_file
"""
pass
@unittest.skip('Not yet implemented')
def test_touch_file(self):
"""
Test touchFile
"""
pass
class HelpersFileLinksTests(unittest.TestCase):
"""
Test sym and hard links
"""
@unittest.skip('Not yet implemented')
def test_link(self):
"""
Test link
"""
pass
@unittest.skip('Not yet implemented')
def test_hardlink_file(self):
"""
Test hardlinkFile
"""
pass
@unittest.skip('Not yet implemented')
def test_symlink(self):
"""
Test symlink
"""
pass
@unittest.skip('Not yet implemented')
def test_move_and_symlink_file(self):
"""
Test moveAndSymlinkFile
"""
pass
class HelpersEncryptionTests(unittest.TestCase):
"""
Test encryption and decryption
"""
@unittest.skip('Not yet implemented')
def test_create_https_certificates(self):
"""
Test create_https_certificates
"""
pass
@unittest.skip('Not yet implemented')
def test_encrypt(self):
"""
Test encrypt
"""
pass
@unittest.skip('Not yet implemented')
def test_decrypt(self):
"""
Test decrypt
"""
pass
@unittest.skip('Not yet implemented')
def test_generate_cookie_secret(self):
"""
Test generateCookieSecret
"""
pass
class HelpersShowTests(unittest.TestCase):
"""
Test show methods
"""
@unittest.skip('Not yet implemented')
def test_search_indexer_for_show_id(self):
"""
Test searchIndexerForShowID
"""
pass
@unittest.skip('Not yet implemented')
def test_is_anime_in_show_list(self):
"""
Test is_anime_in_show_list
"""
pass
@unittest.skip('Not yet implemented')
def test_check_against_names(self):
"""
Test _check_against_names
"""
pass
@unittest.skip('Not yet implemented')
def test_get_show(self):
"""
Test get_show
"""
pass
@unittest.skip('Not yet implemented')
def test_validate_show(self):
"""
Test validateShow
"""
pass
@unittest.skip('Not yet implemented')
def test_map_indexers_to_show(self):
"""
Test mapIndexersToShow
"""
pass
@unittest.skip('Not yet implemented')
def test_get_abs_no_from_s_and_e(self):
"""
Test get_absolute_number_from_season_and_episode
"""
pass
@unittest.skip('Not yet implemented')
def test_get_all_eps_from_abs_no(self):
"""
Test get_all_episodes_from_absolute_number
"""
pass
class HelpersConnectionTests(unittest.TestCase):
"""
Test connections
"""
@unittest.skip('Not yet implemented')
def test_get_lan_ip(self):
"""
Test get_lan_ip
"""
pass
@unittest.skip('Not yet implemented')
def test_check_url(self):
"""
Test check_url
"""
pass
@unittest.skip('Not yet implemented')
def test_anon_url(self):
"""
Test anon_url
"""
pass
@unittest.skip('Not yet implemented')
def test_set_up_anidb_connection(self):
"""
Test set_up_anidb_connection
"""
pass
@unittest.skip('Not yet implemented')
def test_set_up_session(self):
"""
Test _setUpSession
"""
pass
@unittest.skip('Not yet implemented')
def test_get_url(self):
"""
Test getURL
"""
pass
@unittest.skip('Not yet implemented')
def test_generate_api_key(self):
"""
Test generateApiKey
"""
pass
class HelpersMiscTests(unittest.TestCase):
"""
Test misc helper methods
"""
@unittest.skip('Not yet implemented')
def test_fix_glob(self):
"""
Test fixGlob
"""
pass
@unittest.skip('Not yet implemented')
def test_indent_xml(self):
"""
Test indentXML
"""
pass
@unittest.skip('Not yet implemented')
def test_remove_non_release_groups(self):
"""
Test remove_non_release_groups
"""
pass
@unittest.skip('Not yet implemented')
def test_fix_set_group_id(self):
"""
Test fixSetGroupID
"""
pass
@unittest.skip('Not yet implemented')
def test_update_anime_support(self):
"""
Test update_anime_support
"""
pass
@unittest.skip('Not yet implemented')
def test_sanitize_scene_name(self):
"""
Test sanitizeSceneName
"""
pass
@unittest.skip('Not yet implemented')
def test_arithmetic_eval(self):
"""
Test arithmeticEval
"""
pass
@unittest.skip('Not yet implemented')
def test_full_sanitize_scene_name(self):
"""
Test full_sanitizeSceneName
"""
pass
@unittest.skip('Not yet implemented')
def test_remove_article(self):
"""
Test remove_article
"""
pass
@unittest.skip('Not yet implemented')
def test_pretty_time_delta(self):
"""
Test pretty_time_delta
"""
pass
if __name__ == '__main__':
print "=================="
print "STARTING - Helpers TESTS"
print "=================="
print "######################################################################"
for name, test_data in TEST_CASES.items():
test_name = 'test_%s' % name
test = test_generator(test_data)
setattr(HelpersTests, test_name, test)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersConnectionTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersDirectoryTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersEncryptionTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersFileLinksTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersFileTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersMiscTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersShowTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HelpersZipTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| hernandito/SickRage | tests/helpers_tests.py | Python | gpl-3.0 | 16,369 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-03-05 16:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def element_to_frozen_element(apps, schema_editor):
Answer = apps.get_model("review", "ReviewAssignmentAnswer")
Frozen = apps.get_model("review", "FrozenReviewFormElement")
for answer in Answer.objects.all():
f = Frozen(
name=answer.element.name,
kind=answer.element.kind,
choices=answer.element.choices,
required=answer.element.required,
order=answer.element.order,
width=answer.element.width,
help_text=answer.element.help_text,
default_visibility=answer.element.default_visibility,
)
f.answer = answer
f.form_element = answer.element
f.save()
answer.original_element = answer.element
answer.save
def frozen_element_to_element(apps, schema_editor):
Frozen = apps.get_model("review", "FrozenReviewFormElement")
for frozen in Frozen.objects.all():
if frozen.form_element:
answer = frozen.answer
answer.element = frozen.form_element
answer.save()
frozen.delete()
class Migration(migrations.Migration):
dependencies = [
('review', '0010_answer_not_required'),
]
operations = [
migrations.AlterField(
model_name='reviewassignmentanswer',
name='element',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='review.ReviewFormElement'),
),
migrations.CreateModel(
name='FrozenReviewFormElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('kind', models.CharField(choices=[('text', 'Text Field'), ('textarea', 'Text Area'), ('check', 'Check Box'), ('select', 'Select'), ('email', 'Email'), ('upload', 'Upload'), ('date', 'Date')], max_length=50)),
('choices', models.CharField(blank=True, help_text='Seperate choices with the bar | character.', max_length=1000, null=True)),
('required', models.BooleanField(default=True)),
('order', models.IntegerField()),
('width', models.CharField(choices=[('large-4 columns', 'third'), ('large-6 columns', 'half'), ('large-12 columns', 'full')], max_length=20)),
('help_text', models.TextField(blank=True, null=True)),
('default_visibility', models.BooleanField(default=True, help_text='If true, this setting will be available to the author automatically, if false it willbe hidden to the author by default.')),
],
options={
'ordering': ('order', 'name'),
'abstract': False,
},
),
migrations.AddField(
model_name='frozenreviewformelement',
name='answer',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='frozen_element', to='review.ReviewAssignmentAnswer'),
),
migrations.AddField(
model_name='reviewassignmentanswer',
name='original_element',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='review.ReviewFormElement'),
),
migrations.AddField(
model_name='frozenreviewformelement',
name='form_element',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='review.ReviewFormElement'),
),
migrations.RunPython(element_to_frozen_element, reverse_code=frozen_element_to_element),
migrations.AlterField(
model_name='reviewassignment',
name='form',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='review.ReviewForm'),
),
migrations.RemoveField(
model_name='reviewassignmentanswer',
name='element',
),
] | BirkbeckCTP/janeway | src/review/migrations/0011_keep_answers_on_form_delete.py | Python | agpl-3.0 | 4,224 |
import setuptools
if __name__ == "__main__":
setuptools.setup(
name='friendly_computing_machine',
version="0.1.1",
description='A starting template for Python programs',
author='Sangeeta Sur',
author_email='[email protected]',
url="https://github.com/ssx9f/friendly-computing-machine",
license='BSD-3C',
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.7',
],
extras_require={
'docs': [
'sphinx==1.2.3', # autodoc was broken in 1.3.1
'sphinxcontrib-napoleon',
'sphinx_rtd_theme',
'numpydoc',
],
'tests': [
'pytest',
'pytest-cov',
'pytest-pep8',
'tox',
],
},
tests_require=[
'pytest',
'pytest-cov',
'pytest-pep8',
'tox',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
zip_safe=True,
)
| ssx9f/friendly-computing-machine | setup.py | Python | bsd-3-clause | 1,261 |
# encoding: utf-8
# Copyright 2013 maker
# License
#-*- coding: utf-8 -*-
import handlers
from django.conf.urls.defaults import *
from maker.core.api.auth import auth_engine
from maker.core.api.doc import documentation_view
from maker.core.api.resource import CsrfExemptResource
ad = { 'authentication': auth_engine }
#sales resources
saleStatusResource = CsrfExemptResource(handler = handlers.SaleStatusHandler, **ad)
productResource = CsrfExemptResource(handler = handlers.ProductHandler, **ad)
sourceResource = CsrfExemptResource(handler = handlers.SaleSourceHandler, **ad)
leadResource = CsrfExemptResource(handler = handlers.LeadHandler, **ad)
opportunityResource = CsrfExemptResource(handler = handlers.OpportunityHandler, **ad)
orderResource = CsrfExemptResource(handler = handlers.SaleOrderHandler, **ad)
subscriptionResource = CsrfExemptResource(handler = handlers.SubscriptionHandler, **ad)
orderedProductResource = CsrfExemptResource(handler = handlers.OrderedProductHandler, **ad)
urlpatterns = patterns('',
#Sales
url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_sales_doc"),
url(r'^statuses$', saleStatusResource, name="api_sales_status"),
url(r'^status/(?P<object_ptr>\d+)', saleStatusResource, name="api_sales_status"),
url(r'^products$', productResource, name="api_sales_products"),
url(r'^product/(?P<object_ptr>\d+)', productResource, name="api_sales_products"),
url(r'^sources$', sourceResource, name="api_sales_sources"),
url(r'^source/(?P<object_ptr>\d+)', sourceResource, name="api_sales_sources"),
url(r'^leads$', leadResource, name="api_sales_leads"),
url(r'^lead/(?P<object_ptr>\d+)', leadResource, name="api_sales_leads"),
url(r'^opportunities$', opportunityResource, name="api_sales_opportunities"),
url(r'^opportunity/(?P<object_ptr>\d+)', opportunityResource, name="api_sales_opportunities"),
url(r'^orders$', orderResource, name="api_sales_orders"),
url(r'^order/(?P<object_ptr>\d+)', orderResource, name="api_sales_orders"),
url(r'^subscriptions$', subscriptionResource, name="api_sales_subscriptions"),
url(r'^subscription/(?P<object_ptr>\d+)', subscriptionResource, name="api_sales_subscriptions"),
url(r'^ordered_product/(?P<object_ptr>\d+)', orderedProductResource, name="api_sales_ordered_products"),
)
| alejo8591/maker | sales/api/urls.py | Python | mit | 2,334 |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
class TestBatchComputeEnvironment(BaseTest):
def test_batch_compute_update(self):
session_factory = self.replay_flight_data("test_batch_compute_update")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}, {"state": "ENABLED"}],
"actions": [{"type": "update-environment", "state": "DISABLED"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]["computeEnvironmentName"]]
)[
"computeEnvironments"
]
self.assertEqual(envs[0]["state"], "DISABLED")
def test_batch_compute_delete(self):
session_factory = self.replay_flight_data("test_batch_compute_delete")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]['computeEnvironmentName']]
)['computeEnvironments']
self.assertEqual(envs[0]['status'], 'DELETING')
class TestBatchDefinition(BaseTest):
def test_definition_deregister(self):
def_name = 'c7n_batch'
session_factory = self.replay_flight_data(
'test_batch_definition_deregister')
p = self.load_policy({
'name': 'batch-definition',
'resource': 'batch-definition',
'filters': [
{'containerProperties.image': 'amazonlinux'}],
'actions': [{'type': 'deregister'}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['jobDefinitionName'], 'c7n_batch')
client = session_factory(region='us-east-1').client('batch')
defs = client.describe_job_definitions(
jobDefinitionName=def_name)['jobDefinitions']
self.assertEqual(defs[0]['status'], 'INACTIVE')
| taohungyang/cloud-custodian | tests/test_batch.py | Python | apache-2.0 | 3,254 |
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite
~~~~~~~~~~~~~~~~~~
Contains all test Werkzeug tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import re
import sys
import unittest
import shutil
import tempfile
import atexit
from werkzeug.utils import find_modules
from werkzeug._compat import text_type, integer_types, reraise
def get_temporary_directory():
directory = tempfile.mkdtemp()
@atexit.register
def remove_directory():
try:
shutil.rmtree(directory)
except EnvironmentError:
pass
return directory
def iter_suites(package):
"""Yields all testsuites."""
for module in find_modules(package, include_packages=True):
mod = __import__(module, fromlist=['*'])
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class WerkzeugTestCase(unittest.TestCase):
"""Baseclass for all the tests that Werkzeug uses. Use these
methods for testing instead of the camelcased ones in the
baseclass for consistency.
"""
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.teardown()
def assert_line_equal(self, x, y):
assert x == y, "lines not equal\n a = %r\n b = %r" % (x, y)
def assert_equal(self, x, y, msg=None):
return self.assertEqual(x, y, msg)
def assert_not_equal(self, x, y):
return self.assertNotEqual(x, y)
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if callable is None:
return catcher
with catcher:
callable(*args, **kwargs)
if sys.version_info[:2] == (2, 6):
def assertIsNone(self, x):
assert x is None, "%r is not None" % (x,)
def assertIsNotNone(self, x):
assert x is not None, "%r is None" % (x, )
def assertIn(self, x, y):
assert x in y, "%r not in %r" % (x, y)
def assertNotIn(self, x, y):
assert x not in y, "%r in %r" % (x, y)
def assertIsInstance(self, x, y):
assert isinstance(x, y), "not isinstance(%r, %r)" % (x, y)
def assertIs(self, x, y):
assert x is y, "%r is not %r" % (x, y)
def assertIsNot(self, x, y):
assert x is not y, "%r is %r" % (x, y)
def assertSequenceEqual(self, x, y):
self.assertEqual(x, y)
def assertRaisesRegex(self, exc_type, regex, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if not args:
return catcher
elif callable(args[0]):
with catcher:
args[0](*args[1:], **kwargs)
if args[0] is not None:
assert re.search(args[0], catcher.exc_value[0])
else:
raise NotImplementedError()
elif sys.version_info[0] == 2:
def assertRaisesRegex(self, *args, **kwargs):
return self.assertRaisesRegexp(*args, **kwargs)
def assert_is_none(self, x):
self.assertIsNone(x)
def assert_is_not_none(self, x):
self.assertIsNotNone(x)
def assert_in(self, x, y):
self.assertIn(x, y)
def assert_is_instance(self, x, y):
self.assertIsInstance(x, y)
def assert_not_in(self, x, y):
self.assertNotIn(x, y)
def assert_is(self, x, y):
self.assertIs(x, y)
def assert_is_not(self, x, y):
self.assertIsNot(x, y)
def assert_true(self, x):
self.assertTrue(x)
def assert_false(self, x):
self.assertFalse(x)
def assert_raises_regex(self, *args, **kwargs):
return self.assertRaisesRegex(*args, **kwargs)
def assert_sequence_equal(self, x, y):
self.assertSequenceEqual(x, y)
def assert_strict_equal(self, x, y):
'''Stricter version of assert_equal that doesn't do implicit conversion
between unicode and strings'''
self.assert_equal(x, y)
assert issubclass(type(x), type(y)) or issubclass(type(y), type(x)), \
'%s != %s' % (type(x), type(y))
if isinstance(x, (bytes, text_type, integer_types)) or x is None:
return
elif isinstance(x, dict) or isinstance(y, dict):
x = sorted(x.items())
y = sorted(y.items())
elif isinstance(x, set) or isinstance(y, set):
x = sorted(x)
y = sorted(y)
rx, ry = repr(x), repr(y)
if rx != ry:
rx = rx[:200] + (rx[200:] and '...')
ry = ry[:200] + (ry[200:] and '...')
raise AssertionError(rx, ry)
assert repr(x) == repr(y), repr((x, y))[:200]
class _ExceptionCatcher(object):
def __init__(self, test_case, exc_type):
self.test_case = test_case
self.exc_type = exc_type
self.exc_value = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
exception_name = self.exc_type.__name__
if exc_type is None:
self.test_case.fail('Expected exception of type %r' %
exception_name)
elif not issubclass(exc_type, self.exc_type):
reraise(exc_type, exc_value, tb)
self.exc_value = exc_value
return True
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
suite = unittest.TestSuite()
for other_suite in iter_suites(__name__):
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception:
import sys
import traceback
traceback.print_exc()
sys.exit(1)
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/werkzeug/testsuite/__init__.py | Python | gpl-2.0 | 7,756 |
from main import app
app.run(host='127.0.0.1', port=8090, debug=True)
| andimiller/pizza-auth | pizza_auth/run.py | Python | mit | 70 |
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://repo.maven.apache.org/maven2/javax/annotation/javax.annotation-api/1.3.2/javax.annotation-api-1.3.2.jar'
_FILE_NAME = 'javax.annotation-api-1.3.2.jar'
_FILE_VERSION = '1.3.2'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| nwjs/chromium.src | third_party/android_deps/libs/javax_annotation_javax_annotation_api/3pp/fetch.py | Python | bsd-3-clause | 1,385 |
"""django_test URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from books.views import BookList, BookDetail
urlpatterns = [
url(
r'^books/$',
BookList.as_view(),
name='book_list'),
url(
r'^books/(?P<book_id>[0-9]+)/$',
BookDetail.as_view(),
name='book_detail'),
]
| oliverroick/django-tests | django_test/urls.py | Python | mit | 914 |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 11:47:27 2015
"""
#####################################################################################################
# Groupe d'Étude pour la Traduction/le Traitement Automatique des Langues et de la Parole (GETALP)
# Homepage: http://getalp.imag.fr
#
# Author: Tien LE ([email protected])
# Advisors: Laurent Besacier & Benjamin Lecouteux
# URL: tienhuong.weebly.com
#####################################################################################################
#**************************************************************************#
import os
import sys
#import re
#import linecache
#import stat
#import datetime
#for call shell script
#import shlex, subprocess
#**************************************************************************#
#when import module/class in other directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))#in order to test with line by line on the server
from config.configuration import config
from config.config_end_user import config_end_user
#**************************************************************************#
def get_absolute_path_current_module():
"""
Get the current directory which contains this module.
:rtype: string of absolute path of file_name
"""
#get path to current module
path = os.path.dirname(os.path.abspath(sys.argv[0]))
return path
#**************************************************************************#
def load_configuration(filename_configuration = "configuration.yml"):
"""
Load the configuration of this project with file in format YAML
:rtype: Object config
"""
#get absolute path to current module
#path = get_absolute_path_current_module()
#print (path)
#path_to_config_file = path + "/../config/" + filename_configuration
path_to_config_file = os.getenv("WCE_ROOT")+ "/wce_system/config/" + filename_configuration
#print(path_to_config_file)
#str_message_if_not_existed = "Not Existed file configuration"
#is_existed_file(path_to_config_file, str_message_if_not_existed)
return config(path_to_config_file)
#**************************************************************************#
def load_configuration_demo_solution(filename_configuration = "configuration.yml"):
"""
Load the configuration of this project with file in format YAML
:rtype: Object config
"""
#get absolute path to current module
#path = get_absolute_path_current_module()
#print (path)
#path_to_config_file = path + "/config/" + filename_configuration
path_to_config_file = os.getenv("WCE_ROOT")+ "/wce_system/config/" + filename_configuration
#print(path_to_config_file)
return config(path_to_config_file)
#**************************************************************************#
def load_config_end_user(filename_configuration = "config_end_user.yml"):
"""
Load the configuration of this project with file in format YAML
:rtype: Object config
"""
#get absolute path to current module
path = get_absolute_path_current_module()
#print (path)
#path_to_config_file = path + "/../config/" + filename_configuration
#path_to_config_file = path + "/../../input_data/" + filename_configuration
path_to_config_file = os.getenv("WCE_ROOT")+ "/input_data/" + filename_configuration
#print(path_to_config_file)
#tra ve ham khoi tao cua module thich hop
return config_end_user(path_to_config_file)
#**************************************************************************#
#**************************************************************************#
if __name__ == "__main__":
#Test case:
current_config = load_configuration()
config_end_user = load_config_end_user()
print("OK") | besacier/WCE-LIG | wce_system/common_module/cm_config.py | Python | gpl-3.0 | 3,815 |
#local urls.py file
from django.conf.urls import url, include
from . import views
urlpatterns = [
#url(r'^', views.appView.postLocation, name = 'postLocation'),
url(r'^volunteer/', views.appView.member, name = 'member'),
#url(r'^(?P<member_id>[0-9]+)/$', views.appView.detail, name = 'detail'),
#url(r'^(?P<>))
]
| Fazer56/Assignment3 | charitysite/volunteer/urls.py | Python | mit | 352 |
# -*- encoding: utf-8 -*-
import traceback
import pprint
import uuid
import time
import sys
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.core.mail import EmailMessage, EmailMultiAlternatives
try:
from django.core.urlresolvers import reverse, NoReverseMatch
except ImportError:
from django.urls import reverse, NoReverseMatch
from django.contrib.sites.models import Site
from django.template import Template, Context
from django.core.mail import get_connection
from django.utils import translation
from django.conf import settings
from django.core import signing
from dbmail.models import MailTemplate, MailLog, MailGroup, MailLogException
from dbmail.defaults import SHOW_CONTEXT, ENABLE_LOGGING, ADD_HEADER
from dbmail.exceptions import StopSendingException
from dbmail.utils import clean_html
from dbmail import import_module
from dbmail import get_version
from dbmail import defaults
class Sender(object):
provider = defaults.MAIL_PROVIDER
def __init__(self, slug, recipient, *args, **kwargs):
self._slug = slug
self._recipient_list = self._get_recipient_list(recipient)
self._cc = self._email_to_list(kwargs.pop('cc', None))
self._bcc = self._email_to_list(kwargs.pop('bcc', None))
self._user = kwargs.pop('user', None)
self._language = kwargs.pop('language', None)
self._backend = kwargs.pop('backend')
self._provider = kwargs.pop('provider', self.provider)
self._signals_kw = kwargs.pop('signals_kwargs', {})
self._template = self._get_template()
self._context = self._get_context(args)
self._subject = self._get_subject()
self._message = self._get_message()
self._files = kwargs.pop('files', [])
self._kwargs = kwargs
self._num = 1
self._err_msg = None
self._err_exc = None
self._log_id = self._get_log_id()
self._kwargs.pop('retry', None)
self._kwargs.pop('max_retries', None)
self._kwargs.pop('retry_delay', None)
self._from_email = self._get_from_email()
self._update_bcc_from_template_settings()
self._insert_mailer_identification_head()
@staticmethod
def _get_log_id():
return '%f-%s' % (time.time(), uuid.uuid4())
def _insert_mailer_identification_head(self):
if not ADD_HEADER:
return
headers = self._kwargs.pop('headers', {})
headers.update(
{'X-Mailer-Wrapper': 'django-db-mailer ver %s' % get_version()})
self._kwargs['headers'] = headers
def _get_connection(self):
if self._template.auth_credentials:
return self._kwargs.pop('connection', None) or get_connection(
**self._template.auth_credentials)
return self._kwargs.pop('connection', None)
def _get_template(self):
return MailTemplate.get_template(slug=self._slug)
def _get_context(self, context_list):
try:
data = self._model_to_dict(Site.objects.get_current())
except Site.DoesNotExist:
data = {}
for context in context_list:
if isinstance(context, dict):
data.update(context)
elif hasattr(context, '_meta'):
data.update(self._model_to_dict(context))
data.update({self._get_context_module_name(context): context})
if settings.DEBUG and SHOW_CONTEXT:
pprint.pprint(data)
return data
@staticmethod
def _get_context_module_name(context):
from distutils.version import StrictVersion
import django
current_version = django.get_version()
if StrictVersion(current_version) < StrictVersion('1.8'):
return context._meta.module_name
return context._meta.model_name
def _get_str_by_language(self, field, template=None):
obj = template if template else self._template
template = getattr(obj, field)
if self._language is not None:
field = '%s_%s' % (field, self._language)
if hasattr(obj, field):
if getattr(obj, field):
template = getattr(obj, field)
return template
def _get_subject(self):
return self._render_template(
self._get_str_by_language('subject'), self._context)
def _get_message_with_base(self):
self._context['content'] = self._render_template(
self._get_str_by_language('message'), self._context)
return self._render_template(
self._get_str_by_language('message', self._template.base),
self._context
)
def _get_standard_message(self):
return self._render_template(
self._get_str_by_language('message'), self._context)
def _get_message(self):
if self._template.base:
return self._get_message_with_base()
return self._get_standard_message()
def _get_msg_with_track(self):
message = self._message
if defaults.TRACK_ENABLE is False:
return message
if ENABLE_LOGGING and self._template.enable_log:
try:
domain = Site.objects.get_current().domain
encrypted = signing.dumps(self._log_id, compress=True)
path = reverse('db-mail-tracker', args=[encrypted])
message += defaults.TRACK_HTML % {
'url': 'http://%s%s' % (domain, path)}
except (Site.DoesNotExist, NoReverseMatch):
pass
return message
def _attach_files(self, mail):
for file_object in self._template.files_list:
mail.attach_file(file_object.filename.path)
for filename in self._files:
mail.attach_file(filename)
def _send_html_message(self):
msg = EmailMultiAlternatives(
self._subject, clean_html(self._message), cc=self._cc,
from_email=self._from_email, to=self._recipient_list,
bcc=self._bcc, connection=self._get_connection(), **self._kwargs
)
msg.attach_alternative(self._get_msg_with_track(), "text/html")
self._attach_files(msg)
msg.send()
def _send_plain_message(self):
msg = EmailMessage(
self._subject, self._message, from_email=self._from_email,
to=self._recipient_list, cc=self._cc, bcc=self._bcc,
connection=self._get_connection(), **self._kwargs
)
self._attach_files(msg)
msg.send()
def _get_recipient_list(self, recipient):
if not isinstance(recipient, list) and '@' not in recipient:
return self._group_emails(recipient)
return self._email_to_list(recipient)
def _update_bcc_from_template_settings(self):
if self._template.bcc_list:
if self._bcc:
self._bcc.extend(self._template.bcc_list)
else:
self._bcc = self._template.bcc_list
def _get_from_email(self):
if self._kwargs.get('from_email'):
return self._kwargs.pop('from_email', None)
elif not self._template.from_email:
return settings.DEFAULT_FROM_EMAIL
return self._template.from_email.get_mail_from
@staticmethod
def _group_emails(recipient):
email_list = []
for slug in recipient.split(','):
email_list.extend(MailGroup.get_emails(slug))
return list(set(email_list))
@staticmethod
def _email_to_list(recipient):
if recipient is None:
return None
elif not isinstance(recipient, list):
recipient = [d.strip() for d in recipient.split(',') if d.strip()]
return recipient
def _render_template(self, template, context):
translation.activate(self._language or settings.LANGUAGE_CODE)
return Template(template).render(Context(context))
@staticmethod
def _model_to_dict(instance):
opts, data = getattr(instance, '_meta'), dict()
for f in opts.fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if instance.pk is None:
data[f.name] = []
else:
data[f.name] = [
item.pk for item in f.value_from_object(instance)]
elif isinstance(f, ForeignKey):
if getattr(instance, f.name):
data[f.name] = getattr(instance, f.name).__str__()
else:
data[f.name] = f.value_from_object(instance)
return data
def _send_by_custom_provider(self):
module = import_module(self._provider)
module.send(self)
def _send_by_native_provider(self):
if self._template.is_html:
return self._send_html_message()
return self._send_plain_message()
def _send(self):
if self._provider is not None:
return self._send_by_custom_provider()
return self._send_by_native_provider()
def _store_log(self, is_sent):
if ENABLE_LOGGING is True:
if self._template.enable_log or not is_sent:
MailLog.store(
self._recipient_list, self._cc, self._bcc,
is_sent, self._template, self._user,
self._num, self._err_msg, self._err_exc,
self._log_id, self._backend, self._provider
)
def _try_to_send(self):
self._kwargs.pop('queue', None)
for self._num in range(1, self._template.num_of_retries + 1):
try:
self._send()
break
except Exception as exc:
print('[dbmail] %s' % exc)
if self._template.num_of_retries == self._num:
raise
time.sleep(defaults.SEND_RETRY_DELAY_DIRECT)
def _ignore_exception(self):
return self._err_exc in MailLogException.get_ignored_exceptions()
def send(self, is_celery=True):
from dbmail.signals import pre_send, post_send, post_exception
if self._template.is_active:
try:
pre_send.send(
self.__class__, instance=self, **self._signals_kw)
if is_celery is True:
self._send()
else:
self._try_to_send()
self._store_log(True)
post_send.send(
self.__class__, instance=self, **self._signals_kw)
return 'OK'
except StopSendingException:
return
except Exception as exc:
post_exception.send(
self.__class__,
instance=self,
exc_instance=exc,
**self._signals_kw
)
self._err_msg = traceback.format_exc()
self._err_exc = exc.__class__.__name__
self._store_log(False)
if self._ignore_exception():
return
raise
@staticmethod
def debug(key, value):
from django.utils.termcolors import colorize
if value:
sys.stdout.write(colorize(key, fg='green'))
sys.stdout.write(": ")
sys.stdout.write(colorize(repr(value), fg='white'))
sys.stdout.write("\n")
class SenderDebug(Sender):
def _send(self):
self.debug('Provider', self._provider or 'default')
self.debug('Message', self._message)
self.debug('From', self._from_email)
self.debug('Recipients', self._recipient_list)
self.debug('CC', self._cc)
self.debug('BCC', self._bcc)
self.debug('Additional kwargs', self._kwargs)
| LPgenerator/django-db-mailer | dbmail/backends/mail.py | Python | gpl-2.0 | 11,834 |
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
class max31856(object):
"""Read Temperature on the Raspberry PI from the MAX31856 chip using GPIO
Any pins can be used for CS (chip select), MISO, MOSI and CLK
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
#
# Config Register 2
# ------------------
# bit 7: Reserved -> 0
# bit 6: Averaging Mode 1 Sample -> 0 (default)
# bit 5: Averaging Mode 1 Sample -> 0 (default)
# bit 4: Averaging Mode 1 Sample -> 0 (default)
# bit 3: Thermocouple Type -> K Type (default) -> 0 (default)
# bit 2: Thermocouple Type -> K Type (default) -> 0 (default)
# bit 1: Thermocouple Type -> K Type (default) -> 1 (default)
# bit 0: Thermocouple Type -> K Type (default) -> 1 (default)
#
#Uncomment one of the following to select thermocouple type
#self.writeRegister(1, 0x00) #for B Type
#self.writeRegister(1, 0x01) #for E Type
#self.writeRegister(1, 0x02) #for J Type
self.writeRegister(1, 0x03) #for K Type
#self.writeRegister(1, 0x04) #for N Type
#self.writeRegister(1, 0x05) #for R Type
#self.writeRegister(1, 0x06) #for S Type
#self.writeRegister(1, 0x07) #for T Type
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readThermocoupleTemp(self):
self.requestTempConv()
# read 4 registers starting with register 12
out = self.readRegisters(0x0c, 4)
[tc_highByte, tc_middleByte, tc_lowByte] = [out[0], out[1], out[2]]
temp = ((tc_highByte << 16) | (tc_middleByte << 8) | tc_lowByte) >> 5
if (tc_highByte & 0x80):
temp -= 0x80000
temp_C = temp * 0.0078125
fault = out[3]
if ((fault & 0x80) != 0):
raise FaultError("Cold Junction Out-of-Range")
if ((fault & 0x40) != 0):
raise FaultError("Thermocouple Out-of-Range")
if ((fault & 0x20) != 0):
raise FaultError("Cold-Junction High Fault")
if ((fault & 0x10) != 0):
raise FaultError("Cold-Junction Low Fault")
if ((fault & 0x08) != 0):
raise FaultError("Thermocouple Temperature High Fault")
if ((fault & 0x04) != 0):
raise FaultError("Thermocouple Temperature Low Fault")
if ((fault & 0x02) != 0):
raise FaultError("Overvoltage or Undervoltage Input Fault")
if ((fault & 0x01) != 0):
raise FaultError("Thermocouple Open-Circuit Fault")
return temp_C
def readJunctionTemp(self):
self.requestTempConv()
# read 3 registers starting with register 9
out = self.readRegisters(0x09, 3)
offset = out[0]
[junc_msb, junc_lsb] = [out[1], out[2]]
temp = ((junc_msb << 8) | junc_lsb) >> 2
temp = offset + temp
if (junc_msb & 0x80):
temp -= 0x4000
temp_C = temp * 0.015625
return temp_C
def requestTempConv(self):
#
# Config Register 1
# ------------------
# bit 7: Conversion Mode -> 0 (Normally Off Mode)
# bit 6: 1-shot -> 1 (ON)
# bit 5: open-circuit fault detection -> 0 (off)
# bit 4: open-circuit fault detection -> 0 (off)
# bit 3: Cold-junction temerature sensor enabled -> 0 (default)
# bit 2: Fault Mode -> 0 (default)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# write config register 0
self.writeRegister(0, 0x42)
# conversion time is less than 150ms
time.sleep(.2) #give it 200ms for conversion
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
class FaultError(Exception):
pass
if __name__ == "__main__":
import max31856
csPin = 8
misoPin = 9
mosiPin = 10
clkPin = 11
max = max31856.max31856(csPin,misoPin,mosiPin,clkPin)
thermoTempC = max.readThermocoupleTemp()
thermoTempF = (thermoTempC * 9.0/5.0) + 32
print "Thermocouple Temp: %f degF" % thermoTempF
juncTempC = max.readJunctionTemp()
juncTempF = (juncTempC * 9.0/5.0) + 32
print "Cold Junction Temp: %f degF" % juncTempF
GPIO.cleanup()
| steve71/MAX31856 | max31856.py | Python | mit | 6,588 |
Subsets and Splits