repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
capitalk/treelearn | refs/heads/master | distribute_setup.py | 46 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.19"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
|
lucidfrontier45/scikit-learn | refs/heads/master | sklearn/datasets/tests/test_lfw.py | 2 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by runnning the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
# PIL is not properly installed, skip those tests
raise SkipTest
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write('Text file to be ignored by the dataset loader.')
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write("10\n")
more_than_two = [name for name, count in counts.iteritems()
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write('%s\t%d\t%d\n' % (name, first, second))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(range(counts[first_name]))
second_index = random_state.choice(range(counts[second_name]))
f.write('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write("Fake place holder that won't be tested")
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write("Fake place holder that won't be tested")
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
MicroPyramid/django-simple-pagination | refs/heads/master | sandbox/sample/tests.py | 873 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
|
seba-1511/randopt | refs/heads/master | bin/roviz.py | 1 | #!/usr/bin/env python3
import webbrowser
import os
import sys
import json
if __name__ == '__main__':
roviz_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
if len(sys.argv) < 2 or len(sys.argv) > 2:
print('The new usage of roviz is: roviz.py path/to/experiment')
sys.exit(-1)
exp_path = sys.argv[1]
path = os.path.abspath('.')
path = os.path.join(path, exp_path)
results = []
for fname in os.listdir(path):
if fname.endswith('.json'):
fpath = os.path.join(path, fname)
with open(fpath, 'r') as f:
results.append(json.load(f))
results = sorted(results, key=lambda x: float(x['result']))
content = 'var DATA = ' + json.dumps(results) + ';'
header_path = os.path.join(roviz_path, 'header.html')
with open(header_path, 'r') as f:
header = f.read()
footer_path = os.path.join(roviz_path, 'footer.html')
with open(footer_path, 'r') as f:
footer = f.read()
content = header + content + footer
out_path = os.path.join(exp_path, 'viz.html')
with open(out_path, 'w') as f:
f.write(content)
webbrowser.open("file:///" + os.path.abspath(out_path))
|
asajeffrey/servo | refs/heads/master | python/servo/gstreamer.py | 3 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
import sys
GSTREAMER_DYLIBS = [
("gstapp", "gst-plugins-base"),
("gstaudio", "gst-plugins-base"),
("gstbase", "gstreamer"),
("gstcodecparsers", "gst-plugins-bad"),
("gstcontroller", "gstreamer"),
("gstfft", "gst-plugins-base"),
("gstgl", "gst-plugins-base"),
("gstnet", "gstreamer"),
("gstpbutils", "gst-plugins-base"),
("gstplayer", "gst-plugins-bad"),
("gstreamer", "gstreamer"),
("gstriff", "gst-plugins-base"),
("gstrtp", "gst-plugins-base"),
("gstrtsp", "gst-plugins-base"),
("gstsctp", "gst-plugins-bad"),
("gstsdp", "gst-plugins-base"),
("gsttag", "gst-plugins-base"),
("gstvideo", "gst-plugins-base"),
("gstwebrtc", "gst-plugins-bad"),
]
NON_UWP_DYLIBS = [
"gstnet",
"gstsctp",
]
GSTREAMER_PLUGINS = [
("gstapp", "gst-plugins-base"),
("gstaudiobuffersplit", "gst-plugins-bad"),
("gstaudioconvert", "gst-plugins-base"),
("gstaudiofx", "gst-plugins-good"),
("gstaudioparsers", "gst-plugins-good"),
("gstaudioresample", "gst-plugins-base"),
("gstautodetect", "gst-plugins-good"),
("gstcoreelements", "gstreamer"),
("gstdeinterlace", "gst-plugins-good"),
("gstdtls", "gst-plugins-bad"),
("gstgio", "gst-plugins-base"),
("gstid3tag", "gst-plugins-bad"),
("gstid3demux", "gst-plugins-good"),
("gstinterleave", "gst-plugins-good"),
("gstisomp4", "gst-plugins-good"),
("gstlibav", "gst-libav"),
("gstmatroska", "gst-plugins-good"),
("gstogg", "gst-plugins-base"),
("gstopengl", "gst-plugins-base"),
("gstopus", "gst-plugins-base"),
("gstplayback", "gst-plugins-base"),
("gstproxy", "gst-plugins-bad"),
("gstrtp", "gst-plugins-good"),
("gstrtpmanager", "gst-plugins-good"),
("gsttheora", "gst-plugins-base"),
("gsttypefindfunctions", "gst-plugins-base"),
("gstvideoconvert", "gst-plugins-base"),
("gstvideofilter", "gst-plugins-good"),
("gstvideoparsersbad", "gst-plugins-bad"),
("gstvideoscale", "gst-plugins-base"),
("gstvorbis", "gst-plugins-base"),
("gstvolume", "gst-plugins-base"),
("gstvpx", "gst-plugins-good"),
("gstwebrtc", "gst-plugins-bad"),
]
WINDOWS_PLUGINS = [
"gstnice",
"gstwasapi",
]
MACOS_PLUGINS = [
# Temporarily disabled until CI is using Mojave.
# https://github.com/servo/saltfs/issues/1011
# ("gstapplemedia", "gst-plugins-bad"),
("gstosxaudio", "gst-plugins-good"),
("gstosxvideo", "gst-plugins-good"),
]
NON_UWP_PLUGINS = [
"gstdtls",
"gstmatroska",
"gstnice",
"gstogg",
"gstopengl",
"gstopus",
"gstrtp",
"gstrtpmanager",
"gsttheora",
"gstvorbis",
"gstvpx",
"gstwebrtc",
]
def windows_dlls(uwp):
dlls = [x for x, _ in GSTREAMER_DYLIBS]
if uwp:
dlls = filter(lambda x: x not in NON_UWP_DYLIBS, dlls)
return [x + "-1.0-0.dll" for x in dlls]
def windows_plugins(uwp):
dlls = [x for x, _ in GSTREAMER_PLUGINS] + WINDOWS_PLUGINS
if uwp:
dlls = filter(lambda x: x not in NON_UWP_PLUGINS, dlls)
return [x + ".dll" for x in dlls]
def macos_libnice():
return os.path.join('/', 'usr', 'local', 'opt', 'libnice', 'lib')
def macos_dylibs():
return [
os.path.join(
"/usr/local/opt",
path,
"lib",
"lib" + name + "-1.0.0.dylib"
) for name, path in GSTREAMER_DYLIBS
] + [
os.path.join(macos_libnice(), "libnice.dylib"),
os.path.join(macos_libnice(), "libnice.10.dylib"),
]
def macos_plugins():
return [
os.path.join(
"/usr/local/opt",
path,
"lib",
"gstreamer-1.0",
"lib" + name + ".dylib"
) for name, path in GSTREAMER_PLUGINS + MACOS_PLUGINS
] + [
os.path.join(macos_libnice(), "gstreamer-1.0", "libgstnice.dylib"),
]
def write_plugin_list(target):
plugins = []
if "apple-" in target:
plugins = [os.path.basename(x) for x in macos_plugins()]
elif '-windows-' in target:
plugins = windows_plugins('-uwp-' in target)
print('''/* This is a generated file. Do not modify. */
pub(crate) static GSTREAMER_PLUGINS: &[&'static str] = &[
%s
];
''' % ',\n'.join(map(lambda x: '"' + x + '"', plugins)))
if __name__ == "__main__":
write_plugin_list(sys.argv[1])
|
se4u/pylearn2 | refs/heads/master | pylearn2/utils/track_version.py | 33 | #!/usr/bin/env python
"""
Script to obtain version of Python modules and basic information on the
experiment setup (e.g. cpu, os), e.g.
* numpy: 1.6.1 | pylearn: a6e634b83d | pylearn2: 57a156beb0
* CPU: x86_64
* OS: Linux-2.6.35.14-106.fc14.x86_64-x86_64-with-fedora-14-Laughlin
You can also define the modules to be tracked with the environment
variable `PYLEARN2_TRACK_MODULES`. Use ":" to separate module names
between them, e.g. `PYLEARN2_TRACK_MODULES = module1:module2:module3`
By default, the following modules are tracked: pylearn2, theano, numpy, scipy
"""
__authors__ = "Olivier Dellaleau and Raul Chandias Ferrari"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Olivier Dellaleau", "Raul Chandias Ferrari"]
__license__ = "3-clause BSD"
__maintainer__ = "Raul Chandias Ferrari"
__email__ = "chandiar@iro"
import copy
import logging
import os
import platform
import socket
import subprocess
import sys
import warnings
from theano.compat import six
logger = logging.getLogger(__name__)
class MetaLibVersion(type):
"""
Constructor that will be called everytime another's class
constructor is called (if the "__metaclass__ = MetaLibVersion"
line is present in the other class definition).
Parameters
----------
cls : WRITEME
name : WRITEME
bases : WRITEME
dict : WRITEME
"""
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
cls.libv = LibVersion()
class LibVersion(object):
"""
Initialize a LibVersion object that will store the version of python
packages in a dictionary (versions). The python packages that are
supported are: pylearn, pylearn2, theano, jobman, numpy and scipy.
The key for the versions dict is the name of the package and the
associated value is the version number.
"""
def __init__(self):
self.versions = {}
self.str_versions = ''
self.exp_env_info = {}
self._get_lib_versions()
self._get_exp_env_info()
def _get_exp_env_info(self):
"""
Get information about the experimental environment such as the
cpu, os and the hostname of the machine on which the experiment
is running.
"""
self.exp_env_info['host'] = socket.gethostname()
self.exp_env_info['cpu'] = platform.processor()
self.exp_env_info['os'] = platform.platform()
if 'theano' in sys.modules:
self.exp_env_info['theano_config'] = sys.modules['theano'].config
else:
self.exp_env_info['theano_config'] = None
def _get_lib_versions(self):
"""Get version of Python packages."""
repos = os.getenv('PYLEARN2_TRACK_MODULES', '')
default_repos = 'pylearn2:theano:numpy:scipy'
repos = default_repos + ":" + repos
repos = set(repos.split(':'))
for repo in repos:
try:
if repo == '':
continue
__import__(repo)
if hasattr(sys.modules[repo], '__version__'):
v = sys.modules[repo].__version__
if v != 'unknown':
self.versions[repo] = v
continue
self.versions[repo] = self._get_git_version(
self._get_module_parent_path(sys.modules[repo]))
except ImportError:
self.versions[repo] = None
known = copy.copy(self.versions)
# Put together all modules with unknown versions.
unknown = [k for k, w in known.items() if not w]
known = dict((k, w) for k, w in known.items() if w)
# Print versions.
self.str_versions = ' | '.join(
['%s:%s' % (k, w) for k, w in sorted(six.iteritems(known))] +
['%s:?' % ','.join(sorted(unknown))])
def __str__(self):
"""
Return version of the Python packages as a string.
e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0
"""
return self.str_versions
def _get_git_version(self, root):
"""
Return the git revision of a repository with the letter 'M'
appended to the revision if the repo was modified.
e.g. 10d3046e85 M
Parameters
----------
root : str
Root folder of the repository
Returns
-------
rval : str or None
A string with the revision hash, or None if it could not be
retrieved (e.g. if it is not actually a git repository)
"""
if not os.path.isdir(os.path.join(root, '.git')):
return None
cwd_backup = os.getcwd()
try:
os.chdir(root)
sub_p = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
version = sub_p.communicate()[0][0:10].strip()
sub_p = subprocess.Popen(['git', 'diff', '--name-only'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
modified = sub_p.communicate()[0]
if len(modified):
version += ' M'
return version
except Exception:
pass
finally:
try:
os.chdir(cwd_backup)
except Exception:
warnings.warn("Could not chdir back to " + cwd_backup)
def _get_hg_version(self, root):
"""Same as `get_git_version` but for a Mercurial repository."""
if not os.path.isdir(os.path.join(root, '.hg')):
return None
cwd_backup = os.getcwd()
try:
os.chdir(root)
sub_p = subprocess.Popen(['hg', 'parents'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sub_p_output = sub_p.communicate()[0]
finally:
os.chdir(cwd_backup)
first_line = sub_p_output.split('\n')[0]
# The first line looks like:
# changeset: 1517:a6e634b83d88
return first_line.split(':')[2][0:10]
def _get_module_path(self, module):
"""Return path to a given module."""
return os.path.realpath(module.__path__[0])
def _get_module_parent_path(self, module):
"""Return path to the parent directory of a given module."""
return os.path.dirname(self._get_module_path(module))
def print_versions(self):
"""
Print version of the Python packages as a string.
e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0
"""
logger.info(self.__str__())
def print_exp_env_info(self, print_theano_config=False):
"""
Return basic information about the experiment setup such as
the hostname of the machine the experiment was run on, the
operating system installed on the machine.
Parameters
----------
print_theano_config : bool, optional
If True, information about the theano configuration will be
displayed.
"""
logger.info('HOST: {0}'.format(self.exp_env_info['host']))
logger.info('CPU: {0}'.format(self.exp_env_info['cpu']))
logger.info('OS: {0}'.format(self.exp_env_info['os']))
if print_theano_config:
logger.info(self.exp_env_info['theano_config'])
|
mandeepdhami/neutron | refs/heads/master | neutron/tests/api/test_routers_negative.py | 47 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import testtools
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base_routers as base
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class RoutersNegativeTest(base.BaseRouterTest):
@classmethod
def resource_setup(cls):
super(RoutersNegativeTest, cls).resource_setup()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
cls.router = cls.create_router(data_utils.rand_name('router-'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.tenant_cidr = (CONF.network.tenant_network_cidr
if cls._ip_version == 4 else
CONF.network.tenant_network_v6_cidr)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
def test_router_add_gateway_invalid_network_returns_404(self):
self.assertRaises(lib_exc.NotFound,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': self.router['id']})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
def test_router_add_gateway_net_not_external_returns_400(self):
alt_network = self.create_network(
network_name=data_utils.rand_name('router-negative-'))
sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
self.create_subnet(alt_network, cidr=sub_cidr)
self.assertRaises(lib_exc.BadRequest,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': alt_network['id']})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
network01 = self.create_network(
network_name=data_utils.rand_name('router-network01-'))
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
subnet02 = self.create_subnet(network02)
self._add_router_interface_with_subnet_id(self.router['id'],
subnet01['id'])
self.assertRaises(lib_exc.BadRequest,
self._add_router_interface_with_subnet_id,
self.router['id'],
subnet02['id'])
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
def test_router_remove_interface_in_use_returns_409(self):
self.client.add_router_interface_with_subnet_id(
self.router['id'], self.subnet['id'])
self.assertRaises(lib_exc.Conflict,
self.client.delete_router,
self.router['id'])
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
def test_show_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.show_router,
router)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
def test_update_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.update_router,
router, name="new_name")
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
def test_delete_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.delete_router,
router)
class RoutersNegativeIpV6Test(RoutersNegativeTest):
_ip_version = 6
class DvrRoutersNegativeTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(DvrRoutersNegativeTest, cls).skip_checks()
if not test.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(DvrRoutersNegativeTest, cls).resource_setup()
cls.router = cls.create_router(data_utils.rand_name('router'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
def test_router_create_tenant_distributed_returns_forbidden(self):
with testtools.ExpectedException(lib_exc.Forbidden):
self.create_router(
data_utils.rand_name('router'), distributed=True)
|
laikuaut/nlp100 | refs/heads/master | nlp100/chapter2/Q010.py | 1 | # coding: utf-8
from util import util
def Q_010_1():
""" 10. 行数のカウント
行数をカウントせよ.
"""
num_lines = 0
with open('data/hightemp.txt', 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def Q_010_2():
""" 10. 行数のカウント
確認にはwcコマンドを用いよ.
"""
ite = util.exe_cmd('cat data/hightemp.txt | wc -l')
return list(ite)
|
agincel/AdamTestBot | refs/heads/master | future/moves/http/server.py | 2 | from __future__ import absolute_import
from future.utils import PY3
if PY3:
from http.server import *
else:
__future_module__ = True
from BaseHTTPServer import *
from CGIHTTPServer import *
from SimpleHTTPServer import *
try:
from CGIHTTPServer import _url_collapse_path # needed for a test
except ImportError:
try:
# Python 2.7.0 to 2.7.3
from CGIHTTPServer import (
_url_collapse_path_split as _url_collapse_path)
except ImportError:
# Doesn't exist on Python 2.6.x. Ignore it.
pass
|
urda/nistbeacon | refs/heads/master | tests/integration/__init__.py | 7 | """
Copyright 2015-2016 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
OpenNetworkingFoundation/Snowmass-ONFOpenTransport | refs/heads/develop | RI/flask_server/tapi_server/models/tapi_oam_updateoamserviceendpoint_output.py | 4 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_oam_oam_service_end_point import TapiOamOamServiceEndPoint # noqa: F401,E501
from tapi_server import util
class TapiOamUpdateoamserviceendpointOutput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, end_point=None): # noqa: E501
"""TapiOamUpdateoamserviceendpointOutput - a model defined in OpenAPI
:param end_point: The end_point of this TapiOamUpdateoamserviceendpointOutput. # noqa: E501
:type end_point: TapiOamOamServiceEndPoint
"""
self.openapi_types = {
'end_point': TapiOamOamServiceEndPoint
}
self.attribute_map = {
'end_point': 'end-point'
}
self._end_point = end_point
@classmethod
def from_dict(cls, dikt) -> 'TapiOamUpdateoamserviceendpointOutput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.updateoamserviceendpoint.Output of this TapiOamUpdateoamserviceendpointOutput. # noqa: E501
:rtype: TapiOamUpdateoamserviceendpointOutput
"""
return util.deserialize_model(dikt, cls)
@property
def end_point(self):
"""Gets the end_point of this TapiOamUpdateoamserviceendpointOutput.
:return: The end_point of this TapiOamUpdateoamserviceendpointOutput.
:rtype: TapiOamOamServiceEndPoint
"""
return self._end_point
@end_point.setter
def end_point(self, end_point):
"""Sets the end_point of this TapiOamUpdateoamserviceendpointOutput.
:param end_point: The end_point of this TapiOamUpdateoamserviceendpointOutput.
:type end_point: TapiOamOamServiceEndPoint
"""
self._end_point = end_point
|
zdary/intellij-community | refs/heads/master | python/testData/refactoring/move/moveNamespacePackage3/after/src/b.py | 79 | import nspkg.a
print(nspkg.a.VAR) |
gustavoam/django-subscribe | refs/heads/master | subscribe/admin.py | 1 | """Admin classes for the ``subscribe`` app."""
from django.contrib import admin
from .models import Subscription
admin.site.register(Subscription)
|
Wafflespeanut/servo | refs/heads/master | tests/wpt/css-tests/tools/wptserve/wptserve/request.py | 87 | import base64
import cgi
import Cookie
import StringIO
import tempfile
import urlparse
from . import stash
from .utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: headers
List of request headers.
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: body
Request body as a string
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlparse.urlsplit(self.url)
self._raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self._raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | refs/heads/master | python-packages/mne-python-0.10/mne/io/fiff/tests/test_raw.py | 1 | from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_fix_types():
"""Test fixing of channel types
"""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = Raw(fname)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
assert_true((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat():
"""Test RawFIF concatenation
"""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2., copy=False)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(read_raw_fif, test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, False)
raw.load_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, False)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
raw_read.anonymize()
assert_true(raw_read.info.get('subject_info') is None)
out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
raw_read.save(out_fname_anon, overwrite=True)
raw_read = Raw(out_fname_anon)
assert_true(raw_read.info.get('subject_info') is None)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10, False)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = Raw(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = Raw(fif_fname, preload=True)
raw_combo.append(Raw(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=True)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files
"""
tempdir = _TempDir()
raw_1 = Raw(fif_fname, preload=True)
assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2) # samp rate
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = Raw(split_fname)
assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2) # samp rate
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = Raw(fnames)
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
def test_load_bad_channels():
"""Test reading/writing of bad channels
"""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = Raw(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = Raw(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)
"""
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with Raw(fif_fname) as r:
assert_true('Raw' in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with Raw(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = Raw(fif_fname).crop(0, 3.5, False)
raw.load_data()
# put in some data that we know the values of
data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = Raw(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = Raw(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5])
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
Raw(raw_badname)
assert_true(len(w) > 0) # len(w) should be 2 but Travis sometimes has more
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types
"""
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = Raw(fif_fname, preload=True)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = Raw(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw
"""
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations
"""
tempdir = _TempDir()
for proj in [True, False]:
raw = Raw(fif_fname, preload=False, proj=proj)
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
raw.add_proj(projs, remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = Raw(fif_fname, preload=preload, proj=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = Raw(fif_fname, preload=preload, proj=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = Raw(fif_fname, preload=preload, proj=True)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, proj=True, preload=False)
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data
"""
tempdir = _TempDir()
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = np.random.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = Raw(tmp_fname)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface
"""
raw = Raw(fif_fname).crop(0, 7, False)
raw.load_data()
sig_dec = 11
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_lp = raw.copy()
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw.copy()
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw.copy()
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw.copy()
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
raw_lp_iir = raw.copy()
raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
raw_hp_iir = raw.copy()
raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
raw_bp_iir = raw.copy()
raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
sig_dec)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# do a very simple check on line filtering
raw_bs = raw.copy()
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy()
raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy()
raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files
"""
# split a concatenated file to test a difficult case
raw = Raw([fif_fname, fif_fname], preload=False)
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.crop(tmin, tmax, True)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy()
raws[ri].crop(tmin, tmax, False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.crop(0, None, True)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 3, False)
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2)
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1)
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10.)
raw3.resample(10.)
raw4.resample(10.)
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8.)._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8.)._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4.)._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(8.)
assert_true(len(w) == 1)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(4.)
assert_true(len(w) == 1)
# test resampling events: this should no longer give a warning
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
events = find_events(raw)
raw, events = raw.resample(4., events=events)
assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.resample(4., copy=True)
assert_true(raw_resampled is not raw)
raw_resampled = raw.resample(4., copy=False)
assert_true(raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.resample(10)
assert_true(len(raw) == 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert
"""
raw = Raw(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20)
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks)
raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
# Test custom n_fft
raw_filt.apply_hilbert(picks)
raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
assert_raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy
"""
raw = Raw(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = Raw(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter"""
raw = Raw(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
@testing.requires_testing_data
def test_raw_index_as_time():
""" Test index as time conversion"""
raw = Raw(fif_fname, preload=True)
t0 = raw.index_as_time([0], True)[0]
t1 = raw.index_as_time([100], False)[0]
t2 = raw.index_as_time([100], True)[0]
assert_equal(t2 - t1, t0)
# ensure we can go back and forth
t3 = raw.index_as_time(raw.time_as_index([0], True), True)
assert_array_almost_equal(t3, [0.0], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
i0 = raw.time_as_index(raw.index_as_time([0], True), True)
assert_equal(i0[0], 0)
i1 = raw.time_as_index(raw.index_as_time([100], True), True)
assert_equal(i1[0], 100)
# Have to add small amount of time because we truncate via int casting
i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
assert_equal(i1[0], 100)
def test_add_channels():
"""Test raw splitting / re-appending channel types
"""
raw = Raw(test_fif_fname).crop(0, 1).load_data()
raw_nopre = Raw(test_fif_fname, preload=False)
raw_eeg_meg = raw.pick_types(meg=True, eeg=True, copy=True)
raw_eeg = raw.pick_types(meg=False, eeg=True, copy=True)
raw_meg = raw.pick_types(meg=True, eeg=False, copy=True)
raw_stim = raw.pick_types(meg=False, eeg=False, stim=True, copy=True)
raw_new = raw_meg.add_channels([raw_eeg, raw_stim], copy=True)
assert_true(all(ch in raw_new.ch_names
for ch in raw_stim.ch_names + raw_meg.ch_names))
raw_new = raw_meg.add_channels([raw_eeg], copy=True)
assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg = raw_eeg.crop(.5)
assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_raw_time_as_index():
""" Test time as index conversion"""
raw = Raw(fif_fname, preload=True)
first_samp = raw.time_as_index([0], True)[0]
assert_equal(raw.first_samp, -first_samp)
@testing.requires_testing_data
def test_save():
""" Test saving raw"""
tempdir = _TempDir()
raw = Raw(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = Raw(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = Raw(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
# make sure we can overwrite the file we loaded when preload=True
new_raw = Raw(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
""" Test with statement """
for preload in [True, False]:
with Raw(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation
"""
tempdir = _TempDir()
raw1 = Raw(ctf_comp_fname, compensation=None)
assert_true(raw1.comp is None)
data1, times1 = raw1[:, :]
raw2 = Raw(ctf_comp_fname, compensation=3)
data2, times2 = raw2[:, :]
assert_true(raw2.comp is None) # unchanged (data come with grade 3)
assert_array_equal(times1, times2)
assert_array_equal(data1, data2)
raw3 = Raw(ctf_comp_fname, compensation=1)
data3, times3 = raw3[:, :]
assert_true(raw3.comp is not None)
assert_array_equal(times1, times3)
# make sure it's different with a different compensation:
assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw1.save(temp_file, overwrite=True)
raw4 = Raw(temp_file)
data4, times4 = raw4[:, :]
assert_array_equal(times1, times4)
assert_array_equal(data1, data4)
# Now save the file that has modified compensation
# and make sure we can the same data as input ie. compensation
# is undone
raw3.save(temp_file, overwrite=True)
raw5 = Raw(temp_file)
data5, times5 = raw5[:, :]
assert_array_equal(times1, times5)
assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE
"""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return Raw(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw = Raw(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
# preload is True
raw = Raw(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.pick_channels(ch_names, copy=True) # copy is True
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names, copy=False) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
assert_raises(ValueError, raw.pick_channels, ch_names[0])
raw = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels
"""
raw1 = Raw(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
|
hvy/chainer | refs/heads/master | tests/chainer_tests/training_tests/extensions_tests/test_print_report.py | 11 | import tempfile
import unittest
import mock
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def _setup(self, stream=None, delete_flush=False):
self.logreport = mock.MagicMock(spec=extensions.LogReport(
['epoch'], trigger=(1, 'iteration'), log_name=None))
if stream is None:
self.stream = mock.MagicMock()
if delete_flush:
del self.stream.flush
else:
self.stream = stream
self.report = extensions.PrintReport(
['epoch'], log_report=self.logreport, out=self.stream)
self.trainer = testing.get_trainer_with_mock_updater(
stop_trigger=(1, 'iteration'))
self.trainer.extend(self.logreport)
self.trainer.extend(self.report)
self.logreport.log = [{'epoch': 0}]
def test_stream_with_flush_is_flushed(self):
self._setup(delete_flush=False)
self.assertTrue(hasattr(self.stream, 'flush'))
self.stream.flush.assert_not_called()
self.report(self.trainer)
self.stream.flush.assert_called_with()
def test_stream_without_flush_raises_no_exception(self):
self._setup(delete_flush=True)
self.assertFalse(hasattr(self.stream, 'flush'))
self.report(self.trainer)
def test_real_stream_raises_no_exception(self):
with tempfile.TemporaryFile(mode='w') as stream:
self._setup(stream=stream)
self.report(self.trainer)
testing.run_module(__name__, __file__)
|
kntem/webdeposit | refs/heads/webdeposit-final | modules/miscutil/lib/hashutils.py | 3 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio hash functions.
Usage example:
>>> from invenio.hashutils import md5
>>> print md5('MyPa$$')
Simplifies imports of hash functions depending on Python version.
"""
try:
from hashlib import sha256, sha1, md5
HASHLIB_IMPORTED = True
except ImportError:
from md5 import md5
from sha import sha as sha1
HASHLIB_IMPORTED = False
|
jobscore/sync-engine | refs/heads/master | inbox/test/auth/test_gmail_auth_credentials.py | 5 | # -*- coding: UTF-8 -*-
import pytest
from sqlalchemy.orm import joinedload, object_session
from inbox.auth.gmail import GmailAuthHandler
from inbox.models.session import session_scope
from inbox.models.account import Account
from inbox.models.backends.gmail import (GOOGLE_CALENDAR_SCOPE,
GOOGLE_CONTACTS_SCOPE,
GOOGLE_EMAIL_SCOPE,
GmailAccount)
from inbox.auth.gmail import g_token_manager
from inbox.basicauth import OAuthError, ConnectionError
SHARD_ID = 0
ACCESS_TOKEN = 'this_is_an_access_token'
@pytest.fixture
def account_with_multiple_auth_creds(db):
email = '[email protected]'
resp = {'access_token': '',
'expires_in': 3600,
'email': email,
'family_name': '',
'given_name': '',
'name': '',
'gender': '',
'id': 0,
'user_id': '',
'id_token': '',
'link': 'http://example.com',
'locale': '',
'picture': '',
'hd': ''}
all_scopes = ' '.join(
[GOOGLE_CALENDAR_SCOPE, GOOGLE_CONTACTS_SCOPE, GOOGLE_EMAIL_SCOPE])
first_auth_args = {
'refresh_token': 'refresh_token_1',
'client_id': 'client_id_1',
'client_secret': 'client_secret_1',
'scope': all_scopes,
'sync_contacts': True,
'sync_events': True
}
second_auth_args = {
'refresh_token': 'refresh_token_2',
'client_id': 'client_id_2',
'client_secret': 'client_secret_2',
'scope': GOOGLE_EMAIL_SCOPE,
'sync_contacts': False,
'sync_events': False
}
g = GmailAuthHandler('gmail')
g.verify_config = lambda x: True
resp.update(first_auth_args)
account = g.get_account(SHARD_ID, email, resp)
db.session.add(account)
db.session.commit()
resp.update(second_auth_args)
account = g.get_account(SHARD_ID, email, resp)
db.session.add(account)
db.session.commit()
return account
@pytest.fixture
def account_with_single_auth_creds(db):
email = '[email protected]'
resp = {'access_token': '',
'expires_in': 3600,
'email': email,
'family_name': '',
'given_name': '',
'name': '',
'gender': '',
'id': 0,
'user_id': '',
'id_token': '',
'link': 'http://example.com',
'locale': '',
'picture': '',
'hd': '',
'refresh_token': 'refresh_token_3',
'client_id': 'client_id_1',
'client_secret': 'client_secret_1',
'scope': ' '.join([GOOGLE_CALENDAR_SCOPE, GOOGLE_EMAIL_SCOPE]),
'sync_contacts': False,
'sync_events': True
}
g = GmailAuthHandler('gmail')
g.verify_config = lambda x: True
account = g.get_account(SHARD_ID, email, resp)
db.session.add(account)
db.session.commit()
return account
@pytest.fixture
def patch_access_token_getter(monkeypatch):
class TokenGenerator:
def __init__(self):
self.revoked_refresh_tokens = []
self.connection_error_tokens = []
def new_token(self, refresh_token, client_id=None, client_secret=None):
if refresh_token in self.connection_error_tokens:
raise ConnectionError("Invalid connection!")
if refresh_token in self.revoked_refresh_tokens:
raise OAuthError("Invalid token")
expires_in = 10000
return ACCESS_TOKEN, expires_in
def revoke_refresh_token(self, refresh_token):
self.revoked_refresh_tokens.append(refresh_token)
def force_connection_errors(self, refresh_token):
self.connection_error_tokens.append(refresh_token)
token_generator = TokenGenerator()
monkeypatch.setattr('inbox.auth.oauth.OAuthAuthHandler.new_token',
token_generator.new_token)
return token_generator
def test_auth_revoke(
db, account_with_multiple_auth_creds, patch_access_token_getter):
account = account_with_multiple_auth_creds
refresh_token1 = account.auth_credentials[0].refresh_token
refresh_token2 = account.auth_credentials[1].refresh_token
assert len(account.auth_credentials) == 2
assert len(account.valid_auth_credentials) == 2
assert account.sync_contacts is True
assert account.sync_events is True
assert account.sync_state != 'invalid'
assert account.sync_should_run is True
patch_access_token_getter.revoke_refresh_token(refresh_token1)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CONTACTS_SCOPE)
assert account.new_token(GOOGLE_EMAIL_SCOPE).value == ACCESS_TOKEN
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CALENDAR_SCOPE)
account.verify_all_credentials()
assert len(account.auth_credentials) == 2
assert len(account.valid_auth_credentials) == 1
assert account.sync_contacts is False
assert account.sync_events is False
assert account.sync_state != 'invalid'
assert account.sync_should_run is True
patch_access_token_getter.revoke_refresh_token(refresh_token2)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CONTACTS_SCOPE)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_EMAIL_SCOPE)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CALENDAR_SCOPE)
account.verify_all_credentials()
assert len(account.auth_credentials) == 2
assert len(account.valid_auth_credentials) == 0
assert account.sync_state == 'invalid'
assert account.sync_should_run is False
def test_auth_revoke_different_order(
db, account_with_multiple_auth_creds, patch_access_token_getter):
account = account_with_multiple_auth_creds
refresh_token1 = account.auth_credentials[0].refresh_token
refresh_token2 = account.auth_credentials[1].refresh_token
assert len(account.auth_credentials) == 2
assert len(account.valid_auth_credentials) == 2
assert account.sync_contacts is True
assert account.sync_events is True
assert account.sync_state != 'invalid'
assert account.sync_should_run is True
patch_access_token_getter.revoke_refresh_token(refresh_token2)
assert account.new_token(GOOGLE_EMAIL_SCOPE).value == ACCESS_TOKEN
assert account.new_token(GOOGLE_CONTACTS_SCOPE).value == ACCESS_TOKEN
assert account.new_token(GOOGLE_CALENDAR_SCOPE).value == ACCESS_TOKEN
account.verify_all_credentials()
assert len(account.auth_credentials) == 2
assert account.sync_contacts is True
assert account.sync_events is True
assert account.sync_state != 'invalid'
assert account.sync_should_run is True
assert len(account.valid_auth_credentials) == 1
patch_access_token_getter.revoke_refresh_token(refresh_token1)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CONTACTS_SCOPE)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_EMAIL_SCOPE)
with pytest.raises(OAuthError):
account.new_token(GOOGLE_CALENDAR_SCOPE)
account.verify_all_credentials()
assert len(account.auth_credentials) == 2
assert len(account.valid_auth_credentials) == 0
assert account.sync_contacts is False
assert account.sync_events is False
assert account.sync_state == 'invalid'
assert account.sync_should_run is False
def test_create_account(db):
email = '[email protected]'
resp = {'access_token': '',
'expires_in': 3600,
'email': email,
'family_name': '',
'given_name': '',
'name': '',
'gender': '',
'id': 0,
'user_id': '',
'id_token': '',
'link': 'http://example.com',
'locale': '',
'picture': '',
'hd': ''}
g = GmailAuthHandler('gmail')
g.verify_config = lambda x: True
# Auth me once...
token_1 = 'the_first_token'
client_id_1 = 'first client id'
client_secret_1 = 'first client secret'
scopes_1 = 'scope scop sco sc s'
scopes_1_list = scopes_1.split(' ')
first_auth_args = {
'refresh_token': token_1,
'scope': scopes_1,
'client_id': client_id_1,
'client_secret': client_secret_1
}
resp.update(first_auth_args)
account = g.create_account(email, resp)
db.session.add(account)
db.session.commit()
account_id = account.id
with session_scope(account_id) as db_session:
account = db_session.query(Account).filter(
Account.email_address == email).one()
assert account.id == account_id
assert isinstance(account, GmailAccount)
assert len(account.auth_credentials) == 1
auth_creds = account.auth_credentials[0]
assert auth_creds.client_id == client_id_1
assert auth_creds.client_secret == client_secret_1
assert auth_creds.scopes == scopes_1_list
assert auth_creds.refresh_token == token_1
def test_get_account(db):
email = '[email protected]'
resp = {'access_token': '',
'expires_in': 3600,
'email': email,
'family_name': '',
'given_name': '',
'name': '',
'gender': '',
'id': 0,
'user_id': '',
'id_token': '',
'link': 'http://example.com',
'locale': '',
'picture': '',
'hd': ''}
g = GmailAuthHandler('gmail')
g.verify_config = lambda x: True
# Auth me once...
token_1 = 'the_first_token'
client_id_1 = 'first client id'
client_secret_1 = 'first client secret'
scopes_1 = 'scope scop sco sc s'
scopes_1_list = scopes_1.split(' ')
first_auth_args = {
'refresh_token': token_1,
'scope': scopes_1,
'client_id': client_id_1,
'client_secret': client_secret_1
}
resp.update(first_auth_args)
account = g.get_account(SHARD_ID, email, resp)
db.session.add(account)
db.session.commit()
db.session.refresh(account)
assert len(account.auth_credentials) == 1
auth_creds = account.auth_credentials[0]
assert auth_creds.client_id == client_id_1
assert auth_creds.client_secret == client_secret_1
assert auth_creds.scopes == scopes_1_list
assert auth_creds.refresh_token == token_1
# Auth me twice...
token_2 = 'second_token_!'
client_id_2 = 'second client id'
client_secret_2 = 'second client secret'
scopes_2 = 'scope scop sco sc s'
scopes_2_list = scopes_2.split(' ')
second_auth_args = {
'refresh_token': token_2,
'scope': scopes_2,
'client_id': client_id_2,
'client_secret': client_secret_2
}
resp.update(second_auth_args)
account = g.get_account(SHARD_ID, email, resp)
db.session.merge(account)
db.session.commit()
assert len(account.auth_credentials) == 2
auth_creds = next((creds for creds in account.auth_credentials
if creds.refresh_token == token_2), False)
assert auth_creds
assert auth_creds.client_id == client_id_2
assert auth_creds.client_secret == client_secret_2
assert auth_creds.scopes == scopes_2_list
# Don't add duplicate row in GmailAuthCredentials for the same
# client_id/client_secret pair.
resp.update(first_auth_args)
resp['refresh_token'] = 'a new refresh token'
account = g.get_account(SHARD_ID, email, resp)
db.session.merge(account)
db.session.commit()
assert len(account.auth_credentials) == 2
# Should still work okay if we don't get a refresh token back
del resp['refresh_token']
account = g.get_account(SHARD_ID, email, resp)
db.session.merge(account)
db.session.commit()
assert len(account.auth_credentials) == 2
def test_g_token_manager(
db, patch_access_token_getter,
account_with_multiple_auth_creds,
account_with_single_auth_creds):
account = account_with_multiple_auth_creds
refresh_token1 = account.auth_credentials[0].refresh_token
refresh_token2 = account.auth_credentials[1].refresh_token
g_token_manager.clear_cache(account)
# existing account w/ multiple credentials, all valid
assert (g_token_manager.get_token(account, GOOGLE_EMAIL_SCOPE) ==
ACCESS_TOKEN)
assert (g_token_manager.get_token(account, GOOGLE_CONTACTS_SCOPE) ==
ACCESS_TOKEN)
assert (g_token_manager.get_token(account, GOOGLE_CALENDAR_SCOPE) ==
ACCESS_TOKEN)
for auth_creds in account.auth_credentials:
assert auth_creds.is_valid
# existing account w/ multiple credentials: some valid
patch_access_token_getter.revoke_refresh_token(refresh_token1)
g_token_manager.clear_cache(account)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_CONTACTS_SCOPE)
assert (g_token_manager.get_token(account, GOOGLE_EMAIL_SCOPE) ==
ACCESS_TOKEN)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_CALENDAR_SCOPE)
# existing account w/ multiple credentials: all invalid
patch_access_token_getter.revoke_refresh_token(refresh_token2)
g_token_manager.clear_cache(account)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_EMAIL_SCOPE)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_CALENDAR_SCOPE)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_CONTACTS_SCOPE)
db.session.refresh(account)
for auth_creds in account.auth_credentials:
assert not auth_creds.is_valid
# existing account w/ one credential
account = account_with_single_auth_creds
g_token_manager.clear_cache(account)
assert (g_token_manager.get_token(account, GOOGLE_EMAIL_SCOPE) ==
ACCESS_TOKEN)
assert (g_token_manager.get_token(account, GOOGLE_CALENDAR_SCOPE) ==
ACCESS_TOKEN)
with pytest.raises(OAuthError):
g_token_manager.get_token(account, GOOGLE_CONTACTS_SCOPE)
def test_new_token_with_non_oauth_error(
db, patch_access_token_getter, account_with_multiple_auth_creds):
account = account_with_multiple_auth_creds
refresh_token1 = account.auth_credentials[0].refresh_token
refresh_token2 = account.auth_credentials[1].refresh_token
g_token_manager.clear_cache(account)
assert account.new_token(GOOGLE_EMAIL_SCOPE).value == ACCESS_TOKEN
patch_access_token_getter.revoke_refresh_token(refresh_token1)
patch_access_token_getter.force_connection_errors(refresh_token2)
with pytest.raises(ConnectionError):
g_token_manager.get_token(account, GOOGLE_EMAIL_SCOPE)
db.session.refresh(account)
assert len(account.valid_auth_credentials) == 1
def test_invalid_token_during_connect(db, patch_access_token_getter,
account_with_single_auth_creds):
account_id = account_with_single_auth_creds.id
patch_access_token_getter.revoke_refresh_token(
account_with_single_auth_creds.auth_credentials[0].refresh_token)
account_with_single_auth_creds.verify_all_credentials()
assert len(account_with_single_auth_creds.valid_auth_credentials) == 0
g_token_manager.clear_cache(account_with_single_auth_creds)
# connect_account() takes an /expunged/ account object
# that has the necessary relationships eager-loaded
object_session(account_with_single_auth_creds).expunge(
account_with_single_auth_creds)
assert not object_session(account_with_single_auth_creds)
account = db.session.query(GmailAccount).options(
joinedload(GmailAccount.auth_credentials)).get(
account_id)
db.session.expunge(account)
assert not object_session(account)
g = GmailAuthHandler('gmail')
with pytest.raises(OAuthError):
g.connect_account(account)
invalid_account = db.session.query(GmailAccount).get(account_id)
for auth_creds in invalid_account.auth_credentials:
assert not auth_creds.is_valid
|
ChinaMassClouds/copenstack-server | refs/heads/master | openstack/src/ceilometer-2014.2.2/ceilometer/tests/network/statistics/opencontrail/test_driver.py | 6 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Author: Sylvain Afchain <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from six.moves.urllib import parse as urlparse
from ceilometer.network.statistics.opencontrail import driver
class TestOpencontrailDriver(base.BaseTestCase):
def setUp(self):
super(TestOpencontrailDriver, self).setUp()
self.nc_ports = mock.patch('ceilometer.neutron_client'
'.Client.port_get_all',
return_value=self.fake_ports())
self.nc_ports.start()
self.nc_networks = mock.patch('ceilometer.neutron_client'
'.Client.network_get_all',
return_value=self.fake_networks())
self.nc_networks.start()
self.driver = driver.OpencontrailDriver()
self.parse_url = urlparse.ParseResult('opencontrail',
'127.0.0.1:8143',
'/', None, None, None)
self.params = {'password': ['admin'],
'scheme': ['http'],
'username': ['admin'],
'verify_ssl': ['false']}
@staticmethod
def fake_ports():
return [{'admin_state_up': True,
'device_owner': 'compute:None',
'device_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'extra_dhcp_opts': [],
'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442',
'mac_address': 'fa:16:3e:c5:35:93',
'name': '',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'status': 'ACTIVE',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'}]
@staticmethod
def fake_networks():
return [{'admin_state_up': True,
'id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'name': 'public',
'provider:network_type': 'gre',
'provider:physical_network': None,
'provider:segmentation_id': 2,
'router:external': True,
'shared': False,
'status': 'ACTIVE',
'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'],
'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}]
@staticmethod
def fake_port_stats():
return {"value": [{
"name": "c588ebb7-ae52-485a-9f0c-b2791c5da196",
"value": {
"UveVirtualMachineAgent": {
"if_stats_list": [{
"out_bytes": 22,
"in_bandwidth_usage": 0,
"in_bytes": 23,
"out_bandwidth_usage": 0,
"out_pkts": 5,
"in_pkts": 6,
"name": ("674e553b-8df9-4321-87d9-93ba05b93558:"
"96d49cc3-4e01-40ce-9cac-c0e32642a442")
}]}}}]}
def _test_meter(self, meter_name, expected):
with mock.patch('ceilometer.network.'
'statistics.opencontrail.'
'client.NetworksAPIClient.'
'get_port_statistics',
return_value=self.fake_port_stats()) as port_stats:
samples = self.driver.get_sample_data(meter_name, self.parse_url,
self.params, {})
self.assertEqual(expected, [s for s in samples])
net_id = '298a3088-a446-4d5a-bad8-f92ecacd786b'
port_stats.assert_called_with(net_id)
def test_switch_port_receive_packets(self):
expected = [
(6,
'96d49cc3-4e01-40ce-9cac-c0e32642a442',
{'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
mock.ANY)]
self._test_meter('switch.port.receive.packets', expected)
def test_switch_port_transmit_packets(self):
expected = [
(5,
'96d49cc3-4e01-40ce-9cac-c0e32642a442',
{'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
mock.ANY)]
self._test_meter('switch.port.transmit.packets', expected)
def test_switch_port_receive_bytes(self):
expected = [
(23,
'96d49cc3-4e01-40ce-9cac-c0e32642a442',
{'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
mock.ANY)]
self._test_meter('switch.port.receive.bytes', expected)
def test_switch_port_transmit_bytes(self):
expected = [
(22,
'96d49cc3-4e01-40ce-9cac-c0e32642a442',
{'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
mock.ANY)]
self._test_meter('switch.port.transmit.bytes', expected)
|
pranto157/django-pipeline | refs/heads/master | pipeline/compressors/yui.py | 47 | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compressors import SubProcessCompressor
class YUICompressor(SubProcessCompressor):
def compress_common(self, content, compress_type, arguments):
command = '%s --type=%s %s' % (settings.PIPELINE_YUI_BINARY, compress_type, arguments)
return self.execute_command(command, content)
def compress_js(self, js):
return self.compress_common(js, 'js', settings.PIPELINE_YUI_JS_ARGUMENTS)
def compress_css(self, css):
return self.compress_common(css, 'css', settings.PIPELINE_YUI_CSS_ARGUMENTS)
|
ujjwalwahi/odoo | refs/heads/8.0 | addons/auth_signup/__openerp__.py | 313 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Signup',
'description': """
Allow users to sign up and reset their password
===============================================
""",
'author': 'OpenERP SA',
'version': '1.0',
'category': 'Authentication',
'website': 'https://www.odoo.com',
'installable': True,
'auto_install': True,
'depends': [
'base_setup',
'email_template',
'web',
],
'data': [
'auth_signup_data.xml',
'res_config.xml',
'res_users_view.xml',
'views/auth_signup_login.xml',
],
'bootstrap': True,
}
|
allyjweir/lackawanna | refs/heads/master | lackawanna/transcript/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
lihan/SAE-ChannelChat | refs/heads/master | channelchat/1/channelchat/chat/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
wisechengyi/pants | refs/heads/master | tests/python/pants_test/process/test_lock.py | 2 | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
import tempfile
import unittest
from multiprocessing import Manager, Process
from threading import Thread
from pants.process.lock import OwnerPrintingInterProcessFileLock
def hold_lock_until_terminate(path, lock_held, terminate):
lock = OwnerPrintingInterProcessFileLock(path)
lock.acquire()
lock_held.set()
# NOTE: We shouldn't ever wait this long, this is just to ensure
# we don't somehow leak child processes.
terminate.wait(60)
lock.release()
lock_held.clear()
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
def setUp(self):
self.lock_dir = tempfile.mkdtemp()
self.lock_path = os.path.join(self.lock_dir, "lock")
self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
self.manager = Manager()
self.lock_held = self.manager.Event()
self.terminate = self.manager.Event()
self.lock_process = Process(
target=hold_lock_until_terminate, args=(self.lock_path, self.lock_held, self.terminate),
)
def tearDown(self):
self.terminate.set()
try:
shutil.rmtree(self.lock_dir)
except OSError:
pass
def test_non_blocking_attempt(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(blocking=False))
def test_message(self):
self.lock_process.start()
self.lock_held.wait()
self.assertTrue(os.path.exists(self.lock.message_path))
with open(self.lock.message_path, "r") as f:
message_content = f.read()
self.assertIn(str(self.lock_process.pid), message_content)
os.unlink(self.lock.message_path)
def message_fn(message):
self.assertIn(self.lock.missing_message_output, message)
self.lock.acquire(blocking=False, message_fn=message_fn)
def test_blocking(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(timeout=0.1))
acquire_is_blocking = self.manager.Event()
def terminate_subproc(terminate, acquire_is_blocking):
acquire_is_blocking.wait()
terminate.set()
Thread(target=terminate_subproc, args=(self.terminate, acquire_is_blocking)).start()
def message_fn(message):
self.assertIn(str(self.lock_process.pid), message)
acquire_is_blocking.set()
# NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
# but sometimes CI containers are extremely slow, so we choose a very large
# value just in case.
self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))
def test_reentrant(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquire())
def test_release(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquired)
self.lock.release()
self.assertFalse(self.lock.acquired)
|
earthreader/web | refs/heads/master | setup.py | 3 | import os.path
import sys
try:
from setuptools import find_packages, setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages, setup
from setuptools.command.test import test
def readme():
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
return f.read()
except (IOError, OSError):
return ''
class pytest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
from pytest import main
errno = main(self.test_args)
raise SystemExit(errno)
setup_requires = [
'libsass >= 0.3.0'
]
install_requires = [
'Flask >= 0.10',
'libearth >= 0.3.1',
'six',
'waitress'
]
if sys.version_info < (2, 7):
install_requires.append('argparse >= 1.1')
install_requires.extend(setup_requires)
setup(
name='EarthReader-Web',
version='0.3.1',
description='Earth Reader for Web',
long_description=readme(),
url='http://earthreader.org/',
author='Earth Reader team',
author_email='earthreader' '@' 'librelist.com',
entry_points={
'console_scripts': [
'earthreader = earthreader.web.command:main'
]
},
app=['earthreader/web/osx.py'],
license='AGPLv3 or later',
packages=find_packages(exclude=['tests']),
package_data={
'earthreader.web': ['templates/*.*', 'templates/*/*.*',
'static/*.*', 'static/*/*.*']
},
sass_manifests={
'earthreader.web': ('static/scss/', 'static/css/')
},
setup_requires=setup_requires,
install_requires=install_requires,
dependency_links=[
'https://github.com/earthreader/libearth/releases'
],
download_url='https://github.com/earthreader/web/releases',
tests_require=['pytest >= 2.5.0'],
cmdclass={'test': pytest},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved ::'
' GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Text Processing :: Markup :: XML'
]
)
|
Kingdread/qutebrowser | refs/heads/master | qutebrowser/browser/network/networkreply.py | 4 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# Based on the Eric5 helpviewer,
# Copyright (c) 2009 - 2014 Detlev Offenbach <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Special network replies.."""
from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest
from PyQt5.QtCore import pyqtSlot, QIODevice, QByteArray, QTimer
class FixedDataNetworkReply(QNetworkReply):
"""QNetworkReply subclass for fixed data."""
def __init__(self, request, fileData, mimeType, parent=None):
"""Constructor.
Args:
request: reference to the request object (QNetworkRequest)
fileData: reference to the data buffer (QByteArray)
mimeType: for the reply (string)
parent: reference to the parent object (QObject)
"""
super().__init__(parent)
self._data = fileData
self.setRequest(request)
self.setUrl(request.url())
self.setOpenMode(QIODevice.ReadOnly)
self.setHeader(QNetworkRequest.ContentTypeHeader, mimeType)
self.setHeader(QNetworkRequest.ContentLengthHeader,
QByteArray.number(len(fileData)))
self.setAttribute(QNetworkRequest.HttpStatusCodeAttribute, 200)
self.setAttribute(QNetworkRequest.HttpReasonPhraseAttribute, 'OK')
# For some reason, a segfault will be triggered if these lambdas aren't
# there.
# pylint: disable=unnecessary-lambda
QTimer.singleShot(0, lambda: self.metaDataChanged.emit())
QTimer.singleShot(0, lambda: self.readyRead.emit())
QTimer.singleShot(0, lambda: self.finished.emit())
@pyqtSlot()
def abort(self):
"""Abort the operation."""
pass
def bytesAvailable(self):
"""Determine the bytes available for being read.
Return:
bytes available (int)
"""
return len(self._data) + super().bytesAvailable()
def readData(self, maxlen):
"""Retrieve data from the reply object.
Args:
maxlen maximum number of bytes to read (int)
Return:
bytestring containing the data
"""
len_ = min(maxlen, len(self._data))
buf = bytes(self._data[:len_])
self._data = self._data[len_:]
return buf
def isFinished(self):
return True
def isRunning(self):
return False
class ErrorNetworkReply(QNetworkReply):
"""QNetworkReply which always returns an error."""
def __init__(self, req, errorstring, error, parent=None):
"""Constructor.
Args:
req: The QNetworkRequest associated with this reply.
errorstring: The error string to print.
error: The numerical error value.
parent: The parent to pass to QNetworkReply.
"""
super().__init__(parent)
self.setRequest(req)
self.setUrl(req.url())
# We don't actually want to read anything, but we still need to open
# the device to avoid getting a warning.
self.setOpenMode(QIODevice.ReadOnly)
self.setError(error, errorstring)
# For some reason, a segfault will be triggered if these lambdas aren't
# there.
# pylint: disable=unnecessary-lambda
QTimer.singleShot(0, lambda: self.error.emit(error))
QTimer.singleShot(0, lambda: self.finished.emit())
def abort(self):
"""Do nothing since it's a fake reply."""
pass
def bytesAvailable(self):
"""We always have 0 bytes available."""
return 0
def readData(self):
"""No data available."""
return bytes()
def isFinished(self):
return True
def isRunning(self):
return False
|
hand-iemura/lightpng | refs/heads/master | boost_1_53_0/libs/python/test/return_arg.py | 46 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from return_arg_ext import *
>>> l1=Label()
>>> assert l1 is l1.label("bar")
>>> assert l1 is l1.label("bar").sensitive(0)
>>> assert l1.label("foo").sensitive(0) is l1.sensitive(1).label("bar")
>>> assert return_arg is return_arg(return_arg)
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
bukalov/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/main_unittest.py | 124 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from main import change_directory
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.logtesting import LogTesting
class ChangeDirectoryTest(unittest.TestCase):
_original_directory = "/original"
_checkout_root = "/WebKit"
def setUp(self):
self._log = LogTesting.setUp(self)
self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
def tearDown(self):
self._log.tearDown()
def _change_directory(self, paths, checkout_root):
return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
def _assert_result(self, actual_return_value, expected_return_value,
expected_log_messages, expected_current_directory):
self.assertEqual(actual_return_value, expected_return_value)
self._log.assertMessages(expected_log_messages)
self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
def test_paths_none(self):
paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
self._assert_result(paths, None, [], self._checkout_root)
def test_paths_convertible(self):
paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
def test_with_scm_paths_unconvertible(self):
paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
log_messages = [
"""WARNING: Path-dependent style checks may not work correctly:
One of the given paths is outside the WebKit checkout of the current
working directory:
Path: /outside/foo2.txt
Checkout root: /WebKit
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""]
self._assert_result(paths, paths, log_messages, self._original_directory)
|
playingaround2017/test123 | refs/heads/master | gamera/paths.py | 2 | # -*- mode: python; indent-tabs-mode: nil; tab-width: 3 -*-
# vim: set tabstop=3 shiftwidth=3 expandtab:
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom,
# and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import generators
import os, sys, dircache, glob, imp # Python standard library
if 1:
def dummy():
pass
lib = os.path.dirname(os.path.realpath(dummy.func_code.co_filename))
lib_gui = os.path.realpath(os.path.join(lib, "gui"))
# Figure out if we are in the source directory or installed
plugins = os.path.realpath(os.path.join(lib, "plugins"))
doc = os.path.realpath(os.path.join(lib, "doc"))
sys.path.append(plugins)
plugins_src = ""
toolkits = os.path.realpath(os.path.join(lib, "toolkits"))
test = os.path.realpath(os.path.join(lib, "test"))
test_results = os.path.realpath(os.path.join(lib, "test/results"))
def get_toolkit_names(dir):
toolkits = []
listing = dircache.listdir(dir)
dircache.annotate(dir, listing)
for toolkit in listing:
if toolkit.endswith(".py") and toolkit != "__init__.py":
toolkits.append(toolkit[:-3])
elif toolkit.endswith("module.so"):
toolkits.append(toolkit[:-9])
elif (toolkit.endswith("/") and
"__init__.py" in dircache.listdir(os.path.join(dir, toolkit))):
toolkits.append(toolkit[:-1])
return toolkits
def get_directory_of_modules(dir, base=''):
modules = glob.glob(os.path.join(dir, "*.py"))
names = [os.path.basename(x).split('.')[0] for x in modules]
mods = []
suffixes = imp.get_suffixes()
for i in suffixes:
if i[0] == '.py':
suffix = i
break
for m, name in zip(modules, names):
try:
module = imp.load_module(base + name, file(m, 'r'), m, suffix)
mods.append(module)
except Exception, e:
print e
return mods
def import_directory(dir, gl, lo, verbose=0):
modules = glob.glob(os.path.join(dir, "*.py"))
modules = [os.path.basename(x).split('.')[0] for x in modules]
if verbose:
sys.stdout.write("Loading plugins: " + "-" * 40 + "\n")
column = 0
first = 1
result = []
for m in modules:
if m == '__init__':
continue
try:
module = __import__(m, gl, lo, [])
failed = 0
except Exception, e:
failed = e
if failed:
display = '[%s %s]' % (m, str(failed))
else:
display = m
result.append(module)
if m != modules[-1]:
display += ", "
column += len(display)
if verbose:
if column > 70:
sys.stdout.write("\n")
column = len(display)
sys.stdout.write(display)
sys.stdout.flush()
if verbose:
sys.stdout.write("\n")
return result
|
Intel-Corporation/tensorflow | refs/heads/master | tensorflow/contrib/distribute/python/collective_all_reduce_strategy.py | 4 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
# TODO(yuefengz): support in-graph replication.
class CollectiveAllReduceStrategy(distribute_lib.StrategyV1):
"""Distribution strategy that uses collective ops for all-reduce.
*** contrib version ***
It is similar to the MirroredStrategy but it uses collective ops for
reduction.
When `cluster_spec` is given by the `configure` method, it turns into the
mulit-worker version that works on multiple workers with between-graph
replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
"""
def __init__(self,
num_gpus_per_worker=0,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
"""Initializes the object.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
communication: optional Enum of type
`distribute.experimental.CollectiveCommunication`. This provides a way
for the user to override the choice of collective op communication.
Possible values include `AUTO`, `RING`, and `NCCL`.
"""
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
num_gpus_per_worker=num_gpus_per_worker,
communication=communication))
class CollectiveAllReduceExtended(
collective_all_reduce_strategy.CollectiveAllReduceExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self,
container_strategy,
num_gpus_per_worker,
communication):
# Use TFConfigClusterResolver to parse TF_CONFIG. We don't want to change
# the constructor's interface to allow customized cluster resolver. Use
# SimpleClusterResolver to override num_accelerators.
tfconfig = TFConfigClusterResolver()
cluster_resolver = SimpleClusterResolver(
cluster_spec=tfconfig.cluster_spec(),
task_type=tfconfig.task_type,
task_id=tfconfig.task_id,
num_accelerators={"GPU": num_gpus_per_worker},
rpc_layer=tfconfig.rpc_layer)
super(CollectiveAllReduceExtended, self).__init__(
container_strategy,
communication=communication,
cluster_resolver=cluster_resolver)
|
iamutkarshtiwari/sympy | refs/heads/master | sympy/concrete/__init__.py | 123 | from .products import product, Product
from .summations import summation, Sum
|
westinedu/similarinterest | refs/heads/master | django/db/models/sql/where.py | 10 | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from __future__ import absolute_import
import datetime
from itertools import repeat
from django.utils import tree
from django.db.models.fields import Field
from django.db.models.sql.datastructures import EmptyResultSet, FullResultSet
from django.db.models.sql.aggregates import Aggregate
# Connection types
AND = 'AND'
OR = 'OR'
class EmptyShortCircuit(Exception):
"""
Internal exception used to indicate that a "matches nothing" node should be
added to the where-clause.
"""
pass
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
The children in this tree are usually either Q-like objects or lists of
[table_alias, field_name, db_type, lookup_type, value_annotation, params].
However, a child could also be any class with as_sql() and relabel_aliases() methods.
"""
default = AND
def add(self, data, connector):
"""
Add a node to the where-tree. If the data is a list or tuple, it is
expected to be of the form (obj, lookup_type, value), where obj is
a Constraint object, and is then slightly munged before being stored
(to avoid storing any reference to field objects). Otherwise, the 'data'
is stored unchanged and can be any class with an 'as_sql()' method.
"""
if not isinstance(data, (list, tuple)):
super(WhereNode, self).add(data, connector)
return
obj, lookup_type, value = data
if hasattr(value, '__iter__') and hasattr(value, 'next'):
# Consume any generators immediately, so that we can determine
# emptiness and transform any non-empty values correctly.
value = list(value)
# The "value_annotation" parameter is used to pass auxilliary information
# about the value(s) to the query construction. Specifically, datetime
# and empty values need special handling. Other types could be used
# here in the future (using Python types is suggested for consistency).
if isinstance(value, datetime.datetime):
value_annotation = datetime.datetime
elif hasattr(value, 'value_annotation'):
value_annotation = value.value_annotation
else:
value_annotation = bool(value)
if hasattr(obj, "prepare"):
value = obj.prepare(lookup_type, value)
super(WhereNode, self).add(
(obj, lookup_type, value_annotation, value), connector)
def as_sql(self, qn, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns None, None if this node is empty.
If 'node' is provided, that is the root of the SQL generation
(generally not needed except by the internal implementation for
recursion).
"""
if not self.children:
return None, []
result = []
result_params = []
empty = True
for child in self.children:
try:
if hasattr(child, 'as_sql'):
sql, params = child.as_sql(qn=qn, connection=connection)
else:
# A leaf node in the tree.
sql, params = self.make_atom(child, qn, connection)
except EmptyResultSet:
if self.connector == AND and not self.negated:
# We can bail out early in this particular case (only).
raise
elif self.negated:
empty = False
continue
except FullResultSet:
if self.connector == OR:
if self.negated:
empty = True
break
# We match everything. No need for any constraints.
return '', []
if self.negated:
empty = True
continue
empty = False
if sql:
result.append(sql)
result_params.extend(params)
if empty:
raise EmptyResultSet
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
sql_string = 'NOT (%s)' % sql_string
elif len(self.children) != 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def make_atom(self, child, qn, connection):
"""
Turn a tuple (Constraint(table_alias, column_name, db_type),
lookup_type, value_annotation, params) into valid SQL.
The first item of the tuple may also be an Aggregate.
Returns the string for the SQL fragment and the parameters to use for
it.
"""
lvalue, lookup_type, value_annotation, params_or_value = child
if isinstance(lvalue, Constraint):
try:
lvalue, params = lvalue.process(lookup_type, params_or_value, connection)
except EmptyShortCircuit:
raise EmptyResultSet
elif isinstance(lvalue, Aggregate):
params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection)
else:
raise TypeError("'make_atom' expects a Constraint or an Aggregate "
"as the first item of its 'child' argument.")
if isinstance(lvalue, tuple):
# A direct database column lookup.
field_sql = self.sql_for_columns(lvalue, qn, connection)
else:
# A smart object with an as_sql() method.
field_sql = lvalue.as_sql(qn, connection)
if value_annotation is datetime.datetime:
cast_sql = connection.ops.datetime_cast_sql()
else:
cast_sql = '%s'
if hasattr(params, 'as_sql'):
extra, params = params.as_sql(qn, connection)
cast_sql = ''
else:
extra = ''
if (len(params) == 1 and params[0] == '' and lookup_type == 'exact'
and connection.features.interprets_empty_strings_as_nulls):
lookup_type = 'isnull'
value_annotation = True
if lookup_type in connection.operators:
format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),)
return (format % (field_sql,
connection.operators[lookup_type] % cast_sql,
extra), params)
if lookup_type == 'in':
if not value_annotation:
raise EmptyResultSet
if extra:
return ('%s IN %s' % (field_sql, extra), params)
max_in_list_size = connection.ops.max_in_list_size()
if max_in_list_size and len(params) > max_in_list_size:
# Break up the params list into an OR of manageable chunks.
in_clause_elements = ['(']
for offset in xrange(0, len(params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % field_sql)
group_size = min(len(params) - offset, max_in_list_size)
param_group = ', '.join(repeat('%s', group_size))
in_clause_elements.append(param_group)
in_clause_elements.append(')')
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return ('%s IN (%s)' % (field_sql,
', '.join(repeat('%s', len(params)))),
params)
elif lookup_type in ('range', 'year'):
return ('%s BETWEEN %%s and %%s' % field_sql, params)
elif lookup_type in ('month', 'day', 'week_day'):
return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type, field_sql),
params)
elif lookup_type == 'isnull':
return ('%s IS %sNULL' % (field_sql,
(not value_annotation and 'NOT ' or '')), ())
elif lookup_type == 'search':
return (connection.ops.fulltext_search_sql(field_sql), params)
elif lookup_type in ('regex', 'iregex'):
return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
raise TypeError('Invalid lookup_type: %r' % lookup_type)
def sql_for_columns(self, data, qn, connection):
"""
Returns the SQL fragment used for the left-hand side of a column
constraint (for example, the "T1.foo" portion in the clause
"WHERE ... T1.foo = 6").
"""
table_alias, name, db_type = data
if table_alias:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
return connection.ops.field_cast_sql(db_type) % lhs
def relabel_aliases(self, change_map, node=None):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
if not node:
node = self
for pos, child in enumerate(node.children):
if hasattr(child, 'relabel_aliases'):
child.relabel_aliases(change_map)
elif isinstance(child, tree.Node):
self.relabel_aliases(change_map, child)
elif isinstance(child, (list, tuple)):
if isinstance(child[0], (list, tuple)):
elt = list(child[0])
if elt[0] in change_map:
elt[0] = change_map[elt[0]]
node.children[pos] = (tuple(elt),) + child[1:]
else:
child[0].relabel_aliases(change_map)
# Check if the query value also requires relabelling
if hasattr(child[3], 'relabel_aliases'):
child[3].relabel_aliases(change_map)
class EverythingNode(object):
"""
A node that matches everything.
"""
def as_sql(self, qn=None, connection=None):
raise FullResultSet
def relabel_aliases(self, change_map, node=None):
return
class NothingNode(object):
"""
A node that matches nothing.
"""
def as_sql(self, qn=None, connection=None):
raise EmptyResultSet
def relabel_aliases(self, change_map, node=None):
return
class ExtraWhere(object):
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, qn=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), tuple(self.params or ())
class Constraint(object):
"""
An object that can be passed to WhereNode.add() and knows how to
pre-process itself prior to including in the WhereNode.
"""
def __init__(self, alias, col, field):
self.alias, self.col, self.field = alias, col, field
def __getstate__(self):
"""Save the state of the Constraint for pickling.
Fields aren't necessarily pickleable, because they can have
callable default values. So, instead of pickling the field
store a reference so we can restore it manually
"""
obj_dict = self.__dict__.copy()
if self.field:
obj_dict['model'] = self.field.model
obj_dict['field_name'] = self.field.name
del obj_dict['field']
return obj_dict
def __setstate__(self, data):
"""Restore the constraint """
model = data.pop('model', None)
field_name = data.pop('field_name', None)
self.__dict__.update(data)
if model is not None:
self.field = model._meta.get_field(field_name)
else:
self.field = None
def prepare(self, lookup_type, value):
if self.field:
return self.field.get_prep_lookup(lookup_type, value)
return value
def process(self, lookup_type, value, connection):
"""
Returns a tuple of data suitable for inclusion in a WhereNode
instance.
"""
# Because of circular imports, we need to import this here.
from django.db.models.base import ObjectDoesNotExist
try:
if self.field:
params = self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=True)
db_type = self.field.db_type(connection=connection)
else:
# This branch is used at times when we add a comparison to NULL
# (we don't really want to waste time looking up the associated
# field object at the calling location).
params = Field().get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=True)
db_type = None
except ObjectDoesNotExist:
raise EmptyShortCircuit
return (self.alias, self.col, db_type), params
def relabel_aliases(self, change_map):
if self.alias in change_map:
self.alias = change_map[self.alias]
|
conejoninja/plugin.video.pelisalacarta | refs/heads/master | lib/gdata/tlslite/utils/compat.py | 361 | """Miscellaneous functions to mask Python version differences."""
import sys
import os
if sys.version_info < (2,2):
raise AssertionError("Python 2.2 or later required")
if sys.version_info < (2,3):
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def __iter__(self):
return iter(set.values.keys())
if os.name != "java":
import array
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
import math
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
BaseException = Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
#NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS.
#THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A
#CHANCE OF WORKING AGAIN.
import java
import jarray
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
BaseException = java.lang.Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr |
mprego/NBA | refs/heads/master | Regression/tests/Reg_Model_tests.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Sun May 29 14:15:09 2016
@author: Matt
"""
import pandas as pd
from unittest import TestCase
from Regression.Reg_Model import Reg_Model
class TestModel(TestCase):
def test_ridge(self):
model=Reg_Model()
x = pd.DataFrame({'x1': [1,2,3,4]})
y= [1, 2, 3, 4]
ridge=model.ridge_reg(x,y)
pred=int(ridge.predict(x)[0])
expected_pred = 1
self.assertEqual(pred, expected_pred)
def test_gbm(self):
model=Reg_Model()
x = pd.DataFrame({'x1': [1,2,3,4]})
y= [1, 2, 3, 4]
gbm=model.gbm_reg(x,y)
pred=int(gbm.predict(x)[0])
expected_pred = 1
self.assertEqual(pred, expected_pred)
def test_pred(self):
model=Reg_Model()
x = pd.DataFrame({'x1': [1,2,3,4]})
y= [1, 2, 3, 4]
model.set_training(x,y)
model.calc_model()
pred = int(model.get_pred(x)[0])
expected_pred = 1
self.assertEqual(pred, expected_pred)
def test_floor_x(self):
model = Reg_Model()
x = pd.DataFrame({'x1':[1,2,3,4,3,2,1,200], 'x2':[20,21,20, 21,20,20,20,24]})
y = [1, 2, 3, 4, 4, 5, 4, 3]
model.set_training(x,y)
max_x1 = max(model.get_x()['x1'])
exp_max_x1 = 200
self.assertNotEqual(max_x1, exp_max_x1)
def test_floor_y(self):
model = Reg_Model()
x = pd.DataFrame({'x1':[1,2,3,4,3,2,1,200], 'x2':[20,21,20, 21,20,20,20,24]})
y = [1, 2, 3, 4, 4, 5, 4, 3]
model.set_training(x,y)
max_y = max(model.get_y())
exp_max_y = 0
self.assertNotEqual(max_y, exp_max_y) |
hedaoyuan/Paddle | refs/heads/master | python/paddle/trainer_config_helpers/tests/layers_test_config.py | 16 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
num_classes = 5
x = data_layer(name="input1", size=3)
y = data_layer(name="input2", size=5)
z = out_prod_layer(input1=x, input2=y)
x1 = fc_layer(input=x, size=5)
y1 = fc_layer(input=y, size=5)
z1 = mixed_layer(
act=LinearActivation(),
input=[
conv_operator(
img=x1,
filter=y1,
filter_size=1,
num_filters=5,
num_channels=5,
stride=1)
])
assert z1.size > 0
y2 = fc_layer(input=y, size=15)
z2 = rotate_layer(input=y2, height=5, width=3)
cos1 = cos_sim(a=x1, b=y1)
cos3 = cos_sim(a=x1, b=y2, size=3)
linear_comb = linear_comb_layer(weights=x1, vectors=y2, size=3)
out = fc_layer(
input=[cos1, cos3, linear_comb, z, z1, z2],
size=num_classes,
act=SoftmaxActivation())
print_layer(input=[out])
outputs(classification_cost(out, data_layer(name="label", size=num_classes)))
dotmul = mixed_layer(
input=[dotmul_operator(
a=x1, b=x1), dotmul_projection(input=y1)])
proj_with_attr_init = mixed_layer(
input=full_matrix_projection(
input=y1,
param_attr=ParamAttr(
learning_rate=0, initial_mean=0, initial_std=0)),
bias_attr=ParamAttr(
initial_mean=0, initial_std=0, learning_rate=0),
act=LinearActivation(),
size=5,
name='proj_with_attr_init')
# for ctc
tmp = fc_layer(
input=[x1, dotmul, proj_with_attr_init],
size=num_classes + 1,
act=SoftmaxActivation())
ctc = ctc_layer(input=tmp, label=y, size=num_classes + 1)
ctc_eval = ctc_error_evaluator(input=tmp, label=y)
settings(
batch_size=10,
learning_rate=2e-3,
learning_method=AdamOptimizer(),
regularization=L2Regularization(8e-4),
gradient_clipping_threshold=25)
|
all3fox/algos-py | refs/heads/main | src/graph/concomp.py | 1 | from collections import deque
def concomp0(graph):
"""
Performs depth-first search to find connected components of a given graph
Args:
graph: an undirected graph (a forest of connected components).
Returns:
a dictionary {node: integer} where the integer is the same for
those nodes which belong to the same connected component.
"""
def dfs(node):
cc[node] = i
for n in graph[node]:
if cc[n] is not None:
continue
dfs(n)
cc = {node: None for node in graph}
for i, n in enumerate(graph):
if cc[n] is not None:
continue
dfs(n)
return cc
def concomp1(graph):
"""
Performs breadth-first search to find connected components of a given graph
Args:
graph: an undirected graph (a forest of connected components).
Returns:
a dictionary {node: integer} where the integer is the same for
those nodes which belong to the same connected component.
"""
cc, i = {node: None for node in graph}, 0
nodes = deque(maxlen=len(graph))
for node in graph:
if cc[node] is not None:
continue
nodes.append(node)
while nodes:
src = nodes.popleft()
cc[src] = i
nodes.extend([dst for dst in graph[src] if cc[dst] is None])
i += 1
return cc
def concomp2(graph):
"""
Find connected components in undirected graphs breadth-first.
Args:
graph: an undirected graph (a forest of connected components).
Returns:
a dictionary {node: integer} where the integer is the same for
those nodes whose connected component is the same.
"""
cc = {node: None for node in graph}
for i, node in enumerate(graph):
if cc[node] is not None:
continue
nodes = [node]
while nodes:
src = nodes.pop()
cc[src] = i
nodes.extend([dst for dst in graph[src] if cc[dst] is None])
return cc
|
ArnoldJunior/simuladorA | refs/heads/master | graphics.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PySide import QtCore, QtGui
import math
import sys
# Importa os módulos necessários para implementação do diagrama gráfico
from elementos import Religador, BusBarSection, Substation, Condutor
from elementos import EnergyConsumer
from DialogRecloser import RecloserDialog
from DialogBarra import BarraDialog
from DialogConductor import ConductorDialog
from DialogSubstation import SubstationDialog
from DialogEnergyConsumer import EnergyConsumerDialog
from aviso_conexao import AvisoConexaoDialog
from avisoReligador import AvisoReligador
class Edge(QtGui.QGraphicsLineItem):
'''
Classe que implementa o objeto Edge que liga dois objetos Node um ao
outro
'''
def __init__(self, w1, w2, edge_menu):
'''
Metodo inicial da classe Edge
Recebe como parâmetros os objetos Node Inicial e Final
Define o objeto QtCore.QLineF que define a linha que
representa o objeto QtGui.QGraphicsLineItem.
'''
# A class edge representará graficamente os condutores no diagrama.
# Nesta classe está presente a sua definição, assim como suas funções
# necessárias para alinhamento e ligação.
# NOTA IMPORTANTE: A Edge representa gráficamente uma linha. Sendo
# assim, ela possui uma linha virtual em que a classe se baseia para
# desenhar a linha de fato. Edge é um objeto do tipo
# QtGui.QGraphicsLineItem. Sua linha é definida por um objeto do tipo
# QtCore.QLineF. (Ver esta duas funções na biblioteca PySide)
super(Edge, self).__init__()
self.id = id(self)
self.w1 = w1
self.w2 = w2
# Adiciona o objeto edge as lista de w1 e w2, respectivamente.
self.w1.add_edge(self)
self.w2.add_edge(self)
# Associa o menu edge a ser passado para abertura de dialog.
self.myEdgeMenu = edge_menu
# Cria e configura a linha que liga os itens w1 e w2.
line = QtCore.QLineF(self.w1.pos(), self.w2.pos())
self.setLine(line)
self.setZValue(-1)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
# Cria uma flag que determina se a edge está ou não fixa a uma barra
self.isFixed = False
# Cria uma flag que fixa a edge numa barra.
self.fixFlag = False
# Cria uma flag que determina se a edge é ou não permanente.
self.isPermanent = False
# Cria um atributo "linha", que é um objeto do tipo condutor. Este
# objeto será utilizado para representar os dados elétricos do
# condutor. Os dados são iniciados nulos e depois serão setados por
# meio dos menus. Ver classe "Condutor" em elementos.py.
self.linha = Condutor(0, 0, 0, 0, 0, 0)
# Análise: se um item (w1 ou w2) que a linha conecta for uma barra,
# seta-se um atributo desta barra, denominado "bar_busy", como True,
# indicando que a barra está "ocupada".
if w1.myItemType == Node.Barra or w2.myItemType == Node.Barra:
self.isPermanent = True
if w1.myItemType == Node.Barra:
w1.bar_busy = True
if w2.myItemType == Node.Barra:
w2.bar_busy = True
def get_fraction(self, pos):
'''
Esta função obtém uma fração da linha e é utilizada durante o
modelo que denomino "Sticky to Line" de um nó de carga. Pode ser
usado para outros fins em futuras expansões.
'''
# Define os limites (horizontal e vertical) da linha, ou seja, a
# diferença entre os pontos x2 e x1 e os pontos y2 e y1 da linha
# (supondo uma linha que liga (x1,y1) a (x2,y2)).
delta_x = math.fabs(self.line().p2().x() - self.line().p1().x())
delta_y = math.fabs(self.line().p2().y() - self.line().p1().y())
# "dist" representa a distância entre o ponto presente na posição
# "pos", passada na chamada da função, e o ponto inicial da linha.
# Esta distância é dada pela relação matemática que descreve a
# distância entre dois pontos:
# L = ((x1 - x2)² + (y1 - y2)²)^(1/2)
dist = math.sqrt(pow(pos.x() - self.line().p1().x(), 2)
+ pow(pos.y() - self.line().p1().y(), 2))
# Este é um método de aproximação para uma fração definida. Compara-se
# "dist" com o comprimento total da linha. Dependendo da fração obtida
# arredonda-se esta fração para os valores definidos de 0.25, 0.5 e
# 0.75
fraction = dist / self.line().length()
if 0.75 < fraction < 1:
fraction = 0.75
if 0.5 < fraction < 0.75:
fraction = 0.5
if 0.25 < fraction < 0.5:
fraction = 0.25
if 0 < fraction < 0.25:
fraction = 0.25
# Resta analisar uma possível inconsistência: Se o ponto p1 analisado
# acima está abaixo ou acima, à esquerda ou à direita, do ponto p2.
# Se estiver à direita:
if self.line().p1().x() > self.line().p2().x():
# A posição final x é x1 - fração_obtida * delta_x. Ou seja, x1
# será o ponto referência e a posição final x estará a esquerda
# deste ponto
posf_x = self.line().p1().x() - fraction * delta_x
# Se estiver à esquerda:
else:
# A posição final x é x1 + fração_obtida * delta_x. Ou seja, x1
# será o ponto referência e a posição final x estará à direita
# deste ponto.
posf_x = self.line().p1().x() + fraction * delta_x
# O mesmo é feito para y, sabendo que nos módulos do PySide, o eixo y
# incrementa seu valor quando é percorrido para BAIXO. Assim:
# Se estiver ABAIXO:
if self.line().p1().y() > self.line().p2().y():
# A posição final y é y1 - fração_obtida * delta_y. Ou seja, y1
# será o ponto referência e a posição final y estará ACIMA deste
# ponto.
posf_y = self.line().p1().y() - fraction * delta_y
# Se estiver ACIMA:
else:
# A posição final y é y1 + fração_obtida * delta_y. Ou seja, y1
# será o ponto de referência e a posição final y estará ABAIXO
# deste ponto.
posf_y = self.line().p1().y() + fraction * delta_y
# Finalmente, define e retorna a posição final. Explicando: Se
# passarmos uma posição que esteja entre o começo e a metade da linha,
# a função retornará a posição que está exatamente em 0.25 da linha.
# Caso passemos uma posição que esteja no terceiro quarto da linha,
# a função retornará a posição que esteja exatamente na metade da
# linha. Passando uma posição que esteja no último quarto da linha, a
# função retornará a posição que esteja exatamente em 0.75 da linha.
posf = QtCore.QPointF(posf_x, posf_y)
return posf
def update_position(self):
'''
Método de atualização da posição do objeto edge implementado pela
classe Edge. Sempre que um dos objetos Nodes w1 ou w2 modifica sua
posição este método é chamado para que o objeto edge possa
acompanhar o movimento dos Objetos Node.
'''
# Simplesmente cria uma nova linha ligando os itens w1 e w2.
line = QtCore.QLineF(self.w1.pos(), self.w2.pos())
length = line.length()
# Se o comprimento obtido for nulo, retorna a função e a linha não
# será atualizada
if length == 0.0:
return
# Esta função virtual é necessária para realizar mudanças de geometria
# em tempo real nos objetos da biblioteca PySide.
self.prepareGeometryChange()
# Seta a linha obtida como linha da Edge.
self.setLine(line)
def set_color(self, color):
'''
Esta classe simplesmente seta a cor da Edge
'''
# Com a cor passada na chamada da função, seta a cor desejada.
self.setPen(QtGui.QPen(color))
def paint(self, painter, option, widget):
'''
Metodo de desenho do objeto edge implementado pela classe Edge.
A classe executa esta função constantemente.
'''
# Se os itens colidirem graficamente, a linha não é desenhada.
if (self.w1.collidesWithItem(self.w2)):
return
# Temos abaixo a lógica de distribuição de linhas quando elas são
# conectadas a uma barra.
# Se o item self.w1 for do tipo barra deve-se alinhar o item self.w2.
# Note que este alinhamento não se aplica ao elemento Subestação:
if (self.w1.myItemType == Node.Barra
and self.w2.myItemType != Node.Subestacao):
# Seta a flag indicando fixação da linha na Barra.
self.fixFlag = True
# Seta flag de w2, indicando que este item está fixo na barra.
self.w2.Fixed = True
# Se o número de linhas conectas a barra for maior que 1 deve-se
# proceder a lógica de distribuição e alinhamento.
if len(self.w1.edges) > 1:
# Insere a linha em seu local de distribuição calculado pelo
# item gráfico barra. Este local é determinado pela função
# edge_position (Ver classe Node).
line = QtCore.QLineF(self.mapFromItem(
self.w1, self.w1.rect().center().x(),
self.w1.edge_position(
self)), self.mapFromItem(
self.w2, self.w2.rect().center()))
# Ajusta o item w2 na grade invisível presente no diagrama.
# (Ver classe Node, função "adjust_in_grid")
pos = self.w2.adjust_in_grid(
QtCore.QPointF(self.w2.scenePos().x(), line.y1()))
self.w2.setPos(pos)
# Ajusta a linha finalde acordo com o local de distribuição
# com a correção do ajuste na grade.
line.setLine(line.x1(), self.w2.y() + 10, line.x2(), line.y2())
# Fixa o item w2.
self.w2.fix_item()
# Se esta é a primeira ligação da linha, realiza-se uma ligação
# normal.
else:
line = QtCore.QLineF(self.mapFromItem(
self.w1, self.w1.rect().center()), self.mapFromItem(
self.w2, self.w2.rect().center()))
# Se o item self.w2 for do tipo barra deve-se alinhar o item self.w1.
# O procedimento é análogo ao exposto acima.
elif (self.w2.myItemType == Node.Barra
and self.w1.myItemType != Node.Subestacao):
self.fixFlag = True
self.w1.Fixed = True
if len(self.w2.edges) > 1:
line = QtCore.QLineF(self.mapFromItem(
self.w1, self.w1.rect().center()), self.mapFromItem(
self.w2, self.w2.rect().center().x(),
self.w2.edge_position(
self)))
self.w1.setY(self.mapFromItem(
self.w2, self.w2.rect().center().x(),
self.w2.edge_position(
self)).y() - 12.5)
self.w1.fix_item()
else:
line = QtCore.QLineF(self.mapFromItem(
self.w1, self.w1.rect().center()), self.mapFromItem(
self.w2, self.w2.rect().center()))
else:
line = QtCore.QLineF(self.mapFromItem(
self.w1, self.w1.rect().center()), self.mapFromItem(
self.w2, self.w2.rect().center()))
self.setLine(line)
if self.fixFlag:
self.isFixed = True
# Define a caneta e o preenchimento da linha.
painter.setPen(QtGui.QPen(QtCore.Qt.black, # QPen Brush
2, # QPen width
QtCore.Qt.SolidLine,
# QPen style
QtCore.Qt.SquareCap,
# QPen cap style
QtCore.Qt.RoundJoin)
# QPen join style
)
painter.setBrush(QtCore.Qt.black)
painter.drawLine(self.line())
# Se a linha for selecionado, desenha uma linha tracejada ao redor
# da linha selecionada.
if self.isSelected():
painter.setPen(QtGui.QPen(QtCore.Qt.red, 2, QtCore.Qt.DashLine))
my_line = QtCore.QLineF(line)
my_line.translate(0, 4.0)
painter.drawLine(my_line)
my_line.translate(0, -8.0)
painter.drawLine(my_line)
def mousePressEvent(self, mouse_event):
'''
Metodo que reimplementa a função de press do Mouse (ver biblioteca
PySide, mouse events)
'''
# Se a linha for pressionada, seta a mesma como selecionada, para que
# a linha tracejada seja desenhada.
self.setSelected(True)
super(Edge, self).mousePressEvent(mouse_event)
return
def contextMenuEvent(self, event):
'''
Reimplementação da função virtual contextMenuEvent, que define menu
que aparece com o clique direito do mouse (ver biblioteca Pyside,
contextMenuEvent)
'''
self.scene().clearSelection()
self.setSelected(True)
self.myEdgeMenu.exec_(event.screenPos() + QtCore.QPointF(20, 20))
class Text(QtGui.QGraphicsTextItem):
'''
Classe que implementa o objeto Text Genérico
'''
# Cria dois sinais, um relacionado à mudança de posição/geometria do item
# e outro a quando o item perde o foco, ou deixa de estar selecionado.
# (ver PySide, QtCore.Signal)
selectedChange = QtCore.Signal(QtGui.QGraphicsItem)
lostFocus = QtCore.Signal(QtGui.QGraphicsTextItem)
def __init__(self, text, parent=None, scene=None):
'''
Configurações do texto (ver PySide, QtGui.QGraphicsTextItem)
'''
super(Text, self).__init__(parent, scene)
self.setPlainText(text)
self.setZValue(100)
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, False)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, False)
def itemChange(self, change, value):
'''
Função virtual reimplementada para emitir sinal de mudança (ver
Pyside, QGraphicsTextItem)
'''
if change == QtGui.QGraphicsItem.ItemSelectedChange:
self.selectedChange.emit(self)
return value
def focusOutEvent(self, event):
'''
Função virtual reimplementada para emitir sinal de perda de foco
(ver Pyside, QGraphicsTextItem)
'''
self.lostFocus.emit(self)
super(Text, self).focusOutEvent(event)
class Node(QtGui.QGraphicsRectItem):
'''
Classe que implementa o objeto Node Genérico. Este elemento gráfico irá
representar religadores, barras, subestações e nós de carga
'''
# tipos de itens possiveis
Subestacao, Religador, Barra, Agent, NoDeCarga, NoConectivo = range(6)
def __init__(self, item_type, node_menu, parent=None, scene=None):
'''
Método inicial da classe Node
Recebe como parâmetros os objetos myItemType (que define o tipo de
Node desejado) e o menu desejado (menu que abre com clique direito)
Analogamente ao que acontece com a Edge, este item é apenas a
representação de um retângulo do tipo QtCore.QRectF.
'''
super(Node, self).__init__()
# Definição de atributos do tipo flag:
self.bar_busy = False # flag - barra ocupada.
self.Fixed = False # flag - item fixado a uma barra.
# Definição de diversos atributos que serão usados posteriormente.
self.id = id(self) # Atributo que guarda id única do item.
self.edges = {} # Dicionário contendo edges do item.
self.l0 = None # Variável auxiliar de posição.
self.edges_no_sub = {} # Falta perguntar ao lucas.
self.myItemType = item_type # Define o tipo de item.
self.edge_counter = 0 # Contador que acompanha o nº de edges.
self.mean_pos = None # Atributo de posição média.
self.text_config = 'Custom' # Atributo da configuração de relé.
self.pos_ref = 0 # Atributo de posição referência.
# Se o item a ser inserido for do tipo subestação:
if self.myItemType == self.Subestacao:
# Define o retângulo.
rect = QtCore.QRectF(0, 0, 50.0, 50.0)
# Define e ajusta a posição do label do item gráfico. Começa com
# um texto vazio.
self.text = Text('', self, self.scene())
self.substation = Substation(
self.text.toPlainText(), 0.0, 0.0, 0.0, complex(0, 0))
self.text.setPos(self.mapFromItem(self.text, 0, rect.height()))
# Se o item a ser inserido for do tipo religador:
elif self.myItemType == self.Religador:
rect = QtCore.QRectF(0, 0, 20, 20)
# Define e ajusta a posição do label do item gráfico. Começa com
# um texto vazio.
self.text = Text('', self, self.scene())
self.text.setPos(self.mapFromItem(self.text, 10, rect.height()))
# Cria o objeto chave que contém os dados elétricos do elemento
# religador.
self.chave = Religador(self.text.toPlainText(), 0, 0, 0, 0, 1)
# Se o item a ser inserido for do tipo barra:
elif self.myItemType == self.Barra:
rect = QtCore.QRectF(0, 0, 10.0, 100.0)
# Define e ajusta a posição do label do item gráfico. Começa com
# um texto vazio.
self.text = Text('Barra', self, self.scene())
self.text.setPos(self.mapFromItem(self.text, 0, rect.height()))
# Cria o objeto barra que contém os dados elétricos do elemento
# barra.
self.barra = BusBarSection("Identificador")
# Define uma lista vazia com os terminais que possivelmente a barra
# terá
self.terminals = []
# Se o item a ser inserido for do tipo agente:
# OBS: PERGUNTAR PRO LUCAS SE O ABAIXO É NECESSÁRIO
elif self.myItemType == self.Agent:
rect = QtCore.QRectF(0, 0, 50.0, 50.0)
# Define e ajusta a posição do label do item gráfico. Começa com
# o texto Agente.
self.text = Text('Agente', self, self.scene())
self.text.setPos(self.mapFromItem(self.text, 0, rect.height()))
# OBS: PERGUNTAR SE AINDA É NECESSÁRIO A PRESENÇA DE UM NÓ CONECTIVO
# Se o item a ser inserido for do tipo nó conectivo:
elif self.myItemType == self.NoConectivo:
rect = QtCore.QRectF(0, 0, 7, 7)
# Se o item a ser inserido for do tipo nó de carga:
elif self.myItemType == self.NoDeCarga:
rect = QtCore.QRectF(0, 0, 8, 8)
# Define e ajusta a posição do label do item gráfico. Começa com
# um texto vazio.
self.text = Text('', self, self.scene())
self.text.setPos(self.mapFromItem(self.text, 0, rect.height()))
# Define uma lista vazia com os terminais que possivelmente o nó
# de carga terá
self.terminals = []
# Cria o objeto barra que contém os dados elétricos do elemento
# barra.
self.no_de_carga = EnergyConsumer('', 0, 0)
# Estabelece o retângulo do item gráfico como o rect obtido, dependendo
# do item.
self.setRect(rect)
# Estabelece o menu (aberto via clique direito) dependendo do tipo de
# item.
self.myNodeMenu = node_menu
# Seta as flags do QGraphicsItem (ver QtGui.QGraphicsItem.setFlag)
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable, True)
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges, True)
self.setZValue(0)
def fix_item(self):
'''
Seta a flag de fixação do item.
'''
self.Fixed = True
def update_count(self):
'''
Atualiza o contador que acompanha o número de Edges do item.
'''
self.edge_counter = len(self.edges)
def remove_edges(self):
'''
Método de remoção de todos objetos Edge associados ao objeto node.
'''
# Cria uma lista vazia que irá receber as Edges removidas.
deleted_list = []
# Varre as edges do Node.
for edge in self.edges:
# Como todas as edges serão removidas, adiciona cada uma à
# "deleted_list".
deleted_list.append(edge)
# Remove a Edge da cena em que o Node se encontra.
self.scene().removeItem(edge)
for edge in deleted_list:
# Como cada edge removida possui um outro item conectado além do
# Node presente, precisamos removê-la também da lista de edges.
# deste outro item.
if edge.w1 is not None:
edge.w1.remove_edge(edge)
if edge.w2 is not None:
edge.w2.remove_edge(edge)
# Limpa a lista de edges do presente Node, assim como a lista de edges
# que não são conectadas à subestações.
self.edges.clear()
self.edges_no_sub.clear()
# Atualiza o contador que acompanha o número de edges associadas ao
# item.
self.update_count()
def remove_edge(self, edge):
'''
Esta função remove a edge passada na chamada do item presente.
'''
self.edges.pop(edge)
self.update_count()
def add_edge(self, edge):
'''
Método de adição de objetos edge associados ao objeto node
'''
# Se o Node for um religador, este só pode ter no máximo 2 ligações.
# Ou seja, se o contador que acompanha o número de edges do Node for
# maior que 2, a função retorna.
if self.myItemType == self.Religador:
if self.edge_counter > 2:
return
# Incrementa o contador.
self.edge_counter += 1
# Adiciona a Edge passada na chamada da função para o dicionário de
# Edges do Node.
self.edges[edge] = len(self.edges)
# Se o presente Node não for uma subestação, adiciona a Edge no
# dicionário de edges que não se conectam a subestações.
if (edge.w1.myItemType != Node.Subestacao
and edge.w2.myItemType != Node.Subestacao):
self.edges_no_sub[edge] = len(self.edges_no_sub)
self.update_count()
def edge_position(self, edge):
'''
Este método é utilizado da distribuição das Edges ligadas numa
barra, seguindo uma lógica de alinhamento.
'''
# PEDIR PARA O LUCAS EXPLICAR
height = self.rect().height()
height = height - 2.0 * height / 8.0
num_edges = len(self.edges_no_sub)
num_edges -= 1
if num_edges <= 0:
num_edges = 1
dw = height / float(num_edges)
pos = height / 8.0 + self.edges_no_sub[edge] * dw
return pos
def center(self):
'''
Método que retorna o centro do objeto passado.
'''
point = QtCore.QPointF(self.rect().width(), self.rect().height())
return (self.pos() + point / 2)
def set_center(self, pos):
w = self.rect().width()
h = self.rect().height()
point = QtCore.QPointF(w / 2, h / 2)
self.setPos(pos - point)
def boundingRect(self):
'''
Reimplementação da função virtual que especifica a borda do objeto
node (ver biblioteca Pyside, QtGui.QGraphicsRectItem.boundingRect)
'''
extra = 5.0
return self.rect().adjusted(-extra, -extra, extra, extra)
def paint(self, painter, option, widget):
'''
Método de desenho do objeto node implementado pela classe Node.
Aqui se diferencia os objetos pela sua forma. Todos eram definidos
por um retângulo QtCore.QRectF. Neste método, serão desenhadas
suas formas baseadas em seus retângulos.
Ver método paint em PySide.
'''
# Caso o item a ser inserido seja do tipo subestacão:
if self.myItemType == self.Subestacao:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
painter.setBrush(QtCore.Qt.white)
painter.drawEllipse(self.rect())
# Caso o item a ser inserido seja do tipo religador:
elif self.myItemType == self.Religador:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
# Faz-se aqui importante observação: se a chave associada ao
# elemento gráfico religador estiver fechada, desenha-se o
# religador preenchido de preto. Caso contrário, ele é vazado
# (branco)
if self.chave.normalOpen == 1:
painter.setBrush(QtCore.Qt.white)
else:
painter.setBrush(QtCore.Qt.black)
painter.drawRoundedRect(self.rect(), 5, 5)
# Caso o item a ser inserido seja do tipo barra:
elif self.myItemType == self.Barra:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
painter.setBrush(QtCore.Qt.black)
painter.drawRoundedRect(self.rect(), 2, 2)
# Caso o item a ser inserido seja do tipo agente:
elif self.myItemType == self.Agent:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
painter.setBrush(QtCore.Qt.white)
painter.drawRect(self.rect())
# Caso o item a ser inserido seja do tipo nó conectivo:
elif self.myItemType == self.NoConectivo:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
painter.setBrush(QtCore.Qt.black)
painter.drawEllipse(self.rect())
# Caso o item a ser inserido seja do tipo nó de carga:
elif self.myItemType == self.NoDeCarga:
painter.setPen(QtGui.QPen(QtCore.Qt.black, 2))
painter.setBrush(QtCore.Qt.black)
painter.drawRect(self.rect())
# Se o item estiver selecionado, desenha uma caixa pontilhada de
# seleção em seu redor.
if self.isSelected():
painter.setPen(QtGui.QPen(QtCore.Qt.red, 2, QtCore.Qt.DashLine))
painter.setBrush(QtCore.Qt.NoBrush)
adjust = 2
rect = self.rect().adjusted(-adjust, -adjust, adjust, adjust)
painter.drawRect(rect)
def itemChange(self, change, value):
'''
Método que detecta mudancas na posição do objeto Node
'''
# Se a mudança for posição (ver QtGui.QGraphicsItem.ItemPositionChange)
# é preciso atualizar as edges deste Node uma a uma:
if change == QtGui.QGraphicsItem.ItemPositionChange:
for edge in self.edges:
edge.update_position()
# Condição interna de retorno necessária.
return QtGui.QGraphicsItem.itemChange(self, change, value)
def mousePressEvent(self, mouse_event):
'''
Reimplementação da função virtual que define o evento referente
ao aperto de botão do mouse.
'''
# Armazena a cena do item
self.cena = self.scene()
# "Deseleciona" os outros itens que por ventura estejam selecionados.
self.scene().clearSelection()
# Aciona a flag interna do item que indica que o item está selecionado.
self.setSelected(True)
super(Node, self).mousePressEvent(mouse_event)
return
def mouseMoveEvent(self, mouse_event):
'''
Reimplementação da função virtual que define o evento referente
ao movimento do mouse durante o aperto.
'''
super(Node, self).mouseMoveEvent(mouse_event)
# Chama a função "adjust_in_grid", que limita o movimento dos itens
# numa grade invisível presente no diagrama (ver adjust_in_grid na
# class node).
self.setPos(self.adjust_in_grid(self.scenePos()))
def mouseReleaseEvent(self, mouse_event):
'''
Reimplementação da função virtual que define o evento referente
ao soltar do botão mouse após aperto.
'''
super(Node, self).mouseReleaseEvent(mouse_event)
# Cria uma edge None para auxílio na execução.
new_edge = None
scene = self.scene()
# Cria um elemento gráfico do tipo elipse, com tamanho definido pelo
# retângulo QtCore.QRectF. Este elipse é adicionado na posição em que o
# botão foi solto. Este elipse precisa ser removido ao final da função,
# caso contrário ele ficará visível na cena.
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(
QtCore.QRectF(
mouse_event.scenePos() - QtCore.QPointF(10, 10),
QtCore.QSizeF(30, 30)))
scene.addItem(ell)
# O trecho a seguir implementa o caráter "Sticky" do nó conectivo.
# Explicando: Se o nó só tiver uma extremidade ocupada e colidir com
# um node que não seja outro nó conectivo, a linha "gruda" no item
# colidido, estabelecendo uma ligação.
if self.myItemType == Node.NoConectivo and len(self.edges) == 1:
# Varre todos os itens que foram colididos com o elipse criado.
# Isto permite que haja uma margem de colisão ao redor do nó.
for item in scene.items():
if ell.collidesWithItem(item):
if isinstance(item, Node):
if item.myItemType != Node.NoConectivo:
# Se o item for uma barra, ainda é preciso tratar
# o algoritmo! PENDÊNCIA.
if item.myItemType == Node.Barra:
scene.removeItem(ell)
return
# Não sendo o item uma barra, remove-se a linha
# do nó conectivo, e cria uma nova linha se liga
# diretamente ao item colidido.
for edge in self.edges:
edge.scene().removeItem(edge)
if edge.w1.myItemType != Node.NoConectivo:
w1 = edge.w1
else:
w1 = edge.w2
new_edge = Edge(w1, item, scene.myLineMenu)
scene.addItem(new_edge)
new_edge.update_position()
scene.removeItem(self)
# Caso o item seja um Nó de Carga e não esteja conectado ainda, este
# trecho do método implementa a característica "Sticky" do Nó de Carga
# quando ele colide com uma linha.
if self.myItemType == Node.NoDeCarga:
# Se o Nó de Carga já estiver conectado, a função retorna.
if len(self.edges) != 0:
scene.removeItem(ell)
return
scene.removeItem(ell)
if self.scene().myMode == 1:
return
# Se algum item da cena colidir com o elipse e este item não
# for o próprio nó de carga, quebra a linha e adiciona o nó
# de carga. Se o comprimento da linha for muito pequeno, isto
# não é feito.
for item in scene.items():
if ell.collidesWithItem(item):
if isinstance(item, Edge) and not item.isUnderMouse():
if item.line().length() < 20:
return
break_mode = 3
pos = item.get_fraction(mouse_event.scenePos())
self.setPos(pos.x() - 5, pos.y() - 5)
scene.break_edge(item, break_mode, None, self)
scene.removeItem(ell)
return
def mouseDoubleClickEvent(self, event):
'''
Reimplementação da função de duplo clique do mouse.
'''
# Limpa a seleção de qualquer objeto na cena.
self.scene().clearSelection()
# Seta o item como selecionado.
self.setSelected(True)
super(Node, self).mouseDoubleClickEvent(event)
# Executa o Dialog de configuração dos elementos do Node.
self.scene().launch_dialog()
def adjust_in_grid(self, pos):
'''
Este método implementa uma grade invisível na cena, que limita o
movimento dos Nodes para posições bem definidas.
'''
# Ajuste de posição empírico
item_x = pos.x() - 5
item_y = pos.y() - 5
if item_x == 0 or item_y == 0:
return
# Isola a centena da posição x e y do item, e.g se a posição x for
# 384, centena_x = int(384/100) * 100 = 3 * 100. Todo o exemplo é aná-
# logo para a posição y.
centena_x = int(item_x / 100) * 100
centena_y = int(item_y / 100) * 100
# Calcula os residuais, que é a dezena+unidade. No nosso exemplo,
# residual_x = 384 - 300 = 84
residual_x = item_x - centena_x
residual_y = item_y - centena_y
# A posição de referência para a grade é a 0,0. Assim, definiu-se que
# cada quadrado da grade possui 20x20 pixels. Sendo assim, o residual
# calculado irá nos mostrar em que quadrado o item precisa ser
# ajustado. No nosso exemplo, temos x = 384. Então a posição x deve
# ser ajustada para 380, que se encontra no quadrado que compreende
# 380 -> 400. Segue-se a seguinte regra:
# 0 < residual < 10 -> Posição final = centena
# 10 < residual < 20 -> Posição final = centena + 20
# 20 < residual < 30 -> Posição final = centena + 20
# 30 < residual < 40 -> Posição final = centena + 40
# 40 < residual < 50 -> Posição final = centena + 40
# 50 < residual < 60 -> Posição final = centena + 60
# 60 < residual < 70 -> Posição final = centena + 60
# 70 < residual < 80 -> Posição final = centena + 80
# 80 < residual < 90 -> Posição final = centena + 80
# residual > 90 -> Posição final = centena + 100
if residual_x > 10:
if residual_x > 20:
if residual_x > 30:
new_pos_x = centena_x + 40
else:
new_pos_x = centena_x + 20
else:
new_pos_x = centena_x + 20
else:
new_pos_x = centena_x
if residual_x > 40:
if residual_x > 50:
new_pos_x = centena_x + 60
else:
new_pos_x = centena_x + 40
if residual_x > 60:
if residual_x > 70:
new_pos_x = centena_x + 80
else:
new_pos_x = centena_x + 60
if residual_x > 80:
if residual_x > 90:
new_pos_x = centena_x + 100
else:
new_pos_x = centena_x + 80
if residual_y > 10:
if residual_y > 20:
if residual_y > 30:
new_pos_y = centena_y + 40
else:
new_pos_y = centena_y + 20
else:
new_pos_y = centena_y + 20
else:
new_pos_y = centena_y
if residual_y > 40:
if residual_y > 50:
new_pos_y = centena_y + 60
else:
new_pos_y = centena_y + 40
if residual_y > 60:
if residual_y > 70:
new_pos_y = centena_y + 80
else:
new_pos_y = centena_y + 60
if residual_y > 80:
if residual_y > 90:
new_pos_y = centena_y + 100
else:
new_pos_y = centena_y + 80
# Ajuste de posição devido à diferença de geometria.
if self.myItemType == Node.NoDeCarga:
new_pos_x += 6
new_pos_y += 6
return QtCore.QPointF(new_pos_x, new_pos_y)
def contextMenuEvent(self, event):
'''
Método que reimplementa a função virtual do menu aberto pelo clique
com botão direito.
'''
# Limpa a seleção dos itens da cena.
self.scene().clearSelection()
# Seta a flag do item como selecionado.
self.setSelected(True)
# Executa o menu, dependendo do tipo de item.
self.myNodeMenu.exec_(event.screenPos())
class SceneWidget(QtGui.QGraphicsScene):
'''
Classe que implementa o container Gráfico onde os
widgets residirão, denominado cena.
'''
# Tipos de modos de interacao com o diagrama grafico
InsertItem, InsertLine, InsertText, MoveItem, SelectItems = range(5)
# Tipos de estilos para o background do diagrama grafico
GridStyle, NoStyle = range(2)
# Signal definido para a classe SceneWidget enviado quando um item é
# inserido no diagrama grafico
itemInserted = QtCore.Signal(int)
def __init__(self):
super(SceneWidget, self).__init__()
# Definição de flags
self.start_item_is_ghost = False
self.end_item_is_ghost = False
self.keyControlIsPressed = False
# Definição de atributos auxiliares
self.line = None
self.no = None
self.selectRect = None
self.text_item = None
self.dict_prop = {}
self.lista_no_conectivo = []
# Definição da geometria inicial da cena
self.setSceneRect(0, 0, 800, 800)
self.myMode = self.MoveItem
self.myItemType = Node.Subestacao
self.my_background_style = self.NoStyle
# Execuções de métodos iniciais
# Cria as ações que podem ser realizadas na cena (ver create_actions
# em SceneWidget)
self.create_actions()
# Cria os menus que serão utilizados na cena (ver create_menus em
# SceneWidget)
self.create_menus()
# Cria a pilha de comandos UNDO para implementação dos comandos
# desfazer e refazer (CTRL+Z e CTRL+Y). PENDÊNCIA.
self.undoStack = QtGui.QUndoStack()
# Cria os dicionários de padrões dos relés (ver create_dict em
# SceneWidget
self.custom_dict = {'Corrente Nominal': 0,
'Capacidade de Interrupcao': 0, 'Sequencia': 0}
self.create_dict(100, 4, 4, 'ABB')
self.create_dict(150, 5, 3, 'SEL')
self.create_dict(200, 6, 3, 'BOSCH')
def create_dict(self, corrente, capacidade, num_rel, padrao):
'''
Este método cria um dicionário de um padrão de religador comercial,
de acordo com os parâmetros passados.
'''
prop = {'Corrente Nominal': corrente,
'Capacidade de Interrupcao': capacidade, 'Sequencia': num_rel}
self.dict_prop[padrao] = prop
def mousePressEvent(self, mouse_event):
'''
Este método define as ações realizadas quando um evento do tipo
mousePress é detectado no diagrama grafico
'''
super(SceneWidget, self).mousePressEvent(mouse_event)
# Armazena em um atributo a posição em que o mouse foi apertado.
self.pressPos = mouse_event.scenePos()
# Define o break_mode, utilizado no método de quebrar linhas (ver
# break_edge em SceneWidget.
self.break_mode = 2
# Cria uma variável para receber uma edge que foi quebrada.
self.edge_broken = None
# Define as ações para quando o botão apertado do mouse NÃO for o
# esquerdo.
if (mouse_event.button() != QtCore.Qt.LeftButton):
# Variável auxiliar que indica se o nó tem prioridade.
node_priority = False
# Limpa seleção de itens na cena.
self.clearSelection()
# Cria um elipse que será adicionado e retirado da cena no término
# da função, de forma que nunca será visível ao usuário. O elipse
# é inserido na posição de press do mouse.
# O elipse dá precisão ao clique do usuário. Toda ação será inter-
# pretada com uma margem de seleção ao redor, representada pelo
# elipse.
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(
QtCore.QRectF(
mouse_event.scenePos()
- QtCore.QPointF(10, 10), QtCore.QSizeF(30, 30)))
self.addItem(ell)
# Testa todos os itens da cena.
for item in self.items():
# Se o elipse de precisão colidir com a cena e este for um
# Node, seta-se prioridade para o mesmo.
if ell.collidesWithItem(item):
if isinstance(item, Node):
node_priority = True
# Testa novamente todos os itens da cena.
for item in self.items():
# Se o elipse colidir com uma edge e não houver prioridade de
# nó, abre-se o menu de opções (context) da Edge
if (ell.collidesWithItem(item) and isinstance(item, Edge)
and not node_priority):
self.removeItem(ell)
item.setSelected(True)
item.myEdgeMenu.exec_(mouse_event.screenPos())
item.setSelected(False)
return
# Caso não haja linhas colidindo, remove o elipse e retorna.
self.removeItem(ell)
return
# Cria uma variável para receber item oculto e removê-lo, caso exista.
item_oculto = None
for item in self.items():
if not item.isVisible():
item_oculto = item
if item_oculto is None:
pass
else:
self.removeItem(item_oculto)
# Caso o botão pressionado do mouse for o esquerdo:
# Entra no modo passado à cena.
# Se o modo for de inserção de itens:
if self.myMode == self.InsertItem:
# Insere o item com determinado tipo (ver Node).
if self.myItemType == Node.Religador:
item = Node(self.myItemType, self.myRecloserMenu)
elif self.myItemType == Node.Barra:
item = Node(self.myItemType, self.myBusMenu)
elif self.myItemType == Node.Subestacao:
item = Node(self.myItemType, self.mySubstationMenu)
elif self.myItemType == Node.NoDeCarga:
item = Node(self.myItemType, self.mySubstationMenu)
# Ajusta a posição do item para a posição do press do mouse.
item.setPos(item.adjust_in_grid(mouse_event.scenePos()))
self.addItem(item)
# Quando um item é adicionado, o dialog de configuração se abre
# para que o usuário prontamente insira seus dados (ver
# launch_dialog). Caso o usuário cancele a janela, o item é
# removido da cena.
if self.myItemType == Node.Religador:
item.setSelected(True)
result = self.launch_dialog()
item.setSelected(False)
if result == 0:
self.removeItem(item)
elif self.myItemType == Node.Barra:
item.setSelected(True)
result = self.launch_dialog()
item.setSelected(False)
if result == 0:
self.removeItem(item)
elif self.myItemType == Node.Subestacao:
item.setSelected(True)
result = self.launch_dialog()
item.setSelected(False)
if result == 0:
self.removeItem(item)
elif self.myItemType == Node.NoDeCarga:
item.setSelected(True)
result = self.launch_dialog()
item.setSelected(False)
if result == 0:
self.removeItem(item)
# Cria um comando para que seja possibilitada a ação de desfazer/
# refazer. PENDÊNCIA
comando = AddRemoveCommand("Add", self, item)
self.undoStack.push(comando)
# Emite um sinal contendo o tipo do item.
self.itemInserted.emit(self.myItemType)
# Caso o modo passado à cena seja de inserção de linha:
elif self.myMode == self.InsertLine:
# Cria o elipse para o mesmo fim explicado anteriormente: dar
# margem de ação para os "presses" do mouse
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(
QtCore.QRectF(
mouse_event.scenePos()
- QtCore.QPointF(10, 10), QtCore.QSizeF(30, 30)))
self.addItem(ell)
# Seta a prioridade de Node como falsa
node_priority = False
# Aqui se cria uma legenda para os tipos de colisão possível.
edge_collision, node_collision, ellipse_collision = range(3)
# Cria-se uma variável para receber o tipo de colisão
collision = None
# Varre os itens que estejam na posição apertada pelo mouse.
# Se o item for do tipo Node, seta prioridade para o mesmo.
for item in self.items(mouse_event.scenePos()):
if isinstance(item, Node):
node_priority = True
# Varre todos os itens da cena que colidem com o elipse de
# precisão:
for item in self.items():
if ell.collidesWithItem(item):
# 1) Se este item for do tipo Edge e a prioridade de Node
# estiver desligada, seta a colisão como colisão de Edge.
# Ou seja, o usuário está inserindo uma linha em cima de
# outra linha, o que provoca uma quebra na linha original
# (criar derivações)
# IMPORTANTE: O break_mode associado com esta operação é 0.
# A Edge que será quebrada também é armazenada.
if isinstance(item, Edge) and not node_priority:
self.c_pos = (
item.line().p1() + item.line().p2()) / 2
collision = edge_collision
self.break_mode = 0
self.edge_broken = item
# 2) Se este item for do tipo Node, atribui-se este como o
# Node de origem de uma nova linha. Seta a colisão como
# colisão de Node.
elif isinstance(item, Node):
collision = node_collision
self.start_item = item
# 3) Se este item for outro elipse, seta a colisão como
# colisão de elipse.
elif isinstance(item, QtGui.QGraphicsEllipseItem):
collision = ellipse_collision
# Define uma posição inicial como sendo a posição de press do
# mouse.
self.l0 = mouse_event.scenePos()
# Realiza o teste do tipo da colisão.
# 1) Colisão de edge: Existe necessidade de quebra de linha e da
# inserção dum nó conectivo para realizar a derivação. c_pos é a
# posição obtida anteriormente para o ponto médio da linha a ser
# quebrada. O start_item se torna o nó conectivo inserido.
if collision == edge_collision:
self.no = Node(Node.NoConectivo, self.myLineMenu)
self.addItem(self.no)
self.no.setPos(self.c_pos - QtCore.QPointF(3.5, 3.5))
self.start_item = self.no
self.l0 = self.c_pos
# Se a colisão for com outro elipse, significa que o usuário inse-
# riu a linha num espaço livre da cena. Por ora, o programa adicio-
# nará um nó conectivo, que será removido futuramente se a ligação
# não se concretizar (ver mouseReleaseEvent de SceneWidget).
elif collision == ellipse_collision:
self.no = Node(Node.NoConectivo, self.myLineMenu)
self.addItem(self.no)
self.no.setPos(mouse_event.scenePos())
self.start_item = self.no
# Terminados os testes, cria uma linha que está pronta para ser
# criada e atualizada à medida que o usuário move o mouse após o
# press.
self.line = QtGui.QGraphicsLineItem(
QtCore.QLineF(
self.l0,
self.l0))
self.line.setPen(
QtGui.QPen(QtCore.Qt.black, 2))
self.addItem(self.line)
self.removeItem(ell)
# Se o modo de inserção for de texto, insere o texto com base na
# posição do mouse.
elif self.myMode == self.InsertText:
text_item = Text()
text_item.setFont(self.myFont)
text_item.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
text_item.setZValue(1000.0)
text_item.lostFocus.connect(self.editorLostFocus)
text_item.selectedChange.connect(self.itemSelected)
self.addItem(text_item)
text_item.setDefaultTextColor(self.myTextColor)
text_item.setPos(mouse_event.scenePos())
self.textInserted.emit(text_item)
# Se o modo for de seleção de itens múltiplos:
elif self.myMode == self.SelectItems:
selection = True
if selection:
init_point = mouse_event.scenePos()
self.selectRect = QtGui.QGraphicsRectItem(
QtCore.QRectF(init_point, init_point))
self.selectRect.setPen(
QtGui.QPen(QtCore.Qt.red, 2, QtCore.Qt.DashLine))
self.addItem(self.selectRect)
# Caso não seja nenhum destes modos, estamos no modo simples de
# seleção.
else:
# Desliga as prioridades
super(SceneWidget, self).mousePressEvent(mouse_event)
priority_on = False
priority_node = False
# Cria o elipse de precisão.
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(
QtCore.QRectF(
mouse_event.scenePos()
- QtCore.QPointF(10, 10), QtCore.QSizeF(30, 30)))
self.addItem(ell)
# Varre itens que colidem com o elipse.
for item in self.items():
# Se o item for do tipo Node ou Edge, seta prioridade para
# estes dois. Se o item for do tipo Node, seta também priori-
# dade de Node.
if ell.collidesWithItem(item):
if isinstance(item, Node) or isinstance(item, Edge):
if isinstance(item, Node):
priority_node = True
priority_on = True
# Varre itens que colidem com o elipse.
for item in self.items():
if ell.collidesWithItem(item):
# Se o item for outro elipse, e não houver prioridade de
# Node ou Edge, simplesmente limpa a seleção de objetos da
# cena.
if (isinstance(item, QtGui.QGraphicsEllipseItem)
and not priority_on):
self.clearSelection()
# Se o item for um Node, o mesmo é selecionado.
elif isinstance(item, Node):
self.removeItem(ell)
self.clearSelection()
item.setSelected(True)
# Se o item for uma Edge e não houver prioridade de Node,
# o mesmo é selecionado.
elif isinstance(item, Edge) and not priority_node:
self.removeItem(ell)
self.clearSelection()
item.setSelected(True)
return
if ell.scene() == self:
self.removeItem(ell)
return
def mouseMoveEvent(self, mouse_event):
'''
Este método define as ações realizadas quando um evento do tipo
mouseMove é detectado no diagrama grafico.
'''
# Caso estejamos no modo de inserção de linha, desenha a nova linha
# de acordo com o movimento do mouse
if self.myMode == self.InsertLine and self.line:
self.clearSelection()
new_line = QtCore.QLineF(
self.line.line().p1(), mouse_event.scenePos())
self.line.setLine(new_line)
# Caso estejamos no modo simples de movimentação de item, herda a
# função nativa do PySide.
elif self.myMode == self.MoveItem:
super(SceneWidget, self).mouseMoveEvent(mouse_event)
return
# Caso estejamos no modo de seleção de múltiplos itens, desenha o
# retângulo de seleção
elif self.myMode == self.SelectItems and self.selectRect:
new_rect = QtCore.QRectF(
self.selectRect.rect().topLeft(), mouse_event.scenePos())
self.selectRect.setRect(new_rect)
def mouseReleaseEvent(self, mouse_event):
'''
Este método define as ações realizadas quando um evento do tipo
mouseRelease e detectado no diagrama grafico. Neste caso conecta
os dois elementos que estão ligados pela linha criada no evento
mousePress.
'''
# Se o modo atual for de inserção de linha, desligam-se as prioridades
# de node e edge e cria uma flag block_on.
if self.myMode == self.InsertLine and self.line:
node_priority = False
edge_priority = False
block_on = False
# Remove o item self.no, já que ele foi criado provisoriamente
# para aparecer durante o aperto do mouse.
if self.no is not None:
self.removeItem(self.no)
# Cria o elipse de precisão localizado onde o mouse foi apertado.
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(QtCore.QRectF(mouse_event.scenePos() -
QtCore.QPointF(10, 10), QtCore.QSizeF(30, 30)))
self.addItem(ell)
# Testes preliminares do start_item
# Se o start_item for uma barra, seta a flag block_on
if self.start_item.myItemType == Node.Barra:
block_on = True
# Caso seja um religador, verifica se este já possui as duas
# ligações máximas permitidas. Se sim, remove o elipse de precisão
# e retorna a função.
if self.start_item.myItemType == Node.Religador:
if len(self.start_item.edges) >= 2:
self.removeItem(self.line)
self.line = None
self.removeItem(ell)
return
# Estabelecimento do end_item
# Se houver um item "debaixo" do mouse e este for um Node, seta a
# flag de prioridade de Node.
for item in self.items():
if item.isUnderMouse():
if isinstance(item, Node):
node_priority = True
# Se o elipse de precisão englobar uma linha, seta a flag de
# prioridade de edge (ou prioridade de linha)
for item in self.items():
if ell.collidesWithItem(item):
if isinstance(item, Edge):
edge_priority = True
# Testa se o elipse de precisão engloba um Node ou Edge.
for item in self.items():
if ell.collidesWithItem(item):
# Caso seja um Node, realiza o teste do número de ligações
# máximo permitido (2).
if isinstance(item, Node):
if item.myItemType == Node.Religador:
if len(item.edges) >= 2:
self.removeItem(self.line)
self.line = None
self.removeItem(ell)
return
# Esta condição impede que um item ligado a uma barra
# já existente tenha sua condição de alinhamento
# alterada pela conexão com outra barra. Em outras
# palavras, não será permitido a ligação de uma barra
# para um item que já esteja ligado a outra barra.
if block_on is True:
for edge in item.edges:
if (edge.w1.myItemType == Node.Barra
or edge.w2.myItemType == Node.Barra):
self.removeItem(self.line)
self.line = None
self.removeItem(ell)
return
# Terminando-se os testes, o end_item é estabelecido
# como o Node englobado pelo elipse de precisão.
self.end_item = item
# Se o elipse engloba uma edge, e não há prioridade de
# node, ou seja, não há um node "debaixo" do mouse:
elif isinstance(item, Edge) and not node_priority:
# Se block_on está setada (ou seja, o start_item é uma
# barra), remove a linha, o elipse de precisão e
# retorna a função. Em outras palavras, não é possível
# realizar a ligação de uma barra para outra linha
# (quebra de linha).
if block_on is True:
self.removeItem(self.line)
self.line = None
self.removeItem(ell)
return
# Caso contrário, a situação é de que o item final é
# uma linha, ou seja, quebra-se a linha no meio para a
# inserção de um nó de passagem.
# Define o centro da linha.
c_pos = (item.line().p1() + item.line().p2()) / 2
# Define o item final como um nó conectivo, com sua
# posição no centro da linha a ser quebrada.
self.end_item = Node(Node.NoConectivo, self.myLineMenu)
self.end_item.setPos(c_pos +
QtCore.QPointF(-3.5, -3.5))
# Define o break_mode como 1 (quebra regular de linha)
self.break_mode = 1
# Armazena a linha a ser quebrada para uso posterior.
self.edge_broken = item
# Se o item englobado pelo elipse de precisão for ele
# próprio, significa que não o botão do mouse não foi solto
# sobre nenhum elemento na cena. Assim, cria-se
# simplesmente um nó de passagem na posição clicada.
elif (isinstance(item, QtGui.QGraphicsEllipseItem)
and not node_priority and not edge_priority):
self.end_item = Node(Node.NoConectivo, self.myLineMenu)
self.end_item.setPos(mouse_event.scenePos())
# A linha provisória e o elipse de precisão são removidos.
self.removeItem(self.line)
self.line = None
self.removeItem(ell)
# Testes posteriores do start_item e end_item
# Se o start item for uma barra e o end_item um simples nó
# conectivo, substitui-se o último por um religador.
if self.start_item.myItemType == Node.Barra:
if self.end_item.myItemType == Node.NoConectivo:
self.removeItem(self.end_item)
self.end_item = Node(Node.Religador, self.myRecloserMenu)
self.addItem(self.end_item)
self.end_item.setPos(mouse_event.scenePos())
# Se o end item for uma barra e o start item um simples nó
# conectivo, substitui-se o último por um religador.
if self.end_item.myItemType == Node.Barra:
if self.start_item.myItemType == Node.NoConectivo:
self.removeItem(self.start_item)
self.start_item = Node(Node.Religador, self.myRecloserMenu)
self.addItem(self.start_item)
self.start_item.setPos(self.pressPos)
# Teste de comprimento de linha. Se a linha criada for muito
# pequena, a função retorna sem a criação da mesma.
dist = math.sqrt(
math.pow(
self.start_item.pos().x() -
self.end_item.pos().x(), 2) + math.pow(
self.start_item.pos().y() - self.end_item.pos().y(), 2))
if dist < 15:
print "Erro: Comprimento da ligação muito pequeno!"
return
# Se houver uma linha a ser quebrada, mas esta for fixa (ligada a
# uma barra), a quebra não será realizada e a função retorna.
if self.edge_broken is not None and self.edge_broken.isPermanent:
print "Não se pode quebrar esta linha!"
return
# Correção de eventuais discrepâncias entre a associação de cena
# para os itens supracitados.
if self.start_item.scene() is None:
self.addItem(self.start_item)
if self.end_item.scene() is None:
self.addItem(self.end_item)
# Finalmente, faz-se a ligação entre start e end item e a adiciona
# a linha à cena.
edge = Edge(self.start_item, self.end_item, self.myLineMenu)
self.addItem(edge)
edge.set_color(QtCore.Qt.black)
edge.update_position()
# Chama a função de break_edge com seu devido modo de quebra
self.break_edge(self.edge_broken, self.break_mode, edge)
# Inversão dos itens w1 e w2 por conveniência, caso w1 seja um nó
# conectivo
if edge.w1.myItemType == Node.NoConectivo:
aux = edge.w1
edge.w1 = edge.w2
edge.w2 = aux
# Deseleciona itens selecionados
for item in self.selectedItems():
item.setSelected(False)
self.no = None
# Caso o modo seja de seleção de itens, seleciona os itens englobados
# pelo retângulo de seleção.
elif self.myMode == self.SelectItems and self.selectRect:
path = QtGui.QPainterPath()
path.addRect(self.selectRect.rect())
self.setSelectionArea(path)
self.removeItem(self.selectRect)
self.selectRect = None
self.line = None
self.itemInserted.emit(3)
super(SceneWidget, self).mouseReleaseEvent(mouse_event)
def mouseDoubleClickEvent(self, mouse_event):
'''
Este método define as ações realizadas quando um evento do tipo
mouseDoubleClick e detectado no diagrama grafico. Neste caso
conecta os dois elementos que estão ligados pela linha criada no
evento mousePress.
'''
# Se um item for clicado duplamente, abre o diálogo de configuração
# de parâmetros.
for item in self.selectedItems():
if isinstance(item, Node):
item.setSelected(True)
self.launch_dialog()
item.setSelected(False)
return
# Cria o item elipse de precisão.
ell = QtGui.QGraphicsEllipseItem()
ell.setRect(QtCore.QRectF(mouse_event.scenePos() -
QtCore.QPointF(10, 10), QtCore.QSizeF(30, 30)))
self.addItem(ell)
# Se o elipse de precisão englobar uma linha, abre o diálogo de
# configuração de linha.
for item in self.items():
if item.collidesWithItem(ell) and isinstance(item, Edge):
if ell.scene() is not None:
self.removeItem(ell)
item.setSelected(True)
self.launch_dialog()
item.setSelected(False)
return
else:
if ell.scene() is not None:
self.removeItem(ell)
# Define a função de aperto de diversas teclas. O trecho a seguir é
# auto-explicativo.
def keyPressEvent(self, event):
key = event.key()
if self.keyControlIsPressed is True:
if key == QtCore.Qt.Key_Z:
self.undoStack.undo()
if key == QtCore.Qt.Key_Y:
self.undoStack.redo()
if key == QtCore.Qt.Key_Space:
self.change_state()
if key == QtCore.Qt.Key_Up:
for item in self.selectedItems():
item.moveBy(0, -5)
elif key == QtCore.Qt.Key_Down:
for item in self.selectedItems():
item.moveBy(0, 5)
elif key == QtCore.Qt.Key_Left:
for item in self.selectedItems():
item.moveBy(-5, 0)
elif key == QtCore.Qt.Key_Right:
for item in self.selectedItems():
item.moveBy(5, 0)
elif key == QtCore.Qt.Key_Space or key == QtCore.Qt.Key_Enter:
pass
elif key == QtCore.Qt.Key_Control:
self.keyControlIsPressed = True
elif key == QtCore.Qt.Key_Delete:
self.delete_item()
elif key == QtCore.Qt.Key_Escape:
self.clearSelection()
else:
pass
super(SceneWidget, self).keyPressEvent(event)
return
def keyReleaseEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Control:
self.keyControlIsPressed = False
# Função break_edge usada para quebrar a linha quando a inserção é a partir
# ou em cima de uma linha.
def break_edge(self, edge, mode, original_edge, insert=None):
if mode == 3:
break_point = insert
if mode == 2:
command = AddRemoveCommand("Add", self, original_edge)
self.undoStack.push(command)
return
if mode == 0:
break_point = self.start_item
if mode == 1:
break_point = self.end_item
edge.w1.remove_edge(edge)
edge.w2.remove_edge(edge)
self.removeItem(edge)
new_edge_1 = Edge(edge.w1, break_point, self.myLineMenu)
new_edge_2 = Edge(break_point, edge.w2, self.myLineMenu)
self.addItem(new_edge_1)
self.addItem(new_edge_2)
new_edge_1.update_position()
new_edge_2.update_position()
# Definição da função de recuperar uma linha quando está foi quebrada.
def recover_edge(self, item):
w = []
for edge in item.edges:
if edge.w1 == item:
w.append(edge.w2)
elif edge.w2 == item:
w.append(edge.w1)
item.remove_edges()
new_edge = Edge(w[0], w[1], self.myLineMenu)
self.addItem(new_edge)
new_edge.update_position()
def set_item_type(self, type):
'''
Define em qual tipo de item sera inserido no diagrama grafico assim
que um evento do tipo mousePress for detectado, podendo ser:
Node.Subestacao
Node.Religador
Node.Barra
Node.Agent
'''
self.myItemType = type
def set_mode(self, mode):
'''
Define em qual modo
'''
self.myMode = mode
def change_state(self):
'''
Define a função que muda o estado do religador. Esta função será
chamada no momento que o usuário tiver selecionado um religador e
pressionado a barra de espaço.
'''
print "entrou"
for item in self.selectedItems():
if item.myItemType == Node.Religador:
aviso = AvisoReligador(item.chave.normalOpen, item.chave.nome)
if aviso.dialog.result() == 1:
print item.chave.normalOpen
if item.chave.normalOpen == 1:
item.chave.normalOpen = 0
elif item.chave.normalOpen == 0:
item.chave.normalOpen = 1
item.setSelected(False)
item.setSelected(True)
print item.chave.normalOpen
else:
continue
def create_actions(self):
'''
Este metodo cria as ações que serão utilizadas nos menus dos itens
gráficos. Auto-explicativo: ver QtGui.QAction na biblioteca Pyside.
'''
self.propertysAction = QtGui.QAction(
'Abrir/Fechar', self, shortcut='Enter',
triggered=self.change_state)
self.deleteAction = QtGui.QAction(
'Excluir Item', self, shortcut='Delete',
triggered=self.delete_item)
self.increaseBusAction = QtGui.QAction(
'Aumentar Barra', self, shortcut='Ctrl + a',
triggered=self.increase_bus)
self.decreaseBusAction = QtGui.QAction(
'Diminuir Barra', self, shortcut='Ctrl + d',
triggered=self.decrease_bus)
self.alignHLineAction = QtGui.QAction(
'Alinha Linha H', self, shortcut='Ctrl + h',
triggered=self.align_line_h)
self.alignVLineAction = QtGui.QAction(
'Alinhar Linha V', self, shortcut='Ctrl + v',
triggered=self.align_line_v)
def create_menus(self):
'''
Este metodo cria os menus de cada um dos itens gráficos: religador,
subestação, barra e linha. Auto-explicativo: ver QtGui.QMenu na
biblioteca do Pyside.
'''
self.myBusMenu = QtGui.QMenu('Menu Bus')
self.myBusMenu.addAction(self.increaseBusAction)
self.myBusMenu.addAction(self.decreaseBusAction)
self.myBusMenu.addAction(self.deleteAction)
self.myBusMenu.addAction(self.propertysAction)
self.myRecloserMenu = QtGui.QMenu('Menu Recloser')
self.myRecloserMenu.addAction(self.propertysAction)
self.myRecloserMenu.addAction(self.deleteAction)
self.mySubstationMenu = QtGui.QMenu('Menu Subestacao')
self.mySubstationMenu.addAction(self.propertysAction)
self.mySubstationMenu.addAction(self.deleteAction)
self.myLineMenu = QtGui.QMenu('Menu Linha')
self.myLineMenu.addAction(self.alignHLineAction)
self.myLineMenu.addAction(self.alignVLineAction)
self.myLineMenu.addAction(self.propertysAction)
self.myLineMenu.addAction(self.deleteAction)
def delete_item(self):
'''
Este método implementa a ação de exclusão de um item gráfico do
diagrama.
'''
for item in self.selectedItems():
item.Noc = None
if isinstance(item, Node):
# Se o item selecionado não for um nó conectivo e tiver pelo
# menos uma edge associada, este item é eliminado e em seu
# lugar aparece um nó conectivo.
if item.myItemType != Node.NoConectivo:
lista = item.edges
if len(item.edges) >= 1:
item.Noc = Node(Node.NoConectivo, self.myLineMenu)
self.addItem(item.Noc)
item.Noc.setPos(item.scenePos() +
QtCore.QPointF(20, 20))
for edge in lista:
if edge.w1 == item:
new_edge = Edge(
item.Noc, edge.w2, self.myLineMenu)
else:
new_edge = Edge(
item.Noc, edge.w1, self.myLineMenu)
self.addItem(new_edge)
item.remove_edges()
# Caso o item possua mais de duas linhas ligadas, o comporta
# mento se torna imprevisível, então é emitida uma mensagem ao
# usuário restringindo esta ação.
if len(item.edges) > 2:
dialog = AvisoConexaoDialog()
return
# Caso o item deletado seja um nó conectivo conectando duas
# linhas, este é eliminado e é feito uma só linha conectada
# aos objetos anteriormente ligados.
elif (len(item.edges) == 2
and item.myItemType == Node.NoConectivo):
self.recover_edge(item)
if isinstance(item, Edge):
# Se o item selecionado for uma edge e o extremo analisado for
# um nó conectivo solitário, este é deletado juntamente com
# a linha.
if (item.w1.myItemType == Node.NoConectivo
and len(item.w1.edges) <= 1):
self.removeItem(item.w1)
if (item.w2.myItemType == Node.NoConectivo
and len(item.w2.edges) <= 1):
self.removeItem(item.w2)
item.w1.remove_edge(item)
item.w2.remove_edge(item)
# Remove o item
self.removeItem(item)
command = AddRemoveCommand("Remove", self, item)
self.undoStack.push(command)
def launch_dialog(self):
'''
Este método inicia os diálogos de configuração de cada um dos itens
gráficos do diagrama.
'''
for item in self.selectedItems():
# O comentário explicativo abaixo é valido para o caso dos diversos
# tipos de node.
if isinstance(item, Node):
# Caso Religador
if item.myItemType == Node.Religador:
# Chama a função relativa à abertura do dialog referente
# ao religador
dialog = RecloserDialog(item)
# Caso o usuário aperte "OK":
if dialog.dialog.result() == 1:
item.text_config = unicode(
dialog.testeLineEdit.currentText())
# Válido para cada caixa de entrada: Se a entrada do
# usuário for em branco, o campo continua com o mesmo
# valor atribuído a ele anteriormente. Caso contrário,
# atribui o valor inserido pelo usuário ao parâmetro
# correspondente.
if dialog.identificaOLineEdit.text() == "":
pass
else:
item.chave.nome = dialog.identificaOLineEdit.text()
item.text.setPlainText(
dialog.identificaOLineEdit.text())
if dialog.correnteNominalLineEdit.text() == "":
pass
else:
item.chave.ratedCurrent = \
dialog.correnteNominalLineEdit.text()
if dialog.capacidadeDeInterrupOLineEdit.text() == "":
pass
else:
item.chave.breakingCapacity = \
dialog.capacidadeDeInterrupOLineEdit.text()
if dialog.nDeSequNciasDeReligamentoLineEdit.text() == \
"":
pass
else:
item.chave.recloseSequences = \
dialog.nDeSequNciasDeReligamentoLineEdit.text()
else:
return dialog.dialog.result()
# Caso Barra
if item.myItemType == Node.Barra:
dialog = BarraDialog(item)
if dialog.dialog.result() == 1:
if dialog.nomeLineEdit.text() == "":
pass
else:
item.text.setPlainText(dialog.nomeLineEdit.text())
item.barra.nome = dialog.nomeLineEdit.text()
if dialog.fasesLineEdit.text() == "":
pass
else:
item.barra.phases = dialog.fasesLineEdit.text()
else:
return dialog.dialog.result()
# Caso Subestação
if item.myItemType == Node.Subestacao:
dialog = SubstationDialog(item)
if dialog.dialog.result() == 1:
if dialog.nomeLineEdit.text() == "":
pass
else:
item.text.setPlainText(dialog.nomeLineEdit.text())
item.substation.nome = dialog.nomeLineEdit.text()
if dialog.tpLineEdit.text() == "":
pass
else:
item.substation.tensao_primario = \
dialog.tpLineEdit.text()
else:
return dialog.dialog.result()
# Caso Nó de Carga
if item.myItemType == Node.NoDeCarga:
dialog = EnergyConsumerDialog(item)
if dialog.dialog.result() == 1:
if dialog.identificaOLineEdit.text() == "":
pass
else:
item.text.setPlainText(
dialog.identificaOLineEdit.text())
item.no_de_carga.nome = \
dialog.identificaOLineEdit.text()
if dialog.potNciaAtivaLineEdit.text() == "":
pass
else:
item.no_de_carga.potencia_ativa = \
dialog.potNciaAtivaLineEdit.text()
if dialog.potNciaReativaLineEdit.text() == "":
pass
else:
item.no_de_carga.potencia_reativa = \
dialog.potNciaReativaLineEdit.text()
else:
return dialog.dialog.result()
# Caso o item seja uma linha, abre-se o dialog referente às
# configuração da linha. O procedimento é análogo ao feito para
# o caso Node.
if isinstance(item, Edge):
print str(item.linha.id)
dialog = ConductorDialog(item)
if dialog.dialog.result() == 1:
if dialog.comprimentoLineEdit.text() == "":
pass
else:
item.linha.comprimento = \
dialog.comprimentoLineEdit.text()
if dialog.resistenciaLineEdit.text() == "":
pass
else:
item.linha.resistencia = \
dialog.resistenciaLineEdit.text()
if dialog.resistenciaZeroLineEdit.text() == "":
pass
else:
item.linha.resistencia_zero = \
dialog.resistenciaZeroLineEdit.text()
if dialog.reatanciaLineEdit.text() == "":
pass
else:
item.linha.reatancia = \
dialog.reatanciaLineEdit.text()
if dialog.reatanciaZeroLineEdit.text() == "":
pass
else:
item.linha.reatancia_zero = \
dialog.reatanciaZeroLineEdit.text()
if dialog.ampacidadeLineEdit.text() == "":
pass
else:
item.linha.ampacidade = \
dialog.ampacidadeLineEdit.text()
else:
return dialog.dialog.result()
def increase_bus(self):
'''
Este método implementa a ação de aumentar o tamanho do item gráfico
barra.
'''
for item in self.selectedItems():
if isinstance(item, Node):
item.prepareGeometryChange()
item.setRect(
item.rect().x(), item.rect().y(), item.rect().width(),
item.rect().height() * 1.25)
def decrease_bus(self):
'''
Este método implementa a ação de diminuir o tamanho do item gráfico
barra.
'''
for item in self.selectedItems():
if isinstance(item, Node):
item.prepareGeometryChange()
item.setRect(
item.rect().x(), item.rect().y(), item.rect().width(),
item.rect().height() / 1.25)
def align_line_h(self):
w1_is_locked = False
w2_is_locked = False
for item in self.selectedItems():
if isinstance(item, Edge):
for edge in item.w1.edges:
if (edge.w1.myItemType == Node.Barra
or edge.w2.myItemType == Node.Barra):
w1_is_locked = True
for edge in item.w2.edges:
if (edge.w1.myItemType == Node.Barra
or edge.w2.myItemType == Node.Barra):
w2_is_locked = True
if w1_is_locked and not w2_is_locked:
pos = QtCore.QPointF(
item.w2.center().x(), item.w1.center().y())
item.w2.set_center(pos)
item.update_position()
if w2_is_locked and not w1_is_locked:
pos = QtCore.QPointF(
item.w1.center().x(), item.w2.center().y())
item.w1.set_center(pos)
item.update_position()
else:
pos = QtCore.QPointF(
item.w2.center().x(), item.w1.center().y())
item.w2.set_center(pos)
item.update_position()
for item in self.items():
if isinstance(item, Edge):
item.update_position()
def align_line_v(self):
for item in self.selectedItems():
if isinstance(item, Edge):
if item.w1.x() < item.w2.x():
pos = QtCore.QPointF(
item.w1.center().x(), item.w2.center().y())
item.w2.set_center(pos)
else:
pos = QtCore.QPointF(
item.w2.center().x(), item.w1.center().y())
item.w1.set_center(pos)
item.update_position()
item.update_ret()
def h_align(self):
has_pos_priority = False
has_bar_priority = False
y_pos_list = []
for item in self.selectedItems():
if isinstance(item, Node):
if item.myItemType == Node.Religador:
has_pos_priority = True
pos_item = item.pos().y()
if item.myItemType == Node.Barra and item.bar_busy is True:
has_bar_priority = True
pos_barra = item.pos().y()
for item in self.selectedItems():
if isinstance(item, Node):
if item.myItemType != Node.Barra:
if has_bar_priority:
continue
else:
y_pos_list.append(item.pos().y())
continue
if item.myItemType == Node.NoConectivo:
if has_pos_priority:
continue
else:
y_pos_list.append(item.pos().y())
else:
y_pos_list.append(item.pos().y())
max_pos = max(y_pos_list)
min_pos = min(y_pos_list)
mean_pos = max_pos - abs(max_pos - min_pos) / 2.0
for item in self.selectedItems():
if isinstance(item, Node):
if item.Fixed is True:
mean_pos = item.pos().y()
item.mean_pos = mean_pos
for item in self.selectedItems():
if isinstance(item, Node):
pos = mean_pos
if item.Fixed is True:
continue
if (has_bar_priority is True
and item.myItemType == Node.Subestacao):
pos = pos_barra + 25
elif has_pos_priority:
pos = pos_item
if item.myItemType == Node.NoConectivo:
pos = pos + 17
if item.myItemType == Node.NoDeCarga:
pos = pos + 15
if item.myItemType == Node.Barra:
pos = pos_barra
item.setY(pos)
for item in self.selectedItems():
if isinstance(item, Edge):
item.update_position()
def v_align(self):
x_pos_list = []
for item in self.selectedItems():
if isinstance(item, Node):
x_pos_list.append(item.pos().x())
max_pos = max(x_pos_list)
min_pos = min(x_pos_list)
mean_pos = max_pos - abs(max_pos - min_pos) / 2.0
for item in self.selectedItems():
if isinstance(item, Node):
item.setX(mean_pos)
for item in self.selectedItems():
if isinstance(item, Edge):
item.update_position()
def set_grid(self):
if self.my_background_style == self.GridStyle:
self.setBackgroundBrush(QtGui.QBrush(
QtCore.Qt.white, QtCore.Qt.NoBrush))
self.my_background_style = self.NoStyle
elif self.my_background_style == self.NoStyle:
self.setBackgroundBrush(QtGui.QBrush(
QtCore.Qt.lightGray, QtCore.Qt.CrossPattern))
self.my_background_style = self.GridStyle
class ViewWidget(QtGui.QGraphicsView):
'''
Esta classe implementa o container QGraphicsView
onde residirá o objeto QGraphicsScene.
'''
def __init__(self, scene):
super(ViewWidget, self).__init__(scene)
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
def wheelEvent(self, event):
self.scale_view(math.pow(2.0, -event.delta() / 240.0))
def scale_view(self, scale_factor):
factor = self.matrix().scale(scale_factor, scale_factor).mapRect(
QtCore.QRectF(0, 0, 1, 1)).width()
if factor < 0.5 or factor > 3:
return
self.scale(scale_factor, scale_factor)
class AddRemoveCommand(QtGui.QUndoCommand):
def __init__(self, mode, scene, item):
super(AddRemoveCommand, self).__init__(mode)
self.mode = mode
self.item = item
self.scene = scene
self.count = 0
def redo(self):
self.count += 1
if self.count <= 1:
return
if self.mode == "Add":
self.scene.addItem(self.item)
self.scene.addItem(self.item.text)
if self.mode == "Remove":
self.scene.removeItem(self.item)
def undo(self):
if self.mode == "Add":
self.scene.removeItem(self.item)
if self.mode == "Remove":
self.scene.addItem(self.item)
if self.item.Noc is not None:
lista = self.item.Noc.edges
for edge in lista:
if edge.w1 == self.item.Noc:
new_edge = Edge(
self.item, edge.w2, self.scene.myLineMenu)
else:
new_edge = Edge(
self.item, edge.w1, self.scene.myLineMenu)
self.scene.addItem(new_edge)
self.item.Noc.remove_edges()
self.scene.removeItem(self.item.Noc)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
scene = SceneWidget()
widget = ViewWidget(scene)
widget.show()
sys.exit(app.exec_())
|
tensorflow/tensorflow | refs/heads/master | tensorflow/python/keras/feature_column/sequence_feature_column_integration_test.py | 5 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras.feature_column import dense_features
from tensorflow.python.keras.feature_column import sequence_feature_column as ksfc
from tensorflow.python.keras.layers import merge
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc.categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc.embedding_column(col, dimension=10),
fc.numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc.embedding_column(identity_col, dimension=10),
fc.embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec_v2(ctx_cols),
sequence_features=fc.make_parse_example_spec_v2(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = dataset_ops.make_one_shot_iterator(ds).get_next()
# Tile the context features across the sequence features
sequence_input_layer = ksfc.SequenceFeatures(seq_cols)
seq_input, _ = sequence_input_layer(features)
dense_input_layer = dense_features.DenseFeatures(ctx_cols)
ctx_input = dense_input_layer(features)
ctx_input = backend.repeat(ctx_input, array_ops.shape(seq_input)[1])
concatenated_input = merge.concatenate([seq_input, ctx_input])
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(concatenated_input)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
@test_util.run_deprecated_v1
def test_shared_sequence_non_sequence_into_input_layer(self):
non_seq = fc.categorical_column_with_identity('non_seq',
num_buckets=10)
seq = sfc.sequence_categorical_column_with_identity('seq',
num_buckets=10)
shared_non_seq, shared_seq = fc.shared_embedding_columns_v2(
[non_seq, seq],
dimension=4,
combiner='sum',
initializer=init_ops_v2.Ones(),
shared_embedding_collection_name='shared')
seq = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
non_seq = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
features = {'seq': seq, 'non_seq': non_seq}
# Tile the context features across the sequence features
seq_input, seq_length = ksfc.SequenceFeatures([shared_seq])(features)
non_seq_input = dense_features.DenseFeatures([shared_non_seq])(features)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output_seq, output_seq_length, output_non_seq = sess.run(
[seq_input, seq_length, non_seq_input])
self.assertAllEqual(output_seq, [[[1, 1, 1, 1], [1, 1, 1, 1]],
[[1, 1, 1, 1], [0, 0, 0, 0]]])
self.assertAllEqual(output_seq_length, [2, 1])
self.assertAllEqual(output_non_seq, [[2, 2, 2, 2], [1, 1, 1, 1]])
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
|
gangadharkadam/shfr | refs/heads/master | frappe/core/doctype/notification_count/notification_count.py | 23 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import MySQLdb
from frappe.model.document import Document
logger = frappe.get_logger()
class NotificationCount(Document):
pass
@frappe.whitelist()
def get_notifications():
if frappe.flags.in_install_app:
return
config = get_notification_config()
can_read = frappe.user.get_can_read()
open_count_doctype = {}
open_count_module = {}
notification_count = dict(frappe.db.sql("""select for_doctype, open_count
from `tabNotification Count` where owner=%s""", (frappe.session.user,)))
for d in config.for_doctype:
if d in can_read:
condition = config.for_doctype[d]
key = condition.keys()[0]
if d in notification_count:
open_count_doctype[d] = notification_count[d]
else:
result = frappe.get_list(d, fields=["count(*)"],
filters=[[d, key, "=", condition[key]]], as_list=True)[0][0]
open_count_doctype[d] = result
try:
frappe.get_doc({"doctype":"Notification Count", "for_doctype":d,
"open_count":result}).insert(ignore_permissions=True)
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
for m in config.for_module:
if m in notification_count:
open_count_module[m] = notification_count[m]
else:
open_count_module[m] = frappe.get_attr(config.for_module[m])()
try:
frappe.get_doc({"doctype":"Notification Count", "for_doctype":m,
"open_count":open_count_module[m]}).insert(ignore_permissions=True)
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
return {
"open_count_doctype": open_count_doctype,
"open_count_module": open_count_module
}
def clear_notifications(user=None):
if frappe.flags.in_install_app=="frappe":
return
try:
if user:
frappe.db.sql("""delete from `tabNotification Count` where owner=%s""", (user,))
else:
frappe.db.sql("""delete from `tabNotification Count`""")
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
def delete_notification_count_for(doctype):
if frappe.flags.in_import: return
try:
frappe.db.sql("""delete from `tabNotification Count` where for_doctype = %s""", (doctype,))
except MySQLdb.OperationalError, e:
if e.args[0] != 1213:
raise
logger.error("Deadlock")
def clear_doctype_notifications(doc, method=None, *args, **kwargs):
if frappe.flags.in_import:
return
config = get_notification_config()
doctype = doc.doctype
if doctype in config.for_doctype:
delete_notification_count_for(doctype)
return
if doctype in config.for_module_doctypes:
delete_notification_count_for(config.for_module_doctypes[doctype])
def get_notification_info_for_boot():
out = get_notifications()
config = get_notification_config()
can_read = frappe.user.get_can_read()
conditions = {}
module_doctypes = {}
doctype_info = dict(frappe.db.sql("""select name, module from tabDocType"""))
for d in list(set(can_read + config.for_doctype.keys())):
if d in config.for_doctype:
conditions[d] = config.for_doctype[d]
if d in doctype_info:
module_doctypes.setdefault(doctype_info[d], []).append(d)
out.update({
"conditions": conditions,
"module_doctypes": module_doctypes,
})
return out
def get_notification_config():
config = frappe._dict()
for notification_config in frappe.get_hooks().notification_config:
nc = frappe.get_attr(notification_config)()
for key in ("for_doctype", "for_module", "for_module_doctypes"):
config.setdefault(key, {})
config[key].update(nc.get(key, {}))
return config
def on_doctype_update():
if not frappe.db.sql("""show index from `tabNotification Count`
where Key_name="notification_count_owner_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabNotification Count`
add index notification_count_owner_index(owner)""")
|
adamrp/qiime | refs/heads/master | qiime/parallel/multiple_rarefactions.py | 15 | #!/usr/bin/env python
# File created on 14 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from qiime.parallel.util import ParallelWrapper
class ParallelMultipleRarefactions(ParallelWrapper):
_script_name = "single_rarefaction.py"
_job_prefix = 'RARIF'
_input_splitter = ParallelWrapper._input_existing_filepaths
def _identify_files_to_remove(self, job_result_filepaths, params):
""" The output of the individual jobs are the files we want to keep
"""
return []
def _get_job_commands(self,
input_fp,
output_dir,
params,
job_prefix,
working_dir,
command_prefix='/bin/bash; ',
command_suffix='; exit'):
"""Generate rarefaction diversity commands to be submitted to cluster
"""
# Create data for each run (depth, output_fn)
min_seqs = params['min']
max_seqs = params['max']
step = params['step']
num_reps = params['num_reps']
run_parameters = []
for num_seqs in range(min_seqs, max_seqs + 1, step):
for rep_num in range(num_reps):
run_parameters.append((
num_seqs, 'rarefaction_%d_%d.biom' % (num_seqs, rep_num)))
commands = []
result_filepaths = []
if params['suppress_lineages_included']:
lineages_included_str = '--suppress_lineages_included'
else:
lineages_included_str = ''
if params['subsample_multinomial']:
subsample_multinomial_str = '--subsample_multinomial'
else:
subsample_multinomial_str = ''
for depth, output_fn in run_parameters:
# Each run ends with moving the output file from the tmp dir to
# the output_dir. Build the command to perform the move here.
rename_command, current_result_filepaths =\
self._get_rename_command([output_fn], working_dir, output_dir)
result_filepaths += current_result_filepaths
command = '%s %s -i %s -o %s %s %s -d %s %s %s' %\
(command_prefix,
self._script_name,
input_fp,
working_dir + '/' + output_fn,
lineages_included_str,
subsample_multinomial_str,
depth,
rename_command,
command_suffix)
commands.append(command)
commands = self._merge_to_n_commands(commands,
params['jobs_to_start'],
command_prefix=command_prefix,
command_suffix=command_suffix)
return commands, result_filepaths
|
projectatomic/anaconda | refs/heads/rhel7-atomic | pyanaconda/ui/gui/spokes/datetime_spoke.py | 1 | # Datetime configuration spoke class
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <[email protected]>
#
import logging
log = logging.getLogger("anaconda")
# pylint: disable-msg=E0611
from gi.repository import AnacondaWidgets, GLib, Gtk, Gdk
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.categories.localization import LocalizationCategory
from pyanaconda.ui.gui.utils import enlightbox, gtk_action_nowait, gtk_action_wait, gtk_call_once
from pyanaconda.i18n import _, N_
from pyanaconda import timezone
from pyanaconda.timezone import NTP_SERVICE
from pyanaconda.localization import get_xlated_timezone
from pyanaconda import iutil
from pyanaconda import network
from pyanaconda import nm
from pyanaconda import ntp
from pyanaconda import flags
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
import datetime
import os
import re
import threading
import locale as locale_mod
__all__ = ["DatetimeSpoke"]
SERVER_OK = 0
SERVER_NOK = 1
SERVER_QUERY = 2
DEFAULT_TZ = "America/New_York"
POOL_SERVERS_NOTE = N_("Note: pool servers may not be available all the time")
SPLIT_NUMBER_SUFFIX_RE = re.compile(r'([^0-9]*)([-+])([0-9]+)')
def _compare_regions(reg_xlated1, reg_xlated2):
"""Compare two pairs of regions and their translations."""
reg1, xlated1 = reg_xlated1
reg2, xlated2 = reg_xlated2
# sort the Etc timezones to the end
if reg1 == "Etc" and reg2 == "Etc":
return 0
elif reg1 == "Etc":
return 1
elif reg2 == "Etc":
return -1
else:
# otherwise compare the translated names
return locale_mod.strcoll(xlated1, xlated2)
def _compare_cities(city_xlated1, city_xlated2):
"""Compare two paris of cities and their translations."""
# if there are "cities" ending with numbers (like GMT+-X), we need to sort
# them based on their numbers
val1 = city_xlated1[1]
val2 = city_xlated2[1]
match1 = SPLIT_NUMBER_SUFFIX_RE.match(val1)
match2 = SPLIT_NUMBER_SUFFIX_RE.match(val2)
if match1 is None and match2 is None:
# no +-X suffix, just compare the strings
return locale_mod.strcoll(val1, val2)
if match1 is None or match2 is None:
# one with the +-X suffix, compare the prefixes
if match1:
prefix, _sign, _suffix = match1.groups()
return locale_mod.strcoll(prefix, val2)
else:
prefix, _sign, _suffix = match2.groups()
return locale_mod.strcoll(val1, prefix)
# both have the +-X suffix
prefix1, sign1, suffix1 = match1.groups()
prefix2, sign2, suffix2 = match2.groups()
if prefix1 == prefix2:
# same prefixes, let signs determine
return cmp(int(sign1 + suffix1), int(sign2 + suffix2))
else:
# compare prefixes
return locale_mod.strcoll(prefix1, prefix2)
class NTPconfigDialog(GUIObject):
builderObjects = ["ntpConfigDialog", "addImage", "serversStore"]
mainWidgetName = "ntpConfigDialog"
uiFile = "spokes/datetime_spoke.glade"
def __init__(self, *args):
GUIObject.__init__(self, *args)
#used to ensure uniqueness of the threads' names
self._threads_counter = 0
#epoch is increased when serversStore is repopulated
self._epoch = 0
self._epoch_lock = threading.Lock()
@property
def working_server(self):
for row in self._serversStore:
if row[1] == SERVER_OK and row[2]:
#server is checked and working
return row[0]
return None
@property
def servers(self):
ret = list()
for row in self._serversStore:
if row[2]:
#server checked
ret.append(row[0])
return ret
def _render_working(self, column, renderer, model, itr, user_data=None):
#get the value in the second column
value = model[itr][1]
if value == SERVER_QUERY:
renderer.set_property("stock-id", "gtk-dialog-question")
elif value == SERVER_OK:
renderer.set_property("stock-id", "gtk-yes")
else:
renderer.set_property("stock-id", "gtk-no")
def initialize(self):
self.window.set_size_request(500, 400)
workingColumn = self.builder.get_object("workingColumn")
workingRenderer = self.builder.get_object("workingRenderer")
workingColumn.set_cell_data_func(workingRenderer, self._render_working)
self._serverEntry = self.builder.get_object("serverEntry")
self._serversStore = self.builder.get_object("serversStore")
self._poolsNote = self.builder.get_object("poolsNote")
self._initialize_store_from_config()
def _initialize_store_from_config(self):
self._serversStore.clear()
self._poolsNote.set_text("")
if self.data.timezone.ntpservers:
for server in self.data.timezone.ntpservers:
self._add_server(server)
else:
try:
for server in ntp.get_servers_from_config():
self._add_server(server)
except ntp.NTPconfigError as ntperr:
log.warning("Failed to load NTP servers configuration")
def refresh(self):
self._serverEntry.grab_focus()
def refresh_servers_state(self):
itr = self._serversStore.get_iter_first()
while itr:
self._refresh_server_working(itr)
itr = self._serversStore.iter_next(itr)
def run(self):
self.window.show()
rc = self.window.run()
self.window.hide()
#OK clicked
if rc == 1:
new_servers = list()
for row in self._serversStore:
#if server checked
if row[2]:
new_servers.append(row[0])
if flags.can_touch_runtime_system("save NTP servers configuration"):
ntp.save_servers_to_config(new_servers)
iutil.restart_service(NTP_SERVICE)
#Cancel clicked, window destroyed...
else:
self._epoch_lock.acquire()
self._epoch += 1
self._epoch_lock.release()
self._initialize_store_from_config()
return rc
def _set_server_ok_nok(self, itr, epoch_started):
"""
If the server is working, set its data to SERVER_OK, otherwise set its
data to SERVER_NOK.
:param itr: iterator of the $server's row in the self._serversStore
"""
@gtk_action_nowait
def set_store_value(arg_tuple):
"""
We need a function for this, because this way it can be added to
the MainLoop with thread-safe GLib.idle_add (but only with one
argument).
:param arg_tuple: (store, itr, column, value)
"""
(store, itr, column, value) = arg_tuple
store.set_value(itr, column, value)
orig_hostname = self._serversStore[itr][0]
server_working = ntp.ntp_server_working(self._serversStore[itr][0])
#do not let dialog change epoch while we are modifying data
self._epoch_lock.acquire()
#check if we are in the same epoch as the dialog (and the serversStore)
#and if the server wasn't changed meanwhile
if epoch_started == self._epoch:
actual_hostname = self._serversStore[itr][0]
if orig_hostname == actual_hostname:
if server_working:
set_store_value((self._serversStore,
itr, 1, SERVER_OK))
else:
set_store_value((self._serversStore,
itr, 1, SERVER_NOK))
self._epoch_lock.release()
@gtk_action_nowait
def _refresh_server_working(self, itr):
""" Runs a new thread with _set_server_ok_nok(itr) as a taget. """
self._serversStore.set_value(itr, 1, SERVER_QUERY)
new_thread_name = "AnaNTPserver%d" % self._threads_counter
threadMgr.add(AnacondaThread(name=new_thread_name,
target=self._set_server_ok_nok,
args=(itr, self._epoch)))
self._threads_counter += 1
def _add_server(self, server):
"""
Checks if a given server is a valid hostname and if yes, adds it
to the list of servers.
:param server: string containing hostname
"""
(valid, error) = network.sanityCheckHostname(server)
if not valid:
log.error("'%s' is not a valid hostname: %s" % (server, error))
return
for row in self._serversStore:
if row[0] == server:
#do not add duplicate items
return
itr = self._serversStore.append([server, SERVER_QUERY, True])
if "pool" in server:
self._poolsNote.set_text(_(POOL_SERVERS_NOTE))
#do not block UI while starting thread (may take some time)
self._refresh_server_working(itr)
def on_entry_activated(self, entry, *args):
self._add_server(entry.get_text())
entry.set_text("")
def on_add_clicked(self, *args):
self._add_server(self._serverEntry.get_text())
self._serverEntry.set_text("")
def on_use_server_toggled(self, renderer, path, *args):
itr = self._serversStore.get_iter(path)
old_value = self._serversStore[itr][2]
self._serversStore.set_value(itr, 2, not old_value)
def on_server_edited(self, renderer, path, new_text, *args):
if not path:
return
(valid, error) = network.sanityCheckHostname(new_text)
if not valid:
log.error("'%s' is not a valid hostname: %s" % (new_text, error))
return
itr = self._serversStore.get_iter(path)
if self._serversStore[itr][0] == new_text:
return
self._serversStore.set_value(itr, 0, new_text)
self._serversStore.set_value(itr, 1, SERVER_QUERY)
self._refresh_server_working(itr)
class DatetimeSpoke(FirstbootSpokeMixIn, NormalSpoke):
builderObjects = ["datetimeWindow",
"days", "months", "years", "regions", "cities",
"upImage", "upImage1", "upImage2", "downImage",
"downImage1", "downImage2", "downImage3", "configImage",
"citiesFilter", "daysFilter", "regionCompletion",
]
mainWidgetName = "datetimeWindow"
uiFile = "spokes/datetime_spoke.glade"
category = LocalizationCategory
icon = "preferences-system-time-symbolic"
title = N_("DATE & _TIME")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
# taking values from the kickstart file?
self._kickstarted = flags.flags.automatedInstall
def initialize(self):
NormalSpoke.initialize(self)
self._daysStore = self.builder.get_object("days")
self._monthsStore = self.builder.get_object("months")
self._yearsStore = self.builder.get_object("years")
self._regionsStore = self.builder.get_object("regions")
self._citiesStore = self.builder.get_object("cities")
self._tzmap = self.builder.get_object("tzmap")
# we need to know it the new value is the same as previous or not
self._old_region = None
self._old_city = None
self._regionCombo = self.builder.get_object("regionCombobox")
self._cityCombo = self.builder.get_object("cityCombobox")
self._monthCombo = self.builder.get_object("monthCombobox")
self._dayCombo = self.builder.get_object("dayCombobox")
self._yearCombo = self.builder.get_object("yearCombobox")
self._daysFilter = self.builder.get_object("daysFilter")
self._daysFilter.set_visible_func(self.existing_date, None)
self._citiesFilter = self.builder.get_object("citiesFilter")
self._citiesFilter.set_visible_func(self.city_in_region, None)
self._hoursLabel = self.builder.get_object("hoursLabel")
self._minutesLabel = self.builder.get_object("minutesLabel")
self._amPmUp = self.builder.get_object("amPmUpButton")
self._amPmDown = self.builder.get_object("amPmDownButton")
self._amPmLabel = self.builder.get_object("amPmLabel")
self._radioButton24h = self.builder.get_object("timeFormatRB")
self._ntpSwitch = self.builder.get_object("networkTimeSwitch")
self._regions_zones = timezone.get_all_regions_and_timezones()
# Set the initial sensitivity of the AM/PM toggle based on the time-type selected
self._radioButton24h.emit("toggled")
if not flags.can_touch_runtime_system("modify system time and date"):
self._set_date_time_setting_sensitive(False)
self._config_dialog = NTPconfigDialog(self.data)
self._config_dialog.initialize()
threadMgr.add(AnacondaThread(name=constants.THREAD_DATE_TIME,
target=self._initialize))
def _initialize(self):
for day in xrange(1, 32):
self.add_to_store(self._daysStore, day)
self._months_nums = dict()
for i in xrange(1, 13):
#a bit hacky way, but should return the translated string
#TODO: how to handle language change? Clear and populate again?
month = datetime.date(2000, i, 1).strftime('%B')
self.add_to_store(self._monthsStore, month)
self._months_nums[month] = i
for year in xrange(1990, 2051):
self.add_to_store(self._yearsStore, year)
cities = set()
xlated_regions = ((region, get_xlated_timezone(region))
for region in self._regions_zones.iterkeys())
for region, xlated in sorted(xlated_regions, cmp=_compare_regions):
self.add_to_store_xlated(self._regionsStore, region, xlated)
for city in self._regions_zones[region]:
cities.add((city, get_xlated_timezone(city)))
for city, xlated in sorted(cities, cmp=_compare_cities):
self.add_to_store_xlated(self._citiesStore, city, xlated)
self._update_datetime_timer_id = None
if timezone.is_valid_timezone(self.data.timezone.timezone):
self._set_timezone(self.data.timezone.timezone)
elif not flags.flags.automatedInstall:
log.warning("%s is not a valid timezone, falling back to default "\
"(%s)" % (self.data.timezone.timezone, DEFAULT_TZ))
self._set_timezone(DEFAULT_TZ)
self.data.timezone.timezone = DEFAULT_TZ
time_init_thread = threadMgr.get(constants.THREAD_TIME_INIT)
if time_init_thread is not None:
hubQ.send_message(self.__class__.__name__,
_("Restoring hardware time..."))
threadMgr.wait(constants.THREAD_TIME_INIT)
hubQ.send_ready(self.__class__.__name__, False)
@property
def status(self):
if self.data.timezone.timezone:
if timezone.is_valid_timezone(self.data.timezone.timezone):
return _("%s timezone") % get_xlated_timezone(self.data.timezone.timezone)
else:
return _("Invalid timezone")
elif self._tzmap.get_timezone():
return _("%s timezone") % get_xlated_timezone(self._tzmap.get_timezone())
else:
return _("Nothing selected")
def apply(self):
# we could use self._tzmap.get_timezone() here, but it returns "" if
# Etc/XXXXXX timezone is selected
region = self._get_active_region()
city = self._get_active_city()
# nothing selected, just leave the spoke and
# return to hub without changing anything
if not region or not city:
return
old_tz = self.data.timezone.timezone
new_tz = region + "/" + city
self.data.timezone.timezone = new_tz
if old_tz != new_tz:
# new values, not from kickstart
self.data.timezone.seen = False
self._kickstarted = False
self.data.timezone.nontp = not self._ntpSwitch.get_active()
def execute(self):
if self._update_datetime_timer_id is not None:
GLib.source_remove(self._update_datetime_timer_id)
self._update_datetime_timer_id = None
self.data.timezone.setup(self.data)
@property
def ready(self):
return not threadMgr.get("AnaDateTimeThread")
@property
def completed(self):
if self._kickstarted and not self.data.timezone.seen:
# taking values from kickstart, but not specified
return False
else:
return timezone.is_valid_timezone(self.data.timezone.timezone)
@property
def mandatory(self):
return True
def refresh(self):
#update the displayed time
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
self._start_updating_timer_id = None
if timezone.is_valid_timezone(self.data.timezone.timezone):
self._set_timezone(self.data.timezone.timezone)
self._update_datetime()
has_active_network = nm.nm_is_connected()
if not has_active_network:
self._show_no_network_warning()
else:
self.clear_info()
gtk_call_once(self._config_dialog.refresh_servers_state)
if flags.can_touch_runtime_system("get NTP service state"):
ntp_working = has_active_network and iutil.service_running(NTP_SERVICE)
else:
ntp_working = not self.data.timezone.nontp
self._ntpSwitch.set_active(ntp_working)
@gtk_action_nowait
def _set_timezone(self, timezone):
"""
Sets timezone to the city/region comboboxes and the timezone map.
:param timezone: timezone to set
:type timezone: str
:return: if successfully set or not
:rtype: bool
"""
parts = timezone.split("/", 1)
if len(parts) != 2:
# invalid timezone cannot be set
return False
region, city = parts
self._set_combo_selection(self._regionCombo, region)
self._set_combo_selection(self._cityCombo, city)
return True
@gtk_action_nowait
def add_to_store_xlated(self, store, item, xlated):
store.append([item, xlated])
@gtk_action_nowait
def add_to_store(self, store, item):
store.append([item])
def existing_date(self, model, itr, user_data=None):
if not itr:
return False
day = model[itr][0]
#days 1-28 are in every month every year
if day < 29:
return True
months_model = self._monthCombo.get_model()
months_iter = self._monthCombo.get_active_iter()
if not months_iter:
return True
month = months_model[months_iter][0]
years_model = self._yearCombo.get_model()
years_iter = self._yearCombo.get_active_iter()
if not years_iter:
return True
year = years_model[years_iter][0]
try:
datetime.date(year, self._months_nums[month], day)
return True
except ValueError as valerr:
return False
def _get_active_city(self):
cities_model = self._cityCombo.get_model()
cities_iter = self._cityCombo.get_active_iter()
if not cities_iter:
return None
return cities_model[cities_iter][0]
def _get_active_region(self):
regions_model = self._regionCombo.get_model()
regions_iter = self._regionCombo.get_active_iter()
if not regions_iter:
return None
return regions_model[regions_iter][0]
def city_in_region(self, model, itr, user_data=None):
if not itr:
return False
city = model[itr][0]
region = self._get_active_region()
if not region:
return False
return city in self._regions_zones[region]
def _set_amPm_part_sensitive(self, sensitive):
for widget in (self._amPmUp, self._amPmDown, self._amPmLabel):
widget.set_sensitive(sensitive)
def _to_amPm(self, hours):
if hours >= 12:
day_phase = "PM"
else:
day_phase = "AM"
new_hours = ((hours - 1) % 12) + 1
return (new_hours, day_phase)
def _to_24h(self, hours, day_phase):
correction = 0
if day_phase == "AM" and hours == 12:
correction = -12
elif day_phase == "PM" and hours != 12:
correction = 12
return (hours + correction) % 24
def _update_datetime(self):
now = datetime.datetime.now()
if self._radioButton24h.get_active():
self._hoursLabel.set_text("%0.2d" % now.hour)
else:
hours, amPm = self._to_amPm(now.hour)
self._hoursLabel.set_text("%0.2d" % hours)
self._amPmLabel.set_text(amPm)
self._minutesLabel.set_text("%0.2d" % now.minute)
self._set_combo_selection(self._dayCombo, now.day)
self._set_combo_selection(self._monthCombo,
datetime.date(2000, now.month, 1).strftime('%B'))
self._set_combo_selection(self._yearCombo, now.year)
#GLib's timer is driven by the return value of the function.
#It runs the fuction periodically while the returned value
#is True.
return True
def _save_system_time(self):
"""
Returning False from this method removes the timer that would
otherwise call it again and again.
"""
if not flags.can_touch_runtime_system("save system time"):
return False
month = self._get_combo_selection(self._monthCombo)
if not month:
return False
month = self._months_nums[month]
year_str = self._get_combo_selection(self._yearCombo)
if not year_str:
return False
year = int(year_str)
hours = int(self._hoursLabel.get_text())
if not self._radioButton24h.get_active():
hours = self._to_24h(hours, self._amPmLabel.get_text())
minutes = int(self._minutesLabel.get_text())
day = self._get_combo_selection(self._dayCombo)
#day may be None if there is no such in the selected year and month
if day:
day = int(day)
seconds = datetime.datetime.now().second
os.system("date -s '%0.2d/%0.2d/%0.4d %0.2d:%0.2d:%0.2d'" %
(month, day, year, hours, minutes, seconds))
#start the timer only when the spoke is shown
if self._update_datetime_timer_id is not None:
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
#run only once (after first 2 seconds of inactivity)
return False
def _stop_and_maybe_start_time_updating(self, interval=2):
"""
This method is called in every date/time-setting button's callback.
It removes the timer for updating displayed date/time (do not want to
change it while user does it manually) and allows us to set new system
date/time only after $interval seconds long idle on time-setting buttons.
This is done by the _start_updating_timer that is reset in this method.
So when there is $interval seconds long idle on date/time-setting
buttons, self._save_system_time method is invoked. Since it returns
False, this timer is then removed and only reactivated in this method
(thus in some date/time-setting button's callback).
"""
#do not start timers if the spoke is not shown
if self._update_datetime_timer_id is None:
self._update_datetime()
self._save_system_time()
return
#stop time updating
GLib.source_remove(self._update_datetime_timer_id)
#stop previous $interval seconds timer (see below)
if self._start_updating_timer_id:
GLib.source_remove(self._start_updating_timer_id)
#let the user change date/time and after $interval seconds of inactivity
#save it as the system time and start updating the displayed date/time
self._start_updating_timer_id = GLib.timeout_add_seconds(interval,
self._save_system_time)
def _set_combo_selection(self, combo, item):
model = combo.get_model()
if not model:
return False
itr = model.get_iter_first()
while itr:
if model[itr][0] == item:
combo.set_active_iter(itr)
return True
itr = model.iter_next(itr)
return False
def _get_combo_selection(self, combo):
"""
Get the selected item of the combobox.
:return: selected item or None
"""
model = combo.get_model()
itr = combo.get_active_iter()
if not itr or not model:
return None
return model[itr][0]
def _restore_old_city_region(self):
"""Restore stored "old" (or last valid) values."""
# check if there are old values to go back to
if self._old_region and self._old_city:
self._set_timezone(self._old_region + "/" + self._old_city)
def on_up_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours + 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours + 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_down_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours - 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours - 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_up_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes + 1) % 60)
self._minutesLabel.set_text(minutes_str)
pass
def on_down_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes - 1) % 60)
self._minutesLabel.set_text(minutes_str)
def on_up_ampm_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
if self._amPmLabel.get_text() == "AM":
self._amPmLabel.set_text("PM")
else:
self._amPmLabel.set_text("AM")
def on_down_ampm_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
if self._amPmLabel.get_text() == "AM":
self._amPmLabel.set_text("PM")
else:
self._amPmLabel.set_text("AM")
def on_region_changed(self, combo, *args):
"""
:see: on_city_changed
"""
region = self._get_active_region()
if not region or region == self._old_region:
# region entry being edited or old_value chosen, no action needed
# @see: on_city_changed
return
self._citiesFilter.refilter()
# Set the city to the first one available in this newly selected region.
zone = self._regions_zones[region]
firstCity = sorted(list(zone))[0]
self._set_combo_selection(self._cityCombo, firstCity)
self._old_region = region
self._old_city = firstCity
def on_city_changed(self, combo, *args):
"""
ComboBox emits ::changed signal not only when something is selected, but
also when its entry's text is changed. We need to distinguish between
those two cases ('London' typed in the entry => no action until ENTER is
hit etc.; 'London' chosen in the expanded combobox => update timezone
map and do all necessary actions). Fortunately when entry is being
edited, self._get_active_city returns None.
"""
timezone = None
region = self._get_active_region()
city = self._get_active_city()
if not region or not city or (region == self._old_region and
city == self._old_city):
# entry being edited or no change, no actions needed
return
if city and region:
timezone = region + "/" + city
else:
# both city and region are needed to form a valid timezone
return
if region == "Etc":
# Etc timezones cannot be displayed on the map, so let's set the map
# to "" which sets it to "Europe/London" (UTC) without a city pin
self._tzmap.set_timezone("", no_signal=True)
else:
# we don't want the timezone-changed signal to be emitted
self._tzmap.set_timezone(timezone, no_signal=True)
# update "old" values
self._old_city = city
def on_entry_left(self, entry, *args):
# user clicked somewhere else or hit TAB => finished editing
entry.emit("activate")
def on_city_region_key_released(self, entry, event, *args):
if event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval == Gdk.KEY_Escape:
# editing canceled
self._restore_old_city_region()
def on_completion_match_selected(self, combo, model, itr):
item = None
if model and itr:
item = model[itr][0]
if item:
self._set_combo_selection(combo, item)
def on_city_region_text_entry_activated(self, entry):
combo = entry.get_parent()
model = combo.get_model()
entry_text = entry.get_text().lower()
for row in model:
if entry_text == row[0].lower():
self._set_combo_selection(combo, row[0])
return
# non-matching value entered, reset to old values
self._restore_old_city_region()
def on_month_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_day_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
def on_year_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_timezone_changed(self, tz_map, timezone):
if self._set_timezone(timezone):
# timezone successfully set
os.environ["TZ"] = timezone
self._update_datetime()
def on_timeformat_changed(self, button24h, *args):
hours = int(self._hoursLabel.get_text())
amPm = self._amPmLabel.get_text()
#connected to 24-hour radio button
if button24h.get_active():
self._set_amPm_part_sensitive(False)
new_hours = self._to_24h(hours, amPm)
else:
self._set_amPm_part_sensitive(True)
new_hours, new_amPm = self._to_amPm(hours)
self._amPmLabel.set_text(new_amPm)
self._hoursLabel.set_text("%0.2d" % new_hours)
def _set_date_time_setting_sensitive(self, sensitive):
#contains all date/time setting widgets
footer_alignment = self.builder.get_object("footerAlignment")
footer_alignment.set_sensitive(sensitive)
def _show_no_network_warning(self):
self.set_warning(_("You need to set up networking first if you "\
"want to use NTP"))
self.window.show_all()
def _show_no_ntp_server_warning(self):
self.set_warning(_("You have no working NTP server configured"))
self.window.show_all()
def on_ntp_switched(self, switch, *args):
if switch.get_active():
#turned ON
if not flags.can_touch_runtime_system("start NTP service"):
#cannot touch runtime system, not much to do here
return
if not nm.nm_is_connected():
self._show_no_network_warning()
switch.set_active(False)
return
else:
self.clear_info()
working_server = self._config_dialog.working_server
if working_server is None:
self._show_no_ntp_server_warning()
else:
#we need a one-time sync here, because chronyd would not change
#the time as drastically as we need
ntp.one_time_sync_async(working_server)
ret = iutil.start_service(NTP_SERVICE)
self._set_date_time_setting_sensitive(False)
#if starting chronyd failed and chronyd is not running,
#set switch back to OFF
if (ret != 0) and not iutil.service_running(NTP_SERVICE):
switch.set_active(False)
else:
#turned OFF
if not flags.can_touch_runtime_system("stop NTP service"):
#cannot touch runtime system, nothing to do here
return
self._set_date_time_setting_sensitive(True)
ret = iutil.stop_service(NTP_SERVICE)
#if stopping chronyd failed and chronyd is running,
#set switch back to ON
if (ret != 0) and iutil.service_running(NTP_SERVICE):
switch.set_active(True)
self.clear_info()
def on_ntp_config_clicked(self, *args):
self._config_dialog.refresh()
with enlightbox(self.window, self._config_dialog.window):
response = self._config_dialog.run()
if response == 1:
self.data.timezone.ntpservers = self._config_dialog.servers
if self._config_dialog.working_server is None:
self._show_no_ntp_server_warning()
else:
self.clear_info()
|
liorvh/pythonpentest | refs/heads/master | multi_process.py | 2 | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: July 2015
Name: multi_process.py
Purpose: To identify live web applications with a list of IP addresses, using parallel processes
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import multiprocessing, urllib2, argparse, sys, logging, datetime, time
def host_request(host):
print("[*] Testing %s") % (str(host))
target = "http://" + host
target_secure = "https://" + host
timenow = time.time()
record = datetime.datetime.fromtimestamp(timenow).strftime('%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(record)
try:
request = urllib2.Request(target)
request.get_method = lambda : 'HEAD'
response = urllib2.urlopen(request)
response_data = str(response.info())
logger.debug("[*] %s" % response_data)
response.close()
except:
response = None
response_data = None
try:
request_secure = urllib2.urlopen(target_secure)
request_secure.get_method = lambda : 'HEAD'
response_secure = str(urllib2.urlopen(request_secure).read())
response_secure_data = str(response.info())
logger.debug("[*] %s" % response_secure_data)
response_secure.close()
except:
response_secure = None
response_secure_data = None
if response_data != None and response_secure_data != None:
r = "[+] Insecure webserver detected at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[+] Secure webserver detected at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[+] Insecure web server detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[+] Secure web server detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_data == None and response_secure_data == None:
r = "[-] No insecure webserver at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[-] No secure webserver at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[-] Insecure web server was not detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[-] Secure web server was not detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_data != None and response_secure_data == None:
r = "[+] Insecure webserver detected at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[-] No secure webserver at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[+] Insecure web server detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[-] Secure web server was not detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_secure_data != None and response_data == None:
r = "[-] No insecure webserver at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[+] Secure webserver detected at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[-] Insecure web server was not detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[+] Secure web server detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
else:
logger.debug("[-] No results were recorded for %s or %s" % (str(target), str(target_secure)))
def log_init(log):
level = logging.DEBUG # Logging level
format = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") # Log format
logger_obj = logging.getLogger() # Getter for logging agent
file_handler = logging.FileHandler(log) # File Handler
#stderr_handler = logging.StreamHandler() # STDERR Handler
targets_list = []
# Configure logger formats for STDERR and output file
file_handler.setFormatter(format)
#stderr_handler.setFormatter(format)
# Configure logger object
logger_obj.addHandler(file_handler)
#logger_obj.addHandler(stderr_handler)
logger_obj.setLevel(level)
def main():
# If script is executed at the CLI
usage = '''usage: %(prog)s [-t hostfile] [-f logfile.log] [-m 2] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", action="store", dest="targets", default=None, help="Filename for hosts to test")
parser.add_argument("-m", "--multi", action="store", dest="multiprocess", default=1, type=int, help="Number of proceses, defaults to 1")
parser.add_argument("-l", "--logfile", action="store", dest="log", default="results.log", type=str, help="The log file to output the results")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if (args.targets == None):
parser.print_help()
sys.exit(1)
# Set Constructors
targets = args.targets # Targets to be parsed
verbose = args.verbose # Verbosity level
processes = args.multiprocess # Threads to be used
log = args.log # Configure the log output file
if ".log" not in log:
log = log + ".log"
# Load the targets into a list and remove trailing "\n"
with open(targets) as f:
targets_list = [line.rstrip() for line in f.readlines()]
# Establish thread list
pool = multiprocessing.Pool(processes=processes, initializer=log_init(log))
# Queue up the targets to assess
results = pool.map(host_request, targets_list)
for result in results:
for value in result:
print(value)
if __name__ == '__main__':
main()
|
jesparza/peepdf | refs/heads/master | jsbeautifier/unpackers/packer.py | 76 | #
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <[email protected]>
#
# written by Stefano Sanfilippo <[email protected]>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from jsbeautifier.unpackers import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
|
sgammon/libcloud | refs/heads/trunk | docs/examples/compute/openstack_floating_ips.py | 63 | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
# This assumes you don't have SSL set up.
# Note: Code like this poses a security risk (MITM attack) and
# that's the reason why you should never use it for anything else
# besides testing. You have been warned.
libcloud.security.VERIFY_SSL_CERT = False
OpenStack = get_driver(Provider.OPENSTACK)
driver = OpenStack('your_auth_username', 'your_auth_password',
ex_force_auth_url='http://10.0.4.1:5000',
ex_force_auth_version='2.0_password',
ex_tenant_name='your_tenant')
# get the first pool - public by default
pool = driver.ex_list_floating_ip_pools()[0]
# create an ip in the pool
floating_ip = pool.create_floating_ip()
# get the node, note: change the node id to the some id you have
node = driver.ex_get_node_details('922a4381-a18c-487f-b816-cc31c9060853')
# attach the ip to the node
driver.ex_attach_floating_ip_to_node(node, floating_ip)
# remove it from the node
driver.ex_detach_floating_ip_from_node(node, floating_ip)
# delete the ip
floating_ip.delete()
|
hanw/p4-hlir | refs/heads/master | p4_hlir/graphs/__init__.py | 6 | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
whs/django | refs/heads/master | django/contrib/postgres/aggregates/statistics.py | 17 | from django.db.models import FloatField, IntegerField
from django.db.models.aggregates import Aggregate
__all__ = [
'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept',
'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate',
]
class StatAggregate(Aggregate):
def __init__(self, y, x, output_field=FloatField()):
if not x or not y:
raise ValueError('Both y and x must be provided.')
super().__init__(y, x, output_field=output_field)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return super().resolve_expression(query, allow_joins, reuse, summarize)
class Corr(StatAggregate):
function = 'CORR'
class CovarPop(StatAggregate):
def __init__(self, y, x, sample=False):
self.function = 'COVAR_SAMP' if sample else 'COVAR_POP'
super().__init__(y, x)
class RegrAvgX(StatAggregate):
function = 'REGR_AVGX'
class RegrAvgY(StatAggregate):
function = 'REGR_AVGY'
class RegrCount(StatAggregate):
function = 'REGR_COUNT'
def __init__(self, y, x):
super().__init__(y=y, x=x, output_field=IntegerField())
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class RegrIntercept(StatAggregate):
function = 'REGR_INTERCEPT'
class RegrR2(StatAggregate):
function = 'REGR_R2'
class RegrSlope(StatAggregate):
function = 'REGR_SLOPE'
class RegrSXX(StatAggregate):
function = 'REGR_SXX'
class RegrSXY(StatAggregate):
function = 'REGR_SXY'
class RegrSYY(StatAggregate):
function = 'REGR_SYY'
|
luzfcb/django-autocomplete-light | refs/heads/master | test_project/tests/models.py | 5 | from django.contrib.auth.models import User
from django.db.models.signals import post_migrate
def test_user(sender, *args, **kwargs):
if sender.name != 'django.contrib.auth':
return
user, c = User.objects.get_or_create(username='test')
user.is_staff = True
user.is_superuser = True
user.set_password('test')
user.save()
post_migrate.connect(test_user)
|
marcoantoniooliveira/labweb | refs/heads/master | oscar/lib/python2.7/site-packages/sphinx/writers/websupport.py | 3 | # -*- coding: utf-8 -*-
"""
sphinx.writers.websupport
~~~~~~~~~~~~~~~~~~~~~~~~~
sphinx.websupport writer that adds comment-related annotations.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx.writers.html import HTMLTranslator
from sphinx.util.websupport import is_commentable
class WebSupportTranslator(HTMLTranslator):
"""
Our custom HTML translator.
"""
def __init__(self, builder, *args, **kwargs):
HTMLTranslator.__init__(self, builder, *args, **kwargs)
self.comment_class = 'sphinx-has-comment'
def dispatch_visit(self, node):
if is_commentable(node):
self.handle_visit_commentable(node)
HTMLTranslator.dispatch_visit(self, node)
def handle_visit_commentable(self, node):
# We will place the node in the HTML id attribute. If the node
# already has an id (for indexing purposes) put an empty
# span with the existing id directly before this node's HTML.
self.add_db_node(node)
if node.attributes['ids']:
self.body.append('<span id="%s"></span>'
% node.attributes['ids'][0])
node.attributes['ids'] = ['s%s' % node.uid]
node.attributes['classes'].append(self.comment_class)
def add_db_node(self, node):
storage = self.builder.storage
if not storage.has_node(node.uid):
storage.add_node(id=node.uid,
document=self.builder.current_docname,
source=node.rawsource or node.astext())
|
kdrone/crazyflie-python-client | refs/heads/master | build/lib.linux-i686-2.7/cflib/bootloader/__init__.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Bootloading utilities for the Crazyflie.
"""
from cflib.utils.callbacks import Caller
from .cloader import Cloader
from .boottypes import BootVersion, TargetTypes, Target
import zipfile
import json
import sys
import time
__author__ = 'Bitcraze AB'
__all__ = ['Bootloader']
class Bootloader:
"""Bootloader utility for the Crazyflie"""
def __init__(self, clink=None):
"""Init the communication class by starting to comunicate with the
link given. clink is the link address used after reseting to the
bootloader.
The device is actually considered in firmware mode.
"""
self.clink = clink
self.in_loader = False
self.page_size = 0
self.buffer_pages = 0
self.flash_pages = 0
self.start_page = 0
self.cpuid = "N/A"
self.error_code = 0
self.protocol_version = 0
# Outgoing callbacks for progress
# int
self.progress_cb = None
# msg
self.error_cb = None
# bool
self.in_bootloader_cb = None
# Target
self.dev_info_cb = None
#self.dev_info_cb.add_callback(self._dev_info)
#self.in_bootloader_cb.add_callback(self._bootloader_info)
self._boot_plat = None
self._cload = Cloader(clink,
info_cb=self.dev_info_cb,
in_boot_cb=self.in_bootloader_cb)
def start_bootloader(self, warm_boot=False):
if warm_boot:
self._cload.open_bootloader_uri(self.clink)
started = self._cload.reset_to_bootloader(TargetTypes.NRF51)
if started:
started = self._cload.check_link_and_get_info()
else:
uri = self._cload.scan_for_bootloader()
# Workaround for libusb on Windows (open/close too fast)
time.sleep(1)
if uri:
self._cload.open_bootloader_uri(uri)
started = self._cload.check_link_and_get_info()
else:
started = False
if started:
self.protocol_version = self._cload.protocol_version
if self.protocol_version == BootVersion.CF1_PROTO_VER_0 or\
self.protocol_version == BootVersion.CF1_PROTO_VER_1:
# Nothing more to do
pass
elif self.protocol_version == BootVersion.CF2_PROTO_VER:
self._cload.request_info_update(TargetTypes.NRF51)
else:
print "Bootloader protocol 0x{:X} not supported!".self.protocol_version
return started
def get_target(self, target_id):
return self._cload.request_info_update(target_id)
def flash(self, filename, targets):
for target in targets:
if TargetTypes.from_string(target) not in self._cload.targets:
print "Target {} not found by bootloader".format(target)
return False
files_to_flash = ()
if zipfile.is_zipfile(filename):
# Read the manifest (don't forget to check so there is one!)
try:
zf = zipfile.ZipFile(filename)
j = json.loads(zf.read("manifest.json"))
files = j["files"]
if len(targets) == 0:
# No targets specified, just flash everything
for file in files:
if files[file]["target"] in targets:
targets[files[file]["target"]] += (files[file]["type"], )
else:
targets[files[file]["target"]] = (files[file]["type"], )
zip_targets = {}
for file in files:
file_name = file
file_info = files[file]
if file_info["target"] in zip_targets:
zip_targets[file_info["target"]][file_info["type"]] = {"filename": file_name}
else:
zip_targets[file_info["target"]] = {}
zip_targets[file_info["target"]][file_info["type"]] = {"filename": file_name}
except KeyError as e:
print e
print "No manifest.json in {}".format(filename)
return
try:
# Match and create targets
for target in targets:
t = targets[target]
for type in t:
file_to_flash = {}
current_target = "{}-{}".format(target, type)
file_to_flash["type"] = type
# Read the data, if this fails we bail
file_to_flash["target"] = self._cload.targets[TargetTypes.from_string(target)]
file_to_flash["data"] = zf.read(zip_targets[target][type]["filename"])
files_to_flash += (file_to_flash, )
except KeyError as e:
print "Could not find a file for {} in {}".format(current_target, filename)
return False
else:
if len(targets) != 1:
print "Not an archive, must supply one target to flash"
else:
file_to_flash = {}
file_to_flash["type"] = "binary"
f = open(filename, 'rb')
for t in targets:
file_to_flash["target"] = self._cload.targets[TargetTypes.from_string(t)]
file_to_flash["type"] = targets[t][0]
file_to_flash["data"] = f.read()
f.close()
files_to_flash += (file_to_flash, )
if not self.progress_cb:
print ""
file_counter = 0
for target in files_to_flash:
file_counter += 1
self._internal_flash(target, file_counter, len(files_to_flash))
def reset_to_firmware(self):
if self._cload.protocol_version == BootVersion.CF2_PROTO_VER:
self._cload.reset_to_firmware(TargetTypes.NRF51)
else:
self._cload.reset_to_firmware(TargetTypes.STM32)
def close(self):
if self._cload:
self._cload.close()
def _internal_flash(self, target, current_file_number, total_files):
image = target["data"]
t_data = target["target"]
# If used from a UI we need some extra things for reporting progress
factor = (100.0 * t_data.page_size) / len(image)
progress = 0
if self.progress_cb:
self.progress_cb("({}/{}) Starting...".format(current_file_number, total_files), int(progress))
else:
sys.stdout.write("Flashing {} of {} to {} ({}): ".format(current_file_number,
total_files,
TargetTypes.to_string(t_data.id),
target["type"]))
sys.stdout.flush()
if len(image) > ((t_data.flash_pages - t_data.start_page) *
t_data.page_size):
if self.progress_cb:
self.progress_cb("Error: Not enough space to flash the image file.", int(progress))
else:
print "Error: Not enough space to flash the image file."
raise Exception()
if not self.progress_cb:
sys.stdout.write(("%d bytes (%d pages) " % ((len(image) - 1),
int(len(image) / t_data.page_size) + 1)))
sys.stdout.flush()
#For each page
ctr = 0 # Buffer counter
for i in range(0, int((len(image) - 1) / t_data.page_size) + 1):
#Load the buffer
if ((i + 1) * t_data.page_size) > len(image):
self._cload.upload_buffer(t_data.addr, ctr, 0, image[i * t_data.page_size:])
else:
self._cload.upload_buffer(t_data.addr, ctr, 0, image[i * t_data.page_size:
(i + 1) * t_data.page_size])
ctr += 1
if self.progress_cb:
progress += factor
self.progress_cb("({}/{}) Uploading buffer to {}...".format(current_file_number,
total_files,
TargetTypes.to_string(t_data.id)),
int(progress))
else:
sys.stdout.write(".")
sys.stdout.flush()
#Flash when the complete buffers are full
if ctr >= t_data.buffer_pages:
if self.progress_cb:
self.progress_cb("({}/{}) Writing buffer to {}...".format(current_file_number,
total_files,
TargetTypes.to_string(t_data.id)),
int(progress))
else:
sys.stdout.write("%d" % ctr)
sys.stdout.flush()
if not self._cload.write_flash(t_data.addr, 0,
t_data.start_page + i - (ctr - 1),
ctr):
if self.progress_cb:
self.progress_cb("Error during flash operation (code %d)".format(self._cload.error_code),
int(progress))
else:
print "\nError during flash operation (code %d). Maybe"\
" wrong radio link?" % self._cload.error_code
raise Exception()
ctr = 0
if ctr > 0:
if self.progress_cb:
self.progress_cb("({}/{}) Writing buffer to {}...".format(current_file_number,
total_files,
TargetTypes.to_string(t_data.id)),
int(progress))
else:
sys.stdout.write("%d" % ctr)
sys.stdout.flush()
if not self._cload.write_flash(t_data.addr,
0,
(t_data.start_page +
(int((len(image) - 1) / t_data.page_size)) -
(ctr - 1)),
ctr):
if self.progress_cb:
self.progress_cb("Error during flash operation (code %d)".format(self._cload.error_code),
int(progress))
else:
print "\nError during flash operation (code %d). Maybe"\
" wrong radio link?" % self._cload.error_code
raise Exception()
if self.progress_cb:
self.progress_cb("({}/{}) Flashing done!".format(current_file_number, total_files),
int(progress))
else:
print "" |
1ns/project-r2 | refs/heads/master | web/themes/contrib/bootstrap_sass_starterkit/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
chenruixuan/PopClip-Extensions | refs/heads/master | source/Trello/requests/packages/urllib3/util/response.py | 928 | def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
|
elkingtoncode/Pysense | refs/heads/master | setup.py | 19 | import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
cs-au-dk/Artemis | refs/heads/master | contrib/Z3/python/mk_z3tactics.py | 1 | ############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Extract tactics and probes from install_tactics.cpp
#
# Author: Leonardo de Moura (leonardo)
############################################
import re
import os
tactic_pat = re.compile("^[ \t]*ADD_TACTIC_CMD")
probe_pat = re.compile("^[ \t]*ADD_PROBE")
cppfile = open('..%slib%sinstall_tactics.cpp' % (os.sep, os.sep), 'r')
z3tactics = open('z3tactics.py', 'w')
z3tactics.write('# Automatically generated file, generator: mk_z3tactics.py\n')
z3tactics.write('import z3core\n')
z3tactics.write('import z3\n\n')
for line in cppfile:
m1 = tactic_pat.match(line)
m2 = probe_pat.match(line)
if m1:
words = re.split('[^\-a-zA-Z0-9_]+', line)
tactic = words[2]
py_tactic = tactic.replace('-', '_')
z3tactics.write('def %s_tactic(ctx=None):\n' % py_tactic)
z3tactics.write(' ctx = z3._get_ctx(ctx)\n')
z3tactics.write(' return z3.Tactic(z3core.Z3_mk_tactic(ctx.ref(), \'%s\'), ctx)\n\n' % tactic)
elif m2:
words = re.split('[^\-a-zA-Z0-9_]+', line)
probe = words[2]
py_probe = probe.replace('-', '_')
z3tactics.write('def %s_probe(ctx=None):\n' % py_probe)
z3tactics.write(' ctx = z3._get_ctx(ctx)\n')
z3tactics.write(' return z3.Probe(z3core.Z3_mk_probe(ctx.ref(), \'%s\'), ctx)\n\n' % probe)
|
nett55/scrapy | refs/heads/0.20 | scrapy/tests/test_link.py | 27 | import unittest
import warnings
from scrapy.link import Link
class LinkTest(unittest.TestCase):
def _assert_same_links(self, link1, link2):
self.assertEqual(link1, link2)
self.assertEqual(hash(link1), hash(link2))
def _assert_different_links(self, link1, link2):
self.assertNotEqual(link1, link2)
self.assertNotEqual(hash(link1), hash(link2))
def test_eq_and_hash(self):
l1 = Link("http://www.example.com")
l2 = Link("http://www.example.com/other")
l3 = Link("http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link("http://www.example.com", text="test")
l5 = Link("http://www.example.com", text="test2")
l6 = Link("http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link("http://www.example.com", text="test", fragment='something', nofollow=False)
l8 = Link("http://www.example.com", text="test", fragment='something', nofollow=False)
l9 = Link("http://www.example.com", text="test", fragment='something', nofollow=True)
l10 = Link("http://www.example.com", text="test", fragment='other', nofollow=False)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link("http://www.example.com", text="test", fragment='something', nofollow=True)
l2 = eval(repr(l1))
self._assert_same_links(l1, l2)
def test_unicode_url(self):
with warnings.catch_warnings(record=True) as w:
l = Link(u"http://www.example.com/\xa3")
assert isinstance(l.url, str)
assert l.url == 'http://www.example.com/\xc2\xa3'
assert len(w) == 1, "warning not issued"
|
JohnGriffiths/nipype | refs/heads/master | nipype/workflows/graph/setup.py | 14 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('graph', parent_package, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
mfwarren/FreeCoding | refs/heads/master | 2015/02/fc_2015_02_27.py | 1 | #!/usr/bin/env python3
# imports go here
import inspect
#
# Free Coding session for 2015-02-27
# Written by Matt Warren
#
def f(arg):
print(arg)
class Foo:
"""
sample comment
"""
def method(self, arg):
print(arg)
if __name__ == '__main__':
print("f is method? %s" % inspect.ismethod(f))
print("f is function? %s" % inspect.isfunction(f))
print("f signature %s" % inspect.signature(f))
print("Foo is class? %s" % inspect.isclass(Foo))
foo = Foo()
print("foo is class? %s" % inspect.isclass(foo))
print("foo is method? %s" % inspect.ismethod(foo.method))
print("members of foo %s" % inspect.getmembers(foo))
print(inspect.getdoc(Foo))
frame = inspect.currentframe()
print(inspect.getframeinfo(frame))
|
maxvogel/NetworKit-mirror2 | refs/heads/master | networkit/auxiliary.py | 1 | """
General Python helpers.
""" |
A-t48/-tg-station | refs/heads/master | tools/travis/template_dm_generator.py | 132 | #!/usr/bin/env python
import os
import os.path
import sys
folders = ["_maps/RandomRuins", "_maps/RandomZLevels", "_maps/shuttles",
"_maps/templates"]
generated = "_maps/templates.dm"
template_filenames = []
def find_dm(path):
L = []
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
if name.endswith(".dmm"):
s = os.path.join(dirpath, name)
s = s.replace("_maps/","")
L.append(s)
return L
for folder in folders:
template_filenames.extend(find_dm(folder))
with open(generated, 'w') as f:
for template in template_filenames:
f.write('''#include "{}"\n'''.format(template))
|
jean/sentry | refs/heads/master | tests/sentry/similarity/test_encoder.py | 4 | from __future__ import absolute_import
import pytest
import six
from sentry.similarity.encoder import Encoder
def test_builtin_types():
encoder = Encoder()
values = [
1,
1.1,
b'\x00\x01\x02',
u'\N{SNOWMAN}',
('a', 'b', 'c'),
['a', 'b', 'c'],
{
'a': 1,
'b': 2,
'c': 3
},
set(['a', 'b', 'c']),
frozenset(['a', 'b', 'c']),
[{
'a': 1
}, set('b'), ['c'], u'text'],
]
try:
values.append(long(1)) # noqa
except NameError:
pass
for value in values:
encoded = encoder.dumps(value)
assert isinstance(encoded, six.binary_type)
with pytest.raises(TypeError):
encoder.dumps(object())
def test_custom_types():
class Widget(object):
def __init__(self, color):
self.color = color
encoder = Encoder({
Widget: lambda i: {
'color': i.color, },
})
assert encoder.dumps(
Widget('red'),
) == encoder.dumps({
'color': 'red',
})
|
jkstrick/samba | refs/heads/master | buildtools/wafsamba/samba_perl.py | 27 | import Build
from samba_utils import *
from Configure import conf
done = {}
@conf
def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)):
#
# TODO: use the @runonce mechanism for this.
# The problem is that @runonce currently does
# not seem to work together with @conf...
# So @runonce (and/or) @conf needs fixing.
#
if "done" in done:
return
done["done"] = True
conf.find_program('perl', var='PERL', mandatory=mandatory)
conf.check_tool('perl')
path_perl = conf.find_program('perl')
conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl)
conf.check_perl_version(version)
def read_perl_config_var(cmd):
return Utils.to_list(Utils.cmd_output([conf.env.PERL, '-MConfig', '-e', cmd]))
def check_perl_config_var(var):
conf.start_msg("Checking for perl $Config{%s}:" % var)
try:
v = read_perl_config_var('print $Config{%s}' % var)[0]
conf.end_msg("'%s'" % (v), 'GREEN')
return v
except IndexError:
conf.end_msg(False, 'YELLOW')
pass
return None
vendor_prefix = check_perl_config_var('vendorprefix')
perl_arch_install_dir = None
if vendor_prefix == conf.env.PREFIX:
perl_arch_install_dir = check_perl_config_var('vendorarch');
if perl_arch_install_dir is None:
perl_arch_install_dir = "${LIBDIR}/perl5";
conf.start_msg("PERL_ARCH_INSTALL_DIR: ")
conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN')
conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir
perl_lib_install_dir = None
if vendor_prefix == conf.env.PREFIX:
perl_lib_install_dir = check_perl_config_var('vendorlib');
if perl_lib_install_dir is None:
perl_lib_install_dir = "${DATADIR}/perl5";
conf.start_msg("PERL_LIB_INSTALL_DIR: ")
conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN')
conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir
perl_inc = read_perl_config_var('print "@INC"')
perl_inc.remove('.')
conf.start_msg("PERL_INC: ")
conf.end_msg("%s" % (perl_inc), 'GREEN')
conf.env.PERL_INC = perl_inc
|
breznak/nupic | refs/heads/master | examples/opf/experiments/missing_record/base/description.py | 31 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('timestamp', 'first'),
('field1', 'mode'),
('field2', 'mean')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_dayOfWeek': {
'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
'field2': { 'clipInput': True,
'fieldname': u'field2',
'maxval': 50,
'minval': 0,
'n': 500,
'name': u'field2',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 16,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1],
'predictedField': 'field1',
'dataSource': 'fillInBySubExperiment',
'windowSize': 200,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Fill in classifier steps
config['modelParams']['clParams']['steps'] = '%s' % \
(','.join([str(x) for x in config['predictionSteps']]))
# If the predicted field is field1 (category), use avg_err else if field 2
# (scalar) use aae as the metric
if config['predictedField'] == 'field1':
metricName = 'avg_err'
loggedMetrics = ['.*avg_err.*']
else:
metricName = 'aae'
loggedMetrics = ['.*aae.*']
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'missingRecord',
'streams': [ {
'columns': ['*'],
'info': 'missingRecord',
'source': config['dataSource'],
}],
'aggregation': config['aggregationInfo'],
'version': 1
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=config['predictedField'], metric=metricName,
inferenceElement='prediction', params={
'window': config['windowSize']}),
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction', params={'errorMetric': metricName,
'window': config['windowSize']}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': loggedMetrics,
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': metricName,
'window': config['windowSize'],
'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
wikimedia/labs-tools-ptable | refs/heads/master | data.py | 1 | # -*- coding: utf-8 -*-
"""
Copyright © 2012-2015 Ricordisamoa
This file is part of the Wikidata periodic table.
The Wikidata periodic table is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Wikidata periodic table is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the Wikidata periodic table. If not, see <http://www.gnu.org/licenses/>.
"""
# mappings of period and group numbers to Wikidata item ids
# TODO: json?
periods = [
191936,
207712,
211331,
239825,
244982,
239813,
244979,
428818,
986218
]
groups = [
10801007,
19563,
108307,
189302,
193276,
193280,
202602,
202224,
208107,
205253,
185870,
191875,
189294,
106693,
106675,
104567,
19605,
19609
]
special_start = 6
special_series = [
19569, # lanthanide
19577, # actinide
428874 # superactinide
]
special_subclasses = {
19557: 'alkali-metal',
19591: 'post-transition-metal',
19596: 'metalloid',
19753344: 'diatomic-nonmetal',
19753345: 'polyatomic-nonmetal'
}
|
jonathanmz34/ztransfert | refs/heads/master | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
ashang/calibre | refs/heads/master | src/calibre/db/__init__.py | 14 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
SPOOL_SIZE = 30*1024*1024
def _get_next_series_num_for_list(series_indices, unwrap=True):
from calibre.utils.config_base import tweaks
from math import ceil, floor
if not series_indices:
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
if unwrap:
series_indices = [x[0] for x in series_indices]
if tweaks['series_index_auto_increment'] == 'next':
return floor(series_indices[-1]) + 1
if tweaks['series_index_auto_increment'] == 'first_free':
for i in xrange(1, 10000):
if i not in series_indices:
return i
# really shouldn't get here.
if tweaks['series_index_auto_increment'] == 'next_free':
for i in xrange(int(ceil(series_indices[0])), 10000):
if i not in series_indices:
return i
# really shouldn't get here.
if tweaks['series_index_auto_increment'] == 'last_free':
for i in xrange(int(ceil(series_indices[-1])), 0, -1):
if i not in series_indices:
return i
return series_indices[-1] + 1
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
def _get_series_values(val):
import re
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
if not val:
return (val, None)
match = series_index_pat.match(val.strip())
if match is not None:
idx = match.group(2)
try:
idx = float(idx)
return (match.group(1).strip(), idx)
except:
pass
return (val, None)
def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, convert_to_local_tz=True):
'''
Return all metadata stored in the database as a dict. Includes paths to
the cover and each format.
:param prefix: The prefix for all paths. By default, the prefix is the absolute path
to the library folder.
:param ids: Set of ids to return the data for. If None return data for
all entries in database.
'''
import os
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import as_local_time
backend = getattr(self, 'backend', self) # Works with both old and legacy interfaces
if prefix is None:
prefix = backend.library_path
fdata = backend.custom_column_num_map
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
for x, data in fdata.iteritems():
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = []
for record in self.data:
if record is None:
continue
db_id = record[self.FIELD_MAP['id']]
if ids is not None and db_id not in ids:
continue
x = {}
for field in FIELDS:
x[field] = record[self.FIELD_MAP[field]]
if convert_to_local_tz:
for tf in ('timestamp', 'pubdate', 'last_modified'):
x[tf] = as_local_time(x[tf])
data.append(x)
x['id'] = db_id
x['formats'] = []
isbn = self.isbn(db_id, index_is_id=True)
x['isbn'] = isbn if isbn else ''
if not x['authors']:
x['authors'] = _('Unknown')
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
if authors_as_string:
x['authors'] = authors_to_string(x['authors'])
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
x['cover'] = os.path.join(path, 'cover.jpg')
if not record[self.FIELD_MAP['cover']]:
x['cover'] = None
formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
if formats:
for fmt in formats.split(','):
path = self.format_abspath(x['id'], fmt, index_is_id=True)
if path is None:
continue
if prefix != self.library_path:
path = os.path.relpath(path, self.library_path)
path = os.path.join(prefix, path)
x['formats'].append(path)
x['fmt_'+fmt.lower()] = path
x['available_formats'] = [i.upper() for i in formats.split(',')]
return data
|
espenhgn/nest-simulator | refs/heads/master | pynest/examples/arbor_cosim_example/arbor_proxy.py | 17 | #!/usr/bin/env python3
# arbor_proxy.py simulates an Arbor run with MPI spike exchange to external
# NEST. Reimplementation of the C++ version of Peyser implemented in pure
# Python to allow easy testing by external developers without building Arbor.
from mpi4py import MPI
import sys
import numpy as np
import math
############################################################################
# Some helper functions
# for debug printing in MPI environment
print_debug = True
print_prefix = "ARB_PROXY_PY: "
def print_d(to_print):
if (not print_debug): # print_debug is 'global variable'
return
# we are debugging MPI code, force a print after each print statement
print(print_prefix + str(to_print))
sys.stdout.flush()
def print_spike_array_d(to_print, force=False):
if (not (print_debug or force)): # print_debug is 'global variable'
return
# Assume that we received a spike array, no error checking
print(print_prefix + "SPIKES: [", end='')
for spike in to_print:
print("S[{}: {}, t={}]".format(spike[0], spike[1], spike[2], end=''))
print("]")
sys.stdout.flush()
# we are debuggin MPI code, force a print after each print statement
# Gather function
def gather_spikes(spikes, comm):
# We need to know how much data we will receive in this gather action
size = comm.size #
receive_count_array = np.zeros(size, dtype='uint32')
send_count_array = np.array([spikes.size], dtype='uint32')
comm.Allgather(send_count_array, receive_count_array)
# Calculate the amount of spikes
cummulative_sum_spikes = np.cumsum(receive_count_array)
offsets = np.zeros(size)
# start with a zero and skip the last entry in cumsum
offsets[1:] = cummulative_sum_spikes[:-1]
# Create buffers for sending and receiving
# Total nr spikes received is the last entry in cumsum
# Allgatherv only available as raw byte buffers
receive_spikes = np.ones(cummulative_sum_spikes[-1], dtype='byte')
send_buffer = spikes.view(dtype=np.byte) # send as a byte view in spikes
receive_buffer = [receive_spikes, receive_count_array, offsets, MPI.BYTE]
comm.Allgatherv(send_buffer, receive_buffer)
print_spike_array_d(receive_spikes.view('uint32,uint32, float32'))
return receive_spikes.view('uint32,uint32, float32')
class comm_information():
# Helper class for MPI configuration
# TODO: with N>2 simulators self whole function needs to be cleaned up
def __init__(self, is_arbor):
self.is_arbor = is_arbor
self.is_nest = not is_arbor
self.global_rank = MPI.COMM_WORLD.rank
self.global_size = MPI.COMM_WORLD.size
# all arbor go into split 1
color = 1 if is_arbor else 0
self.world = MPI.COMM_WORLD
self.comm = self.world.Split(color)
local_size = self.comm.size
self.local_rank = self.comm.rank
self.arbor_size = local_size if self.is_arbor else self.global_size - local_size # noqa
self.nest_size = self.global_size - self.arbor_size
input = np.array([self.global_rank], dtype=np.int32)
local_ranks = np.zeros(local_size, dtype=np.int32)
# Grab all local ranks. Sort find the first non consecutive.
# This will be the other root
self.comm.Allgather(input, local_ranks)
local_ranks.sort()
# Small helper function to look for first non concecutive entry.
# Ranks can interleaved, the first non concecutive would be nest
def first_missing(np_array):
for idx in range(np_array.size-1):
if not (np_array[idx+1] - np_array[idx] == 1):
return np_array[idx] + 1
# Default the last rank plus one
return np_array[-1]+1
if (self.is_arbor):
self.arbor_root = local_ranks[0]
self.nest_root = first_missing(local_ranks) if self.arbor_root == 0 else 0 # noqa
else:
self.nest_root = local_ranks[0]
self.arbor_root = first_missing(local_ranks) if self.nest_root == 0 else 0 # noqa
def __str__(self):
return str("global ( rank: " + str(self.global_rank) + ", size: " + str(self.global_size) + "\n" + # noqa
"local rank " + str(self.local_rank) + "\n" +
"self is arbor\n" if self.is_arbor else "self is nest\n" +
"arbor (root: " + str(self.arbor_root) + ", size: " + str(self.arbor_size) + ")\n" + # noqa
"nest (root: " + str(self.nest_root) + ", size: " + str(self.nest_size) + ")\n") # noqa
#####################################################################
# MPI configuration
comm_info = comm_information(True)
# Only print one the root arbor rank
if comm_info.local_rank != comm_info.arbor_root:
print_debug = False
# Sim Config
num_arbor_cells = 100
min_delay = 10
duration = 100
########################################################################
# handshake #1: communicate the number of cells between arbor and nest
# send nr of arbor cells
output = np.array([num_arbor_cells], dtype=np.int32)
comm_info.world.Bcast(output, comm_info.arbor_root)
# Receive nest cell_nr
output = np.array([0], dtype=np.int32)
comm_info.world.Bcast(output, root=comm_info.nest_root)
num_nest_cells = output[0]
num_total_cells = num_nest_cells + num_arbor_cells
print_d("num_arbor_cells: " + str(num_arbor_cells) + " " +
"num_nest_cells: " + str(num_nest_cells) + " " +
"num_total_cells: " + str(num_total_cells))
########################################################################
# hand shake #2: min delay
# first send the arbor delays
arb_com_time = min_delay / 2.0
output = np.array([arb_com_time], dtype=np.float32)
comm_info.world.Bcast(output, comm_info.arbor_root)
# receive the nest delays
output = np.array([0], dtype=np.float32)
comm_info.world.Bcast(output, comm_info.nest_root)
nest_com_time = output[0]
print_d("nest_com_time: " + str(nest_com_time))
###############################################################
# Process the delay and calculate new simulator settings
# TODO: This doubling smells cludgy
double_min_delay = 2 * min(arb_com_time, nest_com_time)
print_d("min_delay: " + str(double_min_delay))
delta = double_min_delay / 2.0
steps = int(math.floor(duration / delta))
# Extra step at end if not a whole multiple
if (steps * delta < duration):
steps += 1
###############################################################
# Handshake #3: steps
output = np.array([steps], dtype=np.int32)
comm_info.world.Bcast(output, comm_info.arbor_root)
print_d("delta: " + str(delta) + ", " +
"sim_duration: " + str(duration) + ", " +
"steps: " + str(steps) + ", ")
#######################################################
# main simulated simulation loop inclusive nr of steps.
for step in range(steps+1):
print_d("step: " + str(step) + ": " + str(step * delta))
# We are sending no spikes from arbor to nest.
# Create a array with size zero with correct type
output = np.zeros(0, dtype='uint32, uint32, float32')
gather_spikes(output, comm_info.world)
print_d("Reached arbor_proxy.py end")
|
signed/intellij-community | refs/heads/master | python/testData/resolve/ImplicitDunderSizeOfWithInheritedClassAttr.py | 35 | class A(object):
__sizeof__ = 4
class B(A):
def foo(self):
return __sizeof__
# <ref> |
firerszd/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/test_asyncio/test_locks.py | 80 | """Tests for lock.py"""
import unittest
from unittest import mock
import re
import asyncio
from asyncio import test_utils
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield from lock
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield from lock.acquire()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
if (yield from lock.acquire()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
if (yield from lock.acquire()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield from lock.acquire()
try:
if blocker is not None:
yield from blocker
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
# This spells "yield from lock" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield from ev.wait()):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield from ev.wait()):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop)
self.assertEqual([3, 1, 2], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait_for(predicate)):
result.append(1)
cond.release()
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
return (yield from cond)
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield from sem.acquire()
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from sem.acquire()
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from sem.acquire()
result.append(3)
return True
@asyncio.coroutine
def c4(result):
yield from sem.acquire()
result.append(4)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
|
alexmandujano/django | refs/heads/master | django/conf/locale/bs/formats.py | 118 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. N Y.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. N. Y. G:i T'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'Y M j'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
raycarnes/hr | refs/heads/8.0 | __unported__/hr_payroll_register/wizard/__init__.py | 28 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr_payroll_register_run
|
THURachel/GenSegNet | refs/heads/master | matconvnet-1.0-beta16/doc/matdoc.py | 7 | # file: matdoc.py
# author: Andrea Vedaldi
# brief: Extact comments from a MATLAB mfile and generate a Markdown file
import sys, os, re, shutil
import subprocess, signal
import string, fnmatch
from matdocparser import *
from optparse import OptionParser
usage = """usage: %prog [options] <mfile>
Extracts the comments from the specified <mfile> and prints a Markdown
version of them."""
optparser = OptionParser(usage=usage)
optparser.add_option(
"-v", "--verbose",
dest = "verb",
default = False,
action = "store_true",
help = "print debug information")
findFunction = re.compile(r"^\s*(function|classdef).*$", re.MULTILINE)
getFunction = re.compile(r"\s*%\s*(\w+)\s*(.*)\n"
"((\s*%.*\n)+)")
cleanComments = re.compile("^\s*%", re.MULTILINE)
# --------------------------------------------------------------------
def readText(path):
# --------------------------------------------------------------------
with open (path, "r") as myfile:
text=myfile.read()
return text
# --------------------------------------------------------------------
class MatlabFunction:
# --------------------------------------------------------------------
def __init__(self, name, nature, brief, body):
self.name = name
self.nature = nature
self.brief = brief
self.body = body
def __str__(self):
return "%s (%s)" % (self.name, self.nature)
# --------------------------------------------------------------------
def findNextFunction(test, pos):
# --------------------------------------------------------------------
if pos == 0 and test[0] == '%':
# This is an M-file with a MEX implementation
return (pos, 'function')
m = findFunction.search(test, pos)
if m:
return (m.end()+1, m.group(1))
else:
return (None, None)
# --------------------------------------------------------------------
def getFunctionDoc(text, nature, pos):
# --------------------------------------------------------------------
m = getFunction.match(text, pos)
if m:
name = m.group(1)
brief = m.group(2).strip()
body = clean(m.group(3))
return (MatlabFunction(name, nature, brief, body), m.end()+1)
else:
return (None, pos)
# --------------------------------------------------------------------
def clean(text):
# --------------------------------------------------------------------
return cleanComments.sub("", text)
# --------------------------------------------------------------------
def extract(text):
# --------------------------------------------------------------------
funcs = []
pos = 0
while True:
(pos, nature) = findNextFunction(text, pos)
if nature is None: break
(f, pos) = getFunctionDoc(text, nature, pos)
if f:
funcs.append(f)
return funcs
# --------------------------------------------------------------------
class Frame(object):
# --------------------------------------------------------------------
prefix = ""
before = None
def __init__(self, prefix, before = None, hlevel = 0):
self.prefix = prefix
self.before = before
self.hlevel = hlevel
# --------------------------------------------------------------------
class Context(object):
# --------------------------------------------------------------------
frames = []
def __init__(self, hlevel = 0):
self.hlevel = hlevel
def __str__(self):
text = ""
for f in self.frames:
if not f.before:
text = text + f.prefix
else:
text = text + f.prefix[:-len(f.before)] + f.before
f.before = None
return text
def pop(self):
f = self.frames[-1]
del self.frames[-1]
return f
def push(self, frame):
self.frames.append(frame)
def render_L(tree, context):
print "%s%s" % (context,tree.text)
def render_SL(tree, context):
print "%s%s %s" % (context,
"#"*(context.hlevel+tree.section_level),
tree.inner_text)
def render_S(tree, context):
for n in tree.children: render_SL(n, context)
def render_DH(tree, context):
if len(tree.inner_text.strip()) > 1:
print "%s**%s** [*%s*]" % (context, tree.description.strip(), tree.inner_text.strip())
else:
print "%s**%s**" % (context, tree.description.strip())
def render_DI(tree, context):
context.push(Frame(" ", "* "))
render_DH(tree.children[0], context)
print context
if len(tree.children) > 1:
render_DIVL(tree.children[1], context)
context.pop()
def render_DL(tree, context):
for n in tree.children: render_DI(n, context)
def render_P(tree, context):
for n in tree.children: render_L(n, context)
print context
def render_B(tree, context):
print context
def render_V(tree, context):
context.push(Frame(" "))
for n in tree.children:
if n.isa(L): render_L(n, context)
elif n.isa(B): render_B(n, context)
context.pop()
def render_BL(tree, context):
for n in tree.children:
context.push(Frame(" ", "+ "))
render_DIVL(n, context)
context.pop()
def render_DIVL(tree, context):
for n in tree.children:
if n.isa(P): render_P(n, context)
elif n.isa(BL): render_BL(n, context)
elif n.isa(DL): render_DL(n, context)
elif n.isa(V): render_V(n, context)
elif n.isa(S): render_S(n, context)
context.before = ""
def render(func, brief, tree, hlevel):
print "%s `%s` - %s" % ('#' * hlevel, func.upper(), brief)
render_DIVL(tree, Context(hlevel))
if __name__ == '__main__':
(opts, args) = optparser.parse_args()
if len(args) != 1:
optparser.print_help()
sys.exit(2)
mfilePath = args[0]
# Get the function
text = readText(mfilePath)
funcs = extract(text)
if len(funcs) == 0:
print >> sys.stderr, "Could not find a MATLAB function"
sys.exit(-1)
parser = Parser()
if funcs[0].nature == 'classdef':
# For MATLAB classes, look for other methods outside
# the classdef file
components = mfilePath.split(os.sep)
if len(components)>1 and components[-2][0] == '@':
classDir = string.join(components[:-1],os.sep)
for x in os.listdir(classDir):
if fnmatch.fnmatch(x, '*.m') and not x == components[-1]:
text = readText(classDir + os.sep + x)
funcs_ = extract(text)
if len(funcs_) > 0:
funcs.append(funcs_[0])
else:
# For MATLAB functions, do not print subfuctions
funcs = [funcs[0]]
hlevel = 1
for f in funcs:
lexer = Lexer(f.body.splitlines())
tree = parser.parse(lexer)
if opts.verb:
print >> sys.stderr, tree
render(f.name, f.brief, tree, hlevel)
hlevel = 2
|
padraic-padraic/StabilizerSearch | refs/heads/master | stabilizer_search/__init__.py | 1 | from .core import *
from .stabilizers import *
# from .search import * |
fitermay/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/localflavor/sk/sk_districts.py | 543 | """
Slovak districts according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
DISTRICT_CHOICES = (
('BB', _('Banska Bystrica')),
('BS', _('Banska Stiavnica')),
('BJ', _('Bardejov')),
('BN', _('Banovce nad Bebravou')),
('BR', _('Brezno')),
('BA1', _('Bratislava I')),
('BA2', _('Bratislava II')),
('BA3', _('Bratislava III')),
('BA4', _('Bratislava IV')),
('BA5', _('Bratislava V')),
('BY', _('Bytca')),
('CA', _('Cadca')),
('DT', _('Detva')),
('DK', _('Dolny Kubin')),
('DS', _('Dunajska Streda')),
('GA', _('Galanta')),
('GL', _('Gelnica')),
('HC', _('Hlohovec')),
('HE', _('Humenne')),
('IL', _('Ilava')),
('KK', _('Kezmarok')),
('KN', _('Komarno')),
('KE1', _('Kosice I')),
('KE2', _('Kosice II')),
('KE3', _('Kosice III')),
('KE4', _('Kosice IV')),
('KEO', _('Kosice - okolie')),
('KA', _('Krupina')),
('KM', _('Kysucke Nove Mesto')),
('LV', _('Levice')),
('LE', _('Levoca')),
('LM', _('Liptovsky Mikulas')),
('LC', _('Lucenec')),
('MA', _('Malacky')),
('MT', _('Martin')),
('ML', _('Medzilaborce')),
('MI', _('Michalovce')),
('MY', _('Myjava')),
('NO', _('Namestovo')),
('NR', _('Nitra')),
('NM', _('Nove Mesto nad Vahom')),
('NZ', _('Nove Zamky')),
('PE', _('Partizanske')),
('PK', _('Pezinok')),
('PN', _('Piestany')),
('PT', _('Poltar')),
('PP', _('Poprad')),
('PB', _('Povazska Bystrica')),
('PO', _('Presov')),
('PD', _('Prievidza')),
('PU', _('Puchov')),
('RA', _('Revuca')),
('RS', _('Rimavska Sobota')),
('RV', _('Roznava')),
('RK', _('Ruzomberok')),
('SB', _('Sabinov')),
('SC', _('Senec')),
('SE', _('Senica')),
('SI', _('Skalica')),
('SV', _('Snina')),
('SO', _('Sobrance')),
('SN', _('Spisska Nova Ves')),
('SL', _('Stara Lubovna')),
('SP', _('Stropkov')),
('SK', _('Svidnik')),
('SA', _('Sala')),
('TO', _('Topolcany')),
('TV', _('Trebisov')),
('TN', _('Trencin')),
('TT', _('Trnava')),
('TR', _('Turcianske Teplice')),
('TS', _('Tvrdosin')),
('VK', _('Velky Krtis')),
('VT', _('Vranov nad Toplou')),
('ZM', _('Zlate Moravce')),
('ZV', _('Zvolen')),
('ZC', _('Zarnovica')),
('ZH', _('Ziar nad Hronom')),
('ZA', _('Zilina')),
)
|
uclouvain/osis_louvain | refs/heads/master | base/models/mandate.py | 1 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from base.models.enums import mandate_type as mandate_types
from django.db import models
from osis_common.models.osis_model_admin import OsisModelAdmin
class MandateAdmin(OsisModelAdmin):
list_display = ('education_group', 'function')
raw_id_fields = ('education_group',)
search_fields = ['education_group', 'function', 'external_id']
class Mandate(models.Model):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
education_group = models.ForeignKey('EducationGroup', blank=True, null=True)
function = models.CharField(max_length=20, choices=mandate_types.MANDATE_TYPES)
qualification = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return "{} {}".format(self.education_group, self.function)
|
Git-Host/jaikuengine | refs/heads/master | common/patterns.py | 33 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
"""Regular expressions for various things..."""
# TODO(tyler): Separate out nick sub-pattern.
AVATAR_PARTIAL_PATH_RE = r'(default|(?P<nick>#?\w+@[\w\.]+))/(?P<path>.*)'
AVATAR_PARTIAL_PATH_COMPILED = re.compile(AVATAR_PARTIAL_PATH_RE)
AVATAR_PATH_RE = r'^image/' + AVATAR_PARTIAL_PATH_RE + '\.jpg'
AVATAR_PATH_COMPILED = re.compile(AVATAR_PATH_RE)
# TODO(tyler): Make these match reality / tighter:
EMAIL_RE = r'[^@]+@[a-zA-Z.]+'
EMAIL_COMPILED = re.compile(EMAIL_RE)
class PatternHandler(object):
pattern = None
service = None
def __init__(self, service):
self.service = service
def match(self, from_jid, message):
if self.pattern:
return self.pattern.match(message)
return None
def handle(self, from_jid, match, message):
raise NotImplementedError()
class ChannelPostHandler(PatternHandler):
pattern = re.compile(r'^\s*(#[\w@.]+):?\s+(.*)$')
def handle(self, sender, match, message):
self.service.channel_post(sender, match.group(1), match.group(2))
class CommentHandler(PatternHandler):
"""
Pattern handler for placing a comment.
Comments are placed by prefixing the message with C{'@'} and a nick name.
The comment will be added to the last entry posted or commented on by the
user associated with the given nick name, as received by the commenter.
"""
pattern = re.compile(r"""^\s*@(\w+):?\s*(.*)$""", re.I | re.S)
def handle(self, sender, match, message):
nick = match.group(1)
msg = match.group(2)
self.service.add_comment(sender, nick, msg)
class ConfirmHandler(PatternHandler):
pattern = re.compile(r"""^\s*(?:yes)(?:\s+)?$""", re.I)
def handle(self, sender, match, message):
self.service.confirm(sender)
class FollowHandler(PatternHandler):
pattern = re.compile(
r"""^\s*(?:join|follow|add|f)\s+((?P<channel>#\w+)|(?P<nick>\w+))""",
re.I)
def handle(self, sender, match, message):
channel = match.group('channel')
nick = match.group('nick')
if channel:
self.service.channel_join(sender, channel)
elif nick:
self.service.actor_add_contact(sender, nick)
else:
# hmm, perhaps we should return true or false, depending on whether
# this was handled.
pass
class HelpHandler(PatternHandler):
pattern = re.compile(r"""^\s*(help)\s*$""", re.I)
def handle(self, sender, match, message):
self.service.help(sender)
class LeaveHandler(PatternHandler):
pattern = re.compile(
r"""^\s*(?:leave|part|remove|l)\s+((?P<channel>#\w+)|(?P<nick>\w+))""",
re.I)
def handle(self, sender, match, message):
channel = match.group('channel')
nick = match.group('nick')
if channel:
self.service.channel_part(sender, channel)
elif nick:
self.service.actor_remove_contact(sender, nick)
else:
# hmm, perhaps we should return true or false, depending on whether
# this was handled.
pass
class OffHandler(PatternHandler):
pattern = re.compile(r"""^\s*(?:off|stop|end|quit|cancel|unsubscribe|pause)(?:\s+)?$""", re.I)
def handle(self, sender, match, message):
self.service.stop_notifications(sender)
class OnHandler(PatternHandler):
pattern = re.compile(r"""^\s*(?:on|start|wake)(?:\s+)?$""", re.I)
def handle(self, sender, match, message):
self.service.start_notifications(sender)
class PostHandler(PatternHandler):
def match(self, sender, message):
return True
def handle(self, sender, match, message):
self.service.post(sender, message)
class PromotionHandler(PatternHandler):
"""
Create a new account
"""
pattern = re.compile(r"""^\s*(sign\s+up)\s+(\w+)""", re.I)
def handle(self, sender, match, message):
self.service.promote_user(sender, match.group(2))
class SignInHandler(PatternHandler):
"""
Pattern handler to claim an existing account from a follow-only account.
"""
pattern = re.compile(r"""^\s*(claim|sign\s+in)\s+(\w+)\s+(\S+)""", re.I)
def handle(self, sender, match, message):
nick = match.group(2)
password = match.group(3)
self.service.sign_in(sender, nick, password)
class SignOutHandler(PatternHandler):
pattern = re.compile(r"""^\s*(sign\s+out)\s*$""", re.I)
def handle(self, sender, match, message):
self.service.sign_out(sender)
|
jsirois/pex | refs/heads/master | pex/vendor/_vendored/pip/pip/_vendor/msgpack/_version.py | 21 | version = (1, 0, 0)
|
FePhyFoFum/PyPHLAWD | refs/heads/master | src/get_internal_seqs_unrepresented_in_tips.py | 1 | import sys
import os
import seq
if __name__ == "__main__":
if len(sys.argv) != 3:
print("python "+sys.argv[0]+" curdir logfile")
sys.exit(0)
curd = sys.argv[1]
LOGFILE = sys.argv[2]
ff = None
dirs = []
for i in os.listdir(curd):
if ".fas" == i[-4:]:
ff = i
elif os.path.isdir(curd+"/"+i) and i != "clusters":
dirs.append(curd+"/"+i+"/"+i+".fas")
seqids = []
seqs = {}
for i in seq.read_fasta_file_iter(curd+"/"+ff):
seqids.append(i.name)
seqs[i.name] = i
for i in dirs:
for j in seq.read_fasta_file_iter(i):
if len(j.name) > 0 and j.name in seqids:
del seqs[j.name]
if len(seqs) > 0:
outfile = open(curd+"/notinchildren.fas","w")
for i in seqs:
outfile.write(seqs[i].get_fasta())
outfile.close()
|
MartinEnder/erpnext-de | refs/heads/develop | erpnext/selling/report/customer_credit_balance/customer_credit_balance.py | 96 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from erpnext.selling.doctype.customer.customer import get_customer_outstanding, get_credit_limit
def execute(filters=None):
if not filters: filters = {}
#Check if customer id is according to naming series or customer name
customer_naming_type = frappe.db.get_value("Selling Settings", None, "cust_master_name")
columns = get_columns(customer_naming_type)
data = []
customer_list = get_details(filters)
for d in customer_list:
row = []
outstanding_amt = get_customer_outstanding(d.name, filters.get("company"))
credit_limit = get_credit_limit(d.name, filters.get("company"))
bal = flt(credit_limit) - flt(outstanding_amt)
if customer_naming_type == "Naming Series":
row = [d.name, d.customer_name, credit_limit, outstanding_amt, bal]
else:
row = [d.name, credit_limit, outstanding_amt, bal]
if credit_limit:
data.append(row)
return columns, data
def get_columns(customer_naming_type):
columns = [
_("Customer") + ":Link/Customer:120", _("Credit Limit") + ":Currency:120",
_("Outstanding Amt") + ":Currency:100", _("Credit Balance") + ":Currency:120"
]
if customer_naming_type == "Naming Series":
columns.insert(1, _("Customer Name") + ":Data:120")
return columns
def get_details(filters):
conditions = ""
if filters.get("customer"):
conditions += " where name = %(customer)s"
return frappe.db.sql("""select name, customer_name from `tabCustomer` %s"""
% conditions, filters, as_dict=1)
|
edx-solutions/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_tabs.py | 4 | """ Tests for tab functions (just primitive). """
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from contentstore.views import tabs
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
class TabsPageTests(CourseTestCase):
"""Test cases for Tabs (a.k.a Pages) page"""
def setUp(self):
"""Common setup for tests"""
# call super class to setup course, etc.
super(TabsPageTests, self).setUp()
# Set the URL for tests
self.url = reverse_course_url('tabs_handler', self.course.id)
# add a static tab to the course, for code coverage
self.test_tab = ItemFactory.create(
parent_location=self.course.location,
category="static_tab",
display_name="Static_1"
)
self.reload_course()
def check_invalid_tab_id_response(self, resp):
"""Verify response is an error listing the invalid_tab_id"""
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content.decode('utf-8'))
self.assertIn("error", resp_content)
self.assertIn("invalid_tab_id", resp_content['error'])
def test_not_implemented(self):
"""Verify not implemented errors"""
# JSON GET request not supported
with self.assertRaises(NotImplementedError):
self.client.get(self.url)
# JSON POST request not supported
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'courseware'},
'unsupported_request': None,
}),
)
# invalid JSON POST request
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data={'invalid_request': None},
)
def test_view_index(self):
"""Basic check that the Pages page responds correctly"""
resp = self.client.get_html(self.url)
self.assertContains(resp, 'course-nav-list')
def test_reorder_tabs(self):
"""Test re-ordering of tabs"""
# get the original tab ids
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
num_orig_tabs = len(orig_tab_ids)
# make sure we have enough tabs to play around with
self.assertGreaterEqual(num_orig_tabs, 5)
# reorder the last two tabs
tab_ids[num_orig_tabs - 1], tab_ids[num_orig_tabs - 2] = tab_ids[num_orig_tabs - 2], tab_ids[num_orig_tabs - 1]
# remove the middle tab
# (the code needs to handle the case where tabs requested for re-ordering is a subset of the tabs in the course)
removed_tab = tab_ids.pop(num_orig_tabs // 2)
self.assertEqual(len(tab_ids), num_orig_tabs - 1)
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new tab order
self.reload_course()
new_tab_ids = [tab.tab_id for tab in self.course.tabs]
self.assertEqual(new_tab_ids, tab_ids + [removed_tab])
self.assertNotEqual(new_tab_ids, orig_tab_ids)
def test_reorder_tabs_invalid_list(self):
"""Test re-ordering of tabs with invalid tab list"""
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
# reorder the first two tabs
tab_ids[0], tab_ids[1] = tab_ids[1], tab_ids[0]
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content.decode('utf-8'))
self.assertIn("error", resp_content)
def test_reorder_tabs_invalid_tab(self):
"""Test re-ordering of tabs with invalid tab"""
invalid_tab_ids = ['courseware', 'info', 'invalid_tab_id']
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in invalid_tab_ids]},
)
self.check_invalid_tab_id_response(resp)
def check_toggle_tab_visiblity(self, tab_type, new_is_hidden_setting):
"""Helper method to check changes in tab visibility"""
# find the tab
old_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
# visibility should be different from new setting
self.assertNotEqual(old_tab.is_hidden, new_is_hidden_setting)
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': old_tab.tab_id},
'is_hidden': new_is_hidden_setting,
}),
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new visibility setting
self.reload_course()
new_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
self.assertEqual(new_tab.is_hidden, new_is_hidden_setting)
def test_toggle_tab_visibility(self):
"""Test toggling of tab visibility"""
self.check_toggle_tab_visiblity('wiki', True)
self.check_toggle_tab_visiblity('wiki', False)
def test_toggle_invalid_tab_visibility(self):
"""Test toggling visibility of an invalid tab"""
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'invalid_tab_id'}
}),
)
self.check_invalid_tab_id_response(resp)
def test_tab_preview_html(self):
"""
Verify that the static tab renders itself with the correct HTML
"""
preview_url = '/xblock/{}/{}'.format(self.test_tab.location, STUDENT_VIEW)
resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content.decode('utf-8'))
html = resp_content['html']
# Verify that the HTML contains the expected elements
self.assertIn('<span class="action-button-text">Edit</span>', html)
self.assertIn('<span class="sr">Duplicate this component</span>', html)
self.assertIn('<span class="sr">Delete this component</span>', html)
self.assertIn('<span data-tooltip="Drag to reorder" class="drag-handle action"></span>', html)
class PrimitiveTabEdit(ModuleStoreTestCase):
"""Tests for the primitive tab edit data manipulations"""
def test_delete(self):
"""Test primitive tab deletion."""
course = CourseFactory.create()
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 0)
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 1)
with self.assertRaises(IndexError):
tabs.primitive_delete(course, 7)
tabs.primitive_delete(course, 2)
self.assertNotIn({u'type': u'textbooks'}, course.tabs)
# Check that discussion has shifted up
self.assertEqual(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})
def test_insert(self):
"""Test primitive tab insertion."""
course = CourseFactory.create()
tabs.primitive_insert(course, 2, 'pdf_textbooks', 'aname')
self.assertEqual(course.tabs[2], {'type': 'pdf_textbooks', 'name': 'aname'})
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 0, 'pdf_textbooks', 'aname')
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 3, 'static_tab', 'aname')
def test_save(self):
"""Test course saving."""
course = CourseFactory.create()
tabs.primitive_insert(course, 3, 'pdf_textbooks', 'aname')
course2 = modulestore().get_course(course.id)
self.assertEqual(course2.tabs[3], {'type': 'pdf_textbooks', 'name': 'aname'})
|
rahenry/nonHexact | refs/heads/master | test3.py | 1 | import numpy, scipy, math, cmath, random, scipy.sparse, scipy.sparse.linalg, scipy.special, sys, struct, os, operator
import matplotlib.pyplot as plt
import solver, data_processing, writer, measure
N = 3
L = 200
R = range(0,L)
g = 1.
def omega(j):
return numpy.exp(math.pi*1.J*j/L)
def eps_per(k, L, N):
z = omega(k) + numpy.power(omega(k), 2.*L-1)
if z < 0.: return 0
return numpy.power(omega(k) + numpy.power(omega(k), 2.*L-1), 2./N)
def exact_per(L, N):
res = 0.
for j in range(2*L):
res -= eps_per(j, L, N)
return res/L
print '____'
print solver.exact_eigenvalue(L, N, 1.)
print exact_per(L, N)
|
rkashapov/buildbot | refs/heads/master | pkg/test_buildbot_pkg.py | 10 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import shutil
from subprocess import call
from subprocess import check_call
from textwrap import dedent
from twisted.trial import unittest
class BuildbotWWWPkg(unittest.TestCase):
pkgName = "buildbot_www"
pkgPaths = ["www", "base"]
epName = "base"
loadTestScript = dedent("""
import pkg_resources
apps = {}
for ep in pkg_resources.iter_entry_points('buildbot.www'):
apps[ep.name] = ep.load()
assert("scripts.js" in apps["%(epName)s"].resource.listNames())
assert(apps["%(epName)s"].version.startswith("0."))
assert(apps["%(epName)s"].description is not None)
print(apps["%(epName)s"])
""")
@property
def path(self):
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", *self.pkgPaths))
def rmtree(self, d):
if os.path.isdir(d):
shutil.rmtree(d)
def setUp(self):
call("pip uninstall -y " + self.pkgName, shell=True)
self.rmtree(os.path.join(self.path, "build"))
self.rmtree(os.path.join(self.path, "dist"))
self.rmtree(os.path.join(self.path, "static"))
def run_setup(self, cmd):
check_call("python setup.py " + cmd, shell=True, cwd=self.path)
def check_correct_installation(self):
# assert we can import buildbot_www
# and that it has an endpoint with resource containing file "script.js"
check_call([
'python', '-c', self.loadTestScript % dict(epName=self.epName)])
def test_install(self):
self.run_setup("install")
self.check_correct_installation()
def test_wheel(self):
self.run_setup("bdist_wheel")
check_call("pip install dist/*.whl", shell=True, cwd=self.path)
self.check_correct_installation()
def test_egg(self):
self.run_setup("bdist_egg")
# egg installation is not supported by pip, so we use easy_install
check_call("easy_install dist/*.egg", shell=True, cwd=self.path)
self.check_correct_installation()
def test_develop(self):
self.run_setup("develop")
self.check_correct_installation()
def test_develop_via_pip(self):
check_call("pip install -e .", shell=True, cwd=self.path)
self.check_correct_installation()
def test_sdist(self):
self.run_setup("sdist")
check_call("pip install dist/*.tar.gz", shell=True, cwd=self.path)
self.check_correct_installation()
class BuildbotMDWWWPkg(BuildbotWWWPkg):
pkgPaths = ["www", "md_base"]
class BuildbotConsolePkg(BuildbotWWWPkg):
pkgName = "buildbot-console-view"
pkgPaths = ["www", "console_view"]
epName = "console_view"
class BuildbotWaterfallPkg(BuildbotWWWPkg):
pkgName = "buildbot-waterfall-view"
pkgPaths = ["www", "waterfall_view"]
epName = "waterfall_view"
class BuildbotCodeparameterPkg(BuildbotWWWPkg):
pkgName = "buildbot-codeparameter"
pkgPaths = ["www", "codeparameter"]
epName = "codeparameter"
|
IMCG/iMapReduce | refs/heads/master | src/contrib/hive/metastore/src/gen-py/hive_metastore/ThriftMetaStore.py | 15 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
import fb303.FacebookService
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(fb303.FacebookService.Iface):
def get_fields(self, db_name, table_name):
pass
def get_tables(self, db_name, pattern):
pass
def get_schema(self, table_name):
pass
def alter_table(self, db_name, table_name, schema):
pass
def create_table(self, db_name, table_name, schema):
pass
def drop_table(self, db_name, table_name):
pass
def truncate_table(self, db_name, table_name, partition):
pass
def table_exists(self, db_name, table_name):
pass
def get_partitions(self, db_name, table_name):
pass
def get_dbs(self, ):
pass
def cat(self, db_name, table_name, partition, high):
pass
class Client(fb303.FacebookService.Client, Iface):
def __init__(self, iprot, oprot=None):
fb303.FacebookService.Client.__init__(self, iprot, oprot)
def get_fields(self, db_name, table_name):
self.send_get_fields(db_name, table_name)
return self.recv_get_fields()
def send_get_fields(self, db_name, table_name):
self._oprot.writeMessageBegin('get_fields', TMessageType.CALL, self._seqid)
args = get_fields_args()
args.db_name = db_name
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_fields(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_fields_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields failed: unknown result");
def get_tables(self, db_name, pattern):
self.send_get_tables(db_name, pattern)
return self.recv_get_tables()
def send_get_tables(self, db_name, pattern):
self._oprot.writeMessageBegin('get_tables', TMessageType.CALL, self._seqid)
args = get_tables_args()
args.db_name = db_name
args.pattern = pattern
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_tables(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_tables_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result");
def get_schema(self, table_name):
self.send_get_schema(table_name)
return self.recv_get_schema()
def send_get_schema(self, table_name):
self._oprot.writeMessageBegin('get_schema', TMessageType.CALL, self._seqid)
args = get_schema_args()
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_schema(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_schema_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result");
def alter_table(self, db_name, table_name, schema):
self.send_alter_table(db_name, table_name, schema)
self.recv_alter_table()
def send_alter_table(self, db_name, table_name, schema):
self._oprot.writeMessageBegin('alter_table', TMessageType.CALL, self._seqid)
args = alter_table_args()
args.db_name = db_name
args.table_name = table_name
args.schema = schema
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_alter_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = alter_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
return
def create_table(self, db_name, table_name, schema):
self.send_create_table(db_name, table_name, schema)
self.recv_create_table()
def send_create_table(self, db_name, table_name, schema):
self._oprot.writeMessageBegin('create_table', TMessageType.CALL, self._seqid)
args = create_table_args()
args.db_name = db_name
args.table_name = table_name
args.schema = schema
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = create_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
return
def drop_table(self, db_name, table_name):
self.send_drop_table(db_name, table_name)
self.recv_drop_table()
def send_drop_table(self, db_name, table_name):
self._oprot.writeMessageBegin('drop_table', TMessageType.CALL, self._seqid)
args = drop_table_args()
args.db_name = db_name
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_drop_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = drop_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
return
def truncate_table(self, db_name, table_name, partition):
self.send_truncate_table(db_name, table_name, partition)
self.recv_truncate_table()
def send_truncate_table(self, db_name, table_name, partition):
self._oprot.writeMessageBegin('truncate_table', TMessageType.CALL, self._seqid)
args = truncate_table_args()
args.db_name = db_name
args.table_name = table_name
args.partition = partition
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_truncate_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = truncate_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
return
def table_exists(self, db_name, table_name):
self.send_table_exists(db_name, table_name)
return self.recv_table_exists()
def send_table_exists(self, db_name, table_name):
self._oprot.writeMessageBegin('table_exists', TMessageType.CALL, self._seqid)
args = table_exists_args()
args.db_name = db_name
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_table_exists(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = table_exists_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
raise TApplicationException(TApplicationException.MISSING_RESULT, "table_exists failed: unknown result");
def get_partitions(self, db_name, table_name):
self.send_get_partitions(db_name, table_name)
return self.recv_get_partitions()
def send_get_partitions(self, db_name, table_name):
self._oprot.writeMessageBegin('get_partitions', TMessageType.CALL, self._seqid)
args = get_partitions_args()
args.db_name = db_name
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_partitions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_partitions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result");
def get_dbs(self, ):
self.send_get_dbs()
return self.recv_get_dbs()
def send_get_dbs(self, ):
self._oprot.writeMessageBegin('get_dbs', TMessageType.CALL, self._seqid)
args = get_dbs_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_dbs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_dbs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_dbs failed: unknown result");
def cat(self, db_name, table_name, partition, high):
self.send_cat(db_name, table_name, partition, high)
return self.recv_cat()
def send_cat(self, db_name, table_name, partition, high):
self._oprot.writeMessageBegin('cat', TMessageType.CALL, self._seqid)
args = cat_args()
args.db_name = db_name
args.table_name = table_name
args.partition = partition
args.high = high
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_cat(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = cat_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch1 != None:
raise result.ouch1
if result.ouch2 != None:
raise result.ouch2
if result.ouch3 != None:
raise result.ouch3
raise TApplicationException(TApplicationException.MISSING_RESULT, "cat failed: unknown result");
class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
def __init__(self, handler):
fb303.FacebookService.Processor.__init__(self, handler)
self._processMap["get_fields"] = Processor.process_get_fields
self._processMap["get_tables"] = Processor.process_get_tables
self._processMap["get_schema"] = Processor.process_get_schema
self._processMap["alter_table"] = Processor.process_alter_table
self._processMap["create_table"] = Processor.process_create_table
self._processMap["drop_table"] = Processor.process_drop_table
self._processMap["truncate_table"] = Processor.process_truncate_table
self._processMap["table_exists"] = Processor.process_table_exists
self._processMap["get_partitions"] = Processor.process_get_partitions
self._processMap["get_dbs"] = Processor.process_get_dbs
self._processMap["cat"] = Processor.process_cat
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_get_fields(self, seqid, iprot, oprot):
args = get_fields_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_fields_result()
try:
result.success = self._handler.get_fields(args.db_name, args.table_name)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("get_fields", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_tables(self, seqid, iprot, oprot):
args = get_tables_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_tables_result()
try:
result.success = self._handler.get_tables(args.db_name, args.pattern)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("get_tables", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_schema(self, seqid, iprot, oprot):
args = get_schema_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_schema_result()
try:
result.success = self._handler.get_schema(args.table_name)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("get_schema", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_alter_table(self, seqid, iprot, oprot):
args = alter_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = alter_table_result()
try:
self._handler.alter_table(args.db_name, args.table_name, args.schema)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("alter_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_create_table(self, seqid, iprot, oprot):
args = create_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_table_result()
try:
self._handler.create_table(args.db_name, args.table_name, args.schema)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownDBException, ouch2:
result.ouch2 = ouch2
oprot.writeMessageBegin("create_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_drop_table(self, seqid, iprot, oprot):
args = drop_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = drop_table_result()
try:
self._handler.drop_table(args.db_name, args.table_name)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("drop_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_truncate_table(self, seqid, iprot, oprot):
args = truncate_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = truncate_table_result()
try:
self._handler.truncate_table(args.db_name, args.table_name, args.partition)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("truncate_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_table_exists(self, seqid, iprot, oprot):
args = table_exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = table_exists_result()
try:
result.success = self._handler.table_exists(args.db_name, args.table_name)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownDBException, ouch2:
result.ouch2 = ouch2
oprot.writeMessageBegin("table_exists", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_partitions(self, seqid, iprot, oprot):
args = get_partitions_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_partitions_result()
try:
result.success = self._handler.get_partitions(args.db_name, args.table_name)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownTableException, ouch2:
result.ouch2 = ouch2
except UnknownDBException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("get_partitions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_dbs(self, seqid, iprot, oprot):
args = get_dbs_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_dbs_result()
try:
result.success = self._handler.get_dbs()
except MetaException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("get_dbs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_cat(self, seqid, iprot, oprot):
args = cat_args()
args.read(iprot)
iprot.readMessageEnd()
result = cat_result()
try:
result.success = self._handler.cat(args.db_name, args.table_name, args.partition, args.high)
except MetaException, ouch1:
result.ouch1 = ouch1
except UnknownDBException, ouch2:
result.ouch2 = ouch2
except UnknownTableException, ouch3:
result.ouch3 = ouch3
oprot.writeMessageBegin("cat", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class get_fields_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_fields_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_fields_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype160, _size157) = iprot.readListBegin()
for _i161 in xrange(_size157):
_elem162 = FieldSchema()
_elem162.read(iprot)
self.success.append(_elem162)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_fields_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter163 in self.success:
iter163.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -3)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -4)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -5)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_tables_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.pattern = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'pattern' in d:
self.pattern = d['pattern']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.pattern = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_tables_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.pattern != None:
oprot.writeFieldBegin('pattern', TType.STRING, -2)
oprot.writeString(self.pattern)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_tables_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype167, _size164) = iprot.readListBegin()
for _i168 in xrange(_size164):
_elem169 = iprot.readString();
self.success.append(_elem169)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_tables_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter170 in self.success:
oprot.writeString(iter170)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -3)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -4)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -5)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_args:
thrift_spec = None
def __init__(self, d=None):
self.table_name = None
if isinstance(d, dict):
if 'table_name' in d:
self.table_name = d['table_name']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_args')
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -1)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype172, _vtype173, _size171 ) = iprot.readMapBegin()
for _i175 in xrange(_size171):
_key176 = iprot.readString();
_val177 = iprot.readString();
self.success[_key176] = _val177
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
for kiter178,viter179 in self.success.items():
oprot.writeString(kiter178)
oprot.writeString(viter179)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -2)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -3)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -4)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class alter_table_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
self.schema = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
if 'schema' in d:
self.schema = d['schema']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.MAP:
self.schema = {}
(_ktype181, _vtype182, _size180 ) = iprot.readMapBegin()
for _i184 in xrange(_size180):
_key185 = iprot.readString();
_val186 = iprot.readString();
self.schema[_key185] = _val186
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('alter_table_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.schema != None:
oprot.writeFieldBegin('schema', TType.MAP, -3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.schema))
for kiter187,viter188 in self.schema.items():
oprot.writeString(kiter187)
oprot.writeString(viter188)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class alter_table_result:
thrift_spec = None
def __init__(self, d=None):
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -4:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -6:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('alter_table_result')
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -4)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -5)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -6)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_table_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
self.schema = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
if 'schema' in d:
self.schema = d['schema']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.MAP:
self.schema = {}
(_ktype190, _vtype191, _size189 ) = iprot.readMapBegin()
for _i193 in xrange(_size189):
_key194 = iprot.readString();
_val195 = iprot.readString();
self.schema[_key194] = _val195
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_table_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.schema != None:
oprot.writeFieldBegin('schema', TType.MAP, -3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.schema))
for kiter196,viter197 in self.schema.items():
oprot.writeString(kiter196)
oprot.writeString(viter197)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_table_result:
thrift_spec = None
def __init__(self, d=None):
self.ouch1 = None
self.ouch2 = None
if isinstance(d, dict):
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -4:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch2 = UnknownDBException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_table_result')
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -4)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -5)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_table_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_table_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_table_result:
thrift_spec = None
def __init__(self, d=None):
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -3:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_table_result')
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -3)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -4)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -5)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_table_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
self.partition = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
if 'partition' in d:
self.partition = d['partition']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRING:
self.partition = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_table_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.partition != None:
oprot.writeFieldBegin('partition', TType.STRING, -3)
oprot.writeString(self.partition)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_table_result:
thrift_spec = None
def __init__(self, d=None):
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -4:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -6:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_table_result')
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -4)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -5)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -6)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class table_exists_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('table_exists_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class table_exists_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch2 = UnknownDBException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('table_exists_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -3)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -4)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_partitions_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_partitions_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_partitions_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype201, _size198) = iprot.readListBegin()
for _i202 in xrange(_size198):
_elem203 = iprot.readString();
self.success.append(_elem203)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.STRUCT:
self.ouch2 = UnknownTableException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch3 = UnknownDBException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_partitions_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter204 in self.success:
oprot.writeString(iter204)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -3)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -4)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -5)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_dbs_args:
thrift_spec = (
)
def __init__(self, d=None):
pass
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_dbs_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_dbs_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch' in d:
self.ouch = d['ouch']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype208, _size205) = iprot.readListBegin()
for _i209 in xrange(_size205):
_elem210 = iprot.readString();
self.success.append(_elem210)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == -1:
if ftype == TType.STRUCT:
self.ouch = MetaException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_dbs_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter211 in self.success:
oprot.writeString(iter211)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, -1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cat_args:
thrift_spec = None
def __init__(self, d=None):
self.db_name = None
self.table_name = None
self.partition = None
self.high = None
if isinstance(d, dict):
if 'db_name' in d:
self.db_name = d['db_name']
if 'table_name' in d:
self.table_name = d['table_name']
if 'partition' in d:
self.partition = d['partition']
if 'high' in d:
self.high = d['high']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == -1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -3:
if ftype == TType.STRING:
self.partition = iprot.readString();
else:
iprot.skip(ftype)
elif fid == -4:
if ftype == TType.I32:
self.high = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cat_args')
if self.db_name != None:
oprot.writeFieldBegin('db_name', TType.STRING, -1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, -2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.partition != None:
oprot.writeFieldBegin('partition', TType.STRING, -3)
oprot.writeString(self.partition)
oprot.writeFieldEnd()
if self.high != None:
oprot.writeFieldBegin('high', TType.I32, -4)
oprot.writeI32(self.high)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cat_result:
thrift_spec = None
def __init__(self, d=None):
self.success = None
self.ouch1 = None
self.ouch2 = None
self.ouch3 = None
if isinstance(d, dict):
if 'success' in d:
self.success = d['success']
if 'ouch1' in d:
self.ouch1 = d['ouch1']
if 'ouch2' in d:
self.ouch2 = d['ouch2']
if 'ouch3' in d:
self.ouch3 = d['ouch3']
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype215, _size212) = iprot.readListBegin()
for _i216 in xrange(_size212):
_elem217 = iprot.readString();
self.success.append(_elem217)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == -5:
if ftype == TType.STRUCT:
self.ouch1 = MetaException()
self.ouch1.read(iprot)
else:
iprot.skip(ftype)
elif fid == -6:
if ftype == TType.STRUCT:
self.ouch2 = UnknownDBException()
self.ouch2.read(iprot)
else:
iprot.skip(ftype)
elif fid == -7:
if ftype == TType.STRUCT:
self.ouch3 = UnknownTableException()
self.ouch3.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cat_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter218 in self.success:
oprot.writeString(iter218)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch1 != None:
oprot.writeFieldBegin('ouch1', TType.STRUCT, -5)
self.ouch1.write(oprot)
oprot.writeFieldEnd()
if self.ouch2 != None:
oprot.writeFieldBegin('ouch2', TType.STRUCT, -6)
self.ouch2.write(oprot)
oprot.writeFieldEnd()
if self.ouch3 != None:
oprot.writeFieldBegin('ouch3', TType.STRUCT, -7)
self.ouch3.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
ric03uec/boto | refs/heads/develop | boto/ec2/autoscale/limits.py | 18 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class AccountLimits(object):
def __init__(self, connection=None):
self.connection = connection
self.max_autoscaling_groups = None
self.max_launch_configurations = None
def __repr__(self):
return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups,
self.max_launch_configurations)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
elif name == 'MaxNumberOfAutoScalingGroups':
self.max_autoscaling_groups = int(value)
elif name == 'MaxNumberOfLaunchConfigurations':
self.max_launch_configurations = int(value)
else:
setattr(self, name, value)
|
contactr2m/remote_repo | refs/heads/master | src/song/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
jeffzheng1/tensorflow | refs/heads/master | tensorflow/python/training/monitored_session.py | 3 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import summary
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
# TODO(touts): Share that with the Supervisor.
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.Saver` object taking care of saving the variables. Picked
from and stored into the `SAVERS` collection in the graph.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph.
* `global_step`: A tensor containing the global step counter. Picked
from and stored into the `GLOBAL_STEP` collection in the graph.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A sessionn feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
local_init_op=None,
summary_op=None,
saver=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty scalar string tensor when the variables are
initialized, or a non-empty one listing the names of the
non-initialized variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.Saver` object to use to save and restore variables.
"""
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._ready_op = ready_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
self._init_feed_dict = init_feed_dict
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat(
0,
[variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()])
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = Scaffold.get_or_default(
'saver',
ops.GraphKeys.SAVERS,
lambda: training_saver.Saver(sharded=True, allow_empty=True,
write_version=saver_pb2.SaverDef.V2))
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError('More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key,
arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def _default_local_init_op():
return control_flow_ops.group(variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables())
def MonitoredTrainingSession(master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
hooks=None,
scaffold=None,
config=None):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
inialize/restore.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
hooks: Optional list of `SessionRunHook` objects.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified, a default one is created. It's used to finalize the graph.
config: `ConfigProto` proto used to configure the session.
Returns:
A `MonitoredSession` object.
"""
hooks = hooks or []
scaffold = scaffold or Scaffold()
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold, master=master, config=config)
else:
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
hooks.extend([
basic_session_run_hooks.StepCounterHook(output_dir=checkpoint_dir),
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold, save_steps=100, output_dir=checkpoint_dir),
basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir, save_secs=600, scaffold=scaffold),
])
return MonitoredSession(session_creator=session_creator, hooks=hooks)
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.Session for a chief."""
def __init__(self, scaffold=None, master='', config=None,
checkpoint_dir=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
"""
self._checkpoint_dir = checkpoint_dir
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.Session for a worker."""
def __init__(self, scaffold=None, master='', config=None):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config)
class MonitoredSession(object):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummaryHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()`
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` occurs, it recovers or reinitializes the session before
executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* surpresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context.
How to set `tf.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
"""
def __init__(self, session_creator=None, hooks=None):
"""Creates a MonitoredSession.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
"""
self._hooks = hooks or []
for h in self._hooks:
h.begin()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks)
self._sess = _RecoverableSession(self._coordinated_creator)
@property
def graph(self):
"""The graph that was launched in this session."""
if self._coordinated_creator.tf_sess is None:
return None
return self._coordinated_creator.tf_sess.graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def should_stop(self):
if self._sess:
return self._sess.should_stop()
return True
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(object):
"""Factory for the _RecoverableSession."""
def __init__(self, session_creator, hooks):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
def _is_closed(self):
"""Return True if the supervised session is closed. For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
class _WrappedSession(object):
"""Wrapper around a `tf.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.Session` or `_WrappedSession` object. The wrapped session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session on `tf.errors.AbortedError`.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError`, the wrapped session is closed, and a new
one is created by calling the factory again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._sess_creator.create_session())
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._sess_creator.create_session()
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except errors.AbortedError:
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
def _check_stop(self):
# Check with the coordinator if we should stop.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join()
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
feed_dict = self._call_hook_before_run(
run_context, actual_fetches, feed_dict)
# Do session run.
outputs = _WrappedSession.run(self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(results=outputs[hook] if
hook in outputs else None))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(
hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
|
OSSESAC/odoopubarquiluz | refs/heads/7.0 | addons/account_analytic_plans/wizard/account_crossovered_analytic.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, [], context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.analytic.account.crossovered.analytic',
'datas': datas,
}
account_crossovered_analytic()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
PaulAYoung/f2014_iolab | refs/heads/master | pymongoProject/venv/lib/python2.7/site-packages/passlib/handlers/scram.py | 19 | """passlib.handlers.scram - hash for SCRAM credential storage"""
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify, unhexlify
from base64 import b64encode, b64decode
import hashlib
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.exc import PasslibHashWarning
from passlib.utils import ab64_decode, ab64_encode, consteq, saslprep, \
to_native_str, xor_bytes, splitcomma
from passlib.utils.compat import b, bytes, bascii_to_str, iteritems, \
PY3, u, unicode
from passlib.utils.pbkdf2 import pbkdf2, get_prf, norm_hash_name
import passlib.utils.handlers as uh
# local
__all__ = [
"scram",
]
#=============================================================================
# scram credentials hash
#=============================================================================
class scram(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class provides a format for storing SCRAM passwords, and follows
the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, the length must be between 0-1024 bytes.
If not specified, a 12 byte salt will be autogenerated
(this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 12 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 20000, but must be within ``range(1,1<<32)``.
:type algs: list of strings
:param algs:
Specify list of digest algorithms to use.
By default each scram hash will contain digests for SHA-1,
SHA-256, and SHA-512. This can be overridden by specify either be a
list such as ``["sha-1", "sha-256"]``, or a comma-separated string
such as ``"sha-1, sha-256"``. Names are case insensitive, and may
use :mod:`!hashlib` or `IANA <http://www.iana.org/assignments/hash-function-text-names>`_
hash names.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
In addition to the standard :ref:`password-hash-api` methods,
this class also provides the following methods for manipulating Passlib
scram hashes in ways useful for pluging into a SCRAM protocol stack:
.. automethod:: extract_digest_info
.. automethod:: extract_digest_algs
.. automethod:: derive_digest
"""
#===================================================================
# class attrs
#===================================================================
# NOTE: unlike most GenericHandler classes, the 'checksum' attr of
# ScramHandler is actually a map from digest_name -> digest, so
# many of the standard methods have been overridden.
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide
# a sanity check; the underlying pbkdf2 specifies no bounds for either.
#--GenericHandler--
name = "scram"
setting_kwds = ("salt", "salt_size", "rounds", "algs")
ident = u("$scram$")
#--HasSalt--
default_salt_size = 12
min_salt_size = 0
max_salt_size = 1024
#--HasRounds--
default_rounds = 20000
min_rounds = 1
max_rounds = 2**32-1
rounds_cost = "linear"
#--custom--
# default algorithms when creating new hashes.
default_algs = ["sha-1", "sha-256", "sha-512"]
# list of algs verify prefers to use, in order.
_verify_algs = ["sha-256", "sha-512", "sha-224", "sha-384", "sha-1"]
#===================================================================
# instance attrs
#===================================================================
# 'checksum' is different from most GenericHandler subclasses,
# in that it contains a dict mapping from alg -> digest,
# or None if no checksum present.
# list of algorithms to create/compare digests for.
algs = None
#===================================================================
# scram frontend helpers
#===================================================================
@classmethod
def extract_digest_info(cls, hash, alg):
"""return (salt, rounds, digest) for specific hash algorithm.
:type hash: str
:arg hash:
:class:`!scram` hash stored for desired user
:type alg: str
:arg alg:
Name of digest algorithm (e.g. ``"sha-1"``) requested by client.
This value is run through :func:`~passlib.utils.pbkdf2.norm_hash_name`,
so it is case-insensitive, and can be the raw SCRAM
mechanism name (e.g. ``"SCRAM-SHA-1"``), the IANA name,
or the hashlib name.
:raises KeyError:
If the hash does not contain an entry for the requested digest
algorithm.
:returns:
A tuple containing ``(salt, rounds, digest)``,
where *digest* matches the raw bytes returned by
SCRAM's :func:`Hi` function for the stored password,
the provided *salt*, and the iteration count (*rounds*).
*salt* and *digest* are both raw (unencoded) bytes.
"""
# XXX: this could be sped up by writing custom parsing routine
# that just picks out relevant digest, and doesn't bother
# with full structure validation each time it's called.
alg = norm_hash_name(alg, 'iana')
self = cls.from_string(hash)
chkmap = self.checksum
if not chkmap:
raise ValueError("scram hash contains no digests")
return self.salt, self.rounds, chkmap[alg]
@classmethod
def extract_digest_algs(cls, hash, format="iana"):
"""Return names of all algorithms stored in a given hash.
:type hash: str
:arg hash:
The :class:`!scram` hash to parse
:type format: str
:param format:
This changes the naming convention used by the
returned algorithm names. By default the names
are IANA-compatible; see :func:`~passlib.utils.pbkdf2.norm_hash_name`
for possible values.
:returns:
Returns a list of digest algorithms; e.g. ``["sha-1"]``
"""
# XXX: this could be sped up by writing custom parsing routine
# that just picks out relevant names, and doesn't bother
# with full structure validation each time it's called.
algs = cls.from_string(hash).algs
if format == "iana":
return algs
else:
return [norm_hash_name(alg, format) for alg in algs]
@classmethod
def derive_digest(cls, password, salt, rounds, alg):
"""helper to create SaltedPassword digest for SCRAM.
This performs the step in the SCRAM protocol described as::
SaltedPassword := Hi(Normalize(password), salt, i)
:type password: unicode or utf-8 bytes
:arg password: password to run through digest
:type salt: bytes
:arg salt: raw salt data
:type rounds: int
:arg rounds: number of iterations.
:type alg: str
:arg alg: name of digest to use (e.g. ``"sha-1"``).
:returns:
raw bytes of ``SaltedPassword``
"""
if isinstance(password, bytes):
password = password.decode("utf-8")
password = saslprep(password).encode("utf-8")
if not isinstance(salt, bytes):
raise TypeError("salt must be bytes")
if rounds < 1:
raise ValueError("rounds must be >= 1")
alg = norm_hash_name(alg, "hashlib")
return pbkdf2(password, salt, rounds, None, "hmac-" + alg)
#===================================================================
# serialization
#===================================================================
@classmethod
def from_string(cls, hash):
hash = to_native_str(hash, "ascii", "hash")
if not hash.startswith("$scram$"):
raise uh.exc.InvalidHashError(cls)
parts = hash[7:].split("$")
if len(parts) != 3:
raise uh.exc.MalformedHashError(cls)
rounds_str, salt_str, chk_str = parts
# decode rounds
rounds = int(rounds_str)
if rounds_str != str(rounds): # forbid zero padding, etc.
raise uh.exc.MalformedHashError(cls)
# decode salt
try:
salt = ab64_decode(salt_str.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
# decode algs/digest list
if not chk_str:
# scram hashes MUST have something here.
raise uh.exc.MalformedHashError(cls)
elif "=" in chk_str:
# comma-separated list of 'alg=digest' pairs
algs = None
chkmap = {}
for pair in chk_str.split(","):
alg, digest = pair.split("=")
try:
chkmap[alg] = ab64_decode(digest.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
else:
# comma-separated list of alg names, no digests
algs = chk_str
chkmap = None
# return new object
return cls(
rounds=rounds,
salt=salt,
checksum=chkmap,
algs=algs,
)
def to_string(self, withchk=True):
salt = bascii_to_str(ab64_encode(self.salt))
chkmap = self.checksum
if withchk and chkmap:
chk_str = ",".join(
"%s=%s" % (alg, bascii_to_str(ab64_encode(chkmap[alg])))
for alg in self.algs
)
else:
chk_str = ",".join(self.algs)
return '$scram$%d$%s$%s' % (self.rounds, salt, chk_str)
#===================================================================
# init
#===================================================================
def __init__(self, algs=None, **kwds):
super(scram, self).__init__(**kwds)
self.algs = self._norm_algs(algs)
def _norm_checksum(self, checksum):
if checksum is None:
return None
for alg, digest in iteritems(checksum):
if alg != norm_hash_name(alg, 'iana'):
raise ValueError("malformed algorithm name in scram hash: %r" %
(alg,))
if len(alg) > 9:
raise ValueError("SCRAM limits algorithm names to "
"9 characters: %r" % (alg,))
if not isinstance(digest, bytes):
raise uh.exc.ExpectedTypeError(digest, "raw bytes", "digests")
# TODO: verify digest size (if digest is known)
if 'sha-1' not in checksum:
# NOTE: required because of SCRAM spec.
raise ValueError("sha-1 must be in algorithm list of scram hash")
return checksum
def _norm_algs(self, algs):
"normalize algs parameter"
# determine default algs value
if algs is None:
# derive algs list from checksum (if present).
chk = self.checksum
if chk is not None:
return sorted(chk)
elif self.use_defaults:
return list(self.default_algs)
else:
raise TypeError("no algs list specified")
elif self.checksum is not None:
raise RuntimeError("checksum & algs kwds are mutually exclusive")
# parse args value
if isinstance(algs, str):
algs = splitcomma(algs)
algs = sorted(norm_hash_name(alg, 'iana') for alg in algs)
if any(len(alg)>9 for alg in algs):
raise ValueError("SCRAM limits alg names to max of 9 characters")
if 'sha-1' not in algs:
# NOTE: required because of SCRAM spec (rfc 5802)
raise ValueError("sha-1 must be in algorithm list of scram hash")
return algs
#===================================================================
# digest methods
#===================================================================
@classmethod
def _bind_needs_update(cls, **settings):
"generate a deprecation detector for CryptContext to use"
# generate deprecation hook which marks hashes as deprecated
# if they don't support a superset of current algs.
algs = frozenset(cls(use_defaults=True, **settings).algs)
def detector(hash, secret):
return not algs.issubset(cls.from_string(hash).algs)
return detector
def _calc_checksum(self, secret, alg=None):
rounds = self.rounds
salt = self.salt
hash = self.derive_digest
if alg:
# if requested, generate digest for specific alg
return hash(secret, salt, rounds, alg)
else:
# by default, return dict containing digests for all algs
return dict(
(alg, hash(secret, salt, rounds, alg))
for alg in self.algs
)
@classmethod
def verify(cls, secret, hash, full=False):
uh.validate_secret(secret)
self = cls.from_string(hash)
chkmap = self.checksum
if not chkmap:
raise ValueError("expected %s hash, got %s config string instead" %
(cls.name, cls.name))
# NOTE: to make the verify method efficient, we just calculate hash
# of shortest digest by default. apps can pass in "full=True" to
# check entire hash for consistency.
if full:
correct = failed = False
for alg, digest in iteritems(chkmap):
other = self._calc_checksum(secret, alg)
# NOTE: could do this length check in norm_algs(),
# but don't need to be that strict, and want to be able
# to parse hashes containing algs not supported by platform.
# it's fine if we fail here though.
if len(digest) != len(other):
raise ValueError("mis-sized %s digest in scram hash: %r != %r"
% (alg, len(digest), len(other)))
if consteq(other, digest):
correct = True
else:
failed = True
if correct and failed:
raise ValueError("scram hash verified inconsistently, "
"may be corrupted")
else:
return correct
else:
# XXX: should this just always use sha1 hash? would be faster.
# otherwise only verify against one hash, pick one w/ best security.
for alg in self._verify_algs:
if alg in chkmap:
other = self._calc_checksum(secret, alg)
return consteq(other, chkmap[alg])
# there should always be sha-1 at the very least,
# or something went wrong inside _norm_algs()
raise AssertionError("sha-1 digest not found!")
#===================================================================
#
#===================================================================
#=============================================================================
# code used for testing scram against protocol examples during development.
#=============================================================================
##def _test_reference_scram():
## "quick hack testing scram reference vectors"
## # NOTE: "n,," is GS2 header - see https://tools.ietf.org/html/rfc5801
## from passlib.utils.compat import print_
##
## engine = _scram_engine(
## alg="sha-1",
## salt='QSXCR+Q6sek8bf92'.decode("base64"),
## rounds=4096,
## password=u("pencil"),
## )
## print_(engine.digest.encode("base64").rstrip())
##
## msg = engine.format_auth_msg(
## username="user",
## client_nonce = "fyko+d2lbbFgONRv9qkxdawL",
## server_nonce = "3rfcNHYJY1ZVvWVs7j",
## header='c=biws',
## )
##
## cp = engine.get_encoded_client_proof(msg)
## assert cp == "v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", cp
##
## ss = engine.get_encoded_server_sig(msg)
## assert ss == "rmF9pqV8S7suAoZWja4dJRkFsKQ=", ss
##
##class _scram_engine(object):
## """helper class for verifying scram hash behavior
## against SCRAM protocol examples. not officially part of Passlib.
##
## takes in alg, salt, rounds, and a digest or password.
##
## can calculate the various keys & messages of the scram protocol.
##
## """
## #=========================================================
## # init
## #=========================================================
##
## @classmethod
## def from_string(cls, hash, alg):
## "create record from scram hash, for given alg"
## return cls(alg, *scram.extract_digest_info(hash, alg))
##
## def __init__(self, alg, salt, rounds, digest=None, password=None):
## self.alg = norm_hash_name(alg)
## self.salt = salt
## self.rounds = rounds
## self.password = password
## if password:
## data = scram.derive_digest(password, salt, rounds, alg)
## if digest and data != digest:
## raise ValueError("password doesn't match digest")
## else:
## digest = data
## elif not digest:
## raise TypeError("must provide password or digest")
## self.digest = digest
##
## #=========================================================
## # frontend methods
## #=========================================================
## def get_hash(self, data):
## "return hash of raw data"
## return hashlib.new(iana_to_hashlib(self.alg), data).digest()
##
## def get_client_proof(self, msg):
## "return client proof of specified auth msg text"
## return xor_bytes(self.client_key, self.get_client_sig(msg))
##
## def get_encoded_client_proof(self, msg):
## return self.get_client_proof(msg).encode("base64").rstrip()
##
## def get_client_sig(self, msg):
## "return client signature of specified auth msg text"
## return self.get_hmac(self.stored_key, msg)
##
## def get_server_sig(self, msg):
## "return server signature of specified auth msg text"
## return self.get_hmac(self.server_key, msg)
##
## def get_encoded_server_sig(self, msg):
## return self.get_server_sig(msg).encode("base64").rstrip()
##
## def format_server_response(self, client_nonce, server_nonce):
## return 'r={client_nonce}{server_nonce},s={salt},i={rounds}'.format(
## client_nonce=client_nonce,
## server_nonce=server_nonce,
## rounds=self.rounds,
## salt=self.encoded_salt,
## )
##
## def format_auth_msg(self, username, client_nonce, server_nonce,
## header='c=biws'):
## return (
## 'n={username},r={client_nonce}'
## ','
## 'r={client_nonce}{server_nonce},s={salt},i={rounds}'
## ','
## '{header},r={client_nonce}{server_nonce}'
## ).format(
## username=username,
## client_nonce=client_nonce,
## server_nonce=server_nonce,
## salt=self.encoded_salt,
## rounds=self.rounds,
## header=header,
## )
##
## #=========================================================
## # helpers to calculate & cache constant data
## #=========================================================
## def _calc_get_hmac(self):
## return get_prf("hmac-" + iana_to_hashlib(self.alg))[0]
##
## def _calc_client_key(self):
## return self.get_hmac(self.digest, b("Client Key"))
##
## def _calc_stored_key(self):
## return self.get_hash(self.client_key)
##
## def _calc_server_key(self):
## return self.get_hmac(self.digest, b("Server Key"))
##
## def _calc_encoded_salt(self):
## return self.salt.encode("base64").rstrip()
##
## #=========================================================
## # hacks for calculated attributes
## #=========================================================
##
## def __getattr__(self, attr):
## if not attr.startswith("_"):
## f = getattr(self, "_calc_" + attr, None)
## if f:
## value = f()
## setattr(self, attr, value)
## return value
## raise AttributeError("attribute not found")
##
## def __dir__(self):
## cdir = dir(self.__class__)
## attrs = set(cdir)
## attrs.update(self.__dict__)
## attrs.update(attr[6:] for attr in cdir
## if attr.startswith("_calc_"))
## return sorted(attrs)
## #=========================================================
## # eoc
## #=========================================================
#=============================================================================
# eof
#=============================================================================
|
gdm/aws-cfn-resource-bridge | refs/heads/master | aws/cfn/__init__.py | 8 | #==============================================================================
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
__author__ = 'aws'
|
abhilashnta/edx-platform | refs/heads/master | cms/envs/test.py | 31 | # -*- coding: utf-8 -*-
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .common import *
import os
from path import path
from warnings import filterwarnings, simplefilter
from uuid import uuid4
# import settings from LMS for consistent behavior with CMS
# pylint: disable=unused-import
from lms.envs.test import (
WIKI_ENABLED,
PLATFORM_NAME,
SITE_NAME,
DEFAULT_FILE_STORAGE,
MEDIA_ROOT,
MEDIA_URL,
# This is practically unused but needed by the oauth2_provider package, which
# some tests in common/ rely on.
OAUTH_OIDC_ISSUER,
)
# mongo connection settings
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost')
THIS_UUID = uuid4().hex[:5]
# Nose Test Runner
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
_SYSTEM = 'cms'
_REPORT_DIR = REPO_ROOT / 'reports' / _SYSTEM
_REPORT_DIR.makedirs_p()
_NOSEID_DIR = REPO_ROOT / '.testids' / _SYSTEM
_NOSEID_DIR.makedirs_p()
NOSE_ARGS = [
'--id-file', _NOSEID_DIR / 'noseids',
'--xunit-file', _REPORT_DIR / 'nosetests.xml',
]
TEST_ROOT = path('test_root')
# Want static files in the same dir for running on jenkins.
STATIC_ROOT = TEST_ROOT / "staticfiles"
GITHUB_REPO_ROOT = TEST_ROOT / "data"
DATA_DIR = TEST_ROOT / "data"
COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data"
# For testing "push to lms"
FEATURES['ENABLE_EXPORT_GIT'] = True
GIT_REPO_EXPORT_DIR = TEST_ROOT / "export_course_repos"
# Makes the tests run much faster...
SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
# TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
STATICFILES_DIRS += [
(course_dir, COMMON_TEST_DATA_ROOT / course_dir)
for course_dir in os.listdir(COMMON_TEST_DATA_ROOT)
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
]
# Avoid having to run collectstatic before the unit test suite
# If we don't add these settings, then Django templates that can't
# find pipelined assets will raise a ValueError.
# http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline
STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
STATIC_URL = "/static/"
PIPELINE_ENABLED = False
TENDER_DOMAIN = "help.edge.edx.org"
TENDER_SUBDOMAIN = "edxedge"
# Update module store settings per defaults for tests
update_module_store_settings(
MODULESTORE,
module_store_options={
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': TEST_ROOT / "data",
},
doc_store_settings={
'db': 'test_xmodule',
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'collection': 'test_modulestore{0}'.format(THIS_UUID),
},
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'db': 'test_xcontent',
'port': MONGO_PORT_NUM,
'collection': 'dont_trip',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "cms.db",
},
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "preview"
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'mongo_metadata_inheritance'),
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Add external_auth to Installed apps for testing
INSTALLED_APPS += ('external_auth', )
# Add milestones to Installed apps for testing
INSTALLED_APPS += ('milestones', 'openedx.core.djangoapps.call_stack_manager')
# hide ratelimit warnings while running tests
filterwarnings('ignore', message='No request passed to the backend, unable to rate-limit')
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
# https://docs.python.org/2/library/warnings.html#the-warnings-filter
# Change to "default" to see the first instance of each hit
# or "error" to convert all into errors
simplefilter('ignore')
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
########################### Server Ports ###################################
# These ports are carefully chosen so that if the browser needs to
# access them, they will be available through the SauceLabs SSH tunnel
LETTUCE_SERVER_PORT = 8003
XQUEUE_PORT = 8040
YOUTUBE_PORT = 8031
LTI_PORT = 8765
VIDEO_SOURCE_PORT = 8777
################### Make tests faster
# http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# dummy segment-io key
SEGMENT_IO_KEY = '***REMOVED***'
FEATURES['ENABLE_SERVICE_STATUS'] = True
# Toggles embargo on for testing
FEATURES['EMBARGO'] = True
# set up some testing for microsites
MICROSITE_CONFIGURATION = {
"test_microsite": {
"domain_prefix": "testmicrosite",
"university": "test_microsite",
"platform_name": "Test Microsite",
"logo_image_url": "test_microsite/images/header-logo.png",
"email_from_address": "[email protected]",
"payment_support_email": "[email protected]",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "test_microsite.localhost",
"course_org_filter": "TestMicrositeX",
"course_about_show_social_links": False,
"css_overrides_file": "test_microsite/css/test_microsite.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "This is a Test Microsite Overlay Text.",
"course_index_overlay_logo_file": "test_microsite/images/header-logo.png",
"homepage_overlay_html": "<h1>This is a Test Microsite Overlay HTML</h1>"
},
"default": {
"university": "default_university",
"domain_prefix": "www",
}
}
MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_microsites'
FEATURES['USE_MICROSITES'] = True
# For consistency in user-experience, keep the value of this setting in sync with
# the one in lms/envs/test.py
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Enable a parental consent age limit for testing
PARENTAL_CONSENT_AGE_LIMIT = 13
# Enable content libraries code for the tests
FEATURES['ENABLE_CONTENT_LIBRARIES'] = True
FEATURES['ENABLE_EDXNOTES'] = True
# MILESTONES
FEATURES['MILESTONES_APP'] = True
# ENTRANCE EXAMS
FEATURES['ENTRANCE_EXAMS'] = True
ENTRANCE_EXAM_MIN_SCORE_PCT = 50
VIDEO_CDN_URL = {
'CN': 'http://api.xuetangx.com/edx/video?s3_url='
}
# Courseware Search Index
FEATURES['ENABLE_COURSEWARE_INDEX'] = True
FEATURES['ENABLE_LIBRARY_INDEX'] = True
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# teams feature
FEATURES['ENABLE_TEAMS'] = True
# Dummy secret key for dev/test
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
|
MostlyOpen/odoo_addons_jcafb | refs/heads/master | myo_employee_cst/models/hr_employee_seq.py | 1 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models, fields
class Employee(models.Model):
_inherit = 'hr.employee'
# code = fields.Char('Code', index=True, required=False, readonly=False, default=False,
# help='Use "/" to get an automatic new Employee Code.')
code = fields.Char('Code', index=True, required=False, readonly=False, default='/',
help='Use "/" to get an automatic new Employee Code.')
|
moijes12/oh-mainline | refs/heads/master | vendor/packages/celery/docs/slidesource/slide-example1.py | 23 | from celery.task import Task
class MyTask(Task):
def run(self, x, y):
return x * y
|
d-led/influxdb-cpp-rest | refs/heads/master | deps/fmt/support/appveyor-build.py | 10 | #!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIGURATION']
platform = os.environ['PLATFORM']
path = os.environ['PATH']
image = os.environ['APPVEYOR_BUILD_WORKER_IMAGE']
jobid = os.environ['APPVEYOR_JOB_ID']
cmake_command = ['cmake', '-DFMT_PEDANTIC=ON', '-DCMAKE_BUILD_TYPE=' + config, '..']
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks
# MinGW config.
path = path.replace(r'C:\Program Files (x86)\Git\bin', '')
os.environ['PATH'] = r'C:\MinGW\bin;' + path
else:
# Add MSBuild 14.0 to PATH as described in
# http://help.appveyor.com/discussions/problems/2229-v140-not-found-on-vs2105rc.
os.environ['PATH'] = r'C:\Program Files (x86)\MSBuild\15.0\Bin;' + path
if image == 'Visual Studio 2013':
generator = 'Visual Studio 12 2013'
elif image == 'Visual Studio 2015':
generator = 'Visual Studio 14 2015'
elif image == 'Visual Studio 2017':
generator = 'Visual Studio 15 2017'
if platform == 'x64':
generator += ' Win64'
cmake_command.append('-G' + generator)
build_command = ['cmake', '--build', '.', '--config', config, '--', '/m:4']
test_command = ['ctest', '-C', config]
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
tonypujals/POSTMan-Chrome-Extension | refs/heads/master | tests/selenium/pmtests/postman_tests_requests.py | 104 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import traceback
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsRequests(PostmanTests):
def test_1_get_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get") > 0:
return True
else:
return False
def test_2_get_only_key(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get?start")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("/get?start") > 0:
return True
else:
return False
def test_3_delete_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/delete")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("DELETE")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("delete") > 0:
return True
else:
return False
return True
def test_4_head_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/html")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("HEAD")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("div") > 0:
return True
else:
return False
def test_5_options_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/html")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("OPTIONS")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("div") > 0:
return True
else:
return False
def test_6_post_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("post") > 0:
return True
else:
return False
def test_7_put_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/put")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("PUT")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("put") > 0:
return True
else:
return False
def test_8_init_environment(self):
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
time.sleep(0.1)
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:last-child a")
manage_env_link.click()
time.sleep(1)
add_env_button = self.browser.find_element_by_css_selector("#environments-list-wrapper .toolbar .environments-actions-add")
add_env_button.click()
time.sleep(0.3)
environment_name = self.browser.find_element_by_id("environment-editor-name")
environment_name.clear()
environment_name.send_keys("Requests environment")
first_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("path_get")
first_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("get?start=something")
second_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_key.clear()
second_key.send_keys("path_post")
second_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_val.clear()
second_val.send_keys("post")
third_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-key")
third_key.clear()
third_key.send_keys("Foo")
third_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-value")
third_val.clear()
third_val.send_keys("Bar")
fourth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-key")
fourth_key.clear()
fourth_key.send_keys("Name")
fourth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-value")
fourth_val.clear()
fourth_val.send_keys("John Appleseed")
fifth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-key")
fifth_key.clear()
fifth_key.send_keys("nonce")
fifth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-value")
fifth_val.clear()
fifth_val.send_keys("kllo9940pd9333jh")
sixth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-key")
sixth_key.clear()
sixth_key.send_keys("timestamp")
sixth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-value")
sixth_val.clear()
sixth_val.send_keys("1191242096")
seventh_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-key")
seventh_key.clear()
seventh_key.send_keys("url")
seventh_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-value")
seventh_val.clear()
seventh_val.send_keys("http://photos.example.net/photos")
eigth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-key")
eigth_key.clear()
eigth_key.send_keys("file")
eigth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-value")
eigth_val.clear()
eigth_val.send_keys("vacation.jpg")
ninth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-key")
ninth_key.clear()
ninth_key.send_keys("size")
ninth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-value")
ninth_val.clear()
ninth_val.send_keys("original")
submit_button = self.browser.find_element_by_css_selector("#modal-environments .environments-actions-add-submit")
submit_button.click()
time.sleep(0.3)
close_button = self.browser.find_element_by_css_selector("#modal-environments .modal-header .close")
close_button.click()
time.sleep(1)
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
# Select the environment
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:nth-of-type(1) a")
manage_env_link.click()
return True
def test_9_get_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_get}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get?start=something") > 0:
return True
else:
return False
def test_10_post_formdata_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("{{size}}")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("{{file}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("original") > 0:
return True
else:
return False
def test_11_post_urlencoded_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# Select urlencoded
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(2)").click()
first_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("{{size}}")
second_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("{{file}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("original") > 0:
return True
else:
return False
def test_12_post_raw_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# Select urlencoded
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{{Foo}}={{Name}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("John Appleseed") > 0:
return True
else:
return False
def test_13_post_raw_json_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("John Appleseed") > 0:
return True
else:
return False
# https://github.com/a85/POSTMan-Chrome-Extension/issues/174
def test_14_url_with_semicolon(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get?some=start;val")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("/get?some=start;val") > 0:
return True
else:
return False
# https://github.com/a85/POSTMan-Chrome-Extension/issues/165
def test_15_odata_url(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/Resource(code1='1',code2='1')")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("Not Found") > 0:
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request .request-name")
value = self.browser.execute_script("return arguments[0].innerHTML", first_history_item)
if value.find("http://localhost:5000/Resource(code1='1'<br>,code2='1')") > 0:
return True
else:
return False
else:
return False
def test_16_with_no_cache(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
settings_button = self.browser.find_element_by_css_selector(".preferences a:nth-of-type(2)")
settings_button.click()
time.sleep(1)
no_cache_select = self.browser.find_element_by_id("send-no-cache-header")
Select(no_cache_select).select_by_value("true")
close_button = self.browser.find_element_by_css_selector("#modal-settings .modal-header .close")
close_button.click()
time.sleep(1)
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("no-cache") > 0:
return True
else:
return False
def test_17_without_no_cache(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
settings_button = self.browser.find_element_by_css_selector(".preferences a:nth-of-type(2)")
settings_button.click()
time.sleep(1)
no_cache_select = self.browser.find_element_by_id("send-no-cache-header")
Select(no_cache_select).select_by_value("false")
close_button = self.browser.find_element_by_css_selector("#modal-settings .modal-header .close")
close_button.click()
time.sleep(1)
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("no-cache") < 0:
return True
else:
return False
def test_18_raw_json_type(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("Content-Type")
first_val = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("text/json")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("text/json") > 0:
self.reset_request();
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/post")
selected_mode_element = self.browser.find_element_by_id("body-editor-mode-item-selected")
selected_mode_element_value = self.browser.execute_script("return arguments[0].innerHTML", selected_mode_element)
if selected_mode_element_value.find("JSON") == 0:
return True
else:
return False
except:
return False
else:
return False
def test_19_raw_xml_type(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("Content-Type")
first_val = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("text/xml")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("text/xml") > 0:
self.reset_request();
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/post")
selected_mode_element = self.browser.find_element_by_id("body-editor-mode-item-selected")
selected_mode_element_value = self.browser.execute_script("return arguments[0].innerHTML", selected_mode_element)
if selected_mode_element_value.find("XML") == 0:
return True
else:
return False
except:
return False
else:
return False
def na_test_20_raw_large_request(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
try:
raw_json = open("large_json.json").read()
self.set_code_mirror_raw_value(raw_json)
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("images/user_1.png") > 0:
return True
else:
return False
except:
print traceback.format_exc()
return False
PostmanTestsRequests().run()
|
TimYi/django | refs/heads/master | tests/schema/fields.py | 203 | from django.db import models
from django.db.models.fields.related import (
RECURSIVE_RELATIONSHIP_CONSTANT, ManyRelatedObjectsDescriptor,
ManyToManyField, ManyToManyRel, RelatedField,
create_many_to_many_intermediary_model,
)
from django.utils.functional import curry
class CustomManyToManyField(RelatedField):
"""
Ticket #24104 - Need to have a custom ManyToManyField,
which is not an inheritor of ManyToManyField.
"""
many_to_many = True
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError:
to = str(to)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(CustomManyToManyField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
super(CustomManyToManyField, self).contribute_to_class(cls, name, **kwargs)
if not self.remote_field.through and not cls._meta.abstract and not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
setattr(cls, self.name, ManyRelatedObjectsDescriptor(self.remote_field))
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def get_internal_type(self):
return 'ManyToManyField'
# Copy those methods from ManyToManyField because they don't call super() internally
contribute_to_related_class = ManyToManyField.__dict__['contribute_to_related_class']
_get_m2m_attr = ManyToManyField.__dict__['_get_m2m_attr']
_get_m2m_reverse_attr = ManyToManyField.__dict__['_get_m2m_reverse_attr']
_get_m2m_db_table = ManyToManyField.__dict__['_get_m2m_db_table']
class InheritedManyToManyField(ManyToManyField):
pass
class MediumBlobField(models.BinaryField):
"""
A MySQL BinaryField that uses a different blob size.
"""
def db_type(self, connection):
return 'MEDIUMBLOB'
|
lucienfostier/gaffer | refs/heads/master | doc/source/WorkingWithThePythonScriptingAPI/TutorialStartupConfig2/screengrab.py | 3 | # BuildTarget: images/tutorialBookmarks.png
# BuildTarget: images/tutorialDefaultBookmark.png
# BuildTarget: images/tutorialDefaultImageNodeBookmark.png
# BuildTarget: images/tutorialDefaultImageNodePath.png
import os
import subprocess32 as subprocess
import tempfile
import time
import Gaffer
import GafferUI
import GafferSceneUI
scriptWindow = GafferUI.ScriptWindow.acquire( script )
viewer = scriptWindow.getLayout().editors( GafferUI.Viewer )[0]
graphEditor = scriptWindow.getLayout().editors( GafferUI.GraphEditor )[0]
hierarchyView = scriptWindow.getLayout().editors( GafferSceneUI.HierarchyView )[0]
# Delay for x seconds
def __delay( delay ) :
endtime = time.time() + delay
while time.time() < endtime :
GafferUI.EventLoop.waitForIdle( 1 )
# Create a random directory in `/tmp` for the dispatcher's `jobsDirectory`, so we don't clutter the user's `~gaffer` directory
__temporaryDirectory = tempfile.mkdtemp( prefix = "gafferDocs" )
def __getTempFilePath( fileName, directory = __temporaryDirectory ) :
filePath = "/".join( ( directory, fileName ) )
return filePath
def __dispatchScript( script, tasks, settings ) :
command = "gaffer dispatch -script {} -tasks {} -dispatcher Local -settings {} -dispatcher.jobsDirectory '\"{}/dispatcher/local\"'".format(
script,
" ".join( tasks ),
" ".join( settings ),
__temporaryDirectory
)
process = subprocess.Popen( command, shell=True, stderr = subprocess.PIPE )
process.wait()
return process
# Tutorial: bookmarks in a file browser
__imageName = "tutorialBookmarks"
__tempImagePath = __getTempFilePath( "{}.png".format( __imageName ) )
__rootPath = Gaffer.FileSystemPath( os.path.expandvars( "$GAFFER_ROOT" ) )
__bookmarks = GafferUI.Bookmarks.acquire( application, Gaffer.FileSystemPath )
__fileBrowser = GafferUI.PathChooserDialogue( __rootPath, bookmarks = __bookmarks )
__fileBrowser.setVisible( True )
__delay( 0.1 )
__pathChooser = __fileBrowser.pathChooserWidget()
__button = __pathChooser._PathChooserWidget__bookmarksButton
__button._qtWidget().click()
GafferUI.WidgetAlgo.grab( widget = __fileBrowser, imagePath = __tempImagePath )
__dispatchScript(
script = "scripts/{}_edit.gfr".format( __imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader.fileName '\"{}\"'".format( __tempImagePath ),
"-ImageWriter.fileName '\"{}\"'".format( os.path.abspath( "images/{}.png".format( __imageName ) ) )
]
)
__fileBrowser.setVisible( False )
# Tutorial: default bookmark in file browser
__imageName = "tutorialDefaultBookmark"
__tempImagePath = __getTempFilePath( "{}.png".format( __imageName ) )
__bookmarks.add( "Resources", "/" )
__fileBrowser = GafferUI.PathChooserDialogue( __rootPath, bookmarks = __bookmarks )
__fileBrowser.setVisible( True )
__delay( 0.1 )
__pathChooser = __fileBrowser.pathChooserWidget()
__button = __pathChooser._PathChooserWidget__bookmarksButton
__button._qtWidget().click()
GafferUI.WidgetAlgo.grab( widget = __fileBrowser, imagePath = __tempImagePath )
__dispatchScript(
script = "scripts/{}_edit.gfr".format( __imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader.fileName '\"{}\"'".format( __tempImagePath ),
"-ImageWriter.fileName '\"{}\"'".format( os.path.abspath( "images/{}.png".format( __imageName ) ) )
]
)
__fileBrowser.setVisible( False )
# Tutorial: default bookmark in image node file browser
__imageName = "tutorialDefaultImageNodeBookmark"
__tempImagePath = __getTempFilePath( "{}.png".format( __imageName ) )
__bookmarks = GafferUI.Bookmarks.acquire( application, Gaffer.FileSystemPath, "image" )
__bookmarks.add( "Pictures", "/" )
__fileBrowser = GafferUI.PathChooserDialogue( __rootPath, bookmarks = __bookmarks )
__fileBrowser.setVisible( True )
__delay( 0.1 )
__pathChooser = __fileBrowser.pathChooserWidget()
__button = __pathChooser._PathChooserWidget__bookmarksButton
__button._qtWidget().click()
GafferUI.WidgetAlgo.grab( widget = __fileBrowser, imagePath = __tempImagePath )
__dispatchScript(
script = "scripts/{}_edit.gfr".format( __imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader.fileName '\"{}\"'".format( __tempImagePath ),
"-ImageWriter.fileName '\"{}\"'".format( os.path.abspath( "images/{}.png".format( __imageName ) ) )
]
)
__fileBrowser.setVisible( False )
# Tutorial: default path in image node file browser
__rootPath = Gaffer.FileSystemPath( os.path.expandvars( "$HOME/Pictures" ) )
__bookmarks = GafferUI.Bookmarks.acquire( application, Gaffer.FileSystemPath, "image" )
__fileBrowser = GafferUI.PathChooserDialogue( __rootPath, bookmarks = __bookmarks )
__fileBrowser.setVisible( True )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( __fileBrowser, "images/tutorialDefaultImageNodePath.png" )
|
dagnello/ansible-modules-core | refs/heads/devel | packaging/os/apt_rpm.py | 192 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <[email protected]>
# Based on urpmi module written by Philippe Makowski <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(apt-get update).
required: false
default: no
choices: [ "yes", "no" ]
author: "Evgenii Terechkov (@evgkrsk)"
notes: []
'''
EXAMPLES = '''
# install package foo
- apt_rpm: pkg=foo state=present
# remove package foo
- apt_rpm: pkg=foo state=absent
# description: remove packages foo and bar
- apt_rpm: pkg=foo,bar state=absent
# description: update the package database and install bar (bar will be the updated if a newer version exists)
- apt_rpm: name=bar state=present update_cache=yes
'''
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import sys
APT_PATH="/usr/bin/apt-get"
RPM_PATH="/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc = os.system("%s -q %s" % (RPM_PATH,name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name))
return rc == 0
def update_package_db(module):
rc = os.system("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package))
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages))
rc, out, err = module.run_command(cmd)
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
|
javachengwc/hue | refs/heads/master | desktop/core/ext-py/South-1.0.2/south/migration/utils.py | 129 | import sys
from collections import deque
from django.utils.datastructures import SortedDict
from django.db import models
from south import exceptions
class SortedSet(SortedDict):
def __init__(self, data=tuple()):
self.extend(data)
def __str__(self):
return "SortedSet(%s)" % list(self)
def add(self, value):
self[value] = True
def remove(self, value):
del self[value]
def extend(self, iterable):
[self.add(k) for k in iterable]
def get_app_label(app):
"""
Returns the _internal_ app label for the given app module.
i.e. for <module django.contrib.auth.models> will return 'auth'
"""
return app.__name__.split('.')[-2]
def app_label_to_app_module(app_label):
"""
Given the app label, returns the module of the app itself (unlike models.get_app,
which returns the models module)
"""
# Get the models module
app = models.get_app(app_label)
module_name = ".".join(app.__name__.split(".")[:-1])
try:
module = sys.modules[module_name]
except KeyError:
__import__(module_name, {}, {}, [''])
module = sys.modules[module_name]
return module
def flatten(*stack):
stack = deque(stack)
while stack:
try:
x = next(stack[0])
except TypeError:
stack[0] = iter(stack[0])
x = next(stack[0])
except StopIteration:
stack.popleft()
continue
if hasattr(x, '__iter__') and not isinstance(x, str):
stack.appendleft(x)
else:
yield x
dependency_cache = {}
def _dfs(start, get_children, path):
if (start, get_children) in dependency_cache:
return dependency_cache[(start, get_children)]
results = []
if start in path:
raise exceptions.CircularDependency(path[path.index(start):] + [start])
path.append(start)
results.append(start)
children = sorted(get_children(start), key=lambda x: str(x))
# We need to apply all the migrations this one depends on
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
results = list(SortedSet(results))
dependency_cache[(start, get_children)] = results
return results
def dfs(start, get_children):
return _dfs(start, get_children, [])
def depends(start, get_children):
return dfs(start, get_children)
|
tqchen/tvm | refs/heads/master | tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py | 4 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
import tvm.testing
def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96):
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A[i, k].astype("float32") * B[k, j].astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 4
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(l, m)).astype(B.dtype)
c_np = np.zeros((n, m), dtype=np.float32)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, ctx, number=3)
print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3))
c_np = np.dot(a_np, b_np)
np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3)
def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2):
A = te.placeholder((batch, n, l), name="A", dtype="float16")
B = te.placeholder((batch, l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
z, y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 2
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi)
s[C].bind(z, te.thread_axis("blockIdx.z"))
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
zo, yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, zo, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[1], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[1], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype)
c_np = np.zeros((batch, n, m), dtype=np.float32)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), ctx)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, ctx, number=3)
print(
"batch gemm m=%d n=%d k=%d batch=%d: %f ms"
% (m, n, l, batch, evaluator(a, b, c).mean * 1e3)
)
for bs in range(batch):
c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :])
np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3)
@tvm.testing.requires_tensorcore
def test_tensor_core_matmul():
tensor_core_matmul(16) # test with warp_tile 16x16x16
tensor_core_matmul(8) # test with warp_tile 8x32x16
tensor_core_matmul(32) # test with warp_tile 32x8x16
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_matmul():
tensor_core_batch_matmul()
if __name__ == "__main__":
test_tensor_core_matmul()
test_tensor_core_batch_matmul()
|
ministryofjustice/cla_public | refs/heads/master | cla_public/apps/base/forms.py | 1 | # coding: utf-8
"Base forms"
from flask import render_template, current_app, request
from flask_wtf import Form
from flask.ext.babel import lazy_gettext as _, get_translations
from wtforms import TextAreaField, RadioField, SelectMultipleField, StringField, widgets
from wtforms.validators import InputRequired, Length
from cla_public.apps.base.constants import HELP_FILLING_IN_FORM, REASONS_FOR_CONTACTING_CHOICES, REASONS_FOR_CONTACTING
from cla_public.libs.honeypot import Honeypot
class BabelTranslations(object):
def gettext(self, string):
t = get_translations()
if t is None:
return string
return t.ugettext(string)
def ngettext(self, singular, plural, num):
variables = {"num": num}
t = get_translations()
if t is None:
return (singular if num == 1 else plural) % variables
return t.ungettext(singular, plural, num) % variables
class BabelTranslationsFormMixin(object):
def _get_translations(self):
return BabelTranslations()
_textarea_length_validator = Length(max=1000, message=u"Field cannot contain more than %(max)d characters")
class FeedbackForm(Honeypot, BabelTranslationsFormMixin, Form):
referrer = StringField(widget=widgets.HiddenInput())
difficulty = TextAreaField(
label=_(u"Did you have difficulty using this service? Tell us about the problem."),
validators=[_textarea_length_validator],
)
ideas = TextAreaField(
label=_(u"Do you have any ideas for how it could be improved?"), validators=[_textarea_length_validator]
)
help_filling_in_form = RadioField(
_(u"Did you have any help filling in this form?"), choices=HELP_FILLING_IN_FORM, validators=[InputRequired()]
)
def api_payload(self):
user_agent = request.headers.get("User-Agent")
comment_body = render_template("emails/zendesk-feedback.txt", form=self, user_agent=user_agent)
environment = current_app.config["CLA_ENV"]
subject = "CLA Public Feedback"
if environment != "production":
subject = "[TEST] - " + subject
ticket = {
"requester_id": current_app.config["ZENDESK_DEFAULT_REQUESTER"],
"subject": subject,
"comment": {"body": comment_body},
"group_id": 23832817, # CLA Public
"tags": ["feedback", "civil_legal_advice_public"],
"custom_fields": [
{"id": 23791776, "value": user_agent}, # Browser field
{"id": 26047167, "value": self.referrer.data}, # Referrer URL field
],
}
return {"ticket": ticket}
class ReasonsForContactingForm(Honeypot, BabelTranslationsFormMixin, Form):
"""
Interstitial form to ascertain why users are dropping out of
the checker service
"""
referrer = StringField(widget=widgets.HiddenInput())
reasons = SelectMultipleField(
label=_(u"You can select more than one option"),
choices=REASONS_FOR_CONTACTING_CHOICES,
widget=widgets.ListWidget(prefix_label=False),
option_widget=widgets.CheckboxInput(),
)
other_reasons = TextAreaField(label=_(u"Please specify"), validators=[_textarea_length_validator])
REASONS_FOR_CONTACTING_OTHER = REASONS_FOR_CONTACTING.OTHER
def api_payload(self):
return {
"reasons": [{"category": category} for category in self.reasons.data],
"other_reasons": self.other_reasons.data or "",
"user_agent": request.headers.get("User-Agent") or "Unknown",
"referrer": self.referrer.data or "Unknown",
}
|
jonparrott/billing-export-python | refs/heads/master | test/test.py | 2 | from datetime import date
import json
import logging
import os
import unittest
import cloudstorage as gcs
import main
import webapp2
import webtest
from google.appengine.ext import testbed
class TestParseData(unittest.TestCase):
"""Tests parsing billing export data into a datatable."""
def LoadTestData(self):
data_dir = 'test/data/exports'
for file_name in os.listdir(data_dir):
local_data_file = open(os.sep.join([data_dir, file_name]))
gcs_data_file = gcs.open(main.BUCKET + '/' + file_name, 'w')
gcs_data_file.write(local_data_file.read())
gcs_data_file.close()
local_data_file.close()
def setUp(self):
#logging.basicConfig(level=logging.DEBUG)
self.testbed = testbed.Testbed()
self.testbed.setup_env(app_id='_')
self.testbed.activate()
self.testbed.init_all_stubs()
main.UseLocalGCS()
self.LoadTestData()
app = webapp2.WSGIApplication([('/objectChangeNotification',
main.ObjectChangeNotification)])
self.testapp = webtest.TestApp(app)
def testTotalRelativeDifferenceAlert(self):
compute_engine_alert = main.Alert()
compute_engine_alert.range = main.AlertRange.ONE_DAY
compute_engine_alert.trigger = main.AlertTrigger.RELATIVE_CHANGE
compute_engine_alert.target_value = 'Total'
compute_engine_alert.trigger_value = 1.1
v = compute_engine_alert.isAlertTriggered('google-platform-demo',
date(2014, 02, 01))
assert v
def testSKURelativeDifferenceAlert(self):
compute_engine_alert = main.Alert()
compute_engine_alert.range = main.AlertRange.ONE_WEEK
compute_engine_alert.trigger = main.AlertTrigger.RELATIVE_CHANGE
compute_engine_alert.target_value = 'Cloud/compute-engine'
compute_engine_alert.trigger_value = 300
v = compute_engine_alert.isAlertTriggered('google-platform-demo',
date(2014, 02, 01))
assert v
def testNotSKURelativeDifferenceAlert(self):
compute_engine_alert = main.Alert()
compute_engine_alert.range = main.AlertRange.ONE_WEEK
compute_engine_alert.trigger = main.AlertTrigger.RELATIVE_CHANGE
compute_engine_alert.target_value = 'Cloud/compute-engine'
compute_engine_alert.trigger_value = 400
v = compute_engine_alert.isAlertTriggered('google-platform-demo',
date(2014, 02, 01))
assert not v
def testNegativetSKURelativeDifferenceAlert(self):
compute_engine_alert = main.Alert()
compute_engine_alert.range = main.AlertRange.ONE_WEEK
compute_engine_alert.trigger = main.AlertTrigger.RELATIVE_CHANGE
compute_engine_alert.target_value = 'Cloud/compute-engine'
compute_engine_alert.trigger_value = -300
v = compute_engine_alert.isAlertTriggered('google-platform-demo',
date(2014, 02, 01))
assert not v
def testTotalDifferenceAlert(self):
# data_table = main.GetAllBillingDataTable('google-platform-demo')
compute_engine_alert = main.Alert()
compute_engine_alert.range = main.AlertRange.ONE_DAY
compute_engine_alert.target_value = 'Total'
compute_engine_alert.trigger = main.AlertTrigger.TOTAL_CHANGE
compute_engine_alert.trigger_value = 10.00
v = compute_engine_alert.isAlertTriggered('google-platform-demo',
date(2014, 02, 04))
self.assertTrue(v)
# 167.33016600000002 2/3
# 184.93568900000002 2/4
def testSimpleObjectChangeNotification(self):
data_dir = 'test/data/notifications'
for file_name in os.listdir(data_dir):
local_notification = open(os.sep.join([data_dir, file_name])).read()
notification_dict = json.loads(local_notification)
response = self.testapp.post_json('/objectChangeNotification',
notification_dict)
logging.debug(repr(response))
self.assertEqual(response.status_int, 200)
def testEmptyObjectChangeNotification(self):
data_dir = 'test/data/notifications'
for file_name in os.listdir(data_dir):
response = self.testapp.post('/objectChangeNotification')
logging.debug(repr(response))
self.assertEqual(response.status_int, 200)
def testDailySummaryObjectChangeNotification(self):
data_dir = 'test/data/notifications'
for file_name in os.listdir(data_dir):
local_notification = open(os.sep.join([data_dir, file_name])).read()
notification_dict = json.loads(local_notification)
project_date = main.MatchProjectDate(file_name)
subscription = main.Subscription.getInstance(project_date[0])
subscription.daily_summary = True
response = self.testapp.post_json('/objectChangeNotification',
notification_dict)
logging.debug(repr(response))
self.assertEqual(response.status_int, 200)
def testAlertSummaryObjectChangeNotification(self):
data_dir = 'test/data/notifications'
file_name = 'google-platform-demo-2014-02-04.json'
project_date = main.MatchProjectDate(file_name)
compute_engine_alert = main.Alert(parent=main.Alert.entity_group)
compute_engine_alert.name = 'Test Compute Engine Alert Alert'
compute_engine_alert.range = main.AlertRange.ONE_DAY
compute_engine_alert.target_value = 'Total'
compute_engine_alert.trigger = main.AlertTrigger.TOTAL_CHANGE
compute_engine_alert.trigger_value = 10.00
compute_engine_alert.project = project_date[0]
compute_engine_alert.put()
subscription = main.Subscription.getInstance(project_date[0])
subscription.daily_summary = False
local_notification = open(os.sep.join([data_dir, file_name])).read()
notification_dict = json.loads(local_notification)
project_date = main.MatchProjectDate(file_name)
subscription = main.Subscription.getInstance(project_date[0])
subscription.daily_summary = True
response = self.testapp.post_json('/objectChangeNotification',
notification_dict)
logging.debug(repr(response))
self.assertEqual(response.status_int, 200)
def tearDown(self):
# for gcs_object in gcs.listbucket(main.BUCKET):
# gcs.delete(gcs_object.filename)
self.testbed.deactivate()
|
carnotweat/cpupimp | refs/heads/master | libs/bs4/__init__.py | 417 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
Subsets and Splits