repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ip-tools/ip-navigator | patzilla/util/image/convert.py | 1 | 11407 | # -*- coding: utf-8 -*-
# (c) 2011-2018 Andreas Motl <[email protected]>
import os
import where
import logging
import datetime
import StringIO
import subprocess
from six import BytesIO
from tempfile import NamedTemporaryFile
from cornice.util import to_list
from patzilla.util.python.decorators import memoize
from patzilla.util.python.system import run_command
logger = logging.getLogger(__name__)
def to_png(tiff, width=None, height=None):
"""
Convert image to PNG format with optional resizing.
:param tiff: A stream buffer object like BytesIO
:param width: The width of the image in pixels (optional)
:param height: The height of the image in pixels (optional)
:return: A BytesIO object instance containing image data
"""
"""
The PIL module didn't properly support TIFF images with G4 compression::
Failure: exceptions.IOError: decoder group4 not available
Maybe patch: http://mail.python.org/pipermail/image-sig/2003-July/002354.html
Nowadays, this should be supported by Pillow on recent platforms:
https://pillow.readthedocs.io/en/latest/releasenotes/5.0.0.html#compressed-tiff-images
"""
try:
from PIL import Image
# Read image
image = Image.open(tiff)
if width and height:
# Convert image to grayscale
image = image.convert('L')
# Resize image
image.thumbnail((width, height), Image.LANCZOS)
# Save image into a stream buffer
png = BytesIO()
image.save(png, 'PNG')
# Readers should start reading at the beginning of the stream
png.seek(0)
return png
except Exception as ex:
logger.warning('Image conversion using "Pillow" failed: {}'.format(ex))
"""
However, if the conversion using "Pillow" fails for some reason,
let's try to use the "convert" utility from ImageMagick.
Instructions for installing ImageMagick on Debian::
apt install imagemagick
Instructions for installing ImageMagick on Windows::
https://www.imagemagick.org/script/download.php#windows
Instructions for building ImageMagick on Debian::
# https://packages.debian.org/source/wheezy/imagemagick
aptitude install build-essential checkinstall ghostscript libbz2-dev libexif-dev fftw-dev libfreetype6-dev libjasper-dev libjpeg-dev liblcms2-dev liblqr-1-0-dev libltdl-dev libpng-dev librsvg2-dev libtiff-dev libx11-dev libxext-dev libxml2-dev zlib1g-dev liblzma-dev libpango1.0-dev
./configure --prefix=/opt/imagemagick-7.0.2
wget http://www.imagemagick.org/download/ImageMagick.tar.gz
# untar and cd
make -j6 && make install
"""
more_args = []
# Compute value for "resize" parameter
size = ''
if width or height:
if width:
size += str(width)
# Use "x" for separating "width" and "height" when resizing
size += 'x'
if height:
size += str(height)
more_args += ['-resize', size]
convert = find_convert()
if not convert:
message = 'Could not find ImageMagick program "convert", please install from e.g. https://imagemagick.org/'
logger.error(message)
raise AssertionError(message)
command = [
convert,
'+set', 'date:create', '+set', 'date:modify',
'-colorspace', 'rgb', '-flatten', '-depth', '8',
'-antialias', '-quality', '100', '-density', '300',
# '-level', '30%,100%',
# Debugging
# (see "convert -list debug")
#'-verbose',
#'-debug', 'All',
] \
+ more_args + \
[
# Convert from specific format
#'{0}:-'.format(format),
# Convert from any format
'-',
# Convert to PNG format
'png:-',
]
command_string = ' '.join(command)
try:
logger.debug('Converting image using "{}"'.format(command_string))
return run_imagemagick(command, tiff.read())
except Exception as ex:
logger.error('Image conversion using ImageMagicks "convert" program failed: {}'.format(ex))
raise
def run_imagemagick(command, input=None):
output = run_command(command, input)
if 'ImageMagick' in output.read()[:200]:
command_string = ' '.join(command)
message = 'Image conversion failed, found "ImageMagick" in STDOUT. Command was "{}"'.format(command_string)
logger.error(message)
raise RuntimeError(message)
output.seek(0)
return output
def png_resize(png_payload, width):
image = Image.open(StringIO.StringIO(png_payload)).convert('RGB')
image_width = image.size[0]
image_height = image.size[1]
#aspect = float(image_width) / float(image_height)
#print "aspect:", aspect
scale_factor = float(image_width) / float(width)
#print "scale_factor:", scale_factor
#size = (int(width), int(image_height * aspect))
size = (int(width), int(image_height / scale_factor))
#print "size:", size
print "Resizing image from %s to %s" % (image.size, size)
image.thumbnail(size, Image.ANTIALIAS)
#image.resize(size, Image.ANTIALIAS)
#print "thumbnail done"
png = StringIO.StringIO()
image.save(png, 'PNG')
#print "image saved to memory"
png_payload_resized = png.getvalue()
#print "got payload"
return png_payload_resized
def pdf_join(pages):
# pdftk in1.pdf in2.pdf cat output out1.pdf
# pdftk in.pdf dump_data output report.txt
# pdftk in.pdf update_info in.info output out.pdf
# pdftk in.pdf update_info_utf8 in.info output out.pdf
# pdftk in.pdf attach_files table1.html table2.html to_page 6 output out.pdf
pdftk = find_pdftk()
if not pdftk:
message = 'Could not find program "pdftk", please install it'
logger.error(message)
raise AssertionError(message)
# Build shellout command
command = [pdftk]
tmpfiles = []
for page in pages:
tmpfile = NamedTemporaryFile()
tmpfile.write(page)
tmpfile.flush()
tmpfiles.append(tmpfile)
command.append(tmpfile.name)
command += ['cat', 'output', '-']
#logger.info('command={0}'.format(' '.join(command)))
cmddebug = ' '.join(command)
stdout = stderr = ''
try:
proc = subprocess.Popen(
command,
shell = (os.name == 'nt'),
#shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
stdout, stderr = proc.communicate()
if proc.returncode is not None and proc.returncode != 0:
logger.error('pdftk joining failed, command={0}, stderr={1}, returncode={2}'.format(cmddebug, stderr, proc.returncode))
except Exception as ex:
logger.error('pdftk joining failed, command={0}, exception={1}, stderr={2}'.format(cmddebug, ex, stderr))
finally:
for tmpfile in tmpfiles:
try:
tmpfile.close()
except Exception as ex:
logger.warn('Unable to delete temporary file "%s": %s', tmpfile.name, ex)
return stdout
def pdf_set_metadata(pdf_payload, metadata):
# scdsc
# PDF Producer: BNS/PXI/BPS systems of the EPO
# Content creator: -
# Mod-date: -
# Author: -
# Subject: -
# Title: EP 0666666A2 I
pass
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.write(metadata)
tmpfile.flush()
"""
command = [find_pdftk(), '-', 'dump_data', 'output', '-']
proc = subprocess.Popen(
command,
shell = (os.name == 'nt'),
#shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
stdout, stderr = proc.communicate(pdf_payload)
print stdout
#sys.exit()
"""
command = [find_pdftk(), '-', 'update_info', tmpfile.name, 'output', '-']
#logger.info('command={0}'.format(' '.join(command)))
cmddebug = ' '.join(command)
stdout = stderr = ''
try:
proc = subprocess.Popen(
command,
shell = (os.name == 'nt'),
#shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
stdout, stderr = proc.communicate(pdf_payload)
if proc.returncode is not None and proc.returncode != 0:
logger.error('pdftk metadata store failed, command={0}, stderr={1}'.format(cmddebug, stderr))
raise Exception()
except Exception as ex:
logger.error('pdftk metadata store failed, command={0}, exception={1}, stderr={2}'.format(cmddebug, ex, stderr))
raise
return stdout
def pdf_make_metadata(title, producer, pagecount, page_sections=None):
page_sections = page_sections and to_list(page_sections) or []
date = pdf_now()
tpl = """
InfoBegin
InfoKey: Title
InfoValue: {title}
InfoBegin
InfoKey: Producer
InfoValue: {producer}
InfoBegin
InfoKey: Creator
InfoValue:
InfoBegin
InfoKey: ModDate
InfoValue:
InfoBegin
InfoKey: CreationDate
InfoValue: {date}
NumberOfPages: {pagecount}
"""
metadata = tpl.format(**locals())
# https://stackoverflow.com/questions/2969479/merge-pdfs-with-pdftk-with-bookmarks/20333267#20333267
bookmark_tpl = """
BookmarkBegin
BookmarkTitle: {title}
BookmarkLevel: {level}
BookmarkPageNumber: {start_page}
"""
for page_section in page_sections:
name = page_section['@name']
start_page = page_section['@start-page']
if name == 'SEARCH_REPORT':
title = 'Search-report'
else:
title = name.title()
level = 1
metadata += bookmark_tpl.format(**locals())
return metadata
def pdf_now():
# D:20150220033046+01'00'
now = datetime.datetime.now().strftime("D:%Y%m%d%H%M%S+01'00'")
return now
@memoize
def find_convert():
"""
Debian: aptitude install imagemagick
/usr/bin/convert
Mac OS X with Homebrew
/usr/local/bin/convert
Mac OS X with Macports
/opt/local/bin/convert
Self-compiled
/opt/imagemagick/bin/convert
/opt/imagemagick-7.0.2/bin/convert
"""
# Some nailed location candidates
candidates = [
'/opt/imagemagick-7.0.2/bin/convert',
'/opt/imagemagick/bin/convert',
'/usr/local/bin/convert',
'/opt/local/bin/convert',
'/usr/bin/convert',
]
# More location candidates from the system
candidates += where.where('convert')
# Find location of "convert" program
convert_path = find_program_candidate(candidates)
logger.info('Found "convert" program at {}'.format(convert_path))
return convert_path
@memoize
def find_pdftk():
"""
Debian: aptitude install pdftk
/usr/bin/pdftk
Mac OS X
/opt/pdflabs/pdftk/bin/pdftk
Self-compiled
/usr/local/bin/pdftk
"""
candidates = [
'/opt/pdflabs/pdftk/bin/pdftk',
'/usr/local/bin/pdftk',
'/usr/bin/pdftk',
]
# More location candidates from the system
candidates += where.where('pdftk')
return find_program_candidate(candidates)
def find_program_candidate(candidates):
for candidate in candidates:
if os.path.isfile(candidate):
return candidate
| agpl-3.0 | 3,111,559,247,840,244,700 | 25.589744 | 290 | 0.621899 | false |
cxxgtxy/tensorflow | tensorflow/python/kernel_tests/basic_gpu_test.py | 1 | 10148 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDevide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, results):
n_iterations = 500
with self.test_session(use_gpu=True) as s:
data = variables.Variable(1.0)
with ops.device('/gpu:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.append(value)
if value != results[0]:
break
def testConcurrentSessions(self):
if not test.is_gpu_available():
return
n_threads = 4
results = [[]] * n_threads
threads = [
threading.Thread(target=self._run_session, args=(results[i],))
for i in xrange(n_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = [x for x in itertools.chain(*results)]
self.assertNotEqual(0, len(flat_results))
for result in flat_results:
self.assertEqual(result, flat_results[0])
if __name__ == '__main__':
test.main()
| apache-2.0 | 8,767,254,586,798,337,000 | 36.036496 | 80 | 0.631652 | false |
pipermerriam/flex | tests/loading/definition/schema/test_format.py | 1 | 1324 | import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.schema import schema_validator
from tests.utils import (
assert_path_not_in_errors,
assert_message_in_errors,
)
def test_format_is_not_required():
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('format', errors)
@pytest.mark.parametrize(
'value',
([1, 2], None, {'a': 1}, True, 1, 1.1),
)
def test_format_with_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({'format': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'format.type',
)
def test_format_for_valid_registered_format():
try:
schema_validator({'format': 'uuid'})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('format', errors)
def test_format_for_valid_unregistered_format():
try:
schema_validator({'format': 'not-a-registered-format'})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('format', errors)
| mit | -7,971,527,867,819,252,000 | 21.827586 | 63 | 0.638973 | false |
ej2/declarationshare | config/settings/production.py | 1 | 5700 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
import os
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
#'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
#SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['.herokuapp.com', '.declarenshare.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='declarationshare <[email protected]>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[declarationshare] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| bsd-3-clause | 1,140,230,236,291,163,500 | 33.969325 | 117 | 0.58807 | false |
rchui/pyql | Data/tablelib.py | 1 | 2538 | """ tablelib.py
This file defines functions for getting table information.
"""
import csv
import collections
def load_indexes(froms, tables, indexes):
""" Load indexes into memory.
Args:
froms: tables to query
tables: tables in the database
indexes: indexes in the database
Return:
indexes: indexes for the current query
"""
indexes = {}
for from_ in froms:
if tables[from_[0]] == 'idx':
index = collections.OrderedDict()
with open(from_[0] + '.idx', 'r') as index_reader:
attribute = index_reader.readline().strip()
table = index_reader.readline().strip()
for line in csv.reader(index_reader, quotechar='"', delimiter=','):
index[line[0]] = [int(x) for x in line[1:] if x.isdigit()]
indexes[from_[0]] = [attribute, index, table + '.csv']
return indexes
def get_where_indexes(wheres, attributes):
""" Gets the indexes for all where statements.
Args:
wheres: where values
attributes: attributes in the tables
Returns:
indexes: look up indexes for where values.
"""
indexes = []
for where in wheres:
if len(where) == 3:
subresult = [element.split('.') for element in where]
for i in range(len(subresult)):
if len(subresult[i]) == 2:
subresult[i][1] = attributes[subresult[i][0]].index(subresult[i][1])
indexes.append(subresult)
else:
indexes.append(where)
return indexes
def get_select_indexes(selects, attributes):
""" Gets the indexes for all select statements.
Args:
selects: select values
attributes: attributes in the tables
Returns:
indexes: look up indexes for select values
"""
if selects[0] != '*':
indexes = []
split_select = [select.split('.') for select in selects]
for select in split_select:
indexes.append([select[0], attributes[select[0]].index(select[1])])
return indexes
else:
return [selects]
def get_table_size(tables):
""" Gets the tables size of all tables in the database.
Args:
tables: tables in the database
Returns:
None
"""
line_counts={}
for table, ext in tables.items():
i=0
with open(table+'.'+ext) as fh:
for line in fh:
i+=1
line_counts[table]=i
return line_counts
| mit | -1,861,673,768,422,208,800 | 27.2 | 88 | 0.568558 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/multiprocessing/sharedctypes.py | 1 | 5791 | #
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import sys
import ctypes
import weakref
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
return _new_value(type_)
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, lock=None):
'''
Return a synchronization wrapper for a Value
'''
obj = RawValue(typecode_or_type, *args)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name,)*7), d)
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
| mit | 8,941,750,916,456,261,000 | 24.28821 | 84 | 0.604731 | false |
igemsoftware2017/USTC-Software-2017 | biohub/abacus/handlers.py | 1 | 2160 | from django.urls import reverse
from rest_framework.exceptions import ValidationError
from biohub.abacus.result import AbacusAsyncResult
from . import consts, remote
class BaseHandler(object):
"""
Abstract task handler to adjust different environments.
"""
def __init__(self, request):
self._request = request
def start_task(self, user):
if 'file' not in self._request.FILES:
raise ValidationError('Should upload a file.')
task_id = self._perform_start_task()
async_result = AbacusAsyncResult(task_id)
async_result._set_input_file_name(self._request.FILES['file'].name)
async_result._set_ident(self.ident)
async_result._set_user(user.pk)
return dict(
id=task_id,
query_url=reverse(
'api:abacus:abacus-query',
kwargs=dict(task_id=task_id)
)
)
class LocalHandler(BaseHandler):
ident = consts.LOCAL
def _run_task(self, input_file_name):
from biohub.abacus.tasks import AbacusTask
return AbacusTask.apply_async(input_file_name)
def _store_file(self):
from biohub.core.files.utils import store_file
return store_file(self._request.FILES['file'])[0]
def _perform_start_task(self):
return self._run_task(self._store_file()).task_id
class RemoteHandler(BaseHandler):
ident = consts.REMOTE
def _perform_start_task(self):
task_id, server, signature = remote.start(self._request)
result = AbacusAsyncResult(task_id)
result._set_server(server)
result._set_status('RUNNING')
result._set_signature(signature)
return task_id
def get_handler_class():
"""
To choose and return the right handler.
"""
from .conf import settings
return {
consts.LOCAL: LocalHandler,
consts.REMOTE: RemoteHandler
}[settings.ident]
def get_handler(request):
return get_handler_class()(request)
def query(task_id):
"""
Queries and returns the status (and output if succeeded).
"""
return AbacusAsyncResult(task_id).response()
| gpl-3.0 | 4,481,847,299,378,128,000 | 23.269663 | 75 | 0.636111 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_sql_dw_table_dataset.py | 1 | 2622 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset import Dataset
class AzureSqlDWTableDataset(Dataset):
"""The Azure SQL Data Warehouse dataset.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param type: Constant filled by server.
:type type: str
:param table_name: The table name of the Azure SQL Data Warehouse. Type:
string (or Expression with resultType string).
:type table_name: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
'table_name': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'type': {'key': 'type', 'type': 'str'},
'table_name': {'key': 'typeProperties.tableName', 'type': 'object'},
}
def __init__(self, linked_service_name, table_name, additional_properties=None, description=None, structure=None, parameters=None):
super(AzureSqlDWTableDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters)
self.table_name = table_name
self.type = 'AzureSqlDWTable'
| mit | -80,414,696,839,832,960 | 44.206897 | 207 | 0.647979 | false |
cloudify-cosmo/cloudify-ansible-plugin | cloudify_ansible_sdk/tests/test_from_file.py | 1 | 3732 | # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from mock import Mock
from os import environ, path
from unittest import skipUnless
from . import AnsibleTestBase
from .. import AnsiblePlaybookFromFile
class AnsibleSDKTest(AnsibleTestBase):
def test_that_tests_can_run_correctly(self):
"""Check that these tests can actually run."""
self.assertTrue(path.isfile(self.playbook_path))
self.assertTrue(path.isfile(self.hosts_path))
self.assertIn(
self.hosts_path,
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path, logger=getLogger('testLogger')).sources)
self.assertIn(
self.playbook_path,
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path, logger=getLogger('testLogger')).playbook)
@skipUnless(
environ.get('TEST_ZPLAYS', False),
reason='This test requires you to run "vagrant up". '
'And export TEST_ZPLAYS=true')
def test_zplays(self):
"""Run an actual Ansible playbook from a file."""
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
).execute()
def test_env(self):
test_env = environ.copy()
new = {'foo': 'bar'}
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
environment_variables=new,
logger=getLogger('testLogger')
)
test_env.update(new)
self.assertEqual(p.env, test_env)
def test_verbosity(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
verbosity=5,
logger=getLogger('testLogger')
)
self.assertEqual(p.verbosity, '-vvvvv')
def test_options(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
run_data={'taco': 'foo'},
options_config={'foo': 'bar'},
logger=getLogger('testLogger')
)
self.assertIn('--foo=\'bar\'', p.options)
if 'extra-vars' in p.options:
self.assertIn('@', p.options)
def test_command(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
)
self.assertEqual(p.process_args[0], '-vv')
self.assertEqual(p.process_args[1], '-i {0}'.format(self.hosts_path))
self.assertIn('--extra-vars', p.process_args[2])
self.assertEqual('', p.process_args[-2])
self.assertIn('ansible-examples/lamp_simple/site.yml',
p.process_args[3])
def test_execute(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
)
dummy_mock = Mock('Dummy process execution func')
dummy_mock.return_value = None
result = p.execute(dummy_mock)
self.assertIsNone(result)
| apache-2.0 | 6,136,226,149,651,220,000 | 33.238532 | 77 | 0.607717 | false |
sadad111/leetcodebox | Add One Row to Tree.py | 1 | 1410 | # /**
# * Definition for a binary tree node.
# * public class TreeNode {
# * int val;
# * TreeNode left;
# * TreeNode right;
# * TreeNode(int x) { val = x; }
# * }
# */
# public class Solution {
# public TreeNode addOneRow(TreeNode root, int v, int d) {
# if (d < 2) {
# TreeNode newroot = new TreeNode(v);
# if (d == 0) newroot.right = root;
# else newroot.left = root;
# return newroot;
# }
# if (root == null) return null;
# root.left = addOneRow(root.left, v, d == 2 ? 1 : d-1);
# root.right = addOneRow(root.right, v, d == 2 ? 0 : d-1);
# return root;
# }
# }
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def addOneRow(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
dummy, dummy.left = TreeNode(None), root
row = [dummy]
for _ in range(d - 1):
row = [kid for node in row for kid in (node.left, node.right) if kid]
for node in row:
node.left, node.left.left = TreeNode(v), node.left
node.right, node.right.right = TreeNode(v), node.right
return dummy.left
| gpl-3.0 | -473,034,887,660,258,100 | 29.652174 | 81 | 0.506383 | false |
raultron/ivs_sim | python/old_experiments/test_conics.py | 1 | 1354 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 11 16:36:30 2017
@author: lracuna
"""
from vision.conics import Circle, Ellipse
from pose_sim import *
from vision.camera import *
from vision.plane import Plane
from vision.screen import Screen
from ippe import homo2d
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from Rt_matrix_from_euler_t import R_matrix_from_euler_t
from uniform_sphere import uniform_sphere
#############################################
## INITIALIZATIONS
#############################################
## CREATE A SIMULATED CAMERA
cam = Camera()
fx = fy = 800
cx = 640
cy = 480
cam.set_K(fx,fy,cx,cy)
cam.img_width = 1280
cam.img_height = 960
## DEFINE CAMERA POSE LOOKING STRAIGTH DOWN INTO THE PLANE MODEL
cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(130.0))
cam_world = np.array([0.0,0.0,1,1]).T
cam_t = np.dot(cam.R,-cam_world)
cam.set_t(cam_t[0], cam_t[1], cam_t[2])
cam.set_P()
H_cam = cam.homography_from_Rt()
c1 = Circle((0,0.8),r=0.05)
print c1.calculate_center()
#c1.contour()
c2 = Ellipse((0,0))
print c2.calculate_center()
#c2.contour()
c3 = c1.project(H_cam)
c3.contour(grid_size=100)
print c3.calculate_center()
print c3.major_axis_length()
point = np.array([0,0,0,1]).reshape(4,1)
center_circle = cam.project(point)
print center_circle | mit | -4,666,481,257,851,473,000 | 21.583333 | 64 | 0.662482 | false |
IEEERobotics/high-level | qwe/vision/preprocessing.py | 1 | 10109 | """Image preprocessing tools."""
from math import sqrt, hypot
import numpy as np
import cv2
from time import sleep
from util import Enum
from base import FrameProcessor
from main import main
import commands
from colorfilter import HSVFilter
blueFilter = HSVFilter(np.array([108, 70, 75], np.uint8), np.array([122, 255, 255], np.uint8))
brownFilter = HSVFilter(np.array([178, 128, 32], np.uint8), np.array([11, 255, 100], np.uint8))
whiteFilter = HSVFilter(np.array([0, 0, 100], np.uint8), np.array([179, 64, 255], np.uint8))
yellowFilter = HSVFilter(np.array([15, 100, 75], np.uint8), np.array([50, 255, 255], np.uint8))
greenFilter = HSVFilter(np.array([35, 70, 32], np.uint8), np.array([50, 255, 150], np.uint8))
redFilter = HSVFilter(np.array([175, 100, 75], np.uint8), np.array([15, 255, 255], np.uint8))
class Blob:
colorBlue = (255, 0, 0)
colorDarkBlue = (128, 64, 64)
def __init__(self, tag, area, bbox, rect):
self.tag = tag
self.area = area
self.bbox = bbox
self.rect = rect
self.center = (int(self.rect[0][0]), int(self.rect[0][1])) # int precision is all we need
self.size = self.rect[1]
self.angle = self.rect[2]
def draw(self, imageOut):
cv2.rectangle(imageOut, (self.bbox[0], self.bbox[1]), (self.bbox[0] + self.bbox[2], self.bbox[1] + self.bbox[3]), self.colorBlue, 2)
def __str__(self):
return "<Blob {tag} at ({center[0]:.2f}, {center[1]:.2f}), size: ({size[0]:.2f}, {size[1]:.2f}, area: {area:0.2f})>".format(tag=self.tag, center=self.center, size=self.size, area=self.area)
class ColorPaletteDetector(FrameProcessor):
"""Tries to find a known color palette in camera view."""
minBlobArea = 1000
maxBlobArea = 6000
paletteBBox = (0, 400, 640, 80) # x, y, w, h
markerTag0 = "blue"
markerTag1 = "red"
def __init__(self, options):
FrameProcessor.__init__(self, options)
def initialize(self, imageIn, timeNow):
self.image = imageIn
self.imageSize = (self.image.shape[1], self.image.shape[0]) # (width, height)
self.imageCenter = (self.imageSize[0] / 2, self.imageSize[1] / 2) # (x, y)
self.imageOut = None
self.active = True
self.filterBank = dict(blue=blueFilter, brown=brownFilter, white=whiteFilter, yellow=yellowFilter, green=greenFilter, red=redFilter)
self.masks = { }
self.morphOpenKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
self.blobs = list()
self.paletteCenter = (320, 456)
self.midPoint = (320, 456)
self.cameraOffset = None
def process(self, imageIn, timeNow):
self.image = imageIn
if self.gui: self.imageOut = self.image.copy()
# * Initialize blobs
self.blobs = list()
# * Cut out expected palette area
pbx, pby, pbw, pbh = self.paletteBBox
self.imagePalette = self.image[pby:pby + pbh, pbx:pbx + pbw]
if self.gui:
cv2.imshow("Color palette", self.imagePalette)
#self.imagePaletteOut = self.imageOut[pby:pby + pbh, pbx:pbx + pbw]
cv2.rectangle(self.imageOut, (pbx, pby), (pbx + pbw, pby + pbh), (255, 0, 0))
# * Get HSV
self.imagePaletteHSV = cv2.cvtColor(self.imagePalette, cv2.COLOR_BGR2HSV)
# * Apply filters
for filterName, colorFilter in self.filterBank.iteritems():
mask = colorFilter.apply(self.imagePaletteHSV)
# ** Smooth out mask and remove noise
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.morphOpenKernel, iterations=2)
self.masks[filterName] = mask
if self.gui: cv2.imshow(filterName, self.masks[filterName])
# ** Detect contours in mask
contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE, offset=(pbx, pby))
if len(contours) > 0:
#self.logd("process", "[%.2f] %d %s contour(s)" % (timeNow, len(contours), maskName)) # report contours found
#if self.gui and self.debug: cv2.drawContours(self.imageOut, contours, -1, (0, 255, 255)) # draw all contours found
# *** Walk through list of contours
for contour in contours:
contour = contour.astype(np.int32) # convert contours to 32-bit int for each individual contour [Pandaboard OpenCV bug workaround]
# **** Filter out ones that are too small or too big
area = cv2.contourArea(contour)
if area < self.minBlobArea or area > self.maxBlobArea: continue
# **** Create blob
bbox = cv2.boundingRect(contour)
rect = cv2.minAreaRect(contour)
blob = Blob(filterName, area, bbox, rect)
self.blobs.append(blob)
blob.draw(self.imageOut)
# * Report blobs found
#if self.blobs:
# self.logd("process", "{0} blobs found:\n{1}".format(len(self.blobs), "\n".join((str(blob) for blob in self.blobs))))
# * Get a handle on marker blobs (and make sure their relative positions are as expected)
marker0 = self.getNearestBlob(self.markerTag0)
marker1 = self.getNearestBlob(self.markerTag1)
#self.logd("process", "Marker 0: {0}".format(marker0))
#self.logd("process", "Marker 1: {0}".format(marker1))
# * Compute midpoint and report X, Y offset
if marker0 is not None and marker1 is not None:
self.midPoint = (int((marker0.center[0] + marker1.center[0]) / 2), int((marker0.center[1] + marker1.center[1]) / 2))
self.cameraOffset = (self.midPoint[0] - self.paletteCenter[0], self.midPoint[1] - self.paletteCenter[1])
#self.logd("process", "Mid-point: {0}, camera offset: {1}".format(self.midPoint, self.cameraOffset))
if self.gui:
cv2.line(self.imageOut, marker0.center, marker1.center, (255, 0, 255), 2)
cv2.circle(self.imageOut, self.midPoint, 5, (0, 255, 0), -1)
else:
self.cameraOffset = None
#self.loge("process", "Couldn't determine mid-point and camera offset!")
# * TODO Compute average color of brown and green patches to calibrate
return True, self.imageOut
def getBlobs(self, tag=None):
"""Return a generator/list for blobs that match given tag (or all, if not given)."""
if tag is not None:
return (blob for blob in self.blobs if blob.tag == tag)
else:
self.blobs
def getNearestBlob(self, tag=None, point=None, maxDist=np.inf, minArea=minBlobArea):
if point is None: point = self.imageCenter
minDist = maxDist
nearestBlob = None
for blob in self.getBlobs(tag):
dist = hypot(blob.center[0] - point[0], blob.center[1] - point[1])
if dist < minDist:
minDist = dist
nearestBlob = blob
return nearestBlob
class ExposureNormalizer(FrameProcessor):
"""Obtains a normalized image by averaging two images taken at different exposures."""
State = Enum(['NONE', 'START', 'SAMPLE_LOW', 'SAMPLE_HIGH', 'DONE'])
sample_time_low = 2.0 # secs; when to sample low-exposure image (rel. to start)
sample_time_high = 4.0 # secs; when to sample high-exposure image (rel. to start)
exposure_low = 1
exposure_high = 5
exposure_normal = 3
loop_delay = None # duration to sleep for every iteration (not required for camera); set to None to prevent sleeping
def __init__(self, options):
FrameProcessor.__init__(self, options)
if self.debug:
self.loop_delay = 0.025 # set some delay when debugging, in case we are running a video
self.state = ExposureNormalizer.State.NONE # set to NONE here, call start() to run through once
def initialize(self, imageIn, timeNow):
self.image = imageIn
self.timeStart = self.timeDone = timeNow
self.imageLow = self.imageHigh = self.imageOut = self.image # use first given frame as default
self.active = True
def process(self, imageIn, timeNow):
self.image = imageIn
if self.state is ExposureNormalizer.State.START:
self.timeStart = timeNow
self.imageOut = self.image # default output, till we get a better image
self.setExposure(self.exposure_low) # set exposure to low
self.state = ExposureNormalizer.State.SAMPLE_LOW # [transition]
elif self.state is ExposureNormalizer.State.SAMPLE_LOW:
if (timeNow - self.timeStart) >= self.sample_time_low:
self.imageLow = self.image # save low-exposure image
self.imageOut = self.image # update output with current image (still not the average)
self.setExposure(self.exposure_high) # set exposure to high
self.state = ExposureNormalizer.State.SAMPLE_HIGH # [transition]
elif self.state is ExposureNormalizer.State.SAMPLE_HIGH:
if (timeNow - self.timeStart) >= self.sample_time_high:
self.imageHigh = self.image # save high-exposure image
self.imageOut = (self.imageLow / 2) + (self.imageHigh / 2) # compute average image
self.timeDone = timeNow # so that we can tell whether the avg. image is stale or not
self.setExposure(self.exposure_normal) # set exposure back to normal
self.state = ExposureNormalizer.State.DONE # [transition]
self.logd("process", "[DONE]")
if self.loop_delay is not None:
sleep(self.loop_delay)
return True, self.imageOut # always return imageOut, initially the same as input image at start()
def onKeyPress(self, key, keyChar):
if keyChar == 's': # press 's' to start
self.start()
return True
def start(self):
self.logi("start", "Starting exposure-based normalization...")
self.state = ExposureNormalizer.State.START
def setExposure(self, value=3):
status, output = commands.getstatusoutput("uvcdynctrl -s \"Exposure (Absolute)\" {value}".format(value=value))
self.logd("setExposure", "[{state}] value: {value}, status: {status}, output:\n'''\n{output}\n'''".format(state=ExposureNormalizer.State.toString(self.state), value=value, status=status, output=output))
return (status == 0) # return whether successful or not
if __name__ == "__main__":
options = { 'gui': True, 'debug': True }
#main(ExposureNormalizer(options=options)) # run an ExposureNormalizer instance using main.main()
main(ColorPaletteDetector(options=options))
| bsd-2-clause | -4,807,171,542,224,210,000 | 43.144105 | 206 | 0.659017 | false |
OrhanOdabasi/PixPack | pixpack/grouping.py | 1 | 1317 | #!/usr/bin/env python3
# grouping algorithms for images and videos
# PixPack Photo Organiser
import re
import os
def group_by_dates(date_meta, destination, pattern='ym'):
# generate folder name by using basic date informations
# available patterns: yr=2017, ym=2017-03, ss=summer
# exif date format -> 2006:03:25 21:34:24
# return dest_dir
if date_meta == "NOT_FOUND":
return os.path.join(destination, "NOT_FOUND")
ymd_format = re.match(r"(\d{4}):(\d{2}):(\d{2}) (\d{2}):(\d{2}):(\d{2})", date_meta)
year = ymd_format.group(1)
month = ymd_format.group(2)
day = ymd_format.group(3)
hour = ymd_format.group(4)
minute = ymd_format.group(5)
second = ymd_format.group(6)
# group by year
if pattern.lower() == 'yr':
dest_folder_name = year
elif pattern.lower() == 'ym':
dest_folder_name = "{year}-{month}".format(year=year, month=month)
elif pattern.lower() == 'ss':
if int(month) in (12, 1, 2):
dest_folder_name = "Winter"
elif int(month) in (3, 4, 5):
dest_folder_name = "Spring"
elif int(month) in (6, 7, 8):
dest_folder_name = "Summer"
elif int(month) in (9, 10, 11):
dest_folder_name = "Fall"
return os.path.join(destination, dest_folder_name)
| mit | 725,687,555,921,782,800 | 35.583333 | 88 | 0.593774 | false |
nischalsheth/contrail-controller | src/analytics/contrail-topology/contrail_topology/controller.py | 1 | 21899 | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from analytic_client import AnalyticApiClient
import time, socket, os
from topology_uve import LinkUve
import gevent
from gevent.lock import Semaphore
from opserver.consistent_schdlr import ConsistentScheduler
from topology_config_handler import TopologyConfigHandler
import traceback
import ConfigParser
import signal
import random
import hashlib
from sandesh.topology_info.ttypes import TopologyInfo, TopologyUVE
from sandesh.link.ttypes import RemoteType, RemoteIfInfo, VRouterL2IfInfo,\
VRouterL2IfUVE
class PRouter(object):
def __init__(self, name, data):
self.name = name
self.data = data
class Controller(object):
def __init__(self, config):
self._config = config
self._hostname = socket.gethostname()
self.analytic_api = AnalyticApiClient(self._config)
self._config.random_collectors = self._config.collectors()
self._chksum = ""
if self._config.collectors():
self._chksum = hashlib.md5("".join(self._config.collectors())).hexdigest()
self._config.random_collectors = random.sample(self._config.collectors(), \
len(self._config.collectors()))
self.uve = LinkUve(self._config)
self._sandesh = self.uve.sandesh_instance()
self._logger = self.uve.logger()
self.sleep_time()
self._sem = Semaphore()
self._members = None
self._partitions = None
self._prouters = {}
self._vrouter_l2ifs = {}
self._old_vrouter_l2ifs = {}
self._config_handler = TopologyConfigHandler(self._sandesh,
self._config.rabbitmq_params(), self._config.cassandra_params())
self.constnt_schdlr = ConsistentScheduler(self.uve._moduleid,
zookeeper=self._config.zookeeper_server(),
delete_hndlr=self._del_uves, logger=self._logger,
cluster_id=self._config.cluster_id())
def sleep_time(self, newtime=None):
if newtime:
self._sleep_time = newtime
else:
self._sleep_time = self._config.frequency()
return self._sleep_time
def get_vrouters(self):
self.analytic_api.get_vrouters(True)
self.vrouters = {}
self.vrouter_ips = {}
self.vrouter_macs = {}
for vr in self.analytic_api.list_vrouters():
cfilt = ['VrouterAgent:phy_if', 'VrouterAgent:self_ip_list',
'VRouterL2IfInfo']
try:
d = self.analytic_api.get_vrouter(vr, ','.join(cfilt))
except Exception as e:
traceback.print_exc()
print str(e)
d = {}
if 'VrouterAgent' not in d or\
'self_ip_list' not in d['VrouterAgent'] or\
'phy_if' not in d['VrouterAgent']:
continue
self.vrouters[vr] = {'ips': d['VrouterAgent']['self_ip_list'],
'if': d['VrouterAgent']['phy_if']
}
try:
self.vrouters[vr]['l2_if'] = d['VRouterL2IfInfo']['if_info']
except KeyError:
pass
for ip in d['VrouterAgent']['self_ip_list']:
self.vrouter_ips[ip] = vr # index
for intf in d['VrouterAgent']['phy_if']:
try:
self.vrouter_macs[intf['mac_address']] = {}
self.vrouter_macs[intf['mac_address']]['vrname'] = vr
self.vrouter_macs[intf['mac_address']]['ifname'] = intf['name']
except:
continue
def get_prouters(self):
self.analytic_api.get_prouters(True)
self.prouters = []
for pr in self.analytic_api.list_prouters():
try:
data = self.analytic_api.get_prouter(pr, 'PRouterEntry')
if data:
self.prouters.append(PRouter(pr, data))
except Exception as e:
traceback.print_exc()
print str(e)
def _is_linkup(self, prouter, ifindex):
if 'PRouterEntry' in prouter.data and \
'ifIndexOperStatusTable' in prouter.data['PRouterEntry']:
status = filter(lambda x: x['ifIndex'] == ifindex,
prouter.data['PRouterEntry']['ifIndexOperStatusTable'])
if status and status[0]['ifOperStatus'] == 1:
return True
return False
def _add_link(self, prouter, remote_system_name, local_interface_name,
remote_interface_name, local_interface_index,
remote_interface_index, link_type):
# If the remote_system_name or remote_interface_name is None, do not
# add this link in the link_table.
if not all([remote_system_name, remote_interface_name]):
return False
d = dict(remote_system_name=remote_system_name,
local_interface_name=local_interface_name,
remote_interface_name=remote_interface_name,
local_interface_index=local_interface_index,
remote_interface_index=remote_interface_index,
type=link_type)
if link_type == RemoteType.VRouter:
l2_if = self.vrouters[remote_system_name].get('l2_if')
if l2_if and remote_interface_name in l2_if:
if l2_if[remote_interface_name]['remote_system_name'] != \
prouter.name:
return False
if self._is_linkup(prouter, local_interface_index):
if prouter.name in self.link:
self.link[prouter.name].append(d)
else:
self.link[prouter.name] = [d]
return True
return False
def _chk_lnk(self, pre, index):
if 'ifIndexOperStatusTable' in pre:
for d in pre['ifIndexOperStatusTable']:
if d['ifIndex'] == index:
return d['ifOperStatus'] == 1
return False
def _send_topology_uve(self, members, partitions, prouters):
topology_info = TopologyInfo()
if self._members != members:
self._members = members
topology_info.members = members
if self._partitions != partitions:
self._partitions = partitions
topology_info.partitions = partitions
new_prouters = {p.name: p for p in prouters}
if self._prouters.keys() != new_prouters.keys():
deleted_prouters = [v for p, v in self._prouters.iteritems() \
if p not in new_prouters]
self._del_uves(deleted_prouters)
self._prouters = new_prouters
topology_info.prouters = self._prouters.keys()
if topology_info != TopologyInfo():
topology_info.name = self._hostname
TopologyUVE(data=topology_info).send()
# end _send_topology_uve
def bms_links(self, prouter, ifm):
try:
for lif_fqname, lif in self._config_handler.get_logical_interfaces():
if prouter.name in lif_fqname:
for vmif in lif.obj.get_virtual_machine_interface_refs():
vmi = self._config_handler.\
get_virtual_machine_interface(uuid=vmif.uuid)
if not vmi:
continue
vmi = vmi.obj
for mc in vmi.virtual_machine_interface_mac_addresses.\
get_mac_address():
ifi = [k for k in ifm if ifm[k] in lif_fqname][0]
rsys = '-'.join(['bms', 'host'] + mc.split(':'))
self._add_link(prouter=prouter,
remote_system_name=rsys,
local_interface_name=lif.obj.fq_name[-1],
remote_interface_name='em0',#no idea
local_interface_index=ifi,
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.BMS)
except:
traceback.print_exc()
def compute(self):
self.link = {}
self._old_vrouter_l2ifs = self._vrouter_l2ifs
self._vrouter_l2ifs = {}
for prouter in self.constnt_schdlr.work_items():
pr, d = prouter.name, prouter.data
if 'PRouterEntry' not in d or 'ifTable' not in d['PRouterEntry']:
continue
self.link[pr] = []
lldp_ints = []
ifm = dict(map(lambda x: (x['ifIndex'], x['ifDescr']),
d['PRouterEntry']['ifTable']))
self.bms_links(prouter, ifm)
for pl in d['PRouterEntry']['lldpTable']['lldpRemoteSystemsData']:
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Cisco'):
loc_pname = [x for x in d['PRouterEntry']['lldpTable'][
'lldpLocalSystemData']['lldpLocPortTable'] if x[
'lldpLocPortNum'] == pl['lldpRemLocalPortNum']][
0]['lldpLocPortDesc']
pl['lldpRemLocalPortNum'] = [k for k in ifm if ifm[
k] == loc_pname][0]
elif d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
loc_pname = [x for x in d['PRouterEntry']['lldpTable'][
'lldpLocalSystemData']['lldpLocPortTable'] if x[
'lldpLocPortNum'] == pl['lldpRemLocalPortNum']][
0]['lldpLocPortId']
pl['lldpRemLocalPortNum'] = [k for k in ifm if ifm[
k] == loc_pname][0]
if pl['lldpRemLocalPortNum'] in ifm and self._chk_lnk(
d['PRouterEntry'], pl['lldpRemLocalPortNum']):
if pl['lldpRemPortId'].isdigit():
rii = int(pl['lldpRemPortId'])
else:
try:
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
rpn = filter(lambda y: y['lldpLocPortId'] == pl[
'lldpRemPortId'], [
x for x in self.prouters if x.name == pl[
'lldpRemSysName']][0].data['PRouterEntry'][
'lldpTable']['lldpLocalSystemData'][
'lldpLocPortTable'])[0]['lldpLocPortId']
else:
rpn = filter(lambda y: y['lldpLocPortId'] == pl[
'lldpRemPortId'], [
x for x in self.prouters if x.name == pl[
'lldpRemSysName']][0].data['PRouterEntry'][
'lldpTable']['lldpLocalSystemData'][
'lldpLocPortTable'])[0]['lldpLocPortDesc']
rii = filter(lambda y: y['ifDescr'] == rpn,
[ x for x in self.prouters \
if x.name == pl['lldpRemSysName']][0].data[
'PRouterEntry']['ifTable'])[0]['ifIndex']
except:
rii = 0
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
if self._add_link(
prouter=prouter,
remote_system_name=pl['lldpRemSysName'],
local_interface_name=ifm[pl['lldpRemLocalPortNum']],
remote_interface_name=pl['lldpRemPortId'],
local_interface_index=pl['lldpRemLocalPortNum'],
remote_interface_index=rii,
link_type=RemoteType.PRouter):
lldp_ints.append(ifm[pl['lldpRemLocalPortNum']])
else:
if self._add_link(
prouter=prouter,
remote_system_name=pl['lldpRemSysName'],
local_interface_name=ifm[pl['lldpRemLocalPortNum']],
remote_interface_name=pl['lldpRemPortDesc'],
local_interface_index=pl['lldpRemLocalPortNum'],
remote_interface_index=rii,
link_type=RemoteType.PRouter):
lldp_ints.append(ifm[pl['lldpRemLocalPortNum']])
vrouter_l2ifs = {}
if 'fdbPortIfIndexTable' in d['PRouterEntry']:
dot1d2snmp = map (lambda x: (
x['dot1dBasePortIfIndex'],
x['snmpIfIndex']),
d['PRouterEntry']['fdbPortIfIndexTable'])
dot1d2snmp_dict = dict(dot1d2snmp)
if 'fdbPortTable' in d['PRouterEntry']:
for mac_entry in d['PRouterEntry']['fdbPortTable']:
if mac_entry['mac'] in self.vrouter_macs:
vrouter_mac_entry = self.vrouter_macs[mac_entry['mac']]
vr_name = vrouter_mac_entry['vrname']
vr_ifname = vrouter_mac_entry['ifname']
fdbport = mac_entry['dot1dBasePortIfIndex']
try:
snmpport = dot1d2snmp_dict[fdbport]
ifname = ifm[snmpport]
except:
continue
is_lldp_int = any(ifname == lldp_int for lldp_int in lldp_ints)
if is_lldp_int:
continue
if self._add_link(
prouter=prouter,
remote_system_name=vr_name,
local_interface_name=ifname,
remote_interface_name=vr_ifname,
local_interface_index=snmpport,
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.VRouter):
if vr_name not in vrouter_l2ifs:
vrouter_l2ifs[vr_name] = {}
vrouter_l2ifs[vr_name][vr_ifname] = {
'remote_system_name': prouter.name,
'remote_if_name': ifname,
}
for arp in d['PRouterEntry']['arpTable']:
if arp['ip'] in self.vrouter_ips:
if arp['mac'] in map(lambda x: x['mac_address'],
self.vrouters[self.vrouter_ips[arp['ip']]]['if']):
vr_name = self.vrouter_macs[arp['mac']]['vrname']
vr_ifname = self.vrouter_macs[arp['mac']]['ifname']
try:
if vrouter_l2ifs[vr_name][vr_ifname]\
['remote_system_name'] == prouter.name:
del vrouter_l2ifs[vr_name][vr_ifname]
if not vrouter_l2ifs[vr_name]:
del vrouter_l2ifs[vr_name]
continue
except KeyError:
pass
if ifm[arp['localIfIndex']].startswith('vlan'):
continue
if ifm[arp['localIfIndex']].startswith('irb'):
continue
is_lldp_int = any(ifm[arp['localIfIndex']] == lldp_int for lldp_int in lldp_ints)
if is_lldp_int:
continue
if self._add_link(
prouter=prouter,
remote_system_name=vr_name,
local_interface_name=ifm[arp['localIfIndex']],
remote_interface_name=vr_ifname,
local_interface_index=arp['localIfIndex'],
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.VRouter):
pass
for vr, intf in vrouter_l2ifs.iteritems():
if vr in self._vrouter_l2ifs:
self._vrouter_l2ifs[vr].update(vrouter_l2ifs[vr])
else:
self._vrouter_l2ifs[vr] = intf
def send_uve(self):
old_vrs = set(self._old_vrouter_l2ifs.keys())
new_vrs = set(self._vrouter_l2ifs.keys())
del_vrs = old_vrs - new_vrs
add_vrs = new_vrs - old_vrs
same_vrs = old_vrs.intersection(new_vrs)
for vr in del_vrs:
vr_l2info = VRouterL2IfInfo(name=vr, deleted=True)
VRouterL2IfUVE(data=vr_l2info).send()
for vr in add_vrs:
if_info = {}
for vrif, remif_info in self._vrouter_l2ifs[vr].iteritems():
if_info[vrif] = RemoteIfInfo(remif_info['remote_system_name'],
remif_info['remote_if_name'])
vr_l2info = VRouterL2IfInfo(name=vr, if_info=if_info)
VRouterL2IfUVE(data=vr_l2info).send()
for vr in same_vrs:
if self._vrouter_l2ifs[vr] != self._old_vrouter_l2ifs[vr]:
if_info = {}
for vrif, remif_info in self._vrouter_l2ifs[vr].iteritems():
if_info[vrif] = RemoteIfInfo(
remif_info['remote_system_name'],
remif_info['remote_if_name'])
vr_l2info = VRouterL2IfInfo(name=vr, if_info=if_info)
VRouterL2IfUVE(data=vr_l2info).send()
self.uve.send(self.link)
def switcher(self):
gevent.sleep(0)
def scan_data(self):
t = []
t.append(gevent.spawn(self.get_vrouters))
t.append(gevent.spawn(self.get_prouters))
gevent.joinall(t)
def _del_uves(self, prouters):
with self._sem:
for prouter in prouters:
self.uve.delete(prouter.name)
def sighup_handler(self):
if self._config._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._config._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._config.random_collectors = \
random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self.uve.sandesh_reconfig_collectors(
self._config.random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _uve_scanner(self):
while True:
self.scan_data()
if self.constnt_schdlr.schedule(self.prouters):
members = self.constnt_schdlr.members()
partitions = self.constnt_schdlr.partitions()
self._send_topology_uve(members, partitions,
self.constnt_schdlr.work_items())
try:
with self._sem:
self.compute()
self.send_uve()
except Exception as e:
traceback.print_exc()
print str(e)
gevent.sleep(self._sleep_time)
else:
gevent.sleep(1)
# end _uve_scanner
def run(self):
""" @sighup
SIGHUP handler to indicate configuration changes
"""
gevent.signal(signal.SIGHUP, self.sighup_handler)
self.gevs = [
gevent.spawn(self._config_handler.start),
gevent.spawn(self._uve_scanner)
]
try:
gevent.joinall(self.gevs)
except KeyboardInterrupt:
self._logger.error('Exiting on ^C')
except gevent.GreenletExit:
self._logger.error('Exiting on gevent-kill')
finally:
self._logger.error('stopping everything!')
self.stop()
# end run
def stop(self):
self.uve.stop()
l = len(self.gevs)
for i in range(0, l):
self._logger.error('killing %d of %d' % (i+1, l))
self.gevs[0].kill()
self._logger.error('joining %d of %d' % (i+1, l))
self.gevs[0].join()
self._logger.error('stopped %d of %d' % (i+1, l))
self.gevs.pop(0)
self.constnt_schdlr.finish()
# end stop
| apache-2.0 | 2,516,565,120,198,290,400 | 46.196121 | 105 | 0.481848 | false |
Kozea/Dyko | kalamar/tests/access_point/test_alchemy.py | 1 | 3628 | # -*- coding: utf-8 -*-
# This file is part of Dyko
# Copyright © 2008-2010 Kozea
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kalamar. If not, see <http://www.gnu.org/licenses/>.
"""
Alchemy test.
Test the alchemy backend on an sqlite base.
"""
import unittest
from nose.tools import eq_
from kalamar.access_point.alchemy import AlchemyProperty, Alchemy
from kalamar.site import Site
from ..common import make_site, run_common, require
def make_ap():
"""Create a simple Alchemy access point."""
id_property = AlchemyProperty(int, column_name="id")
name = AlchemyProperty(unicode)
auto = AlchemyProperty(unicode, auto=True)
access_point = Alchemy(
"sqlite:///", "test", {"id": id_property, "name": name, "auto": auto},
["id"], True)
return access_point
@require("sqlalchemy")
class TestAlchemy(unittest.TestCase):
"""Class defining some simple tests on an Alchemy access point."""
def test_search(self):
"""Test a simple search on the access point."""
items = list(self.site.search("test"))
eq_(len(items), 2)
items = list(self.site.search("test", {"id": 1}))
eq_(len(items), 1)
item = items[0]
eq_(item["id"], 1)
eq_(item["name"], "Test")
def test_view(self):
"""Test a simple view on the access point."""
items = list(
self.site.view("test", {"truc": "id", "name": "name"}, {}))
eq_(len(items), 2)
for item in items:
assert "truc" in item.keys() and "name" in item.keys()
items = list(
self.site.view("test", {"truc": "id", "name": "name"}, {"id": 1}))
eq_(len(items), 1)
def test_update(self):
"""Assert that an item can be updated in the DB."""
item = self.site.open("test", {"id": 1})
item["name"] = "updated"
item.save()
item = self.site.open("test", {"id": 1})
eq_(item["name"], "updated")
# camelCase function names come from unittest
# pylint: disable=C0103
def setUp(self):
self.site = Site()
self.site.register("test", make_ap())
self.items = []
item = self.site.create("test", {"id": 1, "name": "Test"})
self.items.append(item)
item.save()
item = self.site.create("test", {"id": 2, "name": "Test2"})
self.items.append(item)
item.save()
def tearDown(self):
for item in self.items:
item.delete()
for access_point in self.site.access_points.values():
access_point._table.drop()
Alchemy.__metadatas = {}
# pylint: enable=C0103
# Common tests
def runner(test):
"""Test runner for ``test``."""
access_point = make_ap()
try:
site = make_site(access_point,
fill=not hasattr(test, "nofill"))
test(site)
finally:
access_point._table.drop()
Alchemy.__metadatas = {}
@require("sqlalchemy")
@run_common
def test_alchemy_common():
"""Define a custom test runner for the common tests."""
return None, runner, "Alchemy"
| gpl-3.0 | -3,399,465,191,261,160,400 | 30.267241 | 78 | 0.605735 | false |
sander76/home-assistant | homeassistant/components/tod/binary_sensor.py | 1 | 8877 | """Support for representing current time of the day as binary sensors."""
from datetime import datetime, timedelta
import logging
import pytz
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_AFTER,
CONF_BEFORE,
CONF_NAME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, event
from homeassistant.helpers.sun import get_astral_event_date, get_astral_event_next
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AFTER = "after"
ATTR_BEFORE = "before"
ATTR_NEXT_UPDATE = "next_update"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_OFFSET = "before_offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AFTER): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_BEFORE): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AFTER_OFFSET, default=timedelta(0)): cv.time_period,
vol.Optional(CONF_BEFORE_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ToD sensors."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return
after = config[CONF_AFTER]
after_offset = config[CONF_AFTER_OFFSET]
before = config[CONF_BEFORE]
before_offset = config[CONF_BEFORE_OFFSET]
name = config[CONF_NAME]
sensor = TodSensor(name, after, after_offset, before, before_offset)
async_add_entities([sensor])
def is_sun_event(sun_event):
"""Return true if event is sun event not time."""
return sun_event in (SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
class TodSensor(BinarySensorEntity):
"""Time of the Day Sensor."""
def __init__(self, name, after, after_offset, before, before_offset):
"""Init the ToD Sensor..."""
self._name = name
self._time_before = self._time_after = self._next_update = None
self._after_offset = after_offset
self._before_offset = before_offset
self._before = before
self._after = after
@property
def should_poll(self):
"""Sensor does not need to be polled."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def after(self):
"""Return the timestamp for the beginning of the period."""
return self._time_after
@property
def before(self):
"""Return the timestamp for the end of the period."""
return self._time_before
@property
def is_on(self):
"""Return True is sensor is on."""
if self.after < self.before:
return self.after <= self.current_datetime < self.before
return False
@property
def current_datetime(self):
"""Return local current datetime according to hass configuration."""
return dt_util.utcnow()
@property
def next_update(self):
"""Return the next update point in the UTC time."""
return self._next_update
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_AFTER: self.after.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_BEFORE: self.before.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_NEXT_UPDATE: self.next_update.astimezone(
self.hass.config.time_zone
).isoformat(),
}
def _naive_time_to_utc_datetime(self, naive_time):
"""Convert naive time from config to utc_datetime with current day."""
# get the current local date from utc time
current_local_date = self.current_datetime.astimezone(
self.hass.config.time_zone
).date()
# calculate utc datetime corecponding to local time
utc_datetime = self.hass.config.time_zone.localize(
datetime.combine(current_local_date, naive_time)
).astimezone(tz=pytz.UTC)
return utc_datetime
def _calculate_initial_boudary_time(self):
"""Calculate internal absolute time boundaries."""
nowutc = self.current_datetime
# If after value is a sun event instead of absolute time
if is_sun_event(self._after):
# Calculate the today's event utc time or
# if not available take next
after_event_date = get_astral_event_date(
self.hass, self._after, nowutc
) or get_astral_event_next(self.hass, self._after, nowutc)
else:
# Convert local time provided to UTC today
# datetime.combine(date, time, tzinfo) is not supported
# in python 3.5. The self._after is provided
# with hass configured TZ not system wide
after_event_date = self._naive_time_to_utc_datetime(self._after)
self._time_after = after_event_date
# If before value is a sun event instead of absolute time
if is_sun_event(self._before):
# Calculate the today's event utc time or if not available take
# next
before_event_date = get_astral_event_date(
self.hass, self._before, nowutc
) or get_astral_event_next(self.hass, self._before, nowutc)
# Before is earlier than after
if before_event_date < after_event_date:
# Take next day for before
before_event_date = get_astral_event_next(
self.hass, self._before, after_event_date
)
else:
# Convert local time provided to UTC today, see above
before_event_date = self._naive_time_to_utc_datetime(self._before)
# It is safe to add timedelta days=1 to UTC as there is no DST
if before_event_date < after_event_date + self._after_offset:
before_event_date += timedelta(days=1)
self._time_before = before_event_date
# We are calculating the _time_after value assuming that it will happen today
# But that is not always true, e.g. after 23:00, before 12:00 and now is 10:00
# If _time_before and _time_after are ahead of current_datetime:
# _time_before is set to 12:00 next day
# _time_after is set to 23:00 today
# current_datetime is set to 10:00 today
if (
self._time_after > self.current_datetime
and self._time_before > self.current_datetime + timedelta(days=1)
):
# remove one day from _time_before and _time_after
self._time_after -= timedelta(days=1)
self._time_before -= timedelta(days=1)
# Add offset to utc boundaries according to the configuration
self._time_after += self._after_offset
self._time_before += self._before_offset
def _turn_to_next_day(self):
"""Turn to to the next day."""
if is_sun_event(self._after):
self._time_after = get_astral_event_next(
self.hass, self._after, self._time_after - self._after_offset
)
self._time_after += self._after_offset
else:
# Offset is already there
self._time_after += timedelta(days=1)
if is_sun_event(self._before):
self._time_before = get_astral_event_next(
self.hass, self._before, self._time_before - self._before_offset
)
self._time_before += self._before_offset
else:
# Offset is already there
self._time_before += timedelta(days=1)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
self._calculate_initial_boudary_time()
self._calculate_next_update()
self._point_in_time_listener(dt_util.now())
def _calculate_next_update(self):
"""Datetime when the next update to the state."""
now = self.current_datetime
if now < self.after:
self._next_update = self.after
return
if now < self.before:
self._next_update = self.before
return
self._turn_to_next_day()
self._next_update = self.after
@callback
def _point_in_time_listener(self, now):
"""Run when the state of the sensor should be updated."""
self._calculate_next_update()
self.async_write_ha_state()
event.async_track_point_in_utc_time(
self.hass, self._point_in_time_listener, self.next_update
)
| apache-2.0 | 3,253,667,888,357,824,500 | 35.9875 | 88 | 0.618227 | false |
listen-lavender/dbskit | dbskit/util.py | 1 | 4457 | #!/usr/bin/env python
# coding=utf-8
def explain(desc):
d = {'$gt':' > ',
'$lt':' < ',
'$gte':' >= ',
'$lte':' <= ',
'$ne':' <> ',
'$in':' in ',
'$nin':' not in ',
'$or':' or ',
'$and':' and ',
'':' and ',
'$regex': ' like ',
'$mod':' mod ',
}
return d.get(desc)
def transfer(spec={}, grand=None, parent='', index=[], condition=[]):
"""
递归转换mongo查询为mysql查询
"""
if isinstance(spec, list):
multi = []
for one in spec:
if isinstance(one, dict):
multi.append(transfer(one, grand=parent, parent='', index=index, condition=condition))
else:
index.append(grand)
condition.append({grand:one})
operator = explain(parent)
if multi:
return '(' + operator.join(multi) + ')'
else:
grand = 'id' if grand == '_id' else grand
if operator.strip() == 'mod':
return '(`' + grand + '`' + operator + ' %s =' + '%s)'
else:
return '(`' + grand + '` in (' + ','.join(['%s' for k in spec]) + '))'
elif isinstance(spec, dict):
multi = []
for k, v in spec.items():
if isinstance(v, dict):
multi.append(transfer(v, grand=parent, parent=k, index=index, condition=condition))
elif isinstance(v, list):
multi.append(transfer(v, grand=parent, parent=k, index=index, condition=condition))
else:
if k == '$options':
continue
operator = explain(k)
if operator is not None:
k = parent
operator = operator or '='
k = 'id' if k == '_id' else k
if v is None:
multi.append('(`' + k + '` is null)')
elif k == '' or k is None:
raise Exception("Empty string key or None key.")
# multi.append('("" = "")')
else:
index.append(k)
if ' like ' == operator:
if v.startswith('^'):
v = v[1:] + '%'
elif v.endswith('$'):
v = '%' + v[:-1]
else:
v = '%' + v + '%'
condition.append({k:v})
multi.append('(`' + k + '`' + operator + '%s' + ')')
return '(' + ' and '.join(multi) + ')' if multi else ''
else:
return ''
def rectify(cls, field, spec={}, grand=None, parent=''):
"""
递归检查类型
"""
if isinstance(spec, list):
for index, one in enumerate(spec):
if isinstance(one, dict):
rectify(cls, field, one, grand=parent, parent='')
else:
if one is None:
continue
if grand in cls.__mappings__:
spec[index] = cls.__mappings__[grand].verify(one)
elif isinstance(spec, dict):
for k, v in spec.items():
if isinstance(v, dict):
rectify(cls, field, v, grand=parent, parent=k)
elif isinstance(v, list):
rectify(cls, field, v, grand=parent, parent=k)
else:
operator = explain(k)
if operator is not None:
f = parent
else:
f = k
if v is None:
continue
if f in cls.__mappings__:
spec[k] = cls.__mappings__[f].verify(spec[k])
else:
pass
if __name__ == '__main__':
spec = {'username':'[email protected]', 'password':'123456', 'status':{'$ne':0}}
index = []
condition = []
print transfer(spec, grand=None, parent='', index=index, condition=condition)
print condition
print index
spec = {'$or':[{'uid':{'$regex':'a$', '$options':'i'}}, {'a':''}]}
index = []
condition = []
print transfer(spec, grand=None, parent='', index=index, condition=condition)
print condition
print index
spec = {'age':{'$mod':[10, 0]}}
index = []
condition = []
print transfer(spec, grand=None, parent='', index=index, condition=condition)
print condition
print index
| mit | -908,718,480,701,097,600 | 32.044776 | 102 | 0.429185 | false |
rndusr/stig | stig/client/aiotransmission/api_freespace.py | 1 | 1084 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
from ..base import FreeSpaceAPIBase
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
class FreeSpaceAPI(FreeSpaceAPIBase):
async def get_free_space(self, path):
"""Return free space in directory `path` in bytes"""
response = await self._rpc.free_space(path=path)
log.debug('Free space in %r: %r', path, response)
if path == response['path']:
return response['size-bytes']
else:
raise RuntimeError('Expected path %r, got %r' % (path, response['path']))
| gpl-3.0 | 4,059,348,513,064,097,000 | 40.692308 | 85 | 0.699262 | false |
openstack/vitrage | vitrage/tests/unit/datasources/test_transformer_base.py | 1 | 1151 | # Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import VertexProperties as VProps
from vitrage.tests import base
# noinspection PyProtectedMember
class BaseTransformerTest(base.BaseTest):
def _validate_base_vertex_props(self,
vertex,
expected_name,
expected_datasource_name):
self.assertFalse(vertex[VProps.VITRAGE_IS_PLACEHOLDER])
self.assertEqual(expected_datasource_name, vertex[VProps.VITRAGE_TYPE])
self.assertEqual(expected_name, vertex[VProps.NAME])
| apache-2.0 | 11,761,442,777,935,972 | 40.107143 | 79 | 0.695917 | false |
ionrock/cherrypy-sqlalchemy | docs/conf.py | 1 | 8484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cp_sqlalchemy documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cp_sqlalchemy
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CherryPy-SQLAlchemy'
copyright = u'2015, Eric Larson'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cp_sqlalchemy.__version__
# The full version, including alpha/beta/rc tags.
release = cp_sqlalchemy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cp_sqlalchemydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cp_sqlalchemy.tex',
u'CherryPy-SQLAlchemy Documentation',
u'Eric Larson', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cp_sqlalchemy',
u'CherryPy-SQLAlchemy Documentation',
[u'Eric Larson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cp_sqlalchemy',
u'CherryPy-SQLAlchemy Documentation',
u'Eric Larson',
'cp_sqlalchemy',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | bsd-3-clause | -7,242,717,677,886,140,000 | 29.854545 | 76 | 0.706035 | false |
blix/pyrite | pyrite/commands/track.py | 1 | 1320 | #Copyright 2008 Govind Salinas <[email protected]>
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import pyrite
options = [
('f', 'force', _('Add file even if it is ignored'), 0),
('v', 'verbose', _('show added files'), 0)
]
help_str = _("""
pyt track [-f | --force] [-v | --verbose] [files...]
Start tracking files. When run without [files], will track all files in the
repository working directory. Tracking files can be controlled with
pyt config ignore <pattern>, -f can be used to override the ignored setting.
""")
def run(cmd, args, flags, io, settings, repo):
is_force = flags.has_key('force')
is_verbose = flags.has_key('verbose')
for line in repo.add_files(is_force, is_verbose, args):
io.info(line)
| gpl-2.0 | 5,571,226,551,426,328,000 | 34.675676 | 76 | 0.718182 | false |
pystockhub/book | ch14/03.py | 1 | 1207 | import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
from zipline.api import order_target, record, symbol
from zipline.algorithm import TradingAlgorithm
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2016, 3, 29)
data = web.DataReader("AAPL", "yahoo", start, end)
#plt.plot(data.index, data['Adj Close'])
#plt.show()
data = data[['Adj Close']]
data.columns = ['AAPL']
data = data.tz_localize('UTC')
#print(data.head())
def initialize(context):
context.i = 0
context.sym = symbol('AAPL')
def handle_data(context, data):
context.i += 1
if context.i < 20:
return
ma5 = data.history(context.sym, 'price', 5, '1d').mean()
ma20 = data.history(context.sym, 'price', 20, '1d').mean()
if ma5 > ma20:
order_target(context.sym, 1)
else:
order_target(context.sym, -1)
record(AAPL=data.current(context.sym, "price"), ma5=ma5, ma20=ma20)
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
result = algo.run(data)
#plt.plot(result.index, result.ma5)
#plt.plot(result.index, result.ma20)
#plt.legend(loc='best')
#plt.show()
#plt.plot(result.index, result.portfolio_value)
#plt.show() | mit | 1,628,635,634,769,601,800 | 24.166667 | 71 | 0.683513 | false |
wchan/tensorflow | tensorflow/python/framework/tensor_util_test.py | 1 | 17969 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import state_ops
class TensorUtilTest(tf.test.TestCase):
def testFloat(self):
t = tensor_util.make_tensor_proto(10.0)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: 10.0
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(10.0, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32),
a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.float32, np.float32), (tf.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]], dtype=nptype), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.int64, np.int64),
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
t = tensor_util.make_tensor_proto([10], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[10, 10, 10, 10],
[10, 10, 10, 10],
[10, 10, 10, 10]], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto([10, 20, 30], shape=[1, 3],
dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
for dtype in [tf.qint32, tf.quint8, tf.qint8]:
# Test with array.
t = tensor_util.make_tensor_proto([(10,), (20,), (30,)], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
self.assertEquals(10, t.int_val[0])
self.assertEquals(20, t.int_val[1])
self.assertEquals(30, t.int_val[2])
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto("f", shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(np.array([[b"a", b"ab"],
[b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(tf.complex64, np.complex64),
(tf.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)]],
dtype=np_dtype), a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testUnsupportedDType(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(tensor_util.ShapeEquals(
t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
class ConstantValueTest(tf.test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.constant(np_val)
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = tf.constant(np_val)
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
def testUnknown(self):
tf_val = state_ops.variable_op(shape=[3, 4, 7], dtype=tf.float32)
self.assertIs(None, tf.contrib.util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = tf.shape(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testSize(self):
tf_val = tf.size(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(6, c_val)
def testSizeOfScalar(self):
tf_val = tf.size(tf.constant(0.0))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.int32, type(c_val))
def testRank(self):
tf_val = tf.rank(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(3, c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.cast(tf.constant(np_val), tf.float64)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = tf.cast(tf.constant(np_val), tf.float64)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.concat(
0, [np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = tf.concat(
tf.placeholder(tf.int32),
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = tf.concat(
1,
[np_val[0, :, :], tf.placeholder(tf.float32),
np_val[2, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertIs(None, c_val)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -6,328,267,020,191,155,000 | 35.374494 | 119 | 0.599421 | false |
martinrusev/amonone | amon/apps/account/forms.py | 1 | 6546 | from django import forms
from django.contrib.auth import authenticate
from django.conf import settings
from django.contrib.auth import get_user_model
from amon.apps.notifications.models import notifications_model
from amon.apps.alerts.models import alerts_model
from amon.apps.account.models import user_preferences_model, forgotten_pass_tokens_model
from amon.apps.api.models import api_key_model
from timezone_field import TimeZoneFormField
from amon.apps.account.mailer import send_email_forgotten_password
User = get_user_model()
class LoginForm(forms.Form):
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'}))
password = forms.CharField(required=True, widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'}))
remember_me = forms.BooleanField(widget=forms.CheckboxInput(), label='Remember Me', required=False)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
user = authenticate(email=email, password=password)
if user:
return self.cleaned_data
raise forms.ValidationError("Invalid login details")
def clean_remember_me(self):
remember_me = self.cleaned_data.get('remember_me')
if not remember_me:
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
else:
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
return remember_me
class AdminUserForm(forms.Form):
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'}))
password = forms.CharField(required=True, widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'}))
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
user = User.objects.filter(email=email).count()
if user:
raise forms.ValidationError("User already exists")
return self.cleaned_data
def save(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
user = User.objects.create_user(email, password)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save()
notifications_model.save(data={"email": email}, provider_id='email')
alerts_model.add_initial_data()
api_key_model.add_initial_data()
class ProfileForm(forms.Form):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
user_preferences = user_preferences_model.get_preferences(user_id=self.user.id)
user_timezone = user_preferences.get('timezone', 'UTC')
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['timezone'].widget.attrs.update({'select2-dropdown': '', 'data-size': 360})
self.fields['timezone'].initial = user_timezone
self.fields['email'].initial = self.user.email
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'}))
timezone = TimeZoneFormField()
# Check email uniqueness
def clean_email(self):
email = self.cleaned_data.get('email')
if email:
if self.user.email != email:
unique = User.objects.filter(email__iexact=email).count()
if unique > 0:
raise forms.ValidationError(u'An user with this email address already exists.')
return email
def save(self):
data = {'timezone': str(self.cleaned_data['timezone'])}
user_preferences_model.save_preferences(user_id=self.user.id, data=data)
self.user.email = self.cleaned_data['email']
self.user.save()
class ChangePasswordForm(forms.Form):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(ChangePasswordForm, self).__init__(*args, **kwargs)
current_password = forms.CharField(required=True, widget=(forms.PasswordInput(attrs={'placeholder': 'Password'})))
new_password = forms.CharField(required=True, widget=(forms.PasswordInput(attrs={'placeholder': 'Password'})))
def clean_current_password(self):
password = self.cleaned_data.get('current_password')
if self.user.check_password(password):
return self.cleaned_data
raise forms.ValidationError("Your current password is not correct")
def save(self):
password = self.cleaned_data.get('new_password')
self.user.set_password(password)
self.user.save()
return True
class ForgottenPasswordForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ForgottenPasswordForm, self).__init__(*args, **kwargs)
email = forms.EmailField(required=True, widget=(forms.TextInput(attrs={'placeholder': 'Your Login Email'})))
def clean(self):
email = self.cleaned_data.get('email')
if email:
user = User.objects.filter(email=email).count()
if user == 0:
raise forms.ValidationError("User does not exists")
return self.cleaned_data
def save(self):
email = self.cleaned_data.get('email')
token = forgotten_pass_tokens_model.set_token(email=email)
send_email_forgotten_password(token=token, recipients=[email])
return True
class ResetPasswordForm(forms.Form):
password = forms.CharField(
required=True,
label='Your new password',
widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'})
)
repeat_password = forms.CharField(
required=True,
label='Confirm it',
widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Repeat Password'})
)
def clean(self):
repeat_password = self.cleaned_data.get('repeat_password')
password = self.cleaned_data.get('password')
if repeat_password and password:
if repeat_password != password:
raise forms.ValidationError("Passwords does not match")
return self.cleaned_data
def save(self, user=None):
password = self.cleaned_data.get('password')
user.set_password(password)
user.save() | mit | 1,838,512,091,177,338,400 | 29.882075 | 128 | 0.641002 | false |
hpcuantwerpen/easybuild-easyblocks | easybuild/easyblocks/t/torchvision.py | 1 | 2524 | ##
# Copyright 2021-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing torchvision, implemented as an easyblock
@author: Alexander Grund (TU Dresden)
"""
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.modules import get_software_root, get_software_version
import easybuild.tools.environment as env
class EB_torchvision(PythonPackage):
"""Support for building/installing TorchVison."""
@staticmethod
def extra_options():
"""Change some defaults."""
extra_vars = PythonPackage.extra_options()
extra_vars['use_pip'][0] = True
extra_vars['download_dep_fail'][0] = True
extra_vars['sanity_pip_check'][0] = True
return extra_vars
def configure_step(self):
"""Set up torchvision config"""
if not get_software_root('PyTorch'):
raise EasyBuildError('PyTorch not found as a dependency')
# Note: Those can be overwritten by e.g. preinstallopts
env.setvar('BUILD_VERSION', self.version)
env.setvar('PYTORCH_VERSION', get_software_version('PyTorch'))
if get_software_root('CUDA'):
cuda_cc = self.cfg['cuda_compute_capabilities'] or build_option('cuda_compute_capabilities')
if cuda_cc:
env.setvar('TORCH_CUDA_ARCH_LIST', ';'.join(cuda_cc))
super(EB_torchvision, self).configure_step()
| gpl-2.0 | 7,378,695,513,344,649,000 | 39.063492 | 104 | 0.709984 | false |
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/dynamic/sarpa_hrp2_interStatic.py | 1 | 6487 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
from tools import *
import walkBauzil_hrp2_pathKino as tp
import time
import omniORB.any
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
pId = tp.ps.numberPaths() -1
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [-3,4.5,-2 ,2.5, 0.55, 0.6])
fullBody.client.basic.robot.setDimensionExtraConfigSpace(tp.extraDof)
ps = tp.ProblemSolver( fullBody )
ps.client.problem.setParameter("aMax",omniORB.any.to_any(tp.aMax))
ps.client.problem.setParameter("vMax",omniORB.any.to_any(tp.vMax))
r = tp.Viewer (ps,viewerClient=tp.r.client,displayArrows = True, displayCoM = True)
q_init =[0, 0, 0.648702, 1.0, 0.0 , 0.0, 0.0,0.0, 0.0, 0.0, 0.0,0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0,0,0,0,0,0]; r (q_init)
q_ref = q_init[::]
fullBody.setCurrentConfig (q_init)
fullBody.setReferenceConfig (q_ref)
#~ AFTER loading obstacles
rLegId = 'hrp2_rleg_rom'
lLegId = 'hrp2_lleg_rom'
tStart = time.time()
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,0,-0.105]
rLegLimbOffset=[0,0,-0.035]#0.035
rLegNormal = [0,0,1]
rLegx = 0.09; rLegy = 0.05
#fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 50000, "forward", 0.1,"_6_DOF")
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 50000, "dynamicWalk", 0.01,"_6_DOF",limbOffset=rLegLimbOffset)
fullBody.runLimbSampleAnalysis(rLegId, "ReferenceConfiguration", True)
#fullBody.saveLimbDatabase(rLegId, "./db/hrp2_rleg_db.db")
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,0,-0.105]
lLegLimbOffset=[0,0,0.035]
lLegNormal = [0,0,1]
lLegx = 0.09; lLegy = 0.05
#fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 50000, "forward", 0.1,"_6_DOF")
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 50000, "dynamicWalk", 0.01,"_6_DOF",limbOffset=lLegLimbOffset)
fullBody.runLimbSampleAnalysis(lLegId, "ReferenceConfiguration", True)
#fullBody.saveLimbDatabase(lLegId, "./db/hrp2_lleg_db.db")
## Add arms (not used for contact) :
rarmId = 'hrp2_rarm_rom'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
fullBody.addNonContactingLimb(rarmId,rarm,rHand, 50000)
fullBody.runLimbSampleAnalysis(rarmId, "ReferenceConfiguration", True)
larmId = 'hrp2_larm_rom'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
fullBody.addNonContactingLimb(larmId,larm,lHand, 50000)
fullBody.runLimbSampleAnalysis(larmId, "ReferenceConfiguration", True)
tGenerate = time.time() - tStart
print "generate databases in : "+str(tGenerate)+" s"
"""
fullBody.addLimbDatabase("./db/hrp2_rleg_db.db",rLegId,"forward")
fullBody.addLimbDatabase("./db/hrp2_lleg_db.db",lLegId,"forward")
tLoad = time.time() - tStart
print "Load databases in : "+str(tLoad)+" s"
"""
q_0 = fullBody.getCurrentConfig();
#~ fullBody.createOctreeBoxes(r.client.gui, 1, rarmId, q_0,)
eps=0.0001
configSize = fullBody.getConfigSize() -fullBody.client.basic.robot.getDimensionExtraConfigSpace()
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.ps.configAtParam(pId,eps)[0:7] # use this to get the correct orientation
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.ps.configAtParam(pId,tp.ps.pathLength(pId)-0.0001)[0:7]
dir_init = tp.ps.configAtParam(pId,eps)[tp.indexECS:tp.indexECS+3]
acc_init = tp.ps.configAtParam(pId,0)[tp.indexECS+3:tp.indexECS+6]
dir_goal = tp.ps.configAtParam(pId,tp.ps.pathLength(pId)-eps)[tp.indexECS:tp.indexECS+3]
acc_goal = [0,0,0]
robTreshold = 1
# copy extraconfig for start and init configurations
q_init[configSize:configSize+3] = dir_init[::]
q_init[configSize+3:configSize+6] = acc_init[::]
q_goal[configSize:configSize+3] = dir_goal[::]
q_goal[configSize+3:configSize+6] = [0,0,0]
# FIXME : test
q_init[2] = q_init[2]+0.1
q_goal[2] = q_goal[2]+0.1
# Randomly generating a contact configuration at q_init
fullBody.setStaticStability(True)
fullBody.setCurrentConfig (q_init)
r(q_init)
q_init = fullBody.generateContacts(q_init,dir_init,acc_init,robTreshold)
r(q_init)
# Randomly generating a contact configuration at q_end
fullBody.setCurrentConfig (q_goal)
q_goal = fullBody.generateContacts(q_goal, dir_goal,acc_goal,robTreshold)
r(q_goal)
# specifying the full body configurations as start and goal state of the problem
r.addLandmark('hrp2_14/BODY',0.3)
r(q_init)
fullBody.setStartState(q_init,[rLegId,lLegId])
fullBody.setEndState(q_goal,[rLegId,lLegId])
fullBody.setStaticStability(True) # only set it after the init/goal configuration are computed
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
import fullBodyPlayerHrp2
tStart = time.time()
configs = fullBody.interpolate(0.01,pathId=pId,robustnessTreshold = robTreshold, filterStates = False)
tInterpolate = time.time()-tStart
print "number of configs : ", len(configs)
print "generated in "+str(tInterpolate)+" s"
r(configs[len(configs)-2])
player = fullBodyPlayerHrp2.Player(fullBody,pp,tp,configs,draw=False,use_window=1,optim_effector=True,use_velocity=False,pathId = pId)
# remove the last config (= user defined q_goal, not consitent with the previous state)
#r(configs[0])
#player.displayContactPlan()
#player.interpolate(2,len(configs)-1)
from planning.config import *
from generate_contact_sequence import *
cs = generateContactSequence(fullBody,configs[:5],r)
filename = OUTPUT_DIR + "/" + OUTPUT_SEQUENCE_FILE
cs.saveAsXML(filename, "ContactSequence")
print "save contact sequence : ",filename
"""
r(q_init)
pos=fullBody.getJointPosition('RLEG_JOINT0')
addSphere(r,r.color.blue,pos)
dir = fullBody.getCurrentConfig()[37:40]
fullBody.client.rbprm.rbprm.evaluateConfig(fullBody.getCurrentConfig(),dir)
vd[0:3] = fullBody.getCurrentConfig()[0:3]
addVector(r,fullBody,r.color.black,vd)
vl[0:3] = fullBody.getCurrentConfig()[0:3]
addVector(r,fullBody,r.color.blue,vl)
vlb[0:3] = fullBody.getCurrentConfig()[0:3]
addVector(r,fullBody,r.color.red,vlb)
"""
wid = r.client.gui.getWindowID("window_hpp_")
#r.client.gui.attachCameraToNode( 'hrp2_14/BODY_0',wid)
| lgpl-3.0 | 4,426,639,063,649,492,500 | 31.762626 | 318 | 0.743641 | false |
lssfau/walberla | tests/lbm/codegen/LbCodeGenerationExample.py | 1 | 2336 | import sympy as sp
import pystencils as ps
from lbmpy.creationfunctions import create_lb_collision_rule
from lbmpy.boundaries import NoSlip, UBB
from pystencils_walberla import CodeGeneration
from lbmpy_walberla import RefinementScaling, generate_boundary, generate_lattice_model
with CodeGeneration() as ctx:
omega, omega_free = sp.symbols("omega, omega_free")
force_field, vel_field, omega_out = ps.fields("force(3), velocity(3), omega_out: [3D]", layout='zyxf')
# the collision rule of the LB method where the some advanced features
collision_rule = create_lb_collision_rule(
stencil='D3Q19', compressible=True,
method='mrt', relaxation_rates=[omega, omega, omega_free, omega_free, omega_free, omega_free],
entropic=True, # entropic method where second omega is chosen s.t. entropy condition
omega_output_field=omega_out, # scalar field where automatically chosen omega of entropic or
# Smagorinsky method is written to
force=force_field.center_vector, # read forces for each lattice cell from an external force field
# that is initialized and changed in C++ app
output={'velocity': vel_field}, # write macroscopic velocity to field in every time step
# useful for coupling multiple LB methods,
# e.g. hydrodynamic to advection/diffusion LBM
optimization={'cse_global': True}
)
# the refinement scaling object describes how certain parameters are scaled across grid scales
# there are two default scaling behaviors available for relaxation rates and forces:
scaling = RefinementScaling()
scaling.add_standard_relaxation_rate_scaling(omega)
scaling.add_force_scaling(force_field)
# generate lattice model and (optionally) boundary conditions
# for CPU simulations waLBerla's internal boundary handling can be used as well
generate_lattice_model(ctx, 'LbCodeGenerationExample_LatticeModel', collision_rule, refinement_scaling=scaling)
generate_boundary(ctx, 'LbCodeGenerationExample_UBB', UBB([0.05, 0, 0]), collision_rule.method)
generate_boundary(ctx, 'LbCodeGenerationExample_NoSlip', NoSlip(), collision_rule.method)
| gpl-3.0 | 1,046,695,366,246,672,600 | 62.135135 | 115 | 0.690068 | false |
Azure/azure-sdk-for-python | sdk/translation/azure-ai-translation-document/tests/testcase.py | 1 | 13689 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import time
import datetime
import uuid
from devtools_testutils import (
AzureTestCase,
)
from azure_devtools.scenario_tests import (
RecordingProcessor,
ReplayableTest
)
from azure.storage.blob import generate_container_sas, ContainerClient
from azure.ai.translation.document import DocumentTranslationInput, TranslationTarget
class Document(object):
"""Represents a document to be uploaded to source/target container"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", str(uuid.uuid4()))
self.suffix = kwargs.get("suffix", ".txt")
self.prefix = kwargs.get("prefix", "")
self.data = kwargs.get("data", b'This is written in english.')
@classmethod
def create_dummy_docs(cls, docs_count):
result = []
for i in range(docs_count):
result.append(cls())
return result
class OperationLocationReplacer(RecordingProcessor):
"""Replace the location/operation location uri in a request/response body."""
def __init__(self):
self._replacement = "https://redacted.cognitiveservices.azure.com/translator/"
def process_response(self, response):
try:
headers = response['headers']
if 'operation-location' in headers:
location_header = "operation-location"
if isinstance(headers[location_header], list):
suffix = headers[location_header][0].split("/translator/")[1]
response['headers'][location_header] = [self._replacement + suffix]
else:
suffix = headers[location_header].split("/translator/")[1]
response['headers'][location_header] = self._replacement + suffix
url = response["url"]
if url is not None:
suffix = url.split("/translator/")[1]
response['url'] = self._replacement + suffix
return response
except (KeyError, ValueError):
return response
class DocumentTranslationTest(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key']
def __init__(self, method_name):
super(DocumentTranslationTest, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
self.recording_processors.append(OperationLocationReplacer())
self.storage_name = os.getenv("TRANSLATION_DOCUMENT_STORAGE_NAME", "redacted")
self.storage_endpoint = "https://" + self.storage_name + ".blob.core.windows.net/"
self.storage_key = os.getenv("TRANSLATION_DOCUMENT_STORAGE_KEY")
self.scrubber.register_name_pair(
self.storage_endpoint, "https://redacted.blob.core.windows.net/"
)
self.scrubber.register_name_pair(
self.storage_name, "redacted"
)
self.scrubber.register_name_pair(
self.storage_key, "fakeZmFrZV9hY29jdW50X2tleQ=="
)
def get_oauth_endpoint(self):
return os.getenv("TRANSLATION_DOCUMENT_TEST_ENDPOINT")
def generate_oauth_token(self):
if self.is_live:
from azure.identity import ClientSecretCredential
return ClientSecretCredential(
os.getenv("TRANSLATION_TENANT_ID"),
os.getenv("TRANSLATION_CLIENT_ID"),
os.getenv("TRANSLATION_CLIENT_SECRET"),
)
def upload_documents(self, data, container_client):
if isinstance(data, list):
for blob in data:
container_client.upload_blob(name=blob.prefix + blob.name + blob.suffix, data=blob.data)
else:
container_client.upload_blob(name=data.prefix + data.name + data.suffix, data=data.data)
def create_source_container(self, data):
# for offline tests
if not self.is_live:
return "dummy_string"
# for actual live tests
container_name = "src" + str(uuid.uuid4())
container_client = ContainerClient(self.storage_endpoint, container_name,
self.storage_key)
container_client.create_container()
self.upload_documents(data, container_client)
return self.generate_sas_url(container_name, "rl")
def create_target_container(self, data=None):
# for offline tests
if not self.is_live:
return "dummy_string"
# for actual live tests
container_name = "target" + str(uuid.uuid4())
container_client = ContainerClient(self.storage_endpoint, container_name,
self.storage_key)
container_client.create_container()
if data:
self.upload_documents(data, container_client)
return self.generate_sas_url(container_name, "rw")
def generate_sas_url(self, container_name, permission):
sas_token = self.generate_sas(
generate_container_sas,
account_name=self.storage_name,
container_name=container_name,
account_key=self.storage_key,
permission=permission,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)
)
container_sas_url = self.storage_endpoint + container_name + "?" + sas_token
return container_sas_url
def wait(self, duration=30):
if self.is_live:
time.sleep(duration)
# model helpers
def _validate_doc_status(self, doc_details, target_language=None, **kwargs):
status = kwargs.pop("statuses", ["Succeeded"])
ids = kwargs.pop("ids", None)
# specific assertions
self.assertIn(doc_details.status, status)
if target_language:
self.assertEqual(doc_details.translated_to, target_language)
# generic assertions
self.assertIn(doc_details.id, ids) if ids else self.assertIsNotNone(doc_details.id)
self.assertIsNotNone(doc_details.id)
self.assertIsNotNone(doc_details.source_document_url)
self.assertIsNotNone(doc_details.translated_document_url)
self.assertIsNotNone(doc_details.translation_progress)
self.assertIsNotNone(doc_details.characters_charged)
self.assertIsNotNone(doc_details.created_on)
self.assertIsNotNone(doc_details.last_updated_on)
def _validate_translation_metadata(self, poller, **kwargs):
status = kwargs.pop("status", None)
total = kwargs.pop('total', None)
failed = kwargs.pop('failed', None)
succeeded = kwargs.pop('succeeded', None)
inprogress = kwargs.pop('inprogress', None)
notstarted = kwargs.pop('notstarted', None)
cancelled = kwargs.pop('cancelled', None)
# status
p = poller.status()
self.assertEqual(poller.status(), status) if status else self.assertIsNotNone(poller.status())
# docs count
if poller.done():
self.assertEqual(poller.details.documents_total_count, total) if total else self.assertIsNotNone(poller.details.documents_total_count)
self.assertEqual(poller.details.documents_failed_count, failed) if failed else self.assertIsNotNone(poller.details.documents_failed_count)
self.assertEqual(poller.details.documents_succeeded_count, succeeded) if succeeded else self.assertIsNotNone(poller.details.documents_succeeded_count)
self.assertEqual(poller.details.documents_in_progress_count, inprogress) if inprogress else self.assertIsNotNone(poller.details.documents_in_progress_count)
self.assertEqual(poller.details.documents_not_yet_started_count, notstarted) if notstarted else self.assertIsNotNone(poller.details.documents_not_yet_started_count)
self.assertEqual(poller.details.documents_cancelled_count, cancelled) if cancelled else self.assertIsNotNone(poller.details.documents_cancelled_count)
# generic assertions
self.assertIsNotNone(poller.details.id)
self.assertIsNotNone(poller.details.created_on)
self.assertIsNotNone(poller.details.last_updated_on)
self.assertIsNotNone(poller.details.total_characters_charged)
def _validate_translations(self, job_details, **kwargs):
status = kwargs.pop("status", None)
total = kwargs.pop('total', None)
failed = kwargs.pop('failed', None)
succeeded = kwargs.pop('succeeded', None)
inprogress = kwargs.pop('inprogress', None)
notstarted = kwargs.pop('notstarted', None)
cancelled = kwargs.pop('cancelled', None)
# status
self.assertEqual(job_details.status, status) if status else self.assertIsNotNone(job_details.status)
# docs count
self.assertEqual(job_details.documents_total_count, total) if total else self.assertIsNotNone(
job_details.documents_total_count)
self.assertEqual(job_details.documents_failed_count, failed) if failed else self.assertIsNotNone(
job_details.documents_failed_count)
self.assertEqual(job_details.documents_succeeded_count,
succeeded) if succeeded else self.assertIsNotNone(job_details.documents_succeeded_count)
self.assertEqual(job_details.documents_in_progress_count,
inprogress) if inprogress else self.assertIsNotNone(
job_details.documents_in_progress_count)
self.assertEqual(job_details.documents_not_yet_started_count,
notstarted) if notstarted else self.assertIsNotNone(
job_details.documents_not_yet_started_count)
self.assertEqual(job_details.documents_cancelled_count,
cancelled) if cancelled else self.assertIsNotNone(job_details.documents_cancelled_count)
# generic assertions
self.assertIsNotNone(job_details.id)
self.assertIsNotNone(job_details.created_on)
self.assertIsNotNone(job_details.last_updated_on)
self.assertIsNotNone(job_details.total_characters_charged)
def _validate_format(self, format):
self.assertIsNotNone(format.file_format)
self.assertIsNotNone(format.file_extensions)
self.assertIsNotNone(format.content_types)
# client helpers
def _begin_and_validate_translation(self, client, translation_inputs, total_docs_count, language=None):
# submit job
poller = client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
# wait for result
result = poller.result()
# validate
self._validate_translation_metadata(poller=poller, status='Succeeded', total=total_docs_count, succeeded=total_docs_count)
for doc in result:
self._validate_doc_status(doc, language)
return poller.id
def _begin_multiple_translations(self, client, operations_count, **kwargs):
wait_for_operation = kwargs.pop('wait', True)
language_code = kwargs.pop('language_code', "es")
docs_per_operation = kwargs.pop('docs_per_operation', 2)
result_job_ids = []
for i in range(operations_count):
# prepare containers and test data
blob_data = Document.create_dummy_docs(docs_per_operation)
source_container_sas_url = self.create_source_container(data=blob_data)
target_container_sas_url = self.create_target_container()
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language_code=language_code
)
]
)
]
# submit multiple jobs
poller = client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
if wait_for_operation:
result = poller.result()
else:
poller.wait()
result_job_ids.append(poller.id)
return result_job_ids
def _begin_and_validate_translation_with_multiple_docs(self, client, docs_count, **kwargs):
# get input parms
wait_for_operation = kwargs.pop('wait', False)
language_code = kwargs.pop('language_code', "es")
# prepare containers and test data
blob_data = Document.create_dummy_docs(docs_count=docs_count)
source_container_sas_url = self.create_source_container(data=blob_data)
target_container_sas_url = self.create_target_container()
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language_code=language_code
)
]
)
]
# submit job
poller = client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
# wait for result
if wait_for_operation:
result = poller.result()
for doc in result:
self._validate_doc_status(doc, "es")
# validate
self._validate_translation_metadata(poller=poller)
return poller
| mit | 109,761,422,991,520,050 | 42.182965 | 176 | 0.628972 | false |
waidyanatha/sambro-eden | private/templates/default/my-remover/config.py | 2 | 36031 | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
T = current.T
settings = current.deployment_settings
"""
Template settings
All settings which are to configure a specific template are located here
Deployers should ideally not need to edit any other files outside of their template folder
"""
# Pre-Populate
# http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/PrePopulate
# Configure/disable pre-population of the database.
# To pre-populate the database On 1st run should specify directory(s) in
# /private/templates/
# eg:
# ["default"] (1 is a shortcut for this)
# ["Standard"]
# ["IFRC_Train"]
# ["roles", "user"]
# Unless doing a manual DB migration, where prepopulate = 0
# In Production, prepopulate = 0 (to save 1x DAL hit every page)
#settings.base.prepopulate = 1
# Theme (folder to use for views/layout.html)
#settings.base.theme = "default"
# Enable Guided Tours
settings.base.guided_tour = True
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
# Allow a new user to be linked to a record (and a new record will be created if it doesn't already exist)
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# "volunteer":T("Volunteer"),
# "member":T("Member")}
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# The name of the teams that users are added to when they opt-in to receive alerts
#settings.auth.opt_in_team_list = ["Updates"]
# Uncomment this to set the opt in default to True
#settings.auth.opt_in_default = True
# Uncomment this to request the Mobile Phone when a user registers
#settings.auth.registration_requests_mobile_phone = True
# Uncomment this to have the Mobile Phone selection during registration be mandatory
#settings.auth.registration_mobile_phone_mandatory = True
# Uncomment this to request the Organisation when a user registers
#settings.auth.registration_requests_organisation = True
# Uncomment this to have the Organisation selection during registration be mandatory
#settings.auth.registration_organisation_required = True
# Uncomment this to have the Organisation input hidden unless the user enters a non-whitelisted domain
#settings.auth.registration_organisation_hidden = True
# Uncomment this to default the Organisation during registration
#settings.auth.registration_organisation_default = "My Organisation"
# Uncomment this to request the Organisation Group when a user registers
#settings.auth.registration_requests_organisation_group = True
# Uncomment this to have the Organisation Group selection during registration be mandatory
#settings.auth.registration_organisation_group_required = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Uncomment to set the default role UUIDs assigned to newly-registered users
# This is a dictionary of lists, where the key is the realm that the list of roles applies to
# The key 0 implies not realm restricted
# The keys "organisation_id" and "site_id" can be used to indicate the user's "organisation_id" and "site_id"
#settings.auth.registration_roles = { 0: ["STAFF", "PROJECT_EDIT"]}
# Uncomment this to enable record approval
#settings.auth.record_approval = True
# Uncomment this and specify a list of tablenames for which record approval is required
#settings.auth.record_approval_required_for = ["project_project"]
# Uncomment this to request an image when users register
#settings.auth.registration_requests_image = True
# Uncomment this to direct newly-registered users to their volunteer page to be able to add extra details
# NB This requires Verification/Approval to be Off
# @ToDo: Extend to all optional Profile settings: Homepage, Twitter, Facebook, Mobile Phone, Image
#settings.auth.registration_volunteer = True
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
#settings.auth.terms_of_service = True
# Uncomment this to allow users to Login using Gmail's SMTP
#settings.auth.gmail_domains = ["gmail.com"]
# Uncomment this to allow users to Login using OpenID
#settings.auth.openid = True
# Uncomment this to enable presence records on login based on HTML5 geolocations
#settings.auth.set_presence_on_login = True
# Uncomment this and specify a list of location levels to be ignored by presence records
#settings.auth.ignore_levels_for_presence = ["L0", "L1", "L2", "L3"]
# Uncomment this to enable the creation of new locations if a user logs in from an unknown location. Warning: This may lead to many useless location entrys
#settings.auth.create_unknown_locations = True
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
#settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
# ("en", "English"),
# ("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("ko", "한국어"),
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("es", "Español"),
# ("tl", "Tagalog"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
#])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
#settings.L10n.display_toolbar = False
# Default timezone for users
#settings.L10n.utc_offset = "UTC +0000"
# Uncomment these to use US-style dates in English (localisations can still convert to local format)
#settings.L10n.date_format = T("%m-%d-%Y")
#settings.L10n.time_format = T("%H:%M:%S")
# Start week on Sunday
#settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
#settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
#settings.L10n.default_country_code = 1
# Make last name in person/user records mandatory
#settings.L10n.mandatory_lastname = True
# Configure the list of Religions
#settings.L10n.get("religions", {"none": T("none"),
#"christian": T("Christian"),
#"muslim": T("Muslim"),
#"jewish": T("Jewish"),
#"buddhist": T("Buddhist"),
#"hindu": T("Hindu"),
#"bahai": T("Bahai"),
#"other": T("other")
#})
# Uncomment this to Translate CMS Series Names
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Finance settings
#settings.fin.currencies = {
# "EUR" : T("Euros"),
# "GBP" : T("Great British Pounds"),
# "USD" : T("United States Dollars"),
#}
#settings.fin.currency_default = "USD"
#settings.fin.currency_writable = False # False currently breaks things
# PDF settings
# Default page size for reports (defaults to A4)
#settings.base.paper_size = T("Letter")
# Location of Logo used in pdfs headers
#settings.ui.pdf_logo = "static/img/mylogo.png"
# GIS (Map) settings
# Size of the Embedded Map
# Change this if-required for your theme
# NB API can override this in specific modules
#settings.gis.map_height = 600
#settings.gis.map_width = 1000
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
#settings.gis.countries = ["US"]
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Hide the Map-based selection tool in the Location Selector
#settings.gis.map_selector = False
# Hide LatLon boxes in the Location Selector
#settings.gis.latlon_selector = False
# Use Building Names as a separate field in Street Addresses?
#settings.gis.building_name = False
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
# Uncomment to fall back to country LatLon to show resources, if nothing better available
#settings.gis.display_L0 = True
# Currently unused
#settings.gis.display_L1 = False
# Set this if there will be multiple areas in which work is being done,
# and a menu to select among them is wanted.
#settings.gis.menu = "Maps"
# Maximum Marker Size
# (takes effect only on display)
#settings.gis.marker_max_height = 35
#settings.gis.marker_max_width = 30
# Duplicate Features so that they show wrapped across the Date Line?
# Points only for now
# lon<0 have a duplicate at lon+360
# lon>0 have a duplicate at lon-360
#settings.gis.duplicate_features = True
# Uncomment to use CMS to provide Metadata on Map Layers
#settings.gis.layer_metadata = True
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to hide the Base Layers folder in the LayerTree
#settings.gis.layer_tree_base = False
# Uncomment to hide the Overlays folder in the LayerTree
#settings.gis.layer_tree_overlays = False
# Uncomment to not expand the folders in the LayerTree by default
#settings.gis.layer_tree_expanded = False
# Uncomment to have custom folders in the LayerTree use Radio Buttons
#settings.gis.layer_tree_radio = True
# Uncomment to display the Map Legend as a floating DIV
#settings.gis.legend = "float"
# Mouse Position: 'normal', 'mgrs' or None
#settings.gis.mouse_position = "mgrs"
# Uncomment to hide the Overview map
#settings.gis.overview = False
# Uncomment to hide the permalink control
#settings.gis.permalink = False
# PoIs to export in KML/OSM feeds from Admin locations
#settings.gis.poi_resources = ["cr_shelter", "hms_hospital", "org_office"]
# Uncomment to hide the ScaleLine control
#settings.gis.scaleline = False
# Uncomment to modify the Simplify Tolerance
#settings.gis.simplify_tolerance = 0.001
# Uncomment to hide the Zoom control
#settings.gis.zoomcontrol = False
# Messaging Settings
# If you wish to use a parser.py in another folder than "default"
#settings.msg.parser = "mytemplatefolder"
# Use 'soft' deletes
#settings.security.archive_not_delete = False
# AAA Settings
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# Ownership-rule for records without owner:
# True = not owned by any user (strict ownership, default)
# False = owned by any authenticated user
#settings.security.strict_ownership = False
# Lock-down access to Map Editing
#settings.security.map = True
# Allow non-MapAdmins to edit hierarchy locations? Defaults to True if not set.
# (Permissions can be set per-country within a gis_config)
#settings.gis.edit_Lx = False
# Allow non-MapAdmins to edit group locations? Defaults to False if not set.
#settings.gis.edit_GR = True
# Note that editing of locations used as regions for the Regions menu is always
# restricted to MapAdmins.
# Uncomment to disable that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# Enable this to change the label for 'Attachments' tabs
#settings.ui.label_attachments = "Attachments"
# Enable this to change the label for 'Mobile Phone'
#settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
#settings.ui.label_postcode = "ZIP Code"
# Enable Social Media share buttons
#settings.ui.social_buttons = True
# Enable this to show pivot table options form by default
#settings.ui.hide_report_options = False
# Uncomment to show created_by/modified_by using Names not Emails
#settings.ui.auth_user_represent = "name"
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["kml", "pdf", "rss", "xls", "xml"]
# Uncomment to include an Interim Save button on CRUD forms
#settings.ui.interim_save = True
# -----------------------------------------------------------------------------
# Persons
# Uncomment to hide fields in S3AddPersonWidget[2]
#settings.pr.request_dob = False
#settings.pr.request_gender = False
#settings.pr.select_existing = False
# -----------------------------------------------------------------------------
# Organisations
# Disable the use of Organisation Branches
#settings.org.branches = False
# Set the length of the auto-generated org/site code the default is 10
#settings.org.site_code_len = 3
# Set the label for Sites
#settings.org.site_label = "Facility"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
#settings.org.site_last_contacted = True
# Uncomment to use an Autocomplete for Site lookup fields
#settings.org.site_autocomplete = True
# Uncomment to have Site Autocompletes search within Address fields
#settings.org.site_address_autocomplete = True
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = False
# Uncomment to add summary fields for Organisations/Offices for # National/International staff
#settings.org.summary = True
# Enable certain fields just for specific Organisations
# Requires a call to settings.set_org_dependent_field(field)
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = \
# {#"<table name>.<field name>" : ["<Organisation Name>"],
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to chage the label for 'Staff'
#settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
#settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
#settings.hrm.org_required = False
# Uncomment to allow HR records to be deletable rather than just marking them as obsolete
#settings.hrm.deletable = True
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
#settings.hrm.filter_certificates = True
# Uncomment to allow HRs to have multiple Job Titles
#settings.hrm.multiple_job_titles = True
# Uncomment to hide the Staff resource
#settings.hrm.show_staff = False
# Uncomment to allow hierarchical categories of Skills, which each need their own set of competency levels.
#settings.hrm.skill_types = True
# Uncomment to disable Staff experience
#settings.hrm.staff_experience = False
# Uncomment to disable Volunteer experience
#settings.hrm.vol_experience = False
# Uncomment to show the Organisation name in HR represents
#settings.hrm.show_organisation = True
# Uncomment to disable the use of Volunteer Awards
#settings.hrm.use_awards = False
# Uncomment to disable the use of HR Certificates
#settings.hrm.use_certificates = False
# Uncomment to disable the use of HR Credentials
#settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Description
#settings.hrm.use_description = False
# Uncomment to enable the use of HR Education
#settings.hrm.use_education = True
# Uncomment to disable the use of HR ID
#settings.hrm.use_id = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
#settings.hrm.teams = False
# Uncomment to disable the use of HR Trainings
#settings.hrm.use_trainings = False
# -----------------------------------------------------------------------------
# Inventory Management
#settings.inv.collapse_tabs = False
# Uncomment to customise the label for Facilities in Inventory Management
#settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
#settings.inv.stock_count = True
# Use the term 'Order' instead of 'Shipment'
#settings.inv.shipment_name = "order"
# Uncomment to not track pack values
#settings.inv.track_pack_values = False
#settings.inv.show_mode_of_transport = True
#settings.inv.send_show_org = False
#settings.inv.send_show_time_in = True
#settings.inv.send_form_name = "Tally Out Sheet"
#settings.inv.send_short_name = "TO"
#settings.inv.send_ref_field_name = "Tally Out Number"
#settings.inv.recv_form_name = "Acknowledgement Receipt for Donations Received Form"
#settings.inv.recv_shortname = "ARDR"
# Types common to both Send and Receive
#settings.inv.shipment_types = {
# 0: T("-"),
# 1: T("Other Warehouse"),
# 2: T("Donation"),
# 3: T("Foreign Donation"),
# 4: T("Local Purchases"),
# 5: T("Confiscated Goods from Bureau Of Customs")
# }
#settings.inv.send_types = {
# 21: T("Distribution")
# }
#settings.inv.send_type_default = 1
#settings.inv.recv_types = {
# 32: T("Donation"),
# 34: T("Purchase"),
# }
#settings.inv.item_status = {
# 0: current.messages["NONE"],
# 1: T("Dump"),
# 2: T("Sale"),
# 3: T("Reject"),
# 4: T("Surplus")
# }
# -----------------------------------------------------------------------------
# Requests Management
# Uncomment to disable Inline Forms in Requests module
#settings.req.inline_forms = False
# Label for Inventory Requests
#settings.req.type_inv_label = "Donations"
# Label for People Requests
#settings.req.type_hrm_label = "Volunteers"
# Label for Requester
#settings.req.requester_label = "Site Contact"
# Filter Requester as being from the Site
#settings.req.requester_from_site = True
#settings.req.date_writable = False
# Allow the status for requests to be set manually,
# rather than just automatically from commitments and shipments
#settings.req.status_writable = False
#settings.req.item_quantities_writable = True
#settings.req.skill_quantities_writable = True
#settings.req.show_quantity_transit = False
#settings.req.multiple_req_items = False
#settings.req.prompt_match = False
#settings.req.items_ask_purpose = False
#settings.req.use_commit = False
#settings.req.requester_optional = True
# Should Requests ask whether Security is required?
#settings.req.ask_security = True
# Should Requests ask whether Transportation is required?
#settings.req.ask_transport = True
#settings.req.use_req_number = False
#settings.req.generate_req_number = False
#settings.req.req_form_name = "Request Issue Form"
#settings.req.req_shortname = "RIS"
# Restrict the type of requests that can be made, valid values in the
# list are ["Stock", "People", "Other"]. If this is commented out then
# all types will be valid.
#settings.req.req_type = ["Stock"]
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
#settings.req.summary = True
# Uncomment to restrict adding new commits to Completed commits
#settings.req.req_restrict_on_complete = True
# Custom Crud Strings for specific req_req types
#settings.req.req_crud_strings = dict()
#ADD_ITEM_REQUEST = T("Make a Request for Donations")
# req_req Crud Strings for Item Request (type=1)
#settings.req.req_crud_strings[1] = Storage(
# title_create = ADD_ITEM_REQUEST,
# title_display = T("Request for Donations Details"),
# title_list = T("Requests for Donations"),
# title_update = T("Edit Request for Donations"),
# title_search = T("Search Requests for Donations"),
# subtitle_create = ADD_ITEM_REQUEST,
# label_list_button = T("List Requests for Donations"),
# label_create_button = ADD_ITEM_REQUEST,
# label_delete_button = T("Delete Request for Donations"),
# msg_record_created = T("Request for Donations Added"),
# msg_record_modified = T("Request for Donations Updated"),
# msg_record_deleted = T("Request for Donations Canceled"),
# msg_list_empty = T("No Requests for Donations"))
#ADD_PEOPLE_REQUEST = T("Make a Request for Volunteers")
# req_req Crud Strings for People Request (type=3)
#settings.req.req_crud_strings[3] = Storage(
# title_create = ADD_PEOPLE_REQUEST,
# title_display = T("Request for Volunteers Details"),
# title_list = T("Requests for Volunteers"),
# title_update = T("Edit Request for Volunteers"),
# title_search = T("Search Requests for Volunteers"),
# subtitle_create = ADD_PEOPLE_REQUEST,
# label_list_button = T("List Requests for Volunteers"),
# label_create_button = ADD_PEOPLE_REQUEST,
# label_delete_button = T("Delete Request for Volunteers"),
# msg_record_created = T("Request for Volunteers Added"),
# msg_record_modified = T("Request for Volunteers Updated"),
# msg_record_deleted = T("Request for Volunteers Canceled"),
# msg_list_empty = T("No Requests for Volunteers"))
# -----------------------------------------------------------------------------
# Supply
#settings.supply.use_alt_name = False
# Do not edit after deployment
#settings.supply.catalog_default = T("Default")
# -----------------------------------------------------------------------------
# Projects
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
#settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
#settings.project.mode_drr = True
# Uncomment this to use settings suitable for detailed Task management
#settings.project.mode_task = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to use Activities for projects
#settings.project.activities = True
# Uncomment this to use Codes for projects
#settings.project.codes = True
# Uncomment this to use Milestones in project/task.
#settings.project.milestones = True
# Uncomment this to disable Sectors in projects
#settings.project.sectors = False
# Uncomment this to use Theme Percentages for projects
#settings.project.theme_percentages = True
# Uncomment this to use multiple Budgets per project
#settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
#settings.project.multiple_organisations = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Lead Implementer"), # T("Host National Society")
# 2: T("Partner"), # T("Partner National Society")
# 3: T("Donor"),
# 4: T("Customer"), # T("Beneficiary")?
# 5: T("Super"), # T("Beneficiary")?
#}
#settings.project.organisation_lead_role = 1
# -----------------------------------------------------------------------------
# Incidents
# Uncomment this to use vehicles when responding to Incident Reports
#settings.irs.vehicle = True
# -----------------------------------------------------------------------------
# Save Search Widget
#settings.search.save_widget = False
# Maximum number of search results for an Autocomplete Widget
#settings.search.max_results = 200
# Comment/uncomment modules here to disable/enable them
# @ToDo: Have the system automatically enable migrate if a module is enabled
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("tour", Storage(
name_nice = T("Guided Tour Functionality"),
module_type = None,
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
module_type = None,
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 4
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 5,
)),
# Vehicle depends on Assets
("vehicle", Storage(
name_nice = T("Vehicles"),
#description = "Manage Vehicles",
restricted = True,
module_type = 10,
)),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 2
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
("hms", Storage(
name_nice = T("Hospitals"),
#description = "Helps to monitor status of hospitals",
restricted = True,
module_type = 10
)),
("irs", Storage(
name_nice = T("Incidents"),
#description = "Incident Reporting System",
restricted = True,
module_type = 10
)),
("dvi", Storage(
name_nice = T("Disaster Victim Identification"),
#description = "Disaster Victim Identification",
restricted = True,
module_type = 10,
#access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller
#audit_read = True, # Can enable Audit for just an individual module here
#audit_write = True
)),
("dvr", Storage(
name_nice = T("Disaster Victim Registry"),
#description = "Allow affected individuals & households to register to receive compensation and distributions",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("transport", Storage(
name_nice = T("Transport"),
restricted = True,
module_type = 10,
)),
#("mpr", Storage(
# name_nice = T("Missing Person Registry"),
# #description = "Helps to report and search for missing persons",
# restricted = True,
# module_type = 10,
# )),
#("stats", Storage(
# name_nice = T("Statistics"),
# #description = "Manages statistics",
# restricted = True,
# module_type = None,
# )),
#("vulnerability", Storage(
# name_nice = T("Vulnerability"),
# #description = "Manages vulnerability indicators",
# restricted = True,
# module_type = 10,
# )),
#("scenario", Storage(
# name_nice = T("Scenarios"),
# #description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).",
# restricted = True,
# module_type = 10,
# )),
#("fire", Storage(
# name_nice = T("Fire Stations"),
# #description = "Fire Station Management",
# restricted = True,
# module_type = 1,
# )),
#("flood", Storage(
# name_nice = T("Flood Warnings"),
# #description = "Flood Gauges show water levels in various parts of the country",
# restricted = True,
# module_type = 10
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# module_type = 10,
# )),
#("patient", Storage(
# name_nice = T("Patient Tracking"),
# #description = "Tracking of Patients",
# restricted = True,
# module_type = 10
# )),
#("security", Storage(
# name_nice = T("Security"),
# #description = "Security Management System",
# restricted = True,
# module_type = 10,
# )),
# These are specialist modules
#("cap", Storage(
# name_nice = T("CAP"),
# #description = "Create & broadcast CAP alerts",
# restricted = True,
# module_type = 10,
#)),
# Requires RPy2 & PostgreSQL
#("climate", Storage(
# name_nice = T("Climate"),
# #description = "Climate data portal",
# restricted = True,
# module_type = 10,
#)),
#("delphi", Storage(
# name_nice = T("Delphi Decision Maker"),
# #description = "Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.",
# restricted = False,
# module_type = 10,
# )),
# @ToDo: Rewrite in a modern style
#("budget", Storage(
# name_nice = T("Budgeting Module"),
# #description = "Allows a Budget to be drawn up",
# restricted = True,
# module_type = 10
# )),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
# )),
# Deprecated by Surveys module
# - depends on CR, IRS & Impact
#("assess", Storage(
# name_nice = T("Assessments"),
# #description = "Rapid Assessments & Flexible Impact Assessments",
# restricted = True,
# module_type = 10,
# )),
#("impact", Storage(
# name_nice = T("Impacts"),
# #description = "Used by Assess",
# restricted = True,
# module_type = None,
# )),
#("ocr", Storage(
# name_nice = T("Optical Character Recognition"),
# #description = "Optical Character Recognition for reading the scanned handwritten paper forms.",
# restricted = False,
# module_type = None,
# )),
])
| mit | -4,464,261,208,661,015,600 | 40.248853 | 155 | 0.66232 | false |
olav-st/screencloud | res/modules/ScreenCloud.py | 1 | 1650 | from PythonQt.QtCore import QSettings
from PythonQt.QtGui import QDesktopServices
import os, string, base64
try:
from md5 import md5
except ImportError:
from hashlib import md5 #md5 has been moved to hashlib in python 3
from random import randint
from collections import defaultdict
from time import strftime, localtime
def getScreenshotFormat():
settings = QSettings()
settings.beginGroup("main")
format = settings.value("format", "png")
settings.endGroup()
return format
def formatFilename(nameFormat, includeFileExtension = True, custom_vars = dict()):
try:
name = strftime(nameFormat.encode('utf-8'), localtime()).decode('utf-8')
except TypeError:
name = strftime(nameFormat, localtime()) #fix for python 3
except ValueError:
name = nameFormat
pass
random_hash = md5(os.urandom(128)).hexdigest()
random_num = str(randint(0,9))
random_short = base64.urlsafe_b64encode(os.urandom(6)).decode('utf-8')
var_dict = defaultdict(str, rnd = random_num, rnd_h = random_hash, rnd_s = random_short)
var_dict.update(custom_vars)
try:
name = string.Formatter().vformat(name, (), var_dict)
except ValueError:
pass
extension = "." + getScreenshotFormat()
if(includeFileExtension and extension not in name):
name += extension
return name
def getPluginDir():
try:
return QDesktopServices.storageLocation(QDesktopServices.DataLocation) + "/plugins"
except AttributeError:
from PythonQt.QtCore import QStandardPaths
return QStandardPaths.writableLocation(QStandardPaths.DataLocation) + "/plugins"
def setUrl(url):
global clipboardUrl
clipboardUrl = url
def setError(err):
global uploadingError
uploadingError = err
| gpl-2.0 | -3,575,533,716,724,261,000 | 29.555556 | 89 | 0.761212 | false |
igor-toga/local-snat | neutron/tests/tempest/api/test_subnetpools.py | 1 | 17261 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest import test
from neutron.tests.tempest.api import base
SUBNETPOOL_NAME = 'smoke-subnetpool'
SUBNET_NAME = 'smoke-subnet'
class SubnetPoolsTestBase(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SubnetPoolsTestBase, cls).resource_setup()
min_prefixlen = '29'
prefixes = [u'10.11.12.0/24']
cls._subnetpool_data = {'prefixes': prefixes,
'min_prefixlen': min_prefixlen}
@classmethod
def _create_subnetpool(cls, is_admin=False, **kwargs):
if 'name' not in kwargs:
name = data_utils.rand_name(SUBNETPOOL_NAME)
else:
name = kwargs.pop('name')
if 'prefixes' not in kwargs:
kwargs['prefixes'] = cls._subnetpool_data['prefixes']
if 'min_prefixlen' not in kwargs:
kwargs['min_prefixlen'] = cls._subnetpool_data['min_prefixlen']
return cls.create_subnetpool(name=name, is_admin=is_admin, **kwargs)
class SubnetPoolsTest(SubnetPoolsTestBase):
min_prefixlen = '28'
max_prefixlen = '31'
_ip_version = 4
subnet_cidr = u'10.11.12.0/31'
new_prefix = u'10.11.15.0/24'
larger_prefix = u'10.11.0.0/16'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create a subnetpool for a tenant
list tenant's subnetpools
show a tenant subnetpool details
subnetpool update
delete a subnetpool
All subnetpool tests are run once with ipv4 and once with ipv6.
v2.0 of the Neutron API is assumed.
"""
def _new_subnetpool_attributes(self):
new_name = data_utils.rand_name(SUBNETPOOL_NAME)
return {'name': new_name, 'min_prefixlen': self.min_prefixlen,
'max_prefixlen': self.max_prefixlen}
def _check_equality_updated_subnetpool(self, expected_values,
updated_pool):
self.assertEqual(expected_values['name'],
updated_pool['name'])
self.assertEqual(expected_values['min_prefixlen'],
updated_pool['min_prefixlen'])
self.assertEqual(expected_values['max_prefixlen'],
updated_pool['max_prefixlen'])
# expected_values may not contains all subnetpool values
if 'prefixes' in expected_values:
self.assertEqual(expected_values['prefixes'],
updated_pool['prefixes'])
@test.idempotent_id('6e1781ec-b45b-4042-aebe-f485c022996e')
def test_create_list_subnetpool(self):
created_subnetpool = self._create_subnetpool()
body = self.client.list_subnetpools()
subnetpools = body['subnetpools']
self.assertIn(created_subnetpool['id'],
[sp['id'] for sp in subnetpools],
"Created subnetpool id should be in the list")
self.assertIn(created_subnetpool['name'],
[sp['name'] for sp in subnetpools],
"Created subnetpool name should be in the list")
@test.idempotent_id('c72c1c0c-2193-4aca-ddd4-b1442640bbbb')
@test.requires_ext(extension="standard-attr-description",
service="network")
def test_create_update_subnetpool_description(self):
body = self._create_subnetpool(description='d1')
self.assertEqual('d1', body['description'])
sub_id = body['id']
body = filter(lambda x: x['id'] == sub_id,
self.client.list_subnetpools()['subnetpools'])[0]
self.assertEqual('d1', body['description'])
body = self.client.update_subnetpool(sub_id, description='d2')
self.assertEqual('d2', body['subnetpool']['description'])
body = filter(lambda x: x['id'] == sub_id,
self.client.list_subnetpools()['subnetpools'])[0]
self.assertEqual('d2', body['description'])
@test.idempotent_id('741d08c2-1e3f-42be-99c7-0ea93c5b728c')
def test_get_subnetpool(self):
created_subnetpool = self._create_subnetpool()
prefixlen = self._subnetpool_data['min_prefixlen']
body = self.client.show_subnetpool(created_subnetpool['id'])
subnetpool = body['subnetpool']
self.assertEqual(created_subnetpool['name'], subnetpool['name'])
self.assertEqual(created_subnetpool['id'], subnetpool['id'])
self.assertEqual(prefixlen, subnetpool['min_prefixlen'])
self.assertEqual(prefixlen, subnetpool['default_prefixlen'])
self.assertFalse(subnetpool['shared'])
@test.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c')
def test_tenant_update_subnetpool(self):
created_subnetpool = self._create_subnetpool()
pool_id = created_subnetpool['id']
subnetpool_data = self._new_subnetpool_attributes()
self.client.update_subnetpool(created_subnetpool['id'],
**subnetpool_data)
body = self.client.show_subnetpool(pool_id)
subnetpool = body['subnetpool']
self._check_equality_updated_subnetpool(subnetpool_data,
subnetpool)
self.assertFalse(subnetpool['shared'])
@test.idempotent_id('4b496082-c992-4319-90be-d4a7ce646290')
def test_update_subnetpool_prefixes_append(self):
# We can append new prefixes to subnetpool
create_subnetpool = self._create_subnetpool()
pool_id = create_subnetpool['id']
old_prefixes = self._subnetpool_data['prefixes']
new_prefixes = old_prefixes[:]
new_prefixes.append(self.new_prefix)
subnetpool_data = {'prefixes': new_prefixes}
self.client.update_subnetpool(pool_id, **subnetpool_data)
body = self.client.show_subnetpool(pool_id)
prefixes = body['subnetpool']['prefixes']
self.assertIn(self.new_prefix, prefixes)
self.assertIn(old_prefixes[0], prefixes)
@test.idempotent_id('2cae5d6a-9d32-42d8-8067-f13970ae13bb')
def test_update_subnetpool_prefixes_extend(self):
# We can extend current subnetpool prefixes
created_subnetpool = self._create_subnetpool()
pool_id = created_subnetpool['id']
old_prefixes = self._subnetpool_data['prefixes']
subnetpool_data = {'prefixes': [self.larger_prefix]}
self.client.update_subnetpool(pool_id, **subnetpool_data)
body = self.client.show_subnetpool(pool_id)
prefixes = body['subnetpool']['prefixes']
self.assertIn(self.larger_prefix, prefixes)
self.assertNotIn(old_prefixes[0], prefixes)
@test.idempotent_id('d70c6c35-913b-4f24-909f-14cd0d29b2d2')
def test_admin_create_shared_subnetpool(self):
created_subnetpool = self._create_subnetpool(is_admin=True,
shared=True)
pool_id = created_subnetpool['id']
# Shared subnetpool can be retrieved by tenant user.
body = self.client.show_subnetpool(pool_id)
subnetpool = body['subnetpool']
self.assertEqual(created_subnetpool['name'], subnetpool['name'])
self.assertTrue(subnetpool['shared'])
def _create_subnet_from_pool(self, subnet_values=None, pool_values=None):
if pool_values is None:
pool_values = {}
created_subnetpool = self._create_subnetpool(**pool_values)
pool_id = created_subnetpool['id']
subnet_name = data_utils.rand_name(SUBNETPOOL_NAME)
network = self.create_network()
subnet_kwargs = {'name': subnet_name,
'subnetpool_id': pool_id}
if subnet_values:
subnet_kwargs.update(subnet_values)
# not creating the subnet using the base.create_subnet because
# that function needs to be enhanced to support subnet_create when
# prefixlen and subnetpool_id is specified.
body = self.client.create_subnet(
network_id=network['id'],
ip_version=self._ip_version,
**subnet_kwargs)
subnet = body['subnet']
return pool_id, subnet
@test.idempotent_id('1362ed7d-3089-42eb-b3a5-d6cb8398ee77')
def test_create_subnet_from_pool_with_prefixlen(self):
subnet_values = {"prefixlen": self.max_prefixlen}
pool_id, subnet = self._create_subnet_from_pool(
subnet_values=subnet_values)
cidr = str(subnet['cidr'])
self.assertEqual(pool_id, subnet['subnetpool_id'])
self.assertTrue(cidr.endswith(str(self.max_prefixlen)))
@test.idempotent_id('86b86189-9789-4582-9c3b-7e2bfe5735ee')
def test_create_subnet_from_pool_with_subnet_cidr(self):
subnet_values = {"cidr": self.subnet_cidr}
pool_id, subnet = self._create_subnet_from_pool(
subnet_values=subnet_values)
cidr = str(subnet['cidr'])
self.assertEqual(pool_id, subnet['subnetpool_id'])
self.assertEqual(cidr, self.subnet_cidr)
@test.idempotent_id('83f76e3a-9c40-40c2-a015-b7c5242178d8')
def test_create_subnet_from_pool_with_default_prefixlen(self):
# If neither cidr nor prefixlen is specified,
# subnet will use subnetpool default_prefixlen for cidr.
pool_id, subnet = self._create_subnet_from_pool()
cidr = str(subnet['cidr'])
self.assertEqual(pool_id, subnet['subnetpool_id'])
prefixlen = self._subnetpool_data['min_prefixlen']
self.assertTrue(cidr.endswith(str(prefixlen)))
@test.idempotent_id('a64af292-ec52-4bde-b654-a6984acaf477')
def test_create_subnet_from_pool_with_quota(self):
pool_values = {'default_quota': 4}
subnet_values = {"prefixlen": self.max_prefixlen}
pool_id, subnet = self._create_subnet_from_pool(
subnet_values=subnet_values, pool_values=pool_values)
cidr = str(subnet['cidr'])
self.assertEqual(pool_id, subnet['subnetpool_id'])
self.assertTrue(cidr.endswith(str(self.max_prefixlen)))
@test.idempotent_id('49b44c64-1619-4b29-b527-ffc3c3115dc4')
@test.requires_ext(extension='address-scope', service='network')
def test_create_subnetpool_associate_address_scope(self):
address_scope = self.create_address_scope(
name=data_utils.rand_name('smoke-address-scope'),
ip_version=self._ip_version)
created_subnetpool = self._create_subnetpool(
address_scope_id=address_scope['id'])
body = self.client.show_subnetpool(created_subnetpool['id'])
self.assertEqual(address_scope['id'],
body['subnetpool']['address_scope_id'])
@test.idempotent_id('910b6393-db24-4f6f-87dc-b36892ad6c8c')
@test.requires_ext(extension='address-scope', service='network')
def test_update_subnetpool_associate_address_scope(self):
address_scope = self.create_address_scope(
name=data_utils.rand_name('smoke-address-scope'),
ip_version=self._ip_version)
created_subnetpool = self._create_subnetpool()
pool_id = created_subnetpool['id']
body = self.client.show_subnetpool(pool_id)
self.assertIsNone(body['subnetpool']['address_scope_id'])
self.client.update_subnetpool(pool_id,
address_scope_id=address_scope['id'])
body = self.client.show_subnetpool(pool_id)
self.assertEqual(address_scope['id'],
body['subnetpool']['address_scope_id'])
@test.idempotent_id('18302e80-46a3-4563-82ac-ccd1dd57f652')
@test.requires_ext(extension='address-scope', service='network')
def test_update_subnetpool_associate_another_address_scope(self):
address_scope = self.create_address_scope(
name=data_utils.rand_name('smoke-address-scope'),
ip_version=self._ip_version)
another_address_scope = self.create_address_scope(
name=data_utils.rand_name('smoke-address-scope'),
ip_version=self._ip_version)
created_subnetpool = self._create_subnetpool(
address_scope_id=address_scope['id'])
pool_id = created_subnetpool['id']
body = self.client.show_subnetpool(pool_id)
self.assertEqual(address_scope['id'],
body['subnetpool']['address_scope_id'])
self.client.update_subnetpool(
pool_id, address_scope_id=another_address_scope['id'])
body = self.client.show_subnetpool(pool_id)
self.assertEqual(another_address_scope['id'],
body['subnetpool']['address_scope_id'])
@test.idempotent_id('f8970048-e41b-42d6-934b-a1297b07706a')
@test.requires_ext(extension='address-scope', service='network')
def test_update_subnetpool_disassociate_address_scope(self):
address_scope = self.create_address_scope(
name=data_utils.rand_name('smoke-address-scope'),
ip_version=self._ip_version)
created_subnetpool = self._create_subnetpool(
address_scope_id=address_scope['id'])
pool_id = created_subnetpool['id']
body = self.client.show_subnetpool(pool_id)
self.assertEqual(address_scope['id'],
body['subnetpool']['address_scope_id'])
self.client.update_subnetpool(pool_id,
address_scope_id=None)
body = self.client.show_subnetpool(pool_id)
self.assertIsNone(body['subnetpool']['address_scope_id'])
class SubnetPoolsTestV6(SubnetPoolsTest):
min_prefixlen = '48'
max_prefixlen = '64'
_ip_version = 6
subnet_cidr = '2001:db8:3::/64'
new_prefix = u'2001:db8:5::/64'
larger_prefix = u'2001:db8::/32'
@classmethod
def resource_setup(cls):
super(SubnetPoolsTestV6, cls).resource_setup()
min_prefixlen = '64'
prefixes = [u'2001:db8:3::/48']
cls._subnetpool_data = {'min_prefixlen': min_prefixlen,
'prefixes': prefixes}
@test.idempotent_id('f62d73dc-cf6f-4879-b94b-dab53982bf3b')
def test_create_dual_stack_subnets_from_subnetpools(self):
pool_id_v6, subnet_v6 = self._create_subnet_from_pool()
pool_values_v4 = {'prefixes': ['192.168.0.0/16'],
'min_prefixlen': 21,
'max_prefixlen': 32}
create_v4_subnetpool = self._create_subnetpool(**pool_values_v4)
pool_id_v4 = create_v4_subnetpool['id']
subnet_v4 = self.client.create_subnet(
network_id=subnet_v6['network_id'], ip_version=4,
subnetpool_id=pool_id_v4)['subnet']
self.assertEqual(subnet_v4['network_id'], subnet_v6['network_id'])
class SubnetPoolsSearchCriteriaTest(base.BaseSearchCriteriaTest,
SubnetPoolsTestBase):
resource = 'subnetpool'
list_kwargs = {'shared': False}
@classmethod
def resource_setup(cls):
super(SubnetPoolsSearchCriteriaTest, cls).resource_setup()
for name in cls.resource_names:
cls._create_subnetpool(name=name)
@test.idempotent_id('6e3f842e-6bfb-49cb-82d3-0026be4e8e04')
def test_list_sorts_asc(self):
self._test_list_sorts_asc()
@test.idempotent_id('f336859b-b868-438c-a6fc-2c06374115f2')
def test_list_sorts_desc(self):
self._test_list_sorts_desc()
@test.idempotent_id('1291fae7-c196-4372-ad59-ce7988518f7b')
def test_list_pagination(self):
self._test_list_pagination()
@test.idempotent_id('ddb20d14-1952-49b4-a17e-231cc2239a52')
def test_list_pagination_with_marker(self):
self._test_list_pagination_with_marker()
@test.idempotent_id('b3bd9665-2769-4a43-b50c-31b1add12891')
def test_list_pagination_with_href_links(self):
self._test_list_pagination_with_href_links()
@test.idempotent_id('1ec1f325-43b0-406e-96ce-20539e38a61d')
def test_list_pagination_page_reverse_asc(self):
self._test_list_pagination_page_reverse_asc()
@test.idempotent_id('f43a293e-4aaa-48f4-aeaf-de63a676357c')
def test_list_pagination_page_reverse_desc(self):
self._test_list_pagination_page_reverse_desc()
@test.idempotent_id('73511385-839c-4829-8ac1-b5ad992126c4')
def test_list_pagination_page_reverse_with_href_links(self):
self._test_list_pagination_page_reverse_with_href_links()
@test.idempotent_id('82a13efc-c18f-4249-b8ec-cec7cf26fbd6')
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
| apache-2.0 | -3,469,962,308,355,516,400 | 43.033163 | 79 | 0.636116 | false |
gimli-org/gimli | pygimli/physics/SIP/plotting.py | 1 | 2358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Spectral induced polarization (SIP) plotting tools"""
import matplotlib.pyplot as plt
import pygimli as pg
def showAmplitudeSpectrum(*args, **kwargs):
pg.deprecated('drawAmplitudeSpectrum')
return drawAmplitudeSpectrum(*args, **kwargs)
def showPhaseSpectrum(*args, **kwargs):
pg.deprecated('drawPhaseSpectrum')
return drawPhaseSpectrum(*args, **kwargs)
def drawAmplitudeSpectrum(ax, freq, amp, ylabel=r'$\rho$ ($\Omega$m)',
grid=True, marker='+', ylog=True, **kwargs):
"""Show amplitude spectrum (resistivity as a function of f)."""
if 'label' not in kwargs:
kwargs['label'] = 'obs'
gci = ax.semilogx(freq, amp, marker=marker, **kwargs)
if ylog is None:
ylog = (min(amp) > 0)
if ylog:
ax.set_yscale('log')
#ax.set_ylim(min(amp) * .99, max(amp * 1.01))
ax.set_xlabel('f (Hz)')
ax.set_ylabel(ylabel)
ax.grid(grid)
ax.legend()
return gci
def drawPhaseSpectrum(ax, freq, phi, ylabel=r'$-\phi$ (mrad)',
grid=True, marker='+', ylog=False, **kwargs):
"""Show phase spectrum (-phi as a function of f)."""
if 'label' not in kwargs:
kwargs['label'] = 'obs'
gci = ax.semilogx(freq, phi, marker=marker, **kwargs)
if ylog:
ax.set_yscale('log')
ax.set_xlabel('f (Hz)')
ax.set_ylabel(ylabel)
ax.grid(grid)
ax.legend()
return gci
def showSpectrum(freq, amp, phi, nrows=2, ylog=None, axs=None, **kwargs):
"""Show amplitude and phase spectra in two subplots."""
if axs is None:
fig, axs = plt.subplots(nrows=nrows, sharex=(nrows == 2))
else:
fig = axs[0].figure
drawAmplitudeSpectrum(axs[0], freq, amp, ylog=ylog, **kwargs)
drawPhaseSpectrum(axs[1], freq, phi, ylog=ylog, **kwargs)
return fig, axs
def plotSpectrum(ax, freq, vals, ylabel=r'$-\phi$ (mrad)',
grid=True, marker='+', ylog=True, **kwargs):
"""Plot some spectrum (redundant).
DEPRECATED
"""
pg.deprecated('drawSpectrum')
if 'label' not in kwargs:
kwargs['label'] = 'obs'
ax.loglog(freq, vals, marker=marker, **kwargs)
if ylog:
ax.set_yscale('log')
ax.set_xlabel('f (Hz)')
ax.set_ylabel(ylabel)
ax.grid(grid)
if __name__ == "__main__":
pass
| apache-2.0 | 1,425,055,818,539,367,400 | 27.756098 | 73 | 0.59754 | false |
davidsblom/FOAM-FSI | src/tests/runTests.py | 1 | 1246 | #!/usr/bin/env python
import os, subprocess, multiprocessing, time, sys, argparse
nbCores = int( os.environ['WM_NCOMPPROCS'] )
parser = argparse.ArgumentParser( description='Run the test suite' )
parser.add_argument('testsuite', help='which testsuite: testsuite-dealii, testsuite-rbf, testsuite-spacemapping, testsuite-fsi, or testsuite-sdc' )
args = parser.parse_args()
runs = []
for i in range( nbCores ):
run = subprocess.Popen( "GTEST_TOTAL_SHARDS=" + str(nbCores) + " GTEST_SHARD_INDEX=" + str(i) + " " + args.testsuite + " --gtest_throw_on_failure > tests_" + str(i) + ".log 2>&1", shell = True )
runs.append( run )
i = 0
returnCode = 0
for run in runs:
code = None
while code is None:
print i
i += 1
sys.stdout.flush()
code = run.poll()
if code > returnCode:
returnCode = code
if code is not None:
break
time.sleep( 1 )
run.wait()
print 'Run finished'
print 'All runs finished'
sys.stdout.flush()
for i in range( nbCores ):
subprocess.call("tail -n 20 tests_" + str(i) + ".log 2>&1", shell=True)
sys.stdout.flush()
if returnCode == 0:
print "Finished successfully"
else:
print "Tests failed"
exit( returnCode )
| gpl-2.0 | 5,173,613,984,465,485,000 | 27.318182 | 198 | 0.629213 | false |
BUILDS-/Derpnet | test/stresser.py | 1 | 1083 | # This file is part of Derpnet.
#
# Derpnet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Derpnet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Derpnet. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 The Derpnet Team.
import socket
import time
import sys
global tot
global worked
worked = 0
tot = 0
def openConn() :
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((sys.argv[1],int(sys.argv[2])))
s.send('test\n' * int(sys.argv[3]))
# time.sleep(2)
print "Got %d bytes of expected %d\n" % (len(s.recv(65536)), len('test\n' * int(sys.argv[3])))
for i in range(40) :
openConn()
| gpl-3.0 | 6,097,142,690,130,481,000 | 30.852941 | 96 | 0.717452 | false |
shanet/Cryptully | src/ncurses/cursesDialog.py | 1 | 2412 | import curses
class CursesDialog:
def __init__(self, screen, message, title="", isError=False, isFatal=False, isBlocking=False):
self.screen = screen
self.title = title
self.message = message
self.isError = isError
self.isFatal = isFatal
self.isBlocking = isBlocking
if curses.has_colors():
curses.init_pair(6, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_RED, curses.COLOR_BLACK)
def show(self):
(height, width) = self.screen.getmaxyx()
if self.isFatal:
exitMessage = "Press enter to exit"
elif self.isError:
exitMessage = "Press enter to continue"
elif self.isBlocking:
exitMessage = "Press any key to continue"
else:
exitMessage = ""
# Determine the max width of the dialog window
dialogWidth = max(len(self.title), len(self.message), len(exitMessage)) + 2
if self.title:
dialogHeight = 7
elif self.isError or self.isBlocking:
dialogHeight = 5
else:
dialogHeight = 3
self.dialogWindow = self.screen.subwin(dialogHeight, dialogWidth, height/2 - int(dialogHeight/2), width/2 - int(dialogWidth/2))
self.dialogWindow.clear()
self.dialogWindow.border(0)
# Add the title if provided
if self.title:
self.dialogWindow.addstr(1, 1, self.title, curses.color_pair(7) if self.isError else curses.color_pair(6))
self.dialogWindow.hline(2, 1, 0, dialogWidth-2)
# Add the message
if self.message:
verticalPos = 3 if self.title else 1
self.dialogWindow.addstr(verticalPos, 1, self.message)
# Add the exit message if the dialog is an error dialog or is blocking
if self.isError or self.isBlocking:
if self.title:
verticalPos = 5
else:
verticalPos = 3
self.dialogWindow.addstr(verticalPos, 1, exitMessage)
# Disable the cursor
curses.curs_set(0)
self.dialogWindow.refresh()
if self.isBlocking:
self.dialogWindow.getch()
self.hide()
def hide(self):
curses.curs_set(2)
self.dialogWindow.clear()
self.dialogWindow.refresh()
| lgpl-3.0 | 5,121,583,365,409,258,000 | 31.16 | 135 | 0.584163 | false |
duducosmos/pgs4a | buildlib/build.py | 1 | 8180 | #!/usr/bin/env python2.7
import sys
sys.path.insert(0, 'buildlib/jinja2.egg')
sys.path.insert(0, 'buildlib')
# import zlib
# zlib.Z_DEFAULT_COMPRESSION = 9
import tarfile
import os
import shutil
import subprocess
import time
import jinja2
import configure
import plat
# Are we doing a Ren'Py build?
RENPY = os.path.exists("private/renpy")
# If we have python 2.7, record the path to it.
if not RENPY and sys.version_info.major == 2 and sys.version_info.minor == 7:
PYTHON = sys.executable
else:
PYTHON = None
# Files and extensions we should not package.
BLACKLIST_FILES = [
"icon.ico",
"icon.icns",
"android-icon.png",
"android-presplash.png",
"launcherinfo.py",
".nomedia",
".android.json",
]
BLACKLIST_EXTENSIONS = [
"~",
".bak",
".rpy",
".swp",
]
BLACKLIST_DIRS = [
".hg",
".git",
".bzr",
".svn",
]
if PYTHON is not None:
BLACKLIST_EXTENSIONS += [".py", ".pyc"]
# Used by render.
environment = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
def render(template, dest, **kwargs):
"""
Using jinja2, render `template` to the filename `dest`, supplying the keyword
arguments as template parameters.
"""
template = environment.get_template(template)
text = template.render(**kwargs)
f = file(dest, "wb")
f.write(text.encode("utf-8"))
f.close()
def compile_dir(dfn):
"""
Compile *.py in directory `dfn` to *.pyo
"""
# -OO = strip docstrings
subprocess.call([PYTHON,'-OO','-m','compileall','-f',dfn])
def make_tar(fn, source_dirs):
"""
Make a zip file `fn` from the contents of source_dis.
"""
# zf = zipfile.ZipFile(fn, "w")
tf = tarfile.open(fn, "w:gz")
for sd in source_dirs:
if ".py" in BLACKLIST_EXTENSIONS:
compile_dir(sd)
sd = os.path.abspath(sd)
for dir, dirs, files in os.walk(sd): #@ReservedAssignment
for bd in BLACKLIST_DIRS:
if bd in dirs:
dirs.remove(bd)
for fn in dirs:
fn = os.path.join(dir, fn)
relfn = os.path.relpath(fn, sd)
tf.add(fn, relfn, recursive=False)
for fn in files:
fn = os.path.join(dir, fn)
relfn = os.path.relpath(fn, sd)
bl = False
for e in BLACKLIST_EXTENSIONS:
if relfn.endswith(e):
bl = True
if bl:
continue
if relfn in BLACKLIST_FILES:
continue
tf.add(fn, relfn)
# TODO: Fix me.
# tf.writestr(".nomedia", "")
tf.close()
def join_and_check(base, sub):
"""
If base/sub is a directory, returns the joined path. Otherwise, return None.
"""
rv = os.path.join(base, sub)
if os.path.exists(rv):
return rv
return None
def build_core(iface, directory, commands):
global BLACKLIST_DIRS
global BLACKLIST_FILES
config = configure.Configuration(directory)
if config.package is None:
iface.fail("Run configure before attempting to build the app.")
if not config.include_sqlite:
BLACKLIST_DIRS += ['sqlite3']
BLACKLIST_FILES += ['_sqlite3.so']
shelve_lib('libsqlite3.so')
if not config.include_pil:
BLACKLIST_DIRS += ['PIL']
BLACKLIST_FILES += ['_imaging.so','_imagingft.so','_imagingmath.so']
if RENPY:
manifest_extra = '<uses-feature android:glEsVersion="0x00020000" />'
default_icon = "templates/renpy-icon.png"
default_presplash = "templates/renpy-presplash.jpg"
public_dir = None
private_dir = None
assets_dir = directory
else:
manifest_extra = ""
default_icon = "templates/pygame-icon.png"
default_presplash = "templates/pygame-presplash.jpg"
if config.layout == "internal":
private_dir = directory
public_dir = None
assets_dir = None
elif config.layout == "external":
private_dir = None
public_dir = directory
assets_dir = None
elif config.layout == "split":
private_dir = join_and_check(directory, "internal")
public_dir = join_and_check(directory, "external")
assets_dir = join_and_check(directory, "assets")
versioned_name = config.name.replace(" ", "").replace("'", "") + "-" + config.version
# Annoying fixups.
config.name = config.name.replace("'", "\\'")
config.icon_name = config.icon_name.replace("'", "\\'")
# Figure out versions of the private and public data.
private_version = str(time.time())
if public_dir:
public_version = private_version
else:
public_version = None
# Render the various templates into control files.
render(
"AndroidManifest.tmpl.xml",
"AndroidManifest.xml",
config = config,
manifest_extra = manifest_extra,
)
render(
"strings.xml",
"res/values/strings.xml",
public_version = public_version,
private_version = private_version,
config = config)
try:
os.unlink("build.xml")
except:
pass
iface.info("Updating build files.")
# Update the project to a recent version.
subprocess.call([plat.android, "update", "project", "-p", '.', '-t', 'android-19', '-n', versioned_name])
iface.info("Creating assets directory.")
shutil.rmtree("assets")
if assets_dir is not None:
shutil.copytree(assets_dir, "assets")
else:
os.mkdir("assets")
# Copy in the Ren'Py common assets.
if os.path.exists("engine-assets/common"):
shutil.copytree("engine-assets/common", "assets/common")
# Ren'Py uses a lot of names that don't work as assets. Auto-rename
# them.
for dirpath, dirnames, filenames in os.walk("assets", topdown=False):
for fn in filenames + dirnames:
if fn[0] == ".":
continue
old = os.path.join(dirpath, fn)
new = os.path.join(dirpath, "x-" + fn)
os.rename(old, new)
iface.info("Packaging internal data.")
private_dirs = [ 'private' ]
if private_dir is not None:
private_dirs.append(private_dir)
if os.path.exists("engine-private"):
private_dirs.append("engine-private")
make_tar("assets/private.mp3", private_dirs)
if public_dir is not None:
iface.info("Packaging external data.")
make_tar("assets/public.mp3", [ public_dir ])
# Copy over the icon and presplash files.
shutil.copy(join_and_check(directory, "android-icon.png") or default_icon, "res/drawable/icon.png")
shutil.copy(join_and_check(directory, "android-presplash.jpg") or default_presplash, "res/drawable/presplash.jpg")
# Build.
iface.info("I'm using Ant to build the package.")
# Clean is required
try:
subprocess.check_call([plat.ant, "clean"] + commands)
iface.success("It looks like the build succeeded.")
except:
iface.fail("The build seems to have failed.")
def shelve_lib(lfn):
for root, _dirs, files in os.walk('libs'):
for fn in files:
if fn == lfn:
shelf_dir = os.path.join('.shelf', root)
if not os.path.exists(shelf_dir):
os.makedirs(shelf_dir)
shutil.move(os.path.join(root,fn), shelf_dir)
def unshelve_libs():
if os.path.exists('.shelf'):
for root, _dirs, files in os.walk('.shelf'):
for fn in files:
lib_dir = root[len('.shelf/'):]
shutil.move(os.path.join(root,fn), lib_dir)
shutil.rmtree('.shelf')
def build(iface, directory, commands):
try:
build_core(iface, directory, commands)
finally:
unshelve_libs()
| lgpl-2.1 | 3,644,704,240,008,344,000 | 25.907895 | 118 | 0.566504 | false |
pikuli-project/pikuli | pikuli/BaseRegion.py | 1 | 9760 | # -*- coding: utf-8 -*-
"""
BaseRegion - rectangle screen area defines with top-left corner coordinates, width and height.
BaseRegion don't have any information in visual content on screen.
Content can be defined using .find() or .findAll() methods, implemented in the descendant class
"""
import cv2
import numpy as np
import platform
from Location import Location
from logger import PikuliLogger
from common_exceptions import FailExit, FindFailed
current_platform = platform.system()
if current_platform == 'Darwin':
from display_mac import Display
elif current_platform == 'Windows':
from display_win import Display
else:
raise NotImplementedError
DELAY_BETWEEN_CV_ATTEMPT = 1.0 # delay between attempts of recognition
DEFAULT_FIND_TIMEOUT = 3.1
logger = PikuliLogger('pikuli.Region ').logger
class BaseRegion(object):
def __init__(self, *args, **kwargs):
"""
Option 1:
args[0]:
Region object
or Screen - whole screen
Option 2:
args[0:4] == [x, y, w, h]:
integers - x,y coordinates, width w, height h;
A new rectangle area will build.
Area borders belongs to area
kwargs can contain:
title - human-readable id (string)
id - id for use in code
find_timeout - default value used for find() method
if don't pass to constructor a DEFAULT_FIND_TIMEOUT will use.
"""
self.display = Display()
self.scaling_factor = self.display.get_monitor_info(1)[-1]
self.drag_location = None
self.relations = ['top-left', 'center']
(self.x, self.y, self.w, self.h) = (None, None, None, None)
self.screen_number = 1
self._last_match = None
# human-readable id
self.title = str(kwargs.get('title', 'New Region'))
# internal id
self._id = kwargs.get('id', 0)
try:
self.set_rect(*args, **kwargs)
except FailExit:
raise FailExit('Incorrect Region class constructor call:\n\targs = {args}\n\tkwargs = {kwargs}'.format(
args=args, kwargs=kwargs))
self._find_timeout = self._verify_timeout(
kwargs.get('find_timeout', DEFAULT_FIND_TIMEOUT),
err_msg='pikuli.{}'.format(type(self).__name__))
logger.debug('New Region with name "{name}" created (x:{x} y:{y} w:{w} h:{h} timeout:{t})'.format(
name=self.title, x=self.x, y=self.y, w=self.w, h=self.h, t=self._find_timeout))
def __str__(self):
return 'Region "%s" (%i, %i, %i, %i)' % (self.title, self.x, self.y, self.w, self.h)
@staticmethod
def _verify_timeout(timeout, allow_none=False,
err_msg='pikuli.verify_timeout_argument()'):
if not timeout and allow_none:
return None
try:
timeout = float(timeout)
if timeout < 0:
raise ValueError
except(ValueError, TypeError) as ex:
raise FailExit('{msg}: wrong timeout = "{t}" ({ex})'.format(
msg=err_msg, t=timeout, ex=str(ex)))
return timeout
def get_id(self):
return self._id
def set_id(self, _id):
self._id = _id
def set_x(self, x, relation='top-left'):
""" 'top-left' -- x - top-left corner coordinate;
'center' -- x - center coordinate
"""
if isinstance(x, int) and relation in self.relations:
if relation is None or relation == 'top-left':
self.x = x
elif relation == 'center':
self.x = x - int(self.w / 2)
else:
raise FailExit('Incorrect Region.set_x() method call:\n\tx = {x}, {type_x}\n\trelation = {r}'.format(
x=x, type_x=type(x), r=relation))
def set_y(self, y, relation='top-left'):
""" 'top-left' -- y - top-left corner coordinate;
'center' -- y - center coordinate
"""
if isinstance(y, int) and relation in self.relations:
if relation is None or relation == 'top-left':
self.y = y
elif relation == 'center':
self.y = y - int(self.h / 2)
else:
raise FailExit('Incorrect Region.set_y() method call:\n\ty = {y}, {type_y}\n\trelation = {r}'.format(
y=y, type_y=type(y), r=relation))
def set_w(self, w, relation='top-left'):
if isinstance(w, int) and w > 0 and relation in self.relations:
if relation == 'center':
self.x += int((self.w - w) / 2)
self.w = w
else:
raise FailExit('Incorrect Region.set_w() method call:\n\tw = {w}, {type_w}\n\trelation = {r}'.format(
w=w, type_w=type(w), r=relation))
def set_h(self, h, relation='top-left'):
if isinstance(h, int) and h > 0 and relation in self.relations:
if relation == 'center':
self.y += int((self.h - h) / 2)
self.h = h
else:
raise FailExit('Incorrect Region.set_h() method call:\n\th = {h}, {type_h}\n\trelation = {r}'.format(
h=h, type_h=type(h), r=relation))
def set_rect(self, *args, **kwargs):
try:
if len(args) == 4 and \
isinstance(args[0], int) and \
isinstance(args[1], int) and \
isinstance(args[2], int) and \
isinstance(args[3], int) and \
args[2] > 0 and args[3] > 0:
relation = kwargs.get('relation', 'top-left') or 'top-left'
self.w = args[2]
self.h = args[3]
if relation == 'top-left':
self.x = args[0]
self.y = args[1]
elif relation == 'center':
self.x = args[0] - int(self.w / 2)
self.y = args[1] - int(self.h / 2)
elif len(args) == 1:
self._set_from_region(args[0])
else:
raise FailExit()
except FailExit as e:
raise FailExit('Incorrect Region.set_rect() method call:'
'\n\targs = {args}\n\tkwargs = {kwargs}\n\terror message: {msg}'.format(
args=str(args), kwargs=str(kwargs), msg=str(e)))
def _set_from_region(self, reg):
try:
self.x = reg.x
self.y = reg.y
self.w = reg.w
self.h = reg.h
self._find_timeout = reg.get_find_timeout()
except Exception as ex:
raise FailExit(str(ex))
def get_top_left(self, x_offs=0, y_offs=0):
return Location(self.x + x_offs,
self.y + y_offs,
title='Top left corner of {}'.format(self.title))
def get_top_right(self, x_offs=0, y_offs=0):
return Location(self.x + x_offs + self.w,
self.y + y_offs,
title='Top right corner of {}'.format(self.title))
def get_bottom_left(self, x_offs=0, y_offs=0):
return Location(self.x + x_offs,
self.y + y_offs + self.h,
title='Bottom left corner of {}'.format(self.title))
def get_bottom_right(self, x_offs=0, y_offs=0):
return Location(self.x + x_offs + self.w,
self.y + y_offs + self.h,
title='Bottom right corner of {}'.format(self.title))
def get_center(self, x_offs=0, y_offs=0):
return Location((self.x + x_offs + int(self.w / 2)),
(self.y + y_offs + int(self.h / 2)),
title='Center of {}'.format(self.title))
@property
def center(self):
return self.get_center()
def click(self, x_offs=0, y_offs=0):
self.get_center(x_offs=x_offs, y_offs=y_offs).click()
@property
def search_area(self):
return self.display.take_screenshot(self.x, self.y, self.w, self.h, None)
def save_as_jpg(self, full_filename):
cv2.imwrite(full_filename, self.display.take_screenshot(self.x, self.y, self.w, self.h),
[cv2.IMWRITE_JPEG_QUALITY, 70])
def save_as_png(self, full_filename):
cv2.imwrite(full_filename, self.display.take_screenshot(self.x, self.y, self.w, self.h))
def _find(self, ps, field):
res = cv2.matchTemplate(field, ps.cv2_pattern, cv2.TM_CCORR_NORMED)
loc = np.where(res > ps.similarity) # 0.995
return map(lambda x, y, s: (int(x + self.x * self.scaling_factor),
int(y + self.y * self.scaling_factor),
float(s)),
loc[1], loc[0], res[loc[0], loc[1]])
def get_last_match(self):
if not self._last_match or self._last_match == []:
raise FindFailed('_last_match() is empty')
return self._last_match
def set_find_timeout(self, timeout):
if not timeout:
self._find_timeout = DEFAULT_FIND_TIMEOUT
else:
self._find_timeout = \
self._verify_timeout(
timeout, err_msg='Incorrect Region.set_find_timeout() method call')
def get_find_timeout(self):
return self._find_timeout
| mit | -2,893,188,896,494,906,000 | 37.506073 | 115 | 0.508711 | false |
bogdanbabych/morphosyntax | src/s010cognatematch/md060graphonoLevV09.py | 1 | 9494 | '''
Created on 25 Mar 2016
@author: bogdan
python3 required for operation -- due to Unicode issues
v09: returning different insertion costs for graphonological distance
'''
import sys, re, os
import copy
# from p010graphems.levenshtein import levenshtein
from collections import defaultdict
from collections import Counter
class clGraphonolev(object):
'''
class computes Levenshtein distance for graphonological representations
the purpose is to plug the module into external programmes to compute modified variants of Lev edit distance
'''
def __init__(self, Debug = False, DebugFile = 'md060graphonolev-debug.txt', DebugMode = 'a'):
'''
Constructor
'''
# self.DFeatures = {}
self.readFeat()
self.BDebug = False
if Debug == True:
self.BDebug = True
self.FDebug = open(DebugFile, DebugMode)
def readFeat(self):
'''
reading a table of phonological features for each letter, only needed for feature-based levenstein distance calculations
'''
self.DGraphemes = defaultdict(list) # the main dictionary of the project: mapping: grapheme, language --> feature sets
FFeatures = open('md060graphonoLev-phonetic-features.tsv', 'rU')
for SLine in FFeatures:
if re.match('#', SLine):
continue
SLine = SLine.rstrip()
LLine = re.split('\t', SLine)
SGrapheme = LLine[0]
SLanguage = LLine[1]
LFeatures = LLine[2:]
LLanguages = re.split(';', SLanguage)
# main representation mapping: create entries for all respective languages
for lang in LLanguages:
self.DGraphemes[(lang, SGrapheme)] = LFeatures
# debugging, can be removed...
'''
FDebug.write('%(lang)s, %(SGrapheme)s, \n' % locals())
for el in LFeatures:
FDebug.write('\t%(el)s\n' % locals())
'''
def str2Features(self, SWord, SLangID):
LGraphFeat = [] # list of tuples: character + list - for each character in the word we get feature list
LWordChars = list(SWord)
for ch in LWordChars:
# FDebug.write('%(SLangID)s, %(ch)s\t' % locals())
try:
LFeatures = self.DGraphemes[(SLangID, ch)]
LGraphFeat.append((ch, LFeatures)) # data structure for LGraphFeat - list of graphemic features
# FDebug.write('features: %(LFeatures)s\n' % locals())
except:
# FDebug.write('no features found\n')
sys.stderr.write('no features found\n')
return LGraphFeat # return list of lists
def compareGraphFeat(self, LGraphFeatA, LGraphFeatB):
# works for pairs of characters (their feature lists).
# Prec, Rec, FMeasure = (0, 0, 0)
# IOverlap = 0
ILenA = len(LGraphFeatA)
ILenB = len(LGraphFeatB)
a_multiset = Counter(LGraphFeatA)
b_multiset = Counter(LGraphFeatB)
overlap = list((a_multiset & b_multiset).elements())
IOverlap = len(overlap)
# a_remainder = list((a_multiset - b_multiset).elements())
# b_remainder = list((b_multiset - a_multiset).elements())
# Precision of List A:
try:
Prec = IOverlap / ILenA
Rec = IOverlap / ILenB
FMeasure = (2 * Prec * Rec) / (Prec + Rec)
except:
Prec, Rec, FMeasure = (0, 0, 0)
return FMeasure
def computeLevenshtein(self, SW1, SW2, SLangID1, SLangID2):
'''
converts character string to two lists of two two tuples : (character , phonological feature list)
'''
s1 = self.str2Features(SW1, SLangID1)
s2 = self.str2Features(SW2, SLangID2)
l1 = len(s1)
l2 = len(s2)
# lAve = (l1 + l2) / 2 # maximum for edit distance ?
lAve = max(l1, l2)
lAveFeats1 = 0 # number of features in each word
lAveFeats2 = 0
for (ch, el) in s1:
if self.BDebug == True:
SEl = str(el)
self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals())
lAveFeats1 += len(el)
for (ch, el) in s2:
if self.BDebug == True:
SEl = str(el)
self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals())
lAveFeats2 += len(el)
lAveFeats = (lAveFeats1 + lAveFeats2) / 2 # average number of features per two words
matrix = [list(range(l1 + 1))] * (l2 + 1)
matrixI2 = copy.deepcopy(matrix)
matrixI4 = copy.deepcopy(matrix)
matrixI6 = copy.deepcopy(matrix)
matrixI8 = copy.deepcopy(matrix)
# different insertion costs
matrix0 = copy.deepcopy(matrix)
for zz in range(l2 + 1):
matrix[zz] = list(range(zz,zz + l1 + 1))
matrixI2[zz] = copy.deepcopy(matrix[zz])
matrixI4[zz] = copy.deepcopy(matrix[zz])
matrixI6[zz] = copy.deepcopy(matrix[zz])
matrixI8[zz] = copy.deepcopy(matrix[zz])
matrix0[zz] = copy.deepcopy(matrix[zz])
for zz in range(0,l2):
for sz in range(0,l1):
# here: 1. compare sets of features; add the minimal substitution score here...
# calculate P, R, F-measure of the feature sets for each symbol, report F-measure:
# print(str(s1[sz]) + '\t' + str(s2[zz]))
(ch1, LFeat1) = s1[sz]
(ch2, LFeat2) = s2[zz]
# FMeasure = self.compareGraphFeat(s1[sz], s2[zz])
FMeasure = self.compareGraphFeat(LFeat1, LFeat2)
OneMinusFMeasure = 1 - FMeasure
# print('FMeasure ' + str(FMeasure))
# if F-Measure = 1 then feature vectors are identical; we need to subtract it from 1 (at the end):
# matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1)
# Main work is here: # experimental question:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + OneMinusFMeasure)
matrixI2[zz+1][sz+1] = min(matrixI2[zz+1][sz] + 0.2, matrixI2[zz][sz+1] + 0.2, matrixI2[zz][sz] + OneMinusFMeasure)
matrixI4[zz+1][sz+1] = min(matrixI4[zz+1][sz] + 0.4, matrixI4[zz][sz+1] + 0.4, matrixI4[zz][sz] + OneMinusFMeasure)
matrixI6[zz+1][sz+1] = min(matrixI6[zz+1][sz] + 0.6, matrixI6[zz][sz+1] + 0.6, matrixI6[zz][sz] + OneMinusFMeasure)
matrixI8[zz+1][sz+1] = min(matrixI8[zz+1][sz] + 0.8, matrixI8[zz][sz+1] + 0.8, matrixI8[zz][sz] + OneMinusFMeasure)
# matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 0.4, matrix[zz][sz+1] + 0.4, matrix[zz][sz] + OneMinusFMeasure)
# insertion cost adjustment -- revert to 1 or lowering to 0.4 ?
# now classical levenshtein distance
# if s1[sz] == s2[zz]:
if ch1 == ch2:
matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz])
else:
matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz] + 1)
# print("That's the Levenshtein-Matrix:")
# self.printMatrix(matrix)
Levenshtein0 = matrix0[l2][l1] # classical Levenshtein distance
Levenshtein1 = matrix[l2][l1]
LevenshteinI2 = matrixI2[l2][l1]
LevenshteinI4 = matrixI4[l2][l1]
LevenshteinI6 = matrixI6[l2][l1]
LevenshteinI8 = matrixI8[l2][l1]
# debug:
if self.BDebug == True:
self.printMatrix(matrix0)
self.printMatrix(matrix)
try:
Levenshtein0Norm = Levenshtein0 / lAve
except:
Levenshtein0Norm = 1
try:
# Levenshtein1Norm = Levenshtein1 / lAveFeats
Levenshtein1Norm = Levenshtein1 / lAve
LevenshteinI2Norm = LevenshteinI2 / lAve
LevenshteinI4Norm = LevenshteinI4 / lAve
LevenshteinI6Norm = LevenshteinI6 / lAve
LevenshteinI8Norm = LevenshteinI8 / lAve
except:
Levenshtein1Norm = 1
LevenshteinI2Norm = 1
LevenshteinI4Norm = 1
LevenshteinI6Norm = 1
LevenshteinI8Norm = 1
# sys.stderr.write('%(SW1)s, %(SW2)s, \n\t%(s1)s\n\t%(s2)s\n\t%(Levenshtein1).3f\n\t%(lAveFeats)\n\n' % locals())
try:
sys.stderr.write('%(SW1)s\n' % locals())
except:
sys.stderr.write('cannot write\n')
try:
sys.stderr.write('%(SW2)s\n' % locals())
except:
sys.stderr.write('cannot write\n')
try:
sys.stderr.write('%(s1)s\n' % locals())
except:
sys.stderr.write('cannot write s1\n')
try:
sys.stderr.write('%(s2)s\n' % locals())
except:
sys.stderr.write('cannot write s2\n')
# return (Levenshtein0, Levenshtein1, Levenshtein0Norm, Levenshtein1Norm)
return (Levenshtein0, Levenshtein1, Levenshtein0Norm, Levenshtein1Norm, LevenshteinI2, LevenshteinI2Norm, LevenshteinI4, LevenshteinI4Norm, LevenshteinI6, LevenshteinI6Norm, LevenshteinI8, LevenshteinI8Norm)
def printMatrix(self, m):
self.FDebug.write(' \n')
for line in m:
spTupel = ()
breite = len(line)
for column in line:
spTupel = spTupel + (column, )
self.FDebug.write(" %3.1f "*breite % spTupel)
self.FDebug.write('\n')
# using the class: initialising and computing Lev distances
if __name__ == '__main__':
FInput = open(sys.argv[1], 'rU')
SLangID1 = sys.argv[2]
SLangID2 = sys.argv[3]
SDebug = sys.argv[4]
if SDebug == 'Debug':
BDebug = True
else:
BDebug = False
OGraphonolev = clGraphonolev(BDebug)
# OGraphonolev.readFeat()
for SLine in FInput:
SLine = SLine.rstrip()
try:
(SW1, SW2) = re.split('\t', SLine, 1)
except:
SW1 = '' ; SW2 = ''
# FDebug.write('SW1 = %(SW1)s; SLangID1 = %(SLangID1)s\n' % locals())
# LGraphFeat1 = OGraphonolev.str2Features(SW1, SLangID1)
# FDebug.write('SW2 = %(SW2)s; SLangID2 = %(SLangID2)s\n' % locals())
# LGraphFeat2 = OGraphonolev.str2Features(SW2, SLangID2)
(Lev0, Lev1, Lev0Norm, Lev1Norm, LevenshteinI2, LevenshteinI2Norm, LevenshteinI4, LevenshteinI4Norm, LevenshteinI6, LevenshteinI6Norm, LevenshteinI8, LevenshteinI8Norm) = OGraphonolev.computeLevenshtein(SW1, SW2, SLangID1, SLangID2)
sys.stdout.write('%(SW1)s, %(SW2)s, %(Lev0)d, %(Lev1).4f, %(Lev0Norm).4f, %(Lev1Norm).4f, %(LevenshteinI2).4f, %(LevenshteinI2Norm).4f, %(LevenshteinI4).4f, %(LevenshteinI4Norm).4f, %(LevenshteinI6).4f, %(LevenshteinI6Norm).4f, %(LevenshteinI8).4f, %(LevenshteinI8Norm).4f\n' % locals())
| apache-2.0 | 137,749,909,859,321,440 | 33.527273 | 289 | 0.662313 | false |
adriank/ObjectPath | objectpath/core/parser.py | 1 | 10901 | #!/usr/bin/env python
# This file is part of ObjectPath released under MIT license.
# Copyright (C) 2010-2014 Adrian Kalbarczyk
# Code from http://effbot.org/zone/simple-top-down-parsing.htm was used in this file.
# Licence of the code is public domain.
# Relicenced to AGPL v3 by Adrian Kalbarczyk and:
# - specialized to work with ObjectPath,
# - optimized
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
from objectpath.core import SELECTOR_OPS, NUM_TYPES
symbol_table = {}
token = nextToken = None
# TODO optimization ('-',1) -> -1
# TODO optimization operators should be numbers
TRUE = ["true", "t"]
FALSE = ["false", "f"]
NONE = ["none", "null", "n", "nil"]
class symbol_base(object):
id = None
value = None
fst = snd = third = None
def nud(self):
raise SyntaxError("Syntax error (%r)." % self.id)
def led(self):
raise SyntaxError("Unknown operator (%r)." % self.id)
def getTree(self):
if self.id == "(name)":
val = self.value.lower()
if val in TRUE:
return True
elif val in FALSE:
return False
elif val in NONE:
return None
return (self.id[1:-1], self.value)
elif self.id == "(number)":
return self.value
elif self.id == "(literal)":
fstLetter = self.value[0]
if fstLetter in ["'", "\""]:
return self.value[1:-1]
# elif fstLetter.isdigit():
# try:
# return int(self.value)
# except:
# return float(self.value)
else:
if self.value == "True":
return True
elif self.value == "False":
return False
elif self.value == "None":
return None
ret = [self.id]
ret_append = ret.append
L = (dict, tuple, list)
for i in filter(None, [self.fst, self.snd, self.third]):
if type(i) is str:
ret_append(i)
elif type(i) in L:
t = []
t_append = t.append
if self.id == "{":
ret = {}
for j in list(self.fst.items()):
ret[j[0].getTree()] = j[1].getTree()
return ret
for j in i:
try:
t_append(j.getTree())
except Exception:
t_append(j)
if self.id in ("[", ".", ".."):
ret.append(t)
else:
ret.extend(t)
# ret_append(t)
# return (self.id,ret[1:])
else:
if type(self.fst.value) in NUM_TYPES and self.snd is None:
if self.id == "-":
return -self.fst.value
if self.id == "+":
return self.fst.value
ret_append(i.getTree())
if self.id == "{":
return {}
# if self.id == "[" and self.fst == []:
# return []
if self.id == "(":
# this will produce ("fn","fnName",arg1,arg2,...argN)
# try:
return tuple(["fn", ret[1][1]] + ret[2:])
# except:
# pass
return tuple(ret)
def __repr__(self):
if self.id == "(name)" or self.id == "(literal)":
return "(%s:%s)" % (self.id[1:-1], self.value)
out = [self.id, self.fst, self.snd, self.third]
# out=list(map(str, filter(None, out)))
return "(" + " ".join(out) + ")"
def symbol(ID, bp=0):
try:
s = symbol_table[ID]
except KeyError:
class s(symbol_base):
pass
s.__name__ = "symbol-" + ID # for debugging
s.id = ID
s.value = None
s.lbp = bp
symbol_table[ID] = s
else:
s.lbp = max(bp, s.lbp)
return s
# helpers
def infix(ID, bp):
def led(self, left):
self.fst = left
self.snd = expression(bp)
return self
symbol(ID, bp).led = led
def infix_r(ID, bp):
def led(self, left):
self.fst = left
self.snd = expression(bp - 1)
return self
symbol(ID, bp).led = led
def prefix(ID, bp):
def nud(self):
self.fst = expression(bp)
return self
symbol(ID).nud = nud
def advance(ID=None):
global token
if ID and token.id != ID:
raise SyntaxError("Expected %r, got %s" % (ID, token.id))
token = nextToken()
def method(s):
# decorator
assert issubclass(s, symbol_base)
def bind(fn):
setattr(s, fn.__name__, fn)
return bind
infix_r("or", 30)
infix_r("and", 40)
prefix("not", 50)
infix("in", 60)
infix("not", 60) # not in
infix("is", 60)
infix("matches", 60)
infix("<", 60)
infix("<=", 60)
infix(">", 60)
infix(">=", 60)
# infix(" ", 60); infix("!=", 60); infix("==", 60)
# infix("&", 90)
# infix("<<", 100); infix(">>", 100)
infix("+", 110)
infix("-", 110)
infix("*", 120)
infix("/", 120)
infix("//", 120)
infix("%", 120)
prefix("-", 130)
prefix("+", 130)
#prefix("~", 130)
# infix_r("**", 140)
symbol(".", 150)
symbol("[", 150)
symbol("{", 150)
symbol("(", 150)
# additional behavior
symbol("(name)").nud = lambda self: self
symbol("(literal)").nud = lambda self: self
symbol("(number)").nud = lambda self: self
symbol("(end)")
symbol(")")
# REGEX
infix("|", 0)
infix("^", 0)
infix("?", 0)
infix("\\", 0)
symbol("@")
@method(symbol("@"))
def nud(self): # pylint: disable=E0102
self.id = "(current)"
return self
symbol("!")
@method(symbol("!"))
def nud(self): # pylint: disable=E0102
self.id = "(node)"
return self
# RegEx
@method(symbol("/"))
def nud(self): # pylint: disable=E0102
self.id = "re"
regex = []
if token.id != "/":
self_fst_append = regex.append
while 1:
if token.id == "/":
break
if token.id in ["(name)", "(number)"]:
self_fst_append(str(token.value))
else:
self_fst_append(token.id)
advance()
self.fst = "".join(regex).replace("\\", "\\\\")
advance("/")
return self
@method(symbol("("))
def nud(self): # pylint: disable=E0102,W0613
expr = expression()
advance(")")
return expr
symbol(",")
@method(symbol("."))
def led(self, left): # pylint: disable=E0102
attr = False
if token.id == ".":
self.id = ".."
advance()
if token.id == "@":
attr = True
advance()
if token.id == "(":
advance()
self.fst = left
self.snd = []
if token.id != ")":
self_snd_append = self.snd.append
while 1:
self_snd_append(expression())
if token.id != ",":
break
advance(",")
advance(")")
return self
if token.id not in ["(name)", "*", "(literal)", "("]:
raise SyntaxError("Expected an attribute name.")
self.fst = left
if attr:
token.value = "@" + token.value
self.snd = token
advance()
return self
# handling namespaces; e.g $.a.b.c or $ss.a.b.c
# default storage is the request namespace
symbol("$")
@method(symbol("$"))
def nud(self): # pylint: disable=E0102
global token # pylint: disable=W0602
self.id = "(root)"
if token.id == ".":
self.fst = "rs"
else:
self.fst = token.value
advance()
return self
symbol("]")
@method(symbol("["))
def led(self, left): # pylint: disable=E0102
self.fst = left
self.snd = expression()
advance("]")
return self
symbol(",")
# this is for built-in functions
@method(symbol("("))
def led(self, left): # pylint: disable=E0102
# self.id="fn"
self.fst = left
self.snd = []
if token.id != ")":
self_snd_append = self.snd.append
while 1:
self_snd_append(expression())
if token.id != ",":
break
advance(",")
advance(")")
return self
symbol(":")
symbol("=")
# constants
def constant(ID):
@method(symbol(ID))
def nud(self): # pylint: disable=W0612
self.id = "(literal)"
self.value = ID
return self
constant("None")
constant("True")
constant("False")
# multitoken operators
@method(symbol("not"))
def led(self, left): # pylint: disable=E0102
if token.id != "in":
raise SyntaxError("Invalid syntax")
advance()
self.id = "not in"
self.fst = left
self.snd = expression(60)
return self
@method(symbol("is"))
def led(self, left): # pylint: disable=E0102
if token.id == "not":
advance()
self.id = "is not"
self.fst = left
self.snd = expression(60)
return self
symbol("]")
@method(symbol("["))
def nud(self): # pylint: disable=E0102
self.fst = []
if token.id != "]":
while 1:
if token.id == "]":
break
self.fst.append(expression())
if token.id not in SELECTOR_OPS + [","]:
break
advance(",")
advance("]")
return self
symbol("}")
@method(symbol("{"))
def nud(self): # pylint: disable=E0102
self.fst = {}
if token.id != "}":
while 1:
if token.id == "}":
break
key = expression()
advance(":")
self.fst[key] = expression()
if token.id != ",":
break
advance(",")
advance("}")
return self
import tokenize as tokenizer
type_map = {
tokenizer.NUMBER: "(number)",
tokenizer.STRING: "(literal)",
tokenizer.OP: "(operator)",
tokenizer.NAME: "(name)",
tokenizer.ERRORTOKEN:
"(operator)" #'$' is recognized in python tokenizer as error token!
}
# python tokenizer
def tokenize_python(program):
if sys.version_info[0] < 3:
tokens = tokenizer.generate_tokens(StringIO(program).next)
else:
tokens = tokenizer.generate_tokens(StringIO(program).__next__)
for t in tokens:
# print type_map[t[0]], t[1]
try:
# change this to output python values in correct type
yield type_map[t[0]], t[1]
except KeyError:
if t[0] in [tokenizer.NL, tokenizer.COMMENT, tokenizer.NEWLINE]:
continue
if t[0] == tokenizer.ENDMARKER:
break
else:
raise SyntaxError("Syntax error")
yield "(end)", "(end)"
def tokenize(program):
if isinstance(program, list):
source = program
else:
source = tokenize_python(program)
for ID, value in source:
if ID == "(literal)":
symbol = symbol_table[ID]
s = symbol()
s.value = value
elif ID == "(number)":
symbol = symbol_table[ID]
s = symbol()
try:
s.value = int(value)
except Exception:
s.value = float(value)
elif value == " ":
continue
else:
# name or operator
symbol = symbol_table.get(value)
if symbol:
s = symbol()
elif ID == "(name)":
symbol = symbol_table[ID]
s = symbol()
s.value = value
else:
raise SyntaxError("Unknown operator '%s', '%s'" % (ID, value))
yield s
# parser engine
def expression(rbp=0):
global token
t = token
token = nextToken()
left = t.nud()
while rbp < token.lbp:
t = token
token = nextToken()
left = t.led(left)
return left
def parse(expr, D=False):
if sys.version_info[0] < 3 and type(expr) is unicode:
expr = expr.encode("utf8")
if type(expr) is not str:
return expr
expr = expr.strip()
global token, nextToken
if sys.version_info[0] >= 3:
nextToken = tokenize(expr).__next__
else:
nextToken = tokenize(expr).next
token = nextToken()
r = expression().getTree()
if D:
print("PARSE STAGE")
print(r)
return r
| mit | 6,232,478,799,201,184,000 | 21.111562 | 85 | 0.563159 | false |
lafactura/datea-api | datea_api/apps/api/resources.py | 1 | 7475 | from api.base_resources import JSONDefaultMixin
from tastypie.resources import Resource
from tastypie.cache import SimpleCache
from api.cache import SimpleDictCache
from tastypie.throttle import CacheThrottle
from tastypie.utils import trailing_slash
from django.conf.urls import url
from datea_api.utils import remove_accents
from haystack.utils.geo import Point
from haystack.utils.geo import Distance
from haystack.query import SearchQuerySet
from haystack.inputs import AutoQuery, Exact
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404
from django.db import models
from campaign.models import Campaign
from campaign.resources import CampaignResource
from tag.models import Tag
from tag.resources import TagResource
from follow.models import Follow
from geoip import geolite2
from ipware.ip import get_real_ip
from api.status_codes import *
resources = {'tag': TagResource(), 'campaign': CampaignResource()}
class IPLocationResource(JSONDefaultMixin, Resource):
class Meta:
resource_name = 'ip_location'
allowed_methods = ['get']
cache = SimpleCache(timeout=100)
thottle = CacheThrottle(throttle_at=300)
def prepend_urls(self):
return [
# dateo stats
url(r"^(?P<resource_name>%s)%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('get_ip_location'), name="api_ip_location")
]
def get_ip_location(self, request, **kwargs):
# tests
self.method_check(request, allowed=['get'])
#self.is_authenticated(request)
self.throttle_check(request)
found = False
ip = get_real_ip(request)
if ip:
match = geolite2.lookup(ip)
if match:
response = {'ip_location' : {'latitude': match.location[0], 'longitude': match.location[1]},
'ip_country' : match.country}
status = OK
found = True
if not found:
response = {'error': 'not found'}
status = NOT_FOUND
self.log_throttled_access(request)
return self.create_response(request, response, status=status)
# An endpoint to search for campaigns and standalone
# tags together: combined dateo environments.
class EnvironmentsResource(JSONDefaultMixin, Resource):
class Meta:
resource_name = 'environments'
allowed_methods = ['get']
cache = SimpleDictCache(timeout=60)
throttle = CacheThrottle(throttle_at=300)
def prepend_urls(self):
return [
# dateo stats
url(r"^(?P<resource_name>%s)%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('get_combined'), name="api_search_combined_env")
]
def get_combined(self, request, **kwargs):
# tests
self.method_check(request, allowed=['get'])
#self.is_authenticated(request)
self.throttle_check(request)
# pagination
limit = int(request.GET.get('limit', 20))
offset = int(request.GET.get('offset', 0))
page = (offset / limit) + 1
# Do the query
q_args = {'published': request.GET.get('published', True), 'is_standalone': True}
# add search query
if 'q' in request.GET and request.GET['q'] != '':
q_args['content'] = AutoQuery(remove_accents(request.GET['q']))
# check for more params
params = ['category_id', 'category', 'user', 'user_id',
'is_active', 'id', 'featured',
'created__year', 'created__month', 'created__day',
'main_tag_id', 'follow_key', 'is_standalone']
for p in params:
if p in request.GET:
q_args[self.rename_get_filters.get(p, p)] = Exact(request.GET.get(p))
# check for additional date filters (with datetime objects)
date_params = ['created__gt', 'created__lt']
for p in date_params:
if p in request.GET:
q_args[p] = models.DateTimeField().to_python(request.get(p))
# GET BY TAGS I FOLLOW
if 'followed_by_tags' in request.GET:
uid = int(request.GET['followed_by_tags'])
follow_keys = ['tag.'+str(f.object_id) for f in Follow.objects.filter(content_type__model='tag', user__id=uid)]
q_args['follow_key__in'] = follow_keys
# show published and unpublished actions
if q_args['published'] == 'all':
del q_args['published']
# INIT THE QUERY
sqs = SearchQuerySet().models(Campaign, Tag).load_all().filter(**q_args)
# SPATIAL QUERY ADDONS
# WITHIN QUERY
if all(k in request.GET and request.GET.get(k) != '' for k in ('bottom_left', 'top_right')):
bleft = [float(c) for c in request.GET.get('bottom_left').split(',')]
bottom_left = Point(bleft[0], bleft[1])
tright = [float(c) for c in request.GET.get('top_right').split(',')]
top_right = Point(tright[0], tright[1])
sqs = sqs.within('center', bottom_left, top_right)
# DWITHIN QUERY
if all(k in request.GET and request.GET.get(k) != '' for k in ('max_distance', 'center')):
dist = Distance( m = int(request.GET.get('max_distance')))
pos = [float(c) for c in request.GET.get('center').split(',')]
position = Point(pos[0], pos[1])
sqs = sqs.dwithin('center', position, dist)
# ORDER BY
order_by = request.GET.get('order_by', '-rank').split(',')
# in elastic search 'score' is '_score'
#order_by = [o if 'score' not in o else o.replace('score', '_score') for o in order_by]
if 'q' in request.GET:
if order_by == ['-rank'] and '-rank' not in request.GET:
#order_by = ['_score']
order_by = ['score', '-rank']
# if q is set, then order will be search relevance first
# if not, then do normal order by
if 'distance' in order_by and 'center' in request.GET and request.GET['center'] != '':
pos = [float(c) for c in request.GET.get('center').split(',')]
position = Point(pos[0], pos[1])
sqs = sqs.distance('center', position).order_by(*order_by)
elif len(order_by) > 0:
sqs = sqs.order_by(*order_by)
paginator = Paginator(sqs, limit)
try:
page = paginator.page(page)
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = []
for result in page.object_list:
cache_key = result.model_name + '.' + str(result.obj_id)
data = self._meta.cache.get(cache_key)
if not data:
bundle = resources[result.model_name].build_bundle(obj=result.object, request=request)
bundle = resources[result.model_name].full_dehydrate(bundle)
data = self._meta.cache.set(cache_key, bundle)
objects.append(data)
object_list = {
'meta': {
'limit': limit,
'next': page.has_next(),
'previous': page.has_previous(),
'total_count': sqs.count(),
'offset': offset
},
'objects': objects,
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
| agpl-3.0 | 835,086,416,194,976,900 | 35.286408 | 123 | 0.587291 | false |
COSMOGRAIL/COSMOULINE | pipe/4_norm_scripts/3a_alt_setmedcoefftoone.py | 1 | 1066 | #
# We set the medcoeff to 1.0 (if you don't want any normalization)
# We use these medcoeffs for the f77 MCS PSF construction, to get initial values, for instance.
#
execfile("../config.py")
from kirbybase import KirbyBase, KBError
#from calccoeff_fct import *
from variousfct import *
import star
print "We will set all medcoeffs to 1.0."
proquest(askquestions)
# As we will tweak the database, do a backup first
backupfile(imgdb, dbbudir, 'calccoeff')
# Select images to treat
db = KirbyBase()
images = db.select(imgdb, ['gogogo', 'treatme'], [True, True], returnType='dict')
# We prepare the database
if "nbrcoeffstars" not in db.getFieldNames(imgdb) :
print "I will add some fields to the database."
proquest(askquestions)
db.addFields(imgdb, ['nbrcoeffstars:int', 'maxcoeffstars:int', 'medcoeff:float', 'sigcoeff:float', 'spancoeff:float'])
for i, image in enumerate(images):
db.update(imgdb, ['recno'], [image['recno']], {'nbrcoeffstars': 0, 'maxcoeffstars': 0, 'medcoeff': 1.0, 'sigcoeff': 0.0, 'spancoeff': 0.0})
db.pack(imgdb)
print "Done."
| gpl-3.0 | -4,331,625,047,873,498,600 | 30.352941 | 140 | 0.719512 | false |
botswana-harvard/edc-map | edc_map/tests/test_sections.py | 1 | 1129 | # from model_mommy import mommy
# from django.test import TestCase
#
# from ..section import Section
# from django.contrib.auth.models import User
#
#
# class TestSectionPolygon(TestCase):
#
# def setUp(self):
# self.user = User.objects.create_user(
# username='tuser', email='[email protected]', password='top_secret@321')
#
# def test_section_polygon(self):
# """Test that a section instance gets created.
# """
# mommy.make_recipe(
# 'edc_map.section',
# location_identifier='123123123',
# user=self.user
# )
# self.assertEqual(Section.objects.all().count(), 1)
#
# def test_section_polygon2(self):
# """Assert that polygon properties return lists."""
# section = mommy.make_recipe(
# 'edc_map.section',
# location_identifier='123123123',
# user=self.user
# )
# section = Section.objects.get(id=section.id)
# self.assertTrue(type(section.section_polygon_list) is list)
# self.assertTrue(type(section.sub_section_polygon_list) is list)
| gpl-2.0 | -6,307,507,320,169,330,000 | 33.212121 | 84 | 0.59876 | false |
Hybrid-Cloud/conveyor | conveyor/conveyorheat/engine/resources/aws/ec2/internet_gateway.py | 1 | 4464 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine import resource
from conveyor.conveyorheat.engine.resources.aws.ec2 import route_table
from conveyor.i18n import _
class InternetGateway(resource.Resource):
PROPERTIES = (
TAGS,
) = (
'Tags',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
properties_schema = {
TAGS: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
implemented=False,
)
),
}
def handle_create(self):
self.resource_id_set(self.physical_resource_name())
def handle_delete(self):
pass
@staticmethod
def get_external_network_id(client):
ext_filter = {'router:external': True}
ext_nets = client.list_networks(**ext_filter)['networks']
if len(ext_nets) != 1:
# TODO(sbaker) if there is more than one external network
# add a heat configuration variable to set the ID of
# the default one
raise exception.Error(
_('Expected 1 external network, found %d') % len(ext_nets))
external_network_id = ext_nets[0]['id']
return external_network_id
class VPCGatewayAttachment(resource.Resource):
PROPERTIES = (
VPC_ID, INTERNET_GATEWAY_ID, VPN_GATEWAY_ID,
) = (
'VpcId', 'InternetGatewayId', 'VpnGatewayId',
)
properties_schema = {
VPC_ID: properties.Schema(
properties.Schema.STRING,
_('VPC ID for this gateway association.'),
required=True
),
INTERNET_GATEWAY_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the InternetGateway.')
),
VPN_GATEWAY_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the VPNGateway to attach to the VPC.'),
implemented=False
),
}
default_client_name = 'neutron'
def _vpc_route_tables(self):
for res in six.itervalues(self.stack):
if (res.has_interface('AWS::EC2::RouteTable') and
res.properties.get(route_table.RouteTable.VPC_ID) ==
self.properties.get(self.VPC_ID)):
yield res
def add_dependencies(self, deps):
super(VPCGatewayAttachment, self).add_dependencies(deps)
# Depend on any route table in this template with the same
# VpcId as this VpcId.
# All route tables must exist before gateway attachment
# as attachment happens to routers (not VPCs)
for route_tbl in self._vpc_route_tables():
deps += (self, route_tbl)
def handle_create(self):
client = self.client()
external_network_id = InternetGateway.get_external_network_id(client)
for router in self._vpc_route_tables():
client.add_gateway_router(router.resource_id, {
'network_id': external_network_id})
def handle_delete(self):
for router in self._vpc_route_tables():
with self.client_plugin().ignore_not_found:
self.client().remove_gateway_router(router.resource_id)
def resource_mapping():
return {
'AWS::EC2::InternetGateway': InternetGateway,
'AWS::EC2::VPCGatewayAttachment': VPCGatewayAttachment,
}
| apache-2.0 | -594,445,450,825,484,400 | 31.583942 | 78 | 0.59095 | false |
richrd/bx | modules/status.py | 1 | 3018 | # -*- coding: utf-8 -*-
from mod_base import *
class Status(Command):
"""Get status information of the host running the bot."""
def run(self, win, user, data, caller=None):
items = self.get_items()
strs = []
for item in items:
strs.append( item[0]+":"+str(item[1]) )
win.Send(", ".join(strs))
def get_items(self):
items = []
power = self.get_power_state()
bat = "!"
if power:
if power[0]:
bat = "+"
bat += str(power[1])
items.append( ("bat", bat) )
uptime = self.get_uptime()
if uptime: items.append( ("up", uptime) )
temp = self.get_temp()
if temp: items.append( ("temp", temp) )
load = self.get_sys_laod()
if load: items.append( ("load", load) )
link = self.get_wifi_quality()
if link: items.append( ("link", link) )
return items
def get_power_state(self):
output = run_shell_cmd("acpi").lower()
if output.find("not found") == -1:
parts = output.split(",")
state = False
raw_state = parts[0][parts[0].find(":")+1:].strip()
if raw_state == "full":
state=True
percent = int(parts[1].replace("%","").strip())
return [state, percent]
else:
return False
def get_uptime(self):
# try:
from datetime import timedelta
f = open('/proc/uptime', 'r')
uptime_seconds = float(f.readline().split()[0])
uptime_string = str(timedelta(seconds = uptime_seconds))
f.close()
return uptime_string
# except:
# return False
def get_wifi_quality(self):
output = run_shell_cmd("iwconfig")
start = "Link Quality="
if output.find(start) != -1:
part = output[output.find(start)+len(start):]
part = part[:part.find(" ")]
return part
return False
def get_sys_laod(self):
uptime = run_shell_cmd("uptime")
if uptime:
load = " ".join(uptime.split(" ")[-3:]).replace(", "," ").replace(",",".")
return load
return False
def get_temp(self):
try:
# lm-sensors
line = run_shell_cmd("sensors | grep Core")
start = "+"
end = "°C"
if line.find(start) != -1 and line.find(end) != -1:
line = line[line.find(start)+1:]
temp = float(line[:line.find(end)])
return temp
except:
pass
try:
# Raspberry Pi
line = run_shell_cmd("/opt/vc/bin/vcgencmd measure_temp")
temp = float(get_string_between("temp=","'",line))
return temp
except:
pass
return False
module = {
"class": Status,
"type": MOD_COMMAND,
"level": 0,
"aliases": ["sta"],
}
| apache-2.0 | 9,010,237,796,635,369,000 | 27.196262 | 86 | 0.476964 | false |
stregoika/aislib | scripts/nais2postgis.py | 1 | 28342 | #!/usr/bin/env python
__author__ = 'Kurt Schwehr'
__version__ = '$Revision: 2275 $'.split()[1]
__revision__ = __version__ # For pylint
__date__ = '$Date: 2006-07-10 16:22:35 -0400 (Mon, 10 Jul 2006) $'.split()[1]
__copyright__ = '2008'
__license__ = 'GPL v3'
__contact__ = 'kurt at ccom.unh.edu'
__doc__='''
Connect to N-AIS and pump the data into Postgres/Postgis. This is a
non-threaded rewrite of ais-port-forward and ais-net-to-postgis.
Which are just cranky.
@var __date__: Date of last svn commit
@undocumented: __doc__ myparser
@status: under development
@since: 05-May-2009
@requires: U{Python<http://python.org/>} >= 2.5
'''
errors_file = file('errors-nais2postgis','w+')
sonita = file('bad.sonita','w+');
import traceback, exceptions
import sys
import time
import socket
import select
import exceptions # For KeyboardInterupt pychecker complaint
import logging # Python's logger module for tracking progress
import aisutils.daemon
import aisutils.uscg
import aisutils.normalize
import ais.sqlhelp
import aisutils.database
import ais
from ais.ais_msg_1 import NavigationStatusDecodeLut
from ais.ais_msg_5 import shipandcargoDecodeLut
#ais_msgs_supported = ('B','C','H')
ais_msgs_supported = ('1','2','3','4','5','B','H') # ,'C', 'H')
''' Which AIS messages will be handled. The rest will be dropped. '''
# Ficheros:
# Fichero de log: nais2postgis.py.log
# Mensajes ais fallidos: bad.ais
################################################################################
# #
# rebuild_track_line #
# #
################################################################################
def rebuild_track_line(cu,userid,name,start_time=None,point_limit=50):
print 'nais2postgis::rebuild_track_line - Init'
q = 'SELECT AsText(position) FROM position WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;'
qPrint = 'SELECT AsText(position) FROM position WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' % (userid, point_limit)
print 'nais2postgis::rebuild_track_line - select: ',qPrint
cu.execute(q,(userid, point_limit))
linePoints=[]
for row in cu.fetchall():
x,y = row[0].split()
x = x.split('(')[1]
y = y.split(')')[0]
if x=='181' and y=='91': # punto fuera de rango valido GPS, se descarta solo ese punto
continue
linePoints.append(row[0].split('(')[1].split(')')[0])
if len(linePoints)<2:
print 'nais2postgis::rebuild_track_line - No hay puntos suficientes; borrar track userid', userid
cu.execute('DELETE FROM track_lines WHERE userid = %s;',(userid,))
return # finaliza la funcion de crear track
lineWKT='LINESTRING('+','.join(linePoints)+')'
# actualizar track: borrar antigua, crear nueva
cu.execute('DELETE FROM track_lines WHERE userid=%s;', (userid,) )
q = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));'
qPrint = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' % (userid, name, lineWKT)
print 'nais2postgis::rebuild_track_line - insert: ',qPrint
cu.execute(q, (userid,name,lineWKT) )
################################################################################
# #
# rebuild_b_track_line #
# #
################################################################################
def rebuild_b_track_line(cu,userid,name,start_time=None,point_limit=50):
print 'nais2postgis::rebuild_b_track_line - Init'
q = 'SELECT AsText(position) FROM positionb WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;'
qPrint = 'SELECT AsText(position) FROM positionb WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' % (userid, point_limit)
print 'nais2postgis::rebuild_b_track_line - select: ',qPrint
cu.execute(q,(userid, point_limit))
linePoints=[]
for row in cu.fetchall():
x,y = row[0].split()
x = x.split('(')[1]
y = y.split(')')[0]
if x=='181' and y=='91':
continue
linePoints.append(row[0].split('(')[1].split(')')[0])
if len(linePoints)<2:
print 'nais2postgis::rebuild_b_track_line - No hay puntos suficientes; borrar track userid', userid
cu.execute('DELETE FROM track_lines WHERE userid = %s;',(userid,))
return
lineWKT='LINESTRING('+','.join(linePoints)+')'
cu.execute('DELETE FROM track_lines WHERE userid=%s;', (userid,) )
q = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));'
qPrint = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' % (userid, name, lineWKT)
print 'nais2postgis::rebuild_b_track_line - insert: ',qPrint
cu.execute(q, (userid,name,lineWKT) )
return
################################################################################
# #
# handle_insert_update #
# #
################################################################################
def handle_insert_update(cx, uscg_msg, msg_dict, aismsg):
print 'nais2postgis::handle_insert_update - Init'
db_uncommitted_count = 0 # Control numero de sentencias que aun no se han comiteado
msg_type = msg_dict['MessageID']
userid = int(msg_dict['UserID'])
cu = cx.cursor()
# ********** Mensajes tipo 1 - 2 - 3 (informes de posicion)
if msg_type in (1,2,3):
x = msg_dict['longitude']
y = msg_dict['latitude']
# Posiciones incorrectas de GPS
if x > 180 or y > 90:
print 'nais2postgis::handle_insert_update - Posiciones incorrectas GPS x: %s y: %s', x, y
return # abandonar insert
# Comprobar posiciones dentro del boundig box definido (si es el caso)
if options.lon_min is not None and options.lon_min > x: return
if options.lon_max is not None and options.lon_max < x: return
if options.lat_min is not None and options.lat_min > y: return
if options.lat_max is not None and options.lat_max < y: return
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
try:
cu.execute(str(ins))
print 'nais2postgis::handle_insert_update - OK Added position'
except Exception,e:
errors_file.write('nais2postgis::handle_insert_update - pos SQL INSERT ERROR for line: %s\t\n',str(msg_dict))
errors_file.write(str(ins))
errors_file.write('\n')
errors_file.flush()
traceback.print_exc(file=errors_file)
traceback.print_exc()
sys.stderr.write('\n\nBAD DB INSERT\n\n')
sonita.write(ins+'\n')
sonita.write('mmmmm %s for ins: %s \n' % (str(e),ins))
sonita.write('burubu %s \n\n' % (str(ins)))
return False
db_uncommitted_count += 1 #incrementar contador, inserts sin commitear
navigationstatus = msg_dict['NavigationStatus']
shipandcargo = 'unknown' # no se porque ....
cg_r = uscg_msg.station
# normalizar estado de navegacion
if str(navigationstatus) in NavigationStatusDecodeLut:
navigationstatus = NavigationStatusDecodeLut[str(navigationstatus)]
# Actualizar registro de ultima posicion para ese barco
cu.execute('SELECT key FROM last_position WHERE userid=%s;', (userid,))
row = cu.fetchall()
if len(row)>0:
print ('nais2postgis::handle_insert_update - actualizar existe last_position key {}, userid {}'.format(row[0][0], userid))
cu.execute('DELETE FROM last_position WHERE userid = %s;', (userid,))
# comprobar si ya existen datos estaticos de ese barco en la tabla shipdata
# para normalizar los nombres del barco en ambas tablas
cu.execute('SELECT name,shipandcargo FROM shipdata WHERE userid=%s LIMIT 1;',(userid,))
row = cu.fetchall()
if len(row)>0:
name = row[0][0].rstrip(' @')
shipandcargo = int(row[0][1])
if str(shipandcargo) in shipandcargoDecodeLut:
shipandcargo = shipandcargoDecodeLut[str(shipandcargo)]
if len(shipandcargo) > 29:
shipandcargo = shipandcargo[:29]
else:
shipandcargo = str(shipandcargo)
else:
name = str(userid)
q = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,navigationstatus, shipandcargo) VALUES (%s,%s,%s,%s,GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),%s,%s,%s);'
if msg_dict['COG'] == 511:
msg_dict['COG'] = 0 # make unknowns point north
qPrint = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,navigationstatus, shipandcargo) VALUES ({},{},{},{},GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),{},{},{});'.format(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,navigationstatus,shipandcargo)
print 'nais2postgis::handle_insert_update - actualizar last_position insert: {}'.format(qPrint)
cu.execute(q,(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,navigationstatus,shipandcargo))
# drop the old value
rebuild_track_line(cu,userid,name) # This will leave out the current point
return True # hay que commitear
# ********** Mensaje tipo 4 (informe de estacion base)
if msg_type == 4:
print 'nais2postgis::handle_insert_update - procesar mensaje 4, delete bsreport userid', userid
cu.execute('DELETE FROM bsreport WHERE userid = %s;',(userid,))
db_uncommitted_count += 1
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
cu.execute(str(ins))
return True # need to commit db
# ********** Mensaje tipo 5 (datos estaticos del barco y relacionados con la travesia)
if msg_type == 5:
cu.execute('DELETE FROM shipdata WHERE userid = %s;',(userid,))
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
try:
cu.execute(str(ins))
except Exception,e:
#errors_file = file('errors-nais2postgis','w+')
print 'nais2postgis::handle_insert_update - Error insert BAD BAD'
errors_file.write('SQL INSERT ERROR for line: %s\t\n',str(msg_dict))
errors_file.write(str(ins))
errors_file.write('\n')
errors_file.flush()
traceback.print_exc(file=errors_file)
traceback.print_exc()
sys.stderr.write('\n\nBAD DB INSERT\n\n')
return False # no commit
return True # need to commit db
# *********** Mensaje tipo 18 (Informe normal de posicion de los equipos de la Clase B)
if msg_type == 18:
x = msg_dict['longitude']
y = msg_dict['latitude']
# Salir si la posicion es incorrecta
if x > 180 or y > 90:
return # 181, 91 is the invalid gps value
# Normalizar posicion dentro de bounding box definido
if options.lon_min is not None and options.lon_min > x: return
if options.lon_max is not None and options.lon_max < x: return
if options.lat_min is not None and options.lat_min > y: return
if options.lat_max is not None and options.lat_max < y: return
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
cu.execute(str(ins))
#navigationstatus = msg_dict['NavigationStatus']
shipandcargo = 'unknown'
cg_r = uscg_msg.station
cu.execute('SELECT key FROM last_position WHERE userid=%s;', (userid,))
row = cu.fetchall()
if len(row)>0:
print ('nais2postgis::handle_insert_update - actualizar existe last_position eliminar antiguo userid {}'.format(userid))
cu.execute('DELETE FROM last_position WHERE userid = %s;', (userid,))
# Mirar si ya existen datos de esa estacion base
cu.execute('SELECT name FROM b_staticdata WHERE partnum=0 AND userid=%s LIMIT 1;',(userid,))
row = cu.fetchall()
if len(row)>0:
name = row[0][0].rstrip(' @')
else:
name = str(userid)
cu.execute('SELECT shipandcargo FROM b_staticdata WHERE partnum=1 AND userid=%s LIMIT 1;',(userid,))
row = cu.fetchall()
if len(row)>0:
shipandcargo = int(row[0][0])
if str(shipandcargo) in shipandcargoDecodeLut:
shipandcargo = shipandcargoDecodeLut[str(shipandcargo)]
if len(shipandcargo) > 29:
shipandcargo = shipandcargo[:29]
else:
shipandcargo = str(shipandcargo)
# FIX: add navigation status
q = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,shipandcargo) VALUES (%s,%s,%s,%s,GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),%s,%s);'
if msg_dict['COG'] == 511:
msg_dict['COG'] = 0 # make unknowns point north
qPrint = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,shipandcargo) VALUES ({},{},{},{},GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),{},{});'.format(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,shipandcargo)
print 'nais2postgis::handle_insert_update - actualizar last_position insert: {}'.format(qPrint)
cu.execute(q,(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,shipandcargo) )
rebuild_b_track_line(cu,userid,name)
return True # need to commit db
# ********** Mensaje tipo 19 (Informe ampliado de posicion de los equipos de la Clase B)
if msg_type == 19:
cu.execute ('DELETE FROM b_pos_and_shipdata WHERE userid=%s AND partnum=%s;', (userid,msg_dict['partnum']))
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
cu.execute(str(ins))
return True # need to commit db
# ********** Mensaje tipo 24 (Informe datos estaticos de la clase B CS
if msg_type == 24: # Class B static data report. Either part A (0) or B (0)
# remove the old value, but only do it by parts
cu.execute ('DELETE FROM b_staticdata WHERE userid=%s AND partnum=%s;', (userid,msg_dict['partnum']))
ins = aismsg.sqlInsert(msg_dict, dbType='postgres')
ins.add('cg_sec', uscg_msg.cg_sec)
ins.add('cg_timestamp', uscg_msg.sqlTimestampStr)
ins.add('cg_r', uscg_msg.station)
print 'nais2postgis::handle_insert_update - Insert: ',ins
cu.execute(str(ins))
return True
return False # No db commit needed - mensaje de tipo no soportado
################################################################################
# #
# Nas2Postgis #
# #
################################################################################
class Nais2Postgis:
def __init__(self,options):
self.v = options.verbose
self.options = options
self.timeout=options.timeout
self.nais_connected = False
self.loop_count = 0
self.nais_src = None
self.cx = aisutils.database.connect(options, dbType='postgres')
self.cu = self.cx.cursor()
self.norm_queue = aisutils.normalize.Normalize() # for multipart messages
self.bad = file('bad.ais','w')
# Gestion de commits; evitar que sean excesivos
self.db_last_commit_time = 0
self.db_uncommitted_count = 0
print "nais2postgis::Nais2Postgis - Init"
def do_one_loop(self):
# Valor de retorno: true si satisfactorio; falso si desconexion u otro error
connection_attempts = 0
while not self.nais_connected:
self.loop_count += 1
connection_attempts += 1
if connection_attempts%100 == 1:
logging.warn('nais2postgis::Nais2Postgis - Conectando a fuente AIS')
sys.stderr.write('nais2postgis::Nais2Postgis - Conectando host %s (puerto %d)\n' %
(str(self.options.inHost), self.options.inPort))
try:
self.nais_src = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.nais_src.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.nais_src.connect((self.options.inHost, self.options.inPort))
except socket.error, inst:
if self.loop_count%50 == 0:
sys.stderr.write('nais2postgis::Nais2Postgis - %d : Fallo de conexion nais_src ... %s\tReintento\n' %
(self.loop_count,str(inst)))
time.sleep(.5)
else:
self.nais_connected=True
logging.warn('nais2postgis::Nais2Postgis - Conectado a AIS')
sys.stderr.write('nais2postgis::Nais2Postgis - Conectado...\n')
#time.sleep(.1)
readersready,outputready,exceptready = select.select([self.nais_src,],[],[],self.timeout)
# No se leen datos
if len(readersready) == 0:
return
for sock in readersready:
msgs = sock.recv(10000)
if len(msgs)==0:
self.nais_connected=False
logging.warn('nais2postgis::Nais2Postgis - DESCONEXION de AIS\n')
sys.stderr.write('nais2postgis::Nais2Postgis - DESCONEXION de AIS\n')
if self.v:
sys.stderr.write('nais2postgis::Nais2Postgis - recibidos %d bytes; mensaje %s\n' % (len(msgs),msgs.strip()))
if not self.nais_connected:
return False
# FIX: does not handle partial messages coming through!
for msg in msgs.split('\n'):
msg = msg.strip()
if 'AIVDM'!= msg[1:6]: continue
try:
self.norm_queue.put(msg)
except Exception, e:
sys.stderr.write('nais2postgis::Nais2Postgis - Incorrecto mensaje AIVDM: %s\n' % (msg))
sys.stderr.write(' Exception:' + str(type(Exception))+'\n')
sys.stderr.write(' Exception args:'+ str(e)+'\n')
traceback.print_exc(file=sys.stderr)
continue
while self.norm_queue.qsize() > 0:
#print 'norm_queue loop',self.norm_queue.qsize()
msg = self.norm_queue.get()
try:
uscg_msg = aisutils.uscg.UscgNmea(msg)
except Exception, e:
logging.exception('nais2postgis::Nais2Postgis - uscg decode exception %s for msg: %s' % (str(e),msg))
self.bad.write('nais2postgis::Nais2Postgis - uscg decode exception %s for msg: %s' % (str(e),msg ) )
#self.bad.write(msg+'\n')
continue
print 'nais2postgis::Nais2Postgis - Tipo Mensaje',uscg_msg.msgTypeChar,' ;tipos soportados: ',ais_msgs_supported
if uscg_msg.msgTypeChar not in ais_msgs_supported:
print 'nais2postgis::Nais2Postgis - Mensaje no soportado'
#logging.warn('msg not supportd "%s"' % (msg[7],))
continue
else:
print 'nais2postgis::Nais2Postgis - Mensaje soportado'
print 'nais2postgis::Nais2Postgis - Mensaje: ',msg
try:
aismsg = ais.msgModByFirstChar[uscg_msg.msgTypeChar]
except Exception, e:
sys.stderr.write(' nais2postgis::Nais2Postgis - Eliminar mensaje tipo desconocido: %s\n\t%s\n' %
(uscg_msg.msgTypeChar,str(e),) )
self.bad.write(msg+'\n')
continue
bv = ais.binary.ais6tobitvec(uscg_msg.contents)
try:
msg_dict = aismsg.decode(bv)
except Exception, e:
sys.stderr.write(' nais2postgis::Nais2Postgis - Eliminar mensaje fallido: %s,%s\n' % (str(e),msg,) )
self.bad.write(msg+'\n')
continue
print 'nais2postgis::Nais2Postgis - Mensaje decodificado: ',msg_dict
try:
if handle_insert_update(self.cx, uscg_msg, msg_dict, aismsg):
self.db_uncommitted_count += 1
except Exception, e:
sys.stderr.write('*** nais2postgis::Nais2Postgis - handle_insert_update exception\n')
sys.stderr.write(' Exception:' + str(type(Exception))+'\n')
sys.stderr.write(' Exception args:'+ str(e)+'\n')
traceback.print_exc(file=sys.stderr)
self.bad.write(msg+'\n')
self.cx.commit() # reset the transaction
print 'nais2postgis::Nais2Postgis - Should commit?',self.db_last_commit_time, time.time() - self.db_last_commit_time, self.db_uncommitted_count
#print 'nais2postgis::Nais2Postgis - temporal forzar commit'
#self.db_last_commit_time = None
# Gestionar necesidad de commitear
if (self.db_last_commit_time is None) or (time.time() - self.db_last_commit_time > 30. and self.db_uncommitted_count > 0):
print 'nais2postgis::Nais2Postgis - Committing:',self.db_last_commit_time,self.db_uncommitted_count
self.db_last_commit_time = time.time()
self.db_uncommitted_count = 0
try:
print 'nais2postgis::Nais2Postgis - Va a commitear'
self.cx.commit()
print ' ... Commit exitoso'
except Exception, e:
sys.stderr.write('*** nais2postgis::Nais2Postgis - handle_insert_update exception\n')
sys.stderr.write(' Exception:' + str(type(Exception))+'\n')
sys.stderr.write(' Exception args:'+ str(e)+'\n')
traceback.print_exc(file=sys.stderr)
self.bad.write(msg+'\n')
time.sleep(.1)
self.cx.commit() # resetear transaccion
################################################################################
# #
# main #
# #
################################################################################
if __name__=='__main__':
from optparse import OptionParser
print 'nais2postgis::main - Inicializar parseo mensajes AIS'
dbType='postgres'; # forzar tipo ddbb
parser = OptionParser(usage="%prog [options]",version="%prog "+__version__ + " ("+__date__+")")
parser.add_option('-i','--in-port',dest='inPort',type='int', default=31414
,help='Puerto de recepcion [default: %default]')
parser.add_option('-I','--in-host',dest='inHost',type='string',default='localhost'
,help='Host de recepcion [default: %default]')
parser.add_option('--in-gethostname',dest='inHostname', action='store_true', default=False
,help='Host de donde provienen los datos [default: %default]')
parser.add_option('-t','--timeout',dest='timeout',type='float', default='5'
,help='Numero de segundos para timeout si no se reciben datos [default: %default]')
#parser.add_option('-a','--add-station',action='append',dest='allowStations',default=None
# ,help='Limite de estaciones para reenvio (e.g. r003679900) [default: all]')
# lon_min default=-71
parser.add_option('-x','--lon-min', dest='lon_min', type='float', default=None
,help='Bounding box, longitud minima [default: %default]')
parser.add_option('-X','--lon-max', dest='lon_max', type='float', default=None
,help='Bounding box, longitud maxima [default: %default]')
# lat_min default default=42
parser.add_option('-y','--lat-min', dest='lat_min', type='float', default=None
,help='Bounding box, latitud minina [default: %default]')
parser.add_option('-Y','--lat-max', dest='lat_max', type='float', default=None
,help='Bounding box, latitud maxima [default: %default]')
aisutils.daemon.stdCmdlineOptions(parser, skip_short=True)
aisutils.database.stdCmdlineOptions(parser, 'postgres')
parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true'
,help='Indicar modo verbose')
# Fichero de log: nais2postgis.py.log
default_log = sys.argv[0].split('/')[-1]+'.log'
parser.add_option('-l', '--log-file', dest='log_file', type='string', default=default_log
, help='Fichero de log [default: %default]')
parser.add_option('-L','--log-level',dest='log_level',type='int', default='0'
,help='Nivel de log (por defecto, todo) [default: %default]')
(options,args) = parser.parse_args()
v = options.verbose
if v:
sys.stderr.write('nais2postgis::main - Modo verbose; fichero %s nivel %d\n' %
(options.log_file, options.log_level) )
sys.stderr.write('nais2postgis::main - Bounding box: X: %s to %s \t\t Y: %s to %s\n' %
(options.lon_min,options.lon_max,options.lat_min,options.lat_max))
if options.inHostname:
options.inHost=socket.gethostname()
if options.daemon_mode:
aisutils.daemon.start(options.pid_file)
logging.basicConfig(filename = options.log_file, level = options.log_level)
n2p = Nais2Postgis(options)
loop_count=0
while True:
loop_count += 1
if 0 == loop_count % 1000:
print 'nais2postgis::main - top level loop',loop_count
try:
n2p.do_one_loop()
except Exception, e:
sys.stderr.write('*** nais2postgis::main - do_one_loop exception\n')
sys.stderr.write(' Exception:' + str(type(Exception))+'\n')
sys.stderr.write(' Exception args:'+ str(e)+'\n')
traceback.print_exc(file=sys.stderr)
continue
time.sleep(0.01)
| gpl-3.0 | -1,894,013,927,633,651,700 | 43.562893 | 330 | 0.550526 | false |
cosurgi/trunk | examples/Lubrication/SimpleShear_lubrication.py | 1 | 5734 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This script perform a simple shear experiment with lubrication law.
# It shows the use of
# - Lubrication law
# - PDFEngine
# - VTK Lubrication recorder
from yade import pack,ymport,export,geom,bodiesHandling, plot
import math
import pylab
#from yade import qt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import datetime as DT
import os
import re
sp=pack.SpherePack();
m_savefile = "data.txt";
m_vtkDir = "vtk";
if not os.path.exists(m_vtkDir):
os.makedirs(m_vtkDir);
# Physics parameters
m_young = 1e9; #Pa
m_friction = atan(0.5); #rad
m_viscosity = 100; #Pa.s
m_sphRadius = 0.1; # m
m_epsilon = 1e-3;
# Simulation
m_shearRate = 10;
m_N = 500; # Number of spheres
m_targetVolfrac = 0.5;
m_stopOnStrain = 100;
# Saving
m_sampling = 100.; # Number of sampling while 1 deformation
m_vtkSampling = 10.; # Number of sampling while 1 deformation
#define material for all bodies:
id_Mat=O.materials.append(FrictMat(young=m_young,poisson=0.3,density=1000,frictionAngle=m_friction))
Mat=O.materials[id_Mat]
# Simulation cell
sq = (4./3.*pi*m_N/m_targetVolfrac)**(1./3.);
width = sq*m_sphRadius;
length = sq*m_sphRadius;
height = sq*m_sphRadius;
O.periodic=True
O.cell.hSize=Matrix3( 1*width, 0, 0,
0 ,5*height, 0,
0, 0, 1*length);
# Sphere pack
No = sp.makeCloud((0,0,0),O.cell.size,m_sphRadius,0.05,m_N,periodic=True,seed=1)
spheres = [utils.sphere(s[0],s[1]) for s in sp];
O.bodies.append(spheres)
# Setup interaction law
law = Law2_ScGeom_ImplicitLubricationPhys( activateTangencialLubrication=True,
activateTwistLubrication=True,
activateRollLubrication=True,
resolution = 2,
theta = 1,
SolutionTol = 1.e-8,
MaxIter = 50);
# Setup engines
O.engines = [ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(aabbEnlargeFactor=2., label="aabb")],verletDist=-0.2,allowBiggerThanPeriod=False),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom6D(interactionDetectionFactor=2.,label="Ig2")],
[Ip2_FrictMat_FrictMat_LubricationPhys(eta=m_viscosity,eps=m_epsilon)],
[law]
),
NewtonIntegrator(damping=0.),
GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=100,timestepSafetyCoefficient=0.8, defaultDt=1e-6,label="TimeStepper",viscEl=False),
PDFEngine(filename="PDF.txt", virtPeriod=1./(m_vtkSampling*m_shearRate), numDiscretizeAnglePhi=9,numDiscretizeAngleTheta=13),
VTKRecorder(fileName=m_vtkDir+'/',recorders=['spheres','velocity','lubrication'], virtPeriod=1./(m_vtkSampling*m_shearRate),label="VtkRecorder"),
PyRunner(command="UpPlot()",virtPeriod=min(1./(m_sampling*m_shearRate), 0.1), label="UpdatePlot"),
PyRunner(command="SavePlot()",realPeriod=600,label="SaveDataPlot"),
PyRunner(command="checkStartShear()", iterPeriod=10, label="beginCheck")
];
plot.plots={'time':('totalStress_yy',
'normalContactStress_yy',
'shearContactStress_yy',
'normalLubrifStress_yy',
'shearLubrifStress_yy',
'kineticStress_yy'),
'time2':('phi'),
'time3':('totalStress_xy',
'normalContactStress_xy',
'shearContactStress_xy',
'normalLubrifStress_xy',
'shearLubrifStress_xy',
'kineticStress_xy')};
plot.plot(subPlots=True);
O.dt = 1e-6;
# Firstly, compress to target volumic fraction
O.cell.velGrad = Matrix3(0,0,0, 0,-10,0, 0,0,0);
def SavePlot():
global m_savefile;
plot.saveDataTxt(m_savefile);
def UpPlot():
global m_stopOnStrain;
[normalContactStress, shearContactStress, normalLubrifStress, shearLubrifStress, potentialStress] = Law2_ScGeom_ImplicitLubricationPhys.getTotalStresses();
kineticStress = getTotalDynamicStress();
totalStress = normalContactStress + shearContactStress + normalLubrifStress + shearLubrifStress + potentialStress + kineticStress;
phi = 1.-porosity();
if abs(O.cell.hSize[0,1]/O.cell.hSize[0,0]) > 1:
flipCell();
plot.addData(
totalStress = totalStress,
totalStress2 = getStress(),
kineticStress = kineticStress,
normalContactStress = normalContactStress,
shearContactStress = shearContactStress,
normalLubrifStress = normalLubrifStress,
shearLubrifStress = shearLubrifStress,
potentialStress = potentialStress,
phi = phi,
iter = O.iter,
strain = O.cell.trsf,
time = O.time,
time2 = O.time,
time3 = O.time,
velGrad = O.cell.velGrad);
if (m_stopOnStrain > 0) & (O.cell.trsf[0,1] > m_stopOnStrain):
SaveAndQuit();
def checkStartShear():
global m_shearRate;
phi = 1. - porosity();
start = m_targetVolfrac < phi;
if start:
print("Start shear.");
O.cell.velGrad = Matrix3(0,m_shearRate, 0, 0,0,0, 0,0,0);
O.cell.trsf = Matrix3(1,0,0, 0,1,0, 0,0,1);
beginCheck.dead = 1;
def SaveAndQuit():
print("Quit condition reach.");
SavePlot();
O.stopAtIter = O.iter+1;
| gpl-2.0 | 5,506,279,512,304,312,000 | 33.751515 | 163 | 0.595745 | false |
larsbergstrom/servo | python/servo/package_commands.py | 1 | 31921 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import hashlib
import json
import os
import os.path as path
import platform
import shutil
import subprocess
import sys
import tempfile
import urllib
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mach.registrar import Registrar
# Note: mako cannot be imported at the top level because it breaks mach bootstrap
sys.path.append(path.join(path.dirname(__file__), "..", "..",
"components", "style", "properties", "Mako-0.9.1.zip"))
from servo.command_base import (
archive_deterministically,
BuildNotFound,
cd,
CommandBase,
is_macosx,
is_windows,
)
from servo.util import delete
PACKAGES = {
'android': [
'target/android/armv7-linux-androideabi/release/servoapp.apk',
'target/android/armv7-linux-androideabi/release/servoview.aar',
],
'linux': [
'target/release/servo-tech-demo.tar.gz',
],
'mac': [
'target/release/servo-tech-demo.dmg',
],
'macbrew': [
'target/release/brew/servo.tar.gz',
],
'magicleap': [
'target/magicleap/aarch64-linux-android/release/Servo.mpk',
],
'maven': [
'target/android/gradle/servoview/maven/org/mozilla/servoview/servoview-armv7/',
'target/android/gradle/servoview/maven/org/mozilla/servoview/servoview-x86/',
],
'windows-msvc': [
r'target\release\msi\Servo.exe',
r'target\release\msi\Servo.zip',
],
'uwp': [
r'support\hololens\AppPackages\ServoApp\ServoApp_1.0.0.0_Test.zip',
],
}
TemporaryDirectory = None
if sys.version_info >= (3, 2):
TemporaryDirectory = tempfile.TemporaryDirectory
else:
import contextlib
# Not quite as robust as tempfile.TemporaryDirectory,
# but good enough for most purposes
@contextlib.contextmanager
def TemporaryDirectory(**kwargs):
dir_name = tempfile.mkdtemp(**kwargs)
try:
yield dir_name
except Exception as e:
shutil.rmtree(dir_name)
raise e
def otool(s):
o = subprocess.Popen(['/usr/bin/otool', '-L', s], stdout=subprocess.PIPE)
for l in o.stdout:
if l[0] == '\t':
yield l.split(' ', 1)[0][1:]
def listfiles(directory):
return [f for f in os.listdir(directory)
if path.isfile(path.join(directory, f))]
def install_name_tool(old, new, binary):
try:
subprocess.check_call(['install_name_tool', '-change', old, '@executable_path/' + new, binary])
except subprocess.CalledProcessError as e:
print("install_name_tool exited with return value %d" % e.returncode)
def is_system_library(lib):
return lib.startswith("/System/Library") or lib.startswith("/usr/lib")
def change_non_system_libraries_path(libraries, relative_path, binary):
for lib in libraries:
if is_system_library(lib):
continue
new_path = path.join(relative_path, path.basename(lib))
install_name_tool(lib, new_path, binary)
def copy_dependencies(binary_path, lib_path):
relative_path = path.relpath(lib_path, path.dirname(binary_path)) + "/"
# Update binary libraries
binary_dependencies = set(otool(binary_path))
change_non_system_libraries_path(binary_dependencies, relative_path, binary_path)
# Update dependencies libraries
need_checked = binary_dependencies
checked = set()
while need_checked:
checking = set(need_checked)
need_checked = set()
for f in checking:
# No need to check these for their dylibs
if is_system_library(f):
continue
need_relinked = set(otool(f))
new_path = path.join(lib_path, path.basename(f))
if not path.exists(new_path):
shutil.copyfile(f, new_path)
change_non_system_libraries_path(need_relinked, relative_path, new_path)
need_checked.update(need_relinked)
checked.update(checking)
need_checked.difference_update(checked)
def copy_windows_dependencies(binary_path, destination):
for f in os.listdir(binary_path):
if os.path.isfile(path.join(binary_path, f)) and f.endswith(".dll"):
shutil.copy(path.join(binary_path, f), destination)
def change_prefs(resources_path, platform, vr=False):
print("Swapping prefs")
prefs_path = path.join(resources_path, "prefs.json")
package_prefs_path = path.join(resources_path, "package-prefs.json")
with open(prefs_path) as prefs, open(package_prefs_path) as package_prefs:
prefs = json.load(prefs)
pref_sets = []
package_prefs = json.load(package_prefs)
if "all" in package_prefs:
pref_sets += [package_prefs["all"]]
if vr and "vr" in package_prefs:
pref_sets += [package_prefs["vr"]]
if platform in package_prefs:
pref_sets += [package_prefs[platform]]
for pref_set in pref_sets:
for pref in pref_set:
if pref in prefs:
prefs[pref] = pref_set[pref]
with open(prefs_path, "w") as out:
json.dump(prefs, out, sort_keys=True, indent=2)
delete(package_prefs_path)
@CommandProvider
class PackageCommands(CommandBase):
@Command('package',
description='Package Servo',
category='package')
@CommandArgument('--release', '-r', action='store_true',
help='Package the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Package the dev build')
@CommandArgument('--android',
default=None,
action='store_true',
help='Package Android')
@CommandArgument('--magicleap',
default=None,
action='store_true',
help='Package Magic Leap')
@CommandArgument('--target', '-t',
default=None,
help='Package for given target platform')
@CommandArgument('--flavor', '-f',
default=None,
help='Package using the given Gradle flavor')
@CommandArgument('--maven',
default=None,
action='store_true',
help='Create a local Maven repository')
@CommandArgument('--uwp',
default=None,
action='append',
help='Create an APPX package')
def package(self, release=False, dev=False, android=None, magicleap=None, debug=False,
debugger=None, target=None, flavor=None, maven=False, uwp=None):
if android is None:
android = self.config["build"]["android"]
if target and android:
print("Please specify either --target or --android.")
sys.exit(1)
if not android:
android = self.handle_android_target(target)
else:
target = self.config["android"]["target"]
if target and magicleap:
print("Please specify either --target or --magicleap.")
sys.exit(1)
if magicleap:
target = "aarch64-linux-android"
env = self.build_env(target=target)
binary_path = self.get_binary_path(
release, dev, target=target, android=android, magicleap=magicleap,
simpleservo=uwp is not None
)
dir_to_root = self.get_top_dir()
target_dir = path.dirname(binary_path)
if uwp:
vs_info = self.vs_dirs()
build_uwp(uwp, dev, vs_info['msbuild'])
elif magicleap:
if platform.system() not in ["Darwin"]:
raise Exception("Magic Leap builds are only supported on macOS.")
if not env.get("MAGICLEAP_SDK"):
raise Exception("Magic Leap builds need the MAGICLEAP_SDK environment variable")
if not env.get("MLCERT"):
raise Exception("Magic Leap builds need the MLCERT environment variable")
# GStreamer configuration
env.setdefault("GSTREAMER_DIR", path.join(
self.get_target_dir(), "magicleap", target, "native", "gstreamer-1.16.0"
))
mabu = path.join(env.get("MAGICLEAP_SDK"), "mabu")
packages = [
"./support/magicleap/Servo.package",
]
if dev:
build_type = "lumin_debug"
else:
build_type = "lumin_release"
for package in packages:
argv = [
mabu,
"-o", target_dir,
"-t", build_type,
"-r",
"GSTREAMER_DIR=" + env["GSTREAMER_DIR"],
package
]
try:
subprocess.check_call(argv, env=env)
except subprocess.CalledProcessError as e:
print("Packaging Magic Leap exited with return value %d" % e.returncode)
return e.returncode
elif android:
android_target = self.config["android"]["target"]
if "aarch64" in android_target:
build_type = "Arm64"
elif "armv7" in android_target:
build_type = "Armv7"
elif "i686" in android_target:
build_type = "x86"
else:
build_type = "Arm"
if dev:
build_mode = "Debug"
else:
build_mode = "Release"
flavor_name = "Main"
if flavor is not None:
flavor_name = flavor.title()
vr = flavor == "googlevr" or flavor == "oculusvr"
dir_to_resources = path.join(self.get_top_dir(), 'target', 'android', 'resources')
if path.exists(dir_to_resources):
delete(dir_to_resources)
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
change_prefs(dir_to_resources, "android", vr=vr)
variant = ":assemble" + flavor_name + build_type + build_mode
apk_task_name = ":servoapp" + variant
aar_task_name = ":servoview" + variant
maven_task_name = ":servoview:uploadArchive"
argv = ["./gradlew", "--no-daemon", apk_task_name, aar_task_name]
if maven:
argv.append(maven_task_name)
try:
with cd(path.join("support", "android", "apk")):
subprocess.check_call(argv, env=env)
except subprocess.CalledProcessError as e:
print("Packaging Android exited with return value %d" % e.returncode)
return e.returncode
elif is_macosx():
print("Creating Servo.app")
dir_to_dmg = path.join(target_dir, 'dmg')
dir_to_app = path.join(dir_to_dmg, 'Servo.app')
dir_to_resources = path.join(dir_to_app, 'Contents', 'Resources')
if path.exists(dir_to_dmg):
print("Cleaning up from previous packaging")
delete(dir_to_dmg)
print("Copying files")
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copy2(path.join(dir_to_root, 'Info.plist'), path.join(dir_to_app, 'Contents', 'Info.plist'))
content_dir = path.join(dir_to_app, 'Contents', 'MacOS')
os.makedirs(content_dir)
shutil.copy2(binary_path, content_dir)
change_prefs(dir_to_resources, "macosx")
print("Finding dylibs and relinking")
copy_dependencies(path.join(content_dir, 'servo'), content_dir)
print("Adding version to Credits.rtf")
version_command = [binary_path, '--version']
p = subprocess.Popen(version_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
version, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Error occurred when getting Servo version: " + stderr)
version = "Nightly version: " + version
import mako.template
template_path = path.join(dir_to_resources, 'Credits.rtf.mako')
credits_path = path.join(dir_to_resources, 'Credits.rtf')
with open(template_path) as template_file:
template = mako.template.Template(template_file.read())
with open(credits_path, "w") as credits_file:
credits_file.write(template.render(version=version))
delete(template_path)
print("Creating dmg")
os.symlink('/Applications', path.join(dir_to_dmg, 'Applications'))
dmg_path = path.join(target_dir, "servo-tech-demo.dmg")
if path.exists(dmg_path):
print("Deleting existing dmg")
os.remove(dmg_path)
try:
subprocess.check_call(['hdiutil', 'create',
'-volname', 'Servo',
'-megabytes', '900',
dmg_path,
'-srcfolder', dir_to_dmg])
except subprocess.CalledProcessError as e:
print("Packaging MacOS dmg exited with return value %d" % e.returncode)
return e.returncode
print("Cleaning up")
delete(dir_to_dmg)
print("Packaged Servo into " + dmg_path)
print("Creating brew package")
dir_to_brew = path.join(target_dir, 'brew_tmp')
dir_to_tar = path.join(target_dir, 'brew')
if not path.exists(dir_to_tar):
os.makedirs(dir_to_tar)
tar_path = path.join(dir_to_tar, "servo.tar.gz")
if path.exists(dir_to_brew):
print("Cleaning up from previous packaging")
delete(dir_to_brew)
if path.exists(tar_path):
print("Deleting existing package")
os.remove(tar_path)
shutil.copytree(path.join(dir_to_root, 'resources'), path.join(dir_to_brew, 'resources'))
os.makedirs(path.join(dir_to_brew, 'bin'))
shutil.copy2(binary_path, path.join(dir_to_brew, 'bin', 'servo'))
# Note that in the context of Homebrew, libexec is reserved for private use by the formula
# and therefore is not symlinked into HOMEBREW_PREFIX.
os.makedirs(path.join(dir_to_brew, 'libexec'))
copy_dependencies(path.join(dir_to_brew, 'bin', 'servo'), path.join(dir_to_brew, 'libexec'))
archive_deterministically(dir_to_brew, tar_path, prepend_path='servo/')
delete(dir_to_brew)
print("Packaged Servo into " + tar_path)
elif is_windows():
dir_to_msi = path.join(target_dir, 'msi')
if path.exists(dir_to_msi):
print("Cleaning up from previous packaging")
delete(dir_to_msi)
os.makedirs(dir_to_msi)
print("Copying files")
dir_to_temp = path.join(dir_to_msi, 'temp')
dir_to_resources = path.join(dir_to_temp, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copy(binary_path, dir_to_temp)
copy_windows_dependencies(target_dir, dir_to_temp)
change_prefs(dir_to_resources, "windows")
# generate Servo.wxs
import mako.template
template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako")
template = mako.template.Template(open(template_path).read())
wxs_path = path.join(dir_to_msi, "Installer.wxs")
open(wxs_path, "w").write(template.render(
exe_path=target_dir,
dir_to_temp=dir_to_temp,
resources_path=dir_to_resources))
# run candle and light
print("Creating MSI")
try:
with cd(dir_to_msi):
subprocess.check_call(['candle', wxs_path])
except subprocess.CalledProcessError as e:
print("WiX candle exited with return value %d" % e.returncode)
return e.returncode
try:
wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0])
with cd(dir_to_msi):
subprocess.check_call(['light', wxsobj_path])
except subprocess.CalledProcessError as e:
print("WiX light exited with return value %d" % e.returncode)
return e.returncode
dir_to_installer = path.join(dir_to_msi, "Installer.msi")
print("Packaged Servo into " + dir_to_installer)
# Generate bundle with Servo installer.
print("Creating bundle")
shutil.copy(path.join(dir_to_root, 'support', 'windows', 'Servo.wxs'), dir_to_msi)
bundle_wxs_path = path.join(dir_to_msi, 'Servo.wxs')
try:
with cd(dir_to_msi):
subprocess.check_call(['candle', bundle_wxs_path, '-ext', 'WixBalExtension'])
except subprocess.CalledProcessError as e:
print("WiX candle exited with return value %d" % e.returncode)
return e.returncode
try:
wxsobj_path = "{}.wixobj".format(path.splitext(bundle_wxs_path)[0])
with cd(dir_to_msi):
subprocess.check_call(['light', wxsobj_path, '-ext', 'WixBalExtension'])
except subprocess.CalledProcessError as e:
print("WiX light exited with return value %d" % e.returncode)
return e.returncode
print("Packaged Servo into " + path.join(dir_to_msi, "Servo.exe"))
print("Creating ZIP")
zip_path = path.join(dir_to_msi, "Servo.zip")
archive_deterministically(dir_to_temp, zip_path, prepend_path='servo/')
print("Packaged Servo into " + zip_path)
print("Cleaning up")
delete(dir_to_temp)
delete(dir_to_installer)
else:
dir_to_temp = path.join(target_dir, 'packaging-temp')
if path.exists(dir_to_temp):
# TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds
print("Cleaning up from previous packaging")
delete(dir_to_temp)
print("Copying files")
dir_to_resources = path.join(dir_to_temp, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copy(binary_path, dir_to_temp)
change_prefs(dir_to_resources, "linux")
print("Creating tarball")
tar_path = path.join(target_dir, 'servo-tech-demo.tar.gz')
archive_deterministically(dir_to_temp, tar_path, prepend_path='servo/')
print("Cleaning up")
delete(dir_to_temp)
print("Packaged Servo into " + tar_path)
@Command('install',
description='Install Servo (currently, Android and Windows only)',
category='package')
@CommandArgument('--release', '-r', action='store_true',
help='Install the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Install the dev build')
@CommandArgument('--android',
action='store_true',
help='Install on Android')
@CommandArgument('--magicleap',
default=None,
action='store_true',
help='Install on Magic Leap')
@CommandArgument('--emulator',
action='store_true',
help='For Android, install to the only emulated device')
@CommandArgument('--usb',
action='store_true',
help='For Android, install to the only USB device')
@CommandArgument('--target', '-t',
default=None,
help='Install the given target platform')
def install(self, release=False, dev=False, android=False, magicleap=False, emulator=False, usb=False, target=None):
if target and android:
print("Please specify either --target or --android.")
sys.exit(1)
if not android:
android = self.handle_android_target(target)
if target and magicleap:
print("Please specify either --target or --magicleap.")
sys.exit(1)
if magicleap:
target = "aarch64-linux-android"
env = self.build_env(target=target)
try:
binary_path = self.get_binary_path(release, dev, android=android, magicleap=magicleap)
except BuildNotFound:
print("Servo build not found. Building servo...")
result = Registrar.dispatch(
"build", context=self.context, release=release, dev=dev, android=android, magicleap=magicleap,
)
if result:
return result
try:
binary_path = self.get_binary_path(release, dev, android=android, magicleap=magicleap)
except BuildNotFound:
print("Rebuilding Servo did not solve the missing build problem.")
return 1
if magicleap:
if not env.get("MAGICLEAP_SDK"):
raise Exception("Magic Leap installs need the MAGICLEAP_SDK environment variable")
mldb = path.join(env.get("MAGICLEAP_SDK"), "tools", "mldb", "mldb")
pkg_path = path.join(path.dirname(binary_path), "Servo.mpk")
exec_command = [
mldb,
"install", "-u",
pkg_path,
]
elif android:
pkg_path = self.get_apk_path(release)
exec_command = [self.android_adb_path(env)]
if emulator and usb:
print("Cannot install to both emulator and USB at the same time.")
return 1
if emulator:
exec_command += ["-e"]
if usb:
exec_command += ["-d"]
exec_command += ["install", "-r", pkg_path]
elif is_windows():
pkg_path = path.join(path.dirname(binary_path), 'msi', 'Servo.msi')
exec_command = ["msiexec", "/i", pkg_path]
if not path.exists(pkg_path):
print("Servo package not found. Packaging servo...")
result = Registrar.dispatch(
"package", context=self.context, release=release, dev=dev, android=android, magicleap=magicleap,
)
if result != 0:
return result
print(" ".join(exec_command))
return subprocess.call(exec_command, env=env)
@Command('upload-nightly',
description='Upload Servo nightly to S3',
category='package')
@CommandArgument('platform',
choices=PACKAGES.keys(),
help='Package platform type to upload')
@CommandArgument('--secret-from-taskcluster',
action='store_true',
help='Retrieve the appropriate secrets from taskcluster.')
def upload_nightly(self, platform, secret_from_taskcluster):
import boto3
def get_taskcluster_secret(name):
url = (
os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster") +
"/secrets/v1/secret/project/servo/" +
name
)
return json.load(urllib.urlopen(url))["secret"]
def get_s3_secret():
aws_access_key = None
aws_secret_access_key = None
if secret_from_taskcluster:
secret = get_taskcluster_secret("s3-upload-credentials")
aws_access_key = secret["aws_access_key_id"]
aws_secret_access_key = secret["aws_secret_access_key"]
return (aws_access_key, aws_secret_access_key)
def nightly_filename(package, timestamp):
return '{}-{}'.format(
timestamp.isoformat() + 'Z', # The `Z` denotes UTC
path.basename(package)
)
def upload_to_s3(platform, package, timestamp):
(aws_access_key, aws_secret_access_key) = get_s3_secret()
s3 = boto3.client(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key
)
BUCKET = 'servo-builds'
nightly_dir = 'nightly/{}'.format(platform)
filename = nightly_filename(package, timestamp)
package_upload_key = '{}/{}'.format(nightly_dir, filename)
extension = path.basename(package).partition('.')[2]
latest_upload_key = '{}/servo-latest.{}'.format(nightly_dir, extension)
s3.upload_file(package, BUCKET, package_upload_key)
copy_source = {
'Bucket': BUCKET,
'Key': package_upload_key,
}
s3.copy(copy_source, BUCKET, latest_upload_key)
def update_maven(directory):
(aws_access_key, aws_secret_access_key) = get_s3_secret()
s3 = boto3.client(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key
)
BUCKET = 'servo-builds'
nightly_dir = 'nightly/maven'
dest_key_base = directory.replace("target/android/gradle/servoview/maven", nightly_dir)
if dest_key_base[-1] == '/':
dest_key_base = dest_key_base[:-1]
# Given a directory with subdirectories like 0.0.1.20181005.caa4d190af...
for artifact_dir in os.listdir(directory):
base_dir = os.path.join(directory, artifact_dir)
if not os.path.isdir(base_dir):
continue
package_upload_base = "{}/{}".format(dest_key_base, artifact_dir)
# Upload all of the files inside the subdirectory.
for f in os.listdir(base_dir):
file_upload_key = "{}/{}".format(package_upload_base, f)
print("Uploading %s to %s" % (os.path.join(base_dir, f), file_upload_key))
s3.upload_file(os.path.join(base_dir, f), BUCKET, file_upload_key)
def update_brew(package, timestamp):
print("Updating brew formula")
package_url = 'https://download.servo.org/nightly/macbrew/{}'.format(
nightly_filename(package, timestamp)
)
with open(package) as p:
digest = hashlib.sha256(p.read()).hexdigest()
brew_version = timestamp.strftime('%Y.%m.%d')
with TemporaryDirectory(prefix='homebrew-servo') as tmp_dir:
def call_git(cmd, **kwargs):
subprocess.check_call(
['git', '-C', tmp_dir] + cmd,
**kwargs
)
call_git([
'clone',
'https://github.com/servo/homebrew-servo.git',
'.',
])
script_dir = path.dirname(path.realpath(__file__))
with open(path.join(script_dir, 'servo-binary-formula.rb.in')) as f:
formula = f.read()
formula = formula.replace('PACKAGEURL', package_url)
formula = formula.replace('SHA', digest)
formula = formula.replace('VERSION', brew_version)
with open(path.join(tmp_dir, 'Formula', 'servo-bin.rb'), 'w') as f:
f.write(formula)
call_git(['add', path.join('.', 'Formula', 'servo-bin.rb')])
call_git([
'-c', 'user.name=Tom Servo',
'-c', '[email protected]',
'commit',
'--message=Version Bump: {}'.format(brew_version),
])
if secret_from_taskcluster:
token = get_taskcluster_secret('github-homebrew-token')["token"]
else:
token = os.environ['GITHUB_HOMEBREW_TOKEN']
push_url = 'https://{}@github.com/servo/homebrew-servo.git'
# TODO(aneeshusa): Use subprocess.DEVNULL with Python 3.3+
with open(os.devnull, 'wb') as DEVNULL:
call_git([
'push',
'-qf',
push_url.format(token),
'master',
], stdout=DEVNULL, stderr=DEVNULL)
timestamp = datetime.utcnow().replace(microsecond=0)
for package in PACKAGES[platform]:
if path.isdir(package):
continue
if not path.isfile(package):
print("Could not find package for {} at {}".format(
platform,
package
), file=sys.stderr)
return 1
upload_to_s3(platform, package, timestamp)
if platform == 'maven':
for package in PACKAGES[platform]:
update_maven(package)
if platform == 'macbrew':
packages = PACKAGES[platform]
assert(len(packages) == 1)
update_brew(packages[0], timestamp)
return 0
def build_uwp(platforms, dev, msbuild_dir):
if any(map(lambda p: p not in ['x64', 'x86', 'arm64'], platforms)):
raise Exception("Unsupported appx platforms: " + str(platforms))
if dev and len(platforms) > 1:
raise Exception("Debug package with multiple architectures is unsupported")
if dev:
Configuration = "Debug"
else:
Configuration = "Release"
msbuild = path.join(msbuild_dir, "msbuild.exe")
build_file_template = path.join('support', 'hololens', 'package.msbuild')
with open(build_file_template) as f:
template_contents = f.read()
build_file = tempfile.NamedTemporaryFile(delete=False)
build_file.write(
template_contents
.replace("%%BUILD_PLATFORMS%%", ';'.join(platforms))
.replace("%%PACKAGE_PLATFORMS%%", '|'.join(platforms))
.replace("%%CONFIGURATION%%", Configuration)
.replace("%%SOLUTION%%", path.join(os.getcwd(), 'support', 'hololens', 'ServoApp.sln'))
)
build_file.close()
# Generate an appxbundle.
subprocess.check_call([msbuild, "/m", build_file.name])
os.unlink(build_file.name)
print("Creating ZIP")
out_dir = path.join(os.getcwd(), 'support', 'hololens', 'AppPackages', 'ServoApp')
name = 'ServoApp_1.0.0.0_%sTest' % ('Debug_' if dev else '')
artifacts_dir = path.join(out_dir, name)
zip_path = path.join(out_dir, name + ".zip")
archive_deterministically(artifacts_dir, zip_path, prepend_path='servo/')
print("Packaged Servo into " + zip_path)
| mpl-2.0 | -4,604,749,228,839,000,600 | 40.135309 | 120 | 0.550327 | false |
wkschwartz/django | django/utils/crypto.py | 1 | 3139 | """
Django's standard crypto functions and utilities.
"""
import hashlib
import hmac
import secrets
import warnings
from django.conf import settings
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.encoding import force_bytes
class InvalidAlgorithm(ValueError):
"""Algorithm is not supported by hashlib."""
pass
def salted_hmac(key_salt, value, secret=None, *, algorithm='sha1'):
"""
Return the HMAC of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY). Default algorithm is SHA1,
but any algorithm name supported by hashlib can be passed.
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
try:
hasher = getattr(hashlib, algorithm)
except AttributeError as e:
raise InvalidAlgorithm(
'%r is not an algorithm accepted by the hashlib module.'
% algorithm
) from e
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function.
key = hasher(key_salt + secret).digest()
# If len(key_salt + secret) > block size of the hash algorithm, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hasher)
NOT_PROVIDED = object() # RemovedInDjango40Warning.
RANDOM_STRING_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS):
def get_random_string(length=NOT_PROVIDED, allowed_chars=RANDOM_STRING_CHARS):
"""
Return a securely generated random string.
The bit length of the returned value can be calculated with the formula:
log_2(len(allowed_chars)^length)
For example, with default `allowed_chars` (26+26+10), this gives:
* length: 12, bit length =~ 71 bits
* length: 22, bit length =~ 131 bits
"""
if length is NOT_PROVIDED:
warnings.warn(
'Not providing a length argument is deprecated.',
RemovedInDjango40Warning,
)
length = 12
return ''.join(secrets.choice(allowed_chars) for i in range(length))
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return secrets.compare_digest(force_bytes(val1), force_bytes(val2))
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""Return the hash of password using pbkdf2."""
if digest is None:
digest = hashlib.sha256
dklen = dklen or None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
| bsd-3-clause | 7,145,878,690,771,512,000 | 35.08046 | 86 | 0.697674 | false |
Yelp/pootle | pootle/apps/pootle_store/views.py | 1 | 37944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from itertools import groupby
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import Max, Q
from django.http import Http404
from django.shortcuts import redirect
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.utils.translation import to_locale, ugettext as _
from django.utils.translation.trans_real import parse_accept_lang_header
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_http_methods
from translate.lang import data
from pootle.core.dateparse import parse_datetime
from pootle.core.decorators import (get_path_obj, get_resource,
permission_required)
from pootle.core.exceptions import Http400
from pootle.core.http import JsonResponse, JsonResponseBadRequest
from pootle_app.models.directory import Directory
from pootle_app.models.permissions import check_permission, check_user_permission
from pootle_misc.checks import get_category_id, check_names
from pootle_misc.forms import make_search_form
from pootle_misc.util import ajax_required, to_int, get_date_interval
from pootle_statistics.models import (Submission, SubmissionFields,
SubmissionTypes)
from .decorators import get_unit_context
from .fields import to_python
from .forms import (unit_comment_form_factory, unit_form_factory,
highlight_whitespace)
from .models import Unit, SuggestionStates
from .templatetags.store_tags import (highlight_diffs, pluralize_source,
pluralize_target)
from .util import (UNTRANSLATED, FUZZY, TRANSLATED, STATES_MAP,
find_altsrcs)
#: Mapping of allowed sorting criteria.
#: Keys are supported query strings, values are the field + order that
#: will be used against the DB.
ALLOWED_SORTS = {
'units': {
'priority': 'priority',
'oldest': 'submitted_on',
'newest': '-submitted_on',
},
'suggestions': {
'oldest': 'suggestion__creation_time',
'newest': '-suggestion__creation_time',
},
'submissions': {
'oldest': 'submission__creation_time',
'newest': '-submission__creation_time',
},
}
#: List of fields from `ALLOWED_SORTS` that can be sorted by simply using
#: `order_by(field)`
SIMPLY_SORTED = ['units']
def get_alt_src_langs(request, user, translation_project):
language = translation_project.language
project = translation_project.project
source_language = project.source_language
langs = user.alt_src_langs.exclude(
id__in=(language.id, source_language.id)
).filter(translationproject__project=project)
if not user.alt_src_langs.count():
from pootle_language.models import Language
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
continue
simplified = data.simplify_to_common(accept_lang)
normalized = to_locale(data.normalize_code(simplified))
code = to_locale(accept_lang)
if (normalized in
('en', 'en_US', source_language.code, language.code) or
code in ('en', 'en_US', source_language.code, language.code)):
continue
langs = Language.objects.filter(
code__in=(normalized, code),
translationproject__project=project,
)
if langs.count():
break
return langs
def get_search_query(form, units_queryset):
words = form.cleaned_data['search'].split()
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(source_f__icontains=word)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(target_f__icontains=word)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
for word in words:
translator_subresult = translator_subresult.filter(
translator_comment__icontains=word,
)
developer_subresult = developer_subresult.filter(
developer_comment__icontains=word,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(locations__icontains=word)
result = result | subresult
return result
def get_search_exact_query(form, units_queryset):
phrase = form.cleaned_data['search']
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(source_f__contains=phrase)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(target_f__contains=phrase)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
translator_subresult = translator_subresult.filter(
translator_comment__contains=phrase,
)
developer_subresult = developer_subresult.filter(
developer_comment__contains=phrase,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(locations__contains=phrase)
result = result | subresult
return result
def get_search_step_query(form, units_queryset):
"""Narrows down units query to units matching search string."""
if 'exact' in form.cleaned_data['soptions']:
return get_search_exact_query(form, units_queryset)
return get_search_query(form, units_queryset)
def get_step_query(request, units_queryset):
"""Narrows down unit query to units matching conditions in GET."""
if 'filter' in request.GET:
unit_filter = request.GET['filter']
username = request.GET.get('user', None)
modified_since = request.GET.get('modified-since', None)
month = request.GET.get('month', None)
sort_by_param = request.GET.get('sort', None)
sort_on = 'units'
user = request.profile
if username is not None:
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
if unit_filter:
match_queryset = units_queryset.none()
if unit_filter == 'all':
match_queryset = units_queryset
elif unit_filter == 'translated':
match_queryset = units_queryset.filter(state=TRANSLATED)
elif unit_filter == 'untranslated':
match_queryset = units_queryset.filter(state=UNTRANSLATED)
elif unit_filter == 'fuzzy':
match_queryset = units_queryset.filter(state=FUZZY)
elif unit_filter == 'incomplete':
match_queryset = units_queryset.filter(
Q(state=UNTRANSLATED) | Q(state=FUZZY),
)
elif unit_filter == 'suggestions':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING
).distinct()
elif unit_filter in ('my-suggestions', 'user-suggestions'):
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING,
suggestion__user=user,
).distinct()
sort_on = 'suggestions'
elif unit_filter == 'user-suggestions-accepted':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.ACCEPTED,
suggestion__user=user,
).distinct()
elif unit_filter == 'user-suggestions-rejected':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.REJECTED,
suggestion__user=user,
).distinct()
elif unit_filter in ('my-submissions', 'user-submissions'):
match_queryset = units_queryset.filter(
submission__submitter=user,
submission__type__in=SubmissionTypes.EDIT_TYPES,
).distinct()
sort_on = 'submissions'
elif (unit_filter in ('my-submissions-overwritten',
'user-submissions-overwritten')):
match_queryset = units_queryset.filter(
submission__submitter=user,
submission__type__in=SubmissionTypes.EDIT_TYPES,
).exclude(submitted_by=user).distinct()
elif unit_filter == 'checks':
if 'checks' in request.GET:
checks = request.GET['checks'].split(',')
if checks:
match_queryset = units_queryset.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=checks,
).distinct()
elif 'category' in request.GET:
category_name = request.GET['category']
try:
category = get_category_id(category_name)
except KeyError:
raise Http404
match_queryset = units_queryset.filter(
qualitycheck__false_positive=False,
qualitycheck__category=category,
).distinct()
if modified_since is not None:
datetime_obj = parse_datetime(modified_since)
if datetime_obj is not None:
match_queryset = match_queryset.filter(
submitted_on__gt=datetime_obj,
).distinct()
if month is not None:
[start, end] = get_date_interval(month)
match_queryset = match_queryset.filter(
submitted_on__gte=start,
submitted_on__lte=end,
).distinct()
sort_by = ALLOWED_SORTS[sort_on].get(sort_by_param, None)
if sort_by is not None:
if sort_on in SIMPLY_SORTED:
if sort_by == 'priority':
# TODO: Replace the following extra() with Coalesce
# https://docs.djangoproject.com/en/1.8/ref/models/database-functions/#coalesce
# once we drop support for Django<1.8.x:
# .annotate(
# sort_by_field=Coalesce(
# Max("vfolders__priority"),
# Value(1)
# )
# ).order_by("-sort_by_field")
match_queryset = match_queryset.extra(select={'sort_by_field': """
SELECT COALESCE(MAX(virtualfolder_virtualfolder.priority), 1)
FROM virtualfolder_virtualfolder
INNER JOIN virtualfolder_virtualfolder_units
ON virtualfolder_virtualfolder.id = virtualfolder_virtualfolder_units.virtualfolder_id
WHERE virtualfolder_virtualfolder_units.unit_id = pootle_store_unit.id
"""}).extra(order_by=['-sort_by_field'])
else:
match_queryset = match_queryset.order_by(sort_by)
else:
# Omit leading `-` sign
if sort_by[0] == '-':
max_field = sort_by[1:]
sort_order = '-sort_by_field'
else:
max_field = sort_by
sort_order = 'sort_by_field'
# It's necessary to use `Max()` here because we can't
# use `distinct()` and `order_by()` at the same time
# (unless PostreSQL is used and `distinct(field_name)`)
match_queryset = match_queryset \
.annotate(sort_by_field=Max(max_field)) \
.order_by(sort_order)
units_queryset = match_queryset
if 'search' in request.GET and 'sfields' in request.GET:
# Accept `sfields` to be a comma-separated string of fields (#46)
GET = request.GET.copy()
sfields = GET['sfields']
if isinstance(sfields, unicode) and u',' in sfields:
GET.setlist('sfields', sfields.split(u','))
# use the search form for validation only
search_form = make_search_form(GET)
if search_form.is_valid():
units_queryset = get_search_step_query(search_form, units_queryset)
return units_queryset
#
# Views used with XMLHttpRequest requests.
#
def _filter_ctx_units(units_qs, unit, how_many, gap=0):
"""Returns ``how_many``*2 units that are before and after ``index``."""
result = {'before': [], 'after': []}
if how_many and unit.index - gap > 0:
before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \
.order_by('-index')[gap:how_many+gap]
result['before'] = _build_units_list(before, reverse=True)
result['before'].reverse()
# FIXME: can we avoid this query if length is known?
if how_many:
after = units_qs.filter(store=unit.store_id,
index__gt=unit.index)[gap:how_many+gap]
result['after'] = _build_units_list(after)
return result
def _prepare_unit(unit):
"""Constructs a dictionary with relevant `unit` data."""
return {
'id': unit.id,
'url': unit.get_translate_url(),
'isfuzzy': unit.isfuzzy(),
'source': [source[1] for source in pluralize_source(unit)],
'target': [target[1] for target in pluralize_target(unit)],
}
def _path_units_with_meta(path, units):
"""Constructs a dictionary which contains a list of `units`
corresponding to `path` as well as its metadata.
"""
meta = None
units_list = []
for unit in iter(units):
if meta is None:
# XXX: Watch out for the query count
store = unit.store
tp = store.translation_project
project = tp.project
meta = {
'source_lang': project.source_language.code,
'source_dir': project.source_language.direction,
'target_lang': tp.language.code,
'target_dir': tp.language.direction,
'project_code': project.code,
'project_style': project.checkstyle,
}
units_list.append(_prepare_unit(unit))
return {
path: {
'meta': meta,
'units': units_list,
},
}
def _build_units_list(units, reverse=False):
"""Given a list/queryset of units, builds a list with the unit data
contained in a dictionary ready to be returned as JSON.
:return: A list with unit id, source, and target texts. In case of
having plural forms, a title for the plural form is also provided.
"""
return_units = []
for unit in iter(units):
return_units.append(_prepare_unit(unit))
return return_units
def _get_critical_checks_snippet(request, unit):
"""Retrieves the critical checks snippet.
:param request: an `HttpRequest` object
:param unit: a `Unit` instance for which critical checks need to be
rendered.
:return: rendered HTML snippet with the failing checks, or `None` if
there are no critical failing checks.
"""
if not unit.has_critical_checks():
return None
can_review = check_user_permission(request.profile, 'review',
unit.store.parent)
ctx = {
'canreview': can_review,
'unit': unit,
}
template = loader.get_template('editor/units/xhr_checks.html')
return template.render(RequestContext(request, ctx))
@ajax_required
def get_units(request):
"""Gets source and target texts and its metadata.
:return: A JSON-encoded string containing the source and target texts
grouped by the store they belong to.
The optional `count` GET parameter defines the chunk size to
consider. The user's preference will be used by default.
When the `initial` GET parameter is present, a sorted list of
the result set ids will be returned too.
"""
pootle_path = request.GET.get('path', None)
if pootle_path is None:
raise Http400(_('Arguments missing.'))
User = get_user_model()
request.profile = User.get(request.user)
limit = request.profile.get_unit_rows()
vfolder = None
if 'virtualfolder' in settings.INSTALLED_APPS:
from virtualfolder.helpers import extract_vfolder_from_path
vfolder, pootle_path = extract_vfolder_from_path(pootle_path)
units_qs = Unit.objects.get_for_path(pootle_path, request.profile)
if vfolder is not None:
units_qs = units_qs.filter(vfolders=vfolder)
units_qs = units_qs.select_related(
'store__translation_project__project',
'store__translation_project__language',
)
step_queryset = get_step_query(request, units_qs)
is_initial_request = request.GET.get('initial', False)
chunk_size = request.GET.get('count', limit)
uids_param = filter(None, request.GET.get('uids', '').split(u','))
uids = filter(None, map(to_int, uids_param))
units = []
unit_groups = []
uid_list = []
if is_initial_request:
sort_by_field = None
if len(step_queryset.query.order_by) == 1:
sort_by_field = step_queryset.query.order_by[0]
sort_on = None
for key, item in ALLOWED_SORTS.items():
if sort_by_field in item.values():
sort_on = key
break
if sort_by_field is None or sort_on == 'units':
# Since `extra()` has been used before, it's necessary to explicitly
# request the `store__pootle_path` field. This is a subtetly in
# Django's ORM.
uid_list = [u['id'] for u in step_queryset.values('id',
'store__pootle_path')]
else:
# Not using `values_list()` here because it doesn't know about all
# existing relations when `extra()` has been used before in the
# queryset. This affects annotated names such as those ending in
# `__max`, where Django thinks we're trying to lookup a field on a
# relationship field. That's why `sort_by_field` alias for `__max`
# is used here. This alias must be queried in
# `values('sort_by_field', 'id')` with `id` otherwise
# Django looks for `sort_by_field` field in the initial table.
# https://code.djangoproject.com/ticket/19434
uid_list = [u['id'] for u in step_queryset.values('id',
'sort_by_field',
'store__pootle_path')]
if len(uids) == 1:
try:
uid = uids[0]
index = uid_list.index(uid)
begin = max(index - chunk_size, 0)
end = min(index + chunk_size + 1, len(uid_list))
uids = uid_list[begin:end]
except ValueError:
raise Http404 # `uid` not found in `uid_list`
else:
count = 2 * chunk_size
uids = uid_list[:count]
if not units and uids:
units = step_queryset.filter(id__in=uids)
units_by_path = groupby(units, lambda x: x.store.pootle_path)
for pootle_path, units in units_by_path:
unit_groups.append(_path_units_with_meta(pootle_path, units))
response = {
'unitGroups': unit_groups,
}
if uid_list:
response['uIds'] = uid_list
return JsonResponse(response)
@ajax_required
@get_unit_context('view')
def get_more_context(request, unit):
"""Retrieves more context units.
:return: An object in JSON notation that contains the source and target
texts for units that are in the context of unit ``uid``.
"""
store = request.store
json = {}
gap = int(request.GET.get('gap', 0))
qty = int(request.GET.get('qty', 1))
json["ctx"] = _filter_ctx_units(store.units, unit, qty, gap)
return JsonResponse(json)
@never_cache
@get_unit_context('view')
def timeline(request, unit):
"""Returns a JSON-encoded string including the changes to the unit
rendered in HTML.
"""
timeline = Submission.objects.filter(
unit=unit,
).filter(
Q(field__in=[
SubmissionFields.TARGET, SubmissionFields.STATE,
SubmissionFields.COMMENT, SubmissionFields.NONE
]) |
Q(type__in=SubmissionTypes.SUGGESTION_TYPES)
).exclude(
field=SubmissionFields.COMMENT,
creation_time=unit.commented_on
).order_by("id")
timeline = timeline.select_related("submitter__user",
"translation_project__language")
User = get_user_model()
entries_group = []
context = {}
# Group by submitter id and creation_time because
# different submissions can have same creation time
for key, values in \
groupby(timeline,
key=lambda x: "%d\001%s" % (x.submitter.id, x.creation_time)):
entry_group = {
'entries': [],
}
for item in values:
# Only add creation_time information for the whole entry group once
entry_group['datetime'] = item.creation_time
# Only add submitter information for the whole entry group once
entry_group.setdefault('submitter', item.submitter)
context.setdefault('language', item.translation_project.language)
entry = {
'field': item.field,
'field_name': SubmissionFields.NAMES_MAP.get(item.field, None),
'type': item.type,
}
if item.field == SubmissionFields.STATE:
entry['old_value'] = STATES_MAP[int(to_python(item.old_value))]
entry['new_value'] = STATES_MAP[int(to_python(item.new_value))]
elif item.suggestion:
entry.update({
'suggestion_text': item.suggestion.target,
'suggestion_description': mark_safe(item.get_suggestion_description()),
})
elif item.quality_check:
check_name = item.quality_check.name
entry.update({
'check_name': check_name,
'check_display_name': check_names[check_name],
'checks_url': u''.join([
reverse('pootle-checks-descriptions'), '#', check_name,
]),
})
else:
entry['new_value'] = to_python(item.new_value)
entry_group['entries'].append(entry)
entries_group.append(entry_group)
if (len(entries_group) > 0 and
entries_group[0]['datetime'] == unit.creation_time):
entries_group[0]['created'] = True
else:
created = {
'created': True,
'submitter': User.objects.get_system_user(),
}
if unit.creation_time:
created['datetime'] = unit.creation_time
entries_group[:0] = [created]
# Let's reverse the chronological order
entries_group.reverse()
context['entries_group'] = entries_group
# The client will want to confirm that the response is relevant for
# the unit on screen at the time of receiving this, so we add the uid.
json = {'uid': unit.id}
t = loader.get_template('editor/units/xhr_timeline.html')
c = RequestContext(request, context)
json['timeline'] = t.render(c).replace('\n', '')
return JsonResponse(json)
@ajax_required
@require_http_methods(['POST', 'DELETE'])
@get_unit_context('translate')
def comment(request, unit):
"""Dispatches the comment action according to the HTTP verb."""
if request.method == 'DELETE':
return delete_comment(request, unit)
elif request.method == 'POST':
return save_comment(request, unit)
def delete_comment(request, unit):
"""Deletes a comment by blanking its contents and records a new
submission.
"""
unit.commented_by = None
unit.commented_on = None
language = request.translation_project.language
comment_form_class = unit_comment_form_factory(language)
form = comment_form_class({}, instance=unit, request=request)
if form.is_valid():
form.save()
return JsonResponse({})
return JsonResponseBadRequest({'msg': _("Failed to remove comment.")})
def save_comment(request, unit):
"""Stores a new comment for the given ``unit``.
:return: If the form validates, the cleaned comment is returned.
An error message is returned otherwise.
"""
# Update current unit instance's attributes
unit.commented_by = request.profile
unit.commented_on = timezone.now().replace(microsecond=0)
language = request.translation_project.language
form = unit_comment_form_factory(language)(request.POST, instance=unit,
request=request)
if form.is_valid():
form.save()
user = request.user
directory = unit.store.parent
ctx = {
'unit': unit,
'language': language,
'cantranslate': check_user_permission(user, 'translate', directory),
'cansuggest': check_user_permission(user, 'suggest', directory),
}
t = loader.get_template('editor/units/xhr_comment.html')
c = RequestContext(request, ctx)
return JsonResponse({'comment': t.render(c)})
return JsonResponseBadRequest({'msg': _("Comment submission failed.")})
@never_cache
@ajax_required
@get_unit_context('view')
def get_edit_unit(request, unit):
"""Given a store path ``pootle_path`` and unit id ``uid``, gathers all the
necessary information to build the editing widget.
:return: A templatised editing widget is returned within the ``editor``
variable and paging information is also returned if the page
number has changed.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(instance=unit, request=request)
comment_form_class = unit_comment_form_factory(language)
comment_form = comment_form_class({}, instance=unit, request=request)
store = unit.store
directory = store.parent
user = request.profile
project = translation_project.project
alt_src_langs = get_alt_src_langs(request, user, translation_project)
altsrcs = find_altsrcs(unit, alt_src_langs, store=store, project=project)
source_language = translation_project.project.source_language
sources = {
unit.store.translation_project.language.code: unit.target_f.strings
for unit in altsrcs
}
sources[source_language.code] = unit.source_f.strings
priority = None
if 'virtualfolder' in settings.INSTALLED_APPS:
vfolder_pk = request.GET.get('vfolder', '')
if vfolder_pk:
from virtualfolder.models import VirtualFolder
try:
# If we are translating a virtual folder, then display its
# priority.
# Note that the passed virtual folder pk might be invalid.
priority = VirtualFolder.objects.get(pk=vfolder_pk).priority
except VirtualFolder.DoesNotExist:
pass
if priority is None:
# Retrieve the unit top priority, if any. This can happen if we are
# not in a virtual folder or if the passed virtual folder pk is
# invalid.
priority = unit.vfolders.aggregate(
priority=Max('priority')
)['priority']
template_vars = {
'unit': unit,
'form': form,
'comment_form': comment_form,
'priority': priority,
'store': store,
'directory': directory,
'profile': user,
'user': request.user,
'project': project,
'language': language,
'source_language': source_language,
'cantranslate': check_user_permission(user, "translate", directory),
'cansuggest': check_user_permission(user, "suggest", directory),
'canreview': check_user_permission(user, "review", directory),
'is_admin': check_user_permission(user, 'administrate', directory),
'altsrcs': altsrcs,
}
if translation_project.project.is_terminology or store.is_terminology:
t = loader.get_template('editor/units/term_edit.html')
else:
t = loader.get_template('editor/units/edit.html')
c = RequestContext(request, template_vars)
json.update({
'editor': t.render(c),
'tm_suggestions': unit.get_tm_suggestions(),
'is_obsolete': unit.isobsolete(),
'sources': sources,
})
return JsonResponse(json)
@get_unit_context('view')
def permalink_redirect(request, unit):
return redirect(request.build_absolute_uri(unit.get_translate_url()))
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_qualitycheck_stats(request, *args, **kwargs):
failing_checks = request.resource_obj.get_checks()
return JsonResponse(failing_checks if failing_checks is not None else {})
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_stats(request, *args, **kwargs):
stats = request.resource_obj.get_stats()
if (isinstance(request.resource_obj, Directory) and
'virtualfolder' in settings.INSTALLED_APPS):
stats['vfolders'] = {}
for vfolder_treeitem in request.resource_obj.vf_treeitems.iterator():
if request.user.is_superuser or vfolder_treeitem.is_visible:
stats['vfolders'][vfolder_treeitem.code] = \
vfolder_treeitem.get_stats(include_children=False)
return JsonResponse(stats)
@ajax_required
@get_unit_context('translate')
def submit(request, unit):
"""Processes translation submissions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
# Store current time so that it is the same for all submissions
current_time = timezone.now()
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.updated_fields:
for field, old_value, new_value in form.updated_fields:
sub = Submission(
creation_time=current_time,
translation_project=translation_project,
submitter=request.profile,
unit=unit,
store=unit.store,
field=field,
type=SubmissionTypes.NORMAL,
old_value=old_value,
new_value=new_value,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
sub.save()
# Update current unit instance's attributes
# important to set these attributes after saving Submission
# because we need to access the unit's state before it was saved
if SubmissionFields.TARGET in (f[0] for f in form.updated_fields):
form.instance.submitted_by = request.profile
form.instance.submitted_on = current_time
form.instance.reviewed_by = None
form.instance.reviewed_on = None
form.instance._log_user = request.profile
form.save()
json['checks'] = _get_critical_checks_snippet(request, unit)
json['user_score'] = request.profile.public_score
return JsonResponse(json)
return JsonResponseBadRequest({'msg': _("Failed to process submission.")})
@ajax_required
@get_unit_context('suggest')
def suggest(request, unit):
"""Processes translation suggestions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.instance._target_updated:
# TODO: Review if this hackish method is still necessary
# HACKISH: django 1.2 stupidly modifies instance on model form
# validation, reload unit from db
unit = Unit.objects.get(id=unit.id)
unit.add_suggestion(
form.cleaned_data['target_f'],
user=request.profile,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
json['user_score'] = request.profile.public_score
return JsonResponse(json)
return JsonResponseBadRequest({'msg': _("Failed to process suggestion.")})
@ajax_required
@require_http_methods(['POST', 'DELETE'])
def manage_suggestion(request, uid, sugg_id):
"""Dispatches the suggestion action according to the HTTP verb."""
if request.method == 'DELETE':
return reject_suggestion(request, uid, sugg_id)
elif request.method == 'POST':
return accept_suggestion(request, uid, sugg_id)
@get_unit_context()
def reject_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
try:
sugg = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
# In order to be able to reject a suggestion, users have to either:
# 1. Have `review` rights, or
# 2. Be the author of the suggestion being rejected
if (not check_permission('review', request) and
(request.user.is_anonymous() or request.user != sugg.user)):
raise PermissionDenied(_('Insufficient rights to access review mode.'))
unit.reject_suggestion(sugg, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
return JsonResponse(json)
@get_unit_context('review')
def accept_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
try:
suggestion = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
unit.accept_suggestion(suggestion, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
json['newtargets'] = [highlight_whitespace(target)
for target in unit.target.strings]
json['newdiffs'] = {}
for sugg in unit.get_suggestions():
json['newdiffs'][sugg.id] = \
[highlight_diffs(unit.target.strings[i], target)
for i, target in enumerate(sugg.target.strings)]
json['checks'] = _get_critical_checks_snippet(request, unit)
return JsonResponse(json)
@ajax_required
@get_unit_context('review')
def toggle_qualitycheck(request, unit, check_id):
try:
unit.toggle_qualitycheck(check_id, bool(request.POST.get('mute')),
request.profile)
except ObjectDoesNotExist:
raise Http404
return JsonResponse({})
| gpl-3.0 | -2,427,599,192,403,181,000 | 35.171592 | 114 | 0.594402 | false |
qspin/qtaste | demo/TestSuites/PlayBack/SelectionInTree/TestScript.py | 1 | 1336 | # coding=utf-8
##
# Playback/SelectionInTree test
# <p>
# Test the node selection in a tree.
##
from qtaste import *
# update in order to cope with the javaGUI extension declared in your testbed configuration.
javaguiMI = testAPI.getJavaGUI(INSTANCE_ID=testData.getValue("JAVAGUI_INSTANCE_NAME"))
subtitler = testAPI.getSubtitler()
subtitler.setSubtitle(testData.getValue("COMMENT"))
# select the tab with tree components
javaguiMI.selectTabTitled("TABBED_PANE", "TREE_LIST_PANEL")
# get test data
component = testData.getValue("COMPONENT_NAME")
value = testData.getValue("VALUE")
def reset():
"""
@step clear the node selection
@expected no node should be selected
"""
javaguiMI.clearNodeSelection(component)
def step1():
"""
@step Description of the actions done for this step
@expected Description of the expected result
"""
javaguiMI.selectNode(component, value, ".")
actualSelection = javaguiMI.getSelectedNode(component, ".")
if actualSelection is None :
testAPI.stopTest(Status.FAIL, "Unable to get the selected node. No node is selected.")
elif actualSelection != value:
testAPI.stopTest(Status.FAIL, "Expected to see value '" + value + "' selected in " + component + "' but got '" + actualSelection + "'")
doStep(reset)
doStep(step1)
| lgpl-3.0 | -4,365,989,084,712,037,000 | 28.688889 | 143 | 0.705838 | false |
mardiros/creds | setup.py | 1 | 1560 | import os
import sys
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
if py_version < (3, 3):
raise Exception("websockets requires Python >= 3.3.")
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'creds'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
requires = [
'pyramid',
'gunicorn',
'aiohttp',
'pyramid_jinja2',
'asyncio_redis',
'pyramid-kvs',
'psycopg2',
'simplejson',
'pyramid_yards',
'pyramid_asyncio',
'cryptacular',
]
setup(name=NAME,
version='0.0',
description='A Credentials API',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Guillaume Gauvrit',
author_email='[email protected]',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite=''.format('{}.tests'.format(NAME)),
install_requires=requires,
entry_points="""\
[console_scripts]
{name} = {name}.__main__:main
[paste.app_factory]
main = {name}:main
""".format(name=NAME),
)
| bsd-3-clause | 1,508,821,387,034,841,300 | 25 | 63 | 0.589103 | false |
ROCKNROLLKID/bleachbit | tests/TestAll.py | 1 | 2222 | #!/usr/bin/env python
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2016 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run all test suites
"""
import os
import sys
import unittest
import TestAction
import TestCLI
import TestCleaner
import TestCleanerML
import TestCommand
import TestCommon
import TestDiagnostic
import TestDeepScan
import TestFileUtilities
import TestGeneral
import TestMemory
import TestOptions
import TestRecognizeCleanerML
import TestSpecial
import TestUpdate
import TestWorker
suites = [TestAction.suite(),
TestCleanerML.suite(),
TestCleaner.suite(),
TestCLI.suite(),
TestCommand.suite(),
TestCommon.suite(),
TestDeepScan.suite(),
TestDiagnostic.suite(),
TestFileUtilities.suite(),
TestGeneral.suite(),
TestMemory.suite(),
TestOptions.suite(),
TestRecognizeCleanerML.suite(),
TestSpecial.suite(),
TestUpdate.suite(),
TestWorker.suite()
]
if 'posix' == os.name and sys.version_info >= (2,7,0):
import TestUnix
suites.append(TestUnix.suite())
if 'nt' == os.name:
import TestWinapp
import TestWindows
suites.append(TestWinapp.suite())
suites.append(TestWindows.suite())
def suite():
"""Combine all the suites into one large suite"""
suite_ = unittest.TestSuite()
map(suite_.addTest, suites)
return suite_
if __name__ == '__main__':
success = unittest.TextTestRunner(verbosity=2).run(suite()).wasSuccessful()
sys.exit(success == False)
| gpl-3.0 | 414,080,220,029,462,200 | 24.837209 | 79 | 0.693969 | false |
mjiang-27/django_learn | admin_advanced/app/admin.py | 1 | 2217 | from django.contrib import admin
# Register your models here.
from .models import Article, Person
'''
Class used to show other related fields in Article
'''
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date', 'mod_date', )
'''
used to split forms into several sets
'''
fieldsets = [
('Title', {'fields': ['title']}),
('Contents', {'classes': ['collapse', ], 'fields': ['content']}),
]
'''
used to filter entries
'''
list_filter = ['title', 'pub_date', ]
'''
used to search entries, add related fields into the tuple
'''
search_fields = ['title', 'mod_date', ]
'''
# funtion to get search result, while don't know how to use it.
def get_search_results(self, req, queryset, search_item):
queryset, use_distinct = super(ArticleAdmin, self).get_search_results(req, queryset, search_item)
try:
search_item_as_str = str(search_item)
queryset |= self.objects.filter(pub_date=search_item_as_str)
except:
pass
return queryset, use_distinct
'''
'''
Operations with save and delete model
'''
def save_model(self, req, obj, form, change):
if change: # for modification
obj_original = self.model.objects.get(pk=obj.pk)
else: # for adding
obj_original = None
obj.user = req.user
obj.save()
def delete_model(self, req, obj):
'''
Given a model instance delete it from the databse
'''
# handle something here
obj.delete()
'''
Class used to show none- fields in Person
'''
class PersonAdmin(admin.ModelAdmin):
list_display = ('full_name', )
'''
class MyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(MyModelAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(author=request.user)
'''
# admin.site.register(Article) # Basic useage of admin
admin.site.register(Article, ArticleAdmin) # Used for show other related fields
admin.site.register(Person, PersonAdmin)
# admin.site.register(MyModelAdmin)
| gpl-3.0 | 7,187,291,948,035,659,000 | 27.063291 | 105 | 0.609833 | false |
danielhers/ucca | scripts/set_external_id_offline.py | 1 | 1091 | #!/usr/bin/env python3
import argparse
import os
import sys
from ucca.ioutil import get_passages_with_progress_bar, write_passage
desc = """Rename passages by a given mapping of IDs"""
def main(filename, input_filenames, outdir):
os.makedirs(outdir, exist_ok=True)
with open(filename, encoding="utf-8") as f:
pairs = [line.strip().split() for line in f]
old_to_new_id = {old_id: new_id for new_id, old_id in pairs}
for passage in get_passages_with_progress_bar(input_filenames, desc="Renaming"):
passage._ID = old_to_new_id[passage.ID]
write_passage(passage, outdir=outdir, verbose=False)
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(description=desc)
argument_parser.add_argument("filename", help="file with lines of the form <NEW ID> <OLD ID>")
argument_parser.add_argument("input_filenames", help="filename pattern or directory with input passages")
argument_parser.add_argument("-o", "--outdir", default=".", help="output directory")
main(**vars(argument_parser.parse_args()))
sys.exit(0)
| gpl-3.0 | -6,168,660,805,610,533,000 | 39.407407 | 109 | 0.690192 | false |
josenavas/QiiTa | qiita_pet/webserver.py | 1 | 12029 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# login code modified from https://gist.github.com/guillaumevincent/4771570
import tornado.auth
import tornado.escape
import tornado.web
import tornado.websocket
from os.path import dirname, join
from base64 import b64encode
from uuid import uuid4
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import is_test_environment
from qiita_pet.handlers.base_handlers import (MainHandler, NoPageHandler)
from qiita_pet.handlers.auth_handlers import (
AuthCreateHandler, AuthLoginHandler, AuthLogoutHandler, AuthVerifyHandler)
from qiita_pet.handlers.user_handlers import (
ChangeForgotPasswordHandler, ForgotPasswordHandler, UserProfileHandler,
UserMessagesHander, UserJobs)
from qiita_pet.handlers.analysis_handlers import (
ListAnalysesHandler, AnalysisSummaryAJAX, SelectedSamplesHandler,
AnalysisDescriptionHandler, AnalysisGraphHandler, CreateAnalysisHandler,
AnalysisJobsHandler, ShareAnalysisAJAX)
from qiita_pet.handlers.study_handlers import (
StudyIndexHandler, StudyBaseInfoAJAX, SampleTemplateHandler,
SampleTemplateOverviewHandler, SampleTemplateSummaryHandler,
StudyEditHandler, ListStudiesHandler, SearchStudiesAJAX, EBISubmitHandler,
CreateStudyAJAX, ShareStudyAJAX, StudyApprovalList, ArtifactGraphAJAX,
VAMPSHandler, StudyTags, StudyGetTags,
ListCommandsHandler, ListOptionsHandler, PrepTemplateSummaryAJAX,
PrepTemplateAJAX, NewArtifactHandler, SampleAJAX, StudyDeleteAjax,
ArtifactAdminAJAX, NewPrepTemplateAjax, DataTypesMenuAJAX, StudyFilesAJAX,
ArtifactGetSamples, ArtifactGetInfo, WorkflowHandler,
WorkflowRunHandler, JobAJAX, AutocompleteHandler)
from qiita_pet.handlers.artifact_handlers import (
ArtifactSummaryAJAX, ArtifactAJAX, ArtifactSummaryHandler)
from qiita_pet.handlers.websocket_handlers import (
MessageHandler, SelectedSocketHandler, SelectSamplesHandler)
from qiita_pet.handlers.logger_handlers import LogEntryViewerHandler
from qiita_pet.handlers.upload import UploadFileHandler, StudyUploadFileHandler
from qiita_pet.handlers.stats import StatsHandler
from qiita_pet.handlers.download import (
DownloadHandler, DownloadStudyBIOMSHandler, DownloadRelease,
DownloadRawData, DownloadEBISampleAccessions, DownloadEBIPrepAccessions,
DownloadUpload)
from qiita_pet.handlers.prep_template import (
PrepTemplateHandler, PrepTemplateGraphHandler, PrepTemplateJobHandler)
from qiita_pet.handlers.ontology import OntologyHandler
from qiita_db.handlers.processing_job import (
JobHandler, HeartbeatHandler, ActiveStepHandler, CompleteHandler,
ProcessingJobAPItestHandler)
from qiita_db.handlers.artifact import (
ArtifactHandler, ArtifactAPItestHandler, ArtifactTypeHandler)
from qiita_db.handlers.prep_template import (
PrepTemplateDataHandler, PrepTemplateAPItestHandler,
PrepTemplateDBHandler)
from qiita_db.handlers.oauth2 import TokenAuthHandler
from qiita_db.handlers.reference import ReferenceHandler
from qiita_db.handlers.core import ResetAPItestHandler
from qiita_db.handlers.plugin import (
PluginHandler, CommandHandler, CommandListHandler, CommandActivateHandler,
ReloadPluginAPItestHandler)
from qiita_db.handlers.analysis import APIAnalysisMetadataHandler
from qiita_db.handlers.archive import APIArchiveObservations
from qiita_pet import uimodules
from qiita_db.util import get_mountpoint
from qiita_pet.handlers.rest import ENDPOINTS as REST_ENDPOINTS
from qiita_pet.handlers.qiita_redbiom import RedbiomPublicSearch
if qiita_config.portal == "QIITA":
from qiita_pet.handlers.portal import (
StudyPortalHandler, StudyPortalAJAXHandler)
DIRNAME = dirname(__file__)
STATIC_PATH = join(DIRNAME, "static")
TEMPLATE_PATH = join(DIRNAME, "templates") # base folder for webpages
_, RES_PATH = get_mountpoint('job')[0]
COOKIE_SECRET = b64encode(uuid4().bytes + uuid4().bytes)
DEBUG = qiita_config.test_environment
_vendor_js = join(STATIC_PATH, 'vendor', 'js')
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login/", AuthLoginHandler),
(r"/auth/logout/", AuthLogoutHandler),
(r"/auth/create/", AuthCreateHandler),
(r"/auth/verify/(.*)", AuthVerifyHandler),
(r"/auth/forgot/", ForgotPasswordHandler),
(r"/auth/reset/(.*)", ChangeForgotPasswordHandler),
(r"/profile/", UserProfileHandler),
(r"/user/messages/", UserMessagesHander),
(r"/user/jobs/", UserJobs),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": STATIC_PATH}),
# Analysis handlers
(r"/analysis/list/", ListAnalysesHandler),
(r"/analysis/dflt/sumary/", AnalysisSummaryAJAX),
(r"/analysis/create/", CreateAnalysisHandler),
(r"/analysis/selected/", SelectedSamplesHandler),
(r"/analysis/selected/socket/", SelectedSocketHandler),
(r"/analysis/description/(.*)/graph/", AnalysisGraphHandler),
(r"/analysis/description/(.*)/jobs/", AnalysisJobsHandler),
(r"/analysis/description/(.*)/", AnalysisDescriptionHandler),
(r"/analysis/sharing/", ShareAnalysisAJAX),
(r"/artifact/samples/", ArtifactGetSamples),
(r"/artifact/info/", ArtifactGetInfo),
(r"/consumer/", MessageHandler),
(r"/admin/error/", LogEntryViewerHandler),
(r"/admin/approval/", StudyApprovalList),
(r"/admin/artifact/", ArtifactAdminAJAX),
(r"/ebi_submission/(.*)", EBISubmitHandler),
# Study handlers
(r"/study/create/", StudyEditHandler),
(r"/study/edit/(.*)", StudyEditHandler),
(r"/study/list/", ListStudiesHandler),
(r"/study/process/commands/options/", ListOptionsHandler),
(r"/study/process/commands/", ListCommandsHandler),
(r"/study/process/workflow/run/", WorkflowRunHandler),
(r"/study/process/workflow/", WorkflowHandler),
(r"/study/process/job/", JobAJAX),
(r"/study/list/socket/", SelectSamplesHandler),
(r"/study/search/(.*)", SearchStudiesAJAX),
(r"/study/new_artifact/", NewArtifactHandler),
(r"/study/files/", StudyFilesAJAX),
(r"/study/sharing/", ShareStudyAJAX),
(r"/study/sharing/autocomplete/", AutocompleteHandler),
(r"/study/new_prep_template/", NewPrepTemplateAjax),
(r"/study/tags/(.*)", StudyTags),
(r"/study/get_tags/", StudyGetTags),
# Artifact handlers
(r"/artifact/graph/", ArtifactGraphAJAX),
(r"/artifact/(.*)/summary/", ArtifactSummaryAJAX),
(r"/artifact/html_summary/(.*)", ArtifactSummaryHandler,
{"path": qiita_config.base_data_dir}),
(r"/artifact/(.*)/", ArtifactAJAX),
# Prep template handlers
(r"/prep_template/", PrepTemplateHandler),
(r"/prep_template/(.*)/graph/", PrepTemplateGraphHandler),
(r"/prep_template/(.*)/jobs/", PrepTemplateJobHandler),
(r"/ontology/", OntologyHandler),
# ORDER FOR /study/description/ SUBPAGES HERE MATTERS.
# Same reasoning as below. /study/description/(.*) should be last.
(r"/study/description/sample_template/overview/",
SampleTemplateOverviewHandler),
(r"/study/description/sample_template/summary/",
SampleTemplateSummaryHandler),
(r"/study/description/sample_template/", SampleTemplateHandler),
(r"/study/description/sample_summary/", SampleAJAX),
(r"/study/description/prep_summary/", PrepTemplateSummaryAJAX),
(r"/study/description/prep_template/", PrepTemplateAJAX),
(r"/study/description/baseinfo/", StudyBaseInfoAJAX),
(r"/study/description/data_type_menu/", DataTypesMenuAJAX),
(r"/study/description/(.*)", StudyIndexHandler),
(r"/study/delete/", StudyDeleteAjax),
(r"/study/upload/(.*)", StudyUploadFileHandler),
(r"/upload/", UploadFileHandler),
(r"/check_study/", CreateStudyAJAX),
(r"/stats/", StatsHandler),
(r"/download/(.*)", DownloadHandler),
(r"/download_study_bioms/(.*)", DownloadStudyBIOMSHandler),
(r"/download_raw_data/(.*)", DownloadRawData),
(r"/download_ebi_accessions/samples/(.*)",
DownloadEBISampleAccessions),
(r"/download_ebi_accessions/experiments/(.*)",
DownloadEBIPrepAccessions),
(r"/download_upload/(.*)", DownloadUpload),
(r"/release/download/(.*)", DownloadRelease),
(r"/vamps/(.*)", VAMPSHandler),
(r"/redbiom/(.*)", RedbiomPublicSearch),
# Plugin handlers - the order matters here so do not change
# qiita_db/jobs/(.*) should go after any of the
# qiita_db/jobs/(.*)/XXXX because otherwise it will match the
# regular expression and the qiita_db/jobs/(.*)/XXXX will never
# be hit.
(r"/qiita_db/authenticate/", TokenAuthHandler),
(r"/qiita_db/jobs/(.*)/heartbeat/", HeartbeatHandler),
(r"/qiita_db/jobs/(.*)/step/", ActiveStepHandler),
(r"/qiita_db/jobs/(.*)/complete/", CompleteHandler),
(r"/qiita_db/jobs/(.*)", JobHandler),
(r"/qiita_db/artifacts/types/", ArtifactTypeHandler),
(r"/qiita_db/artifacts/(.*)/", ArtifactHandler),
(r"/qiita_db/prep_template/(.*)/data/", PrepTemplateDataHandler),
(r"/qiita_db/prep_template/(.*)/", PrepTemplateDBHandler),
(r"/qiita_db/references/(.*)/", ReferenceHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/activate/",
CommandActivateHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/", CommandHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/", CommandListHandler),
(r"/qiita_db/plugins/(.*)/(.*)/", PluginHandler),
(r"/qiita_db/analysis/(.*)/metadata/", APIAnalysisMetadataHandler),
(r"/qiita_db/archive/observations/", APIArchiveObservations)
]
# rest endpoints
handlers.extend(REST_ENDPOINTS)
if qiita_config.portal == "QIITA":
# Add portals editing pages only on main portal
portals = [
(r"/admin/portals/studies/", StudyPortalHandler),
(r"/admin/portals/studiesAJAX/", StudyPortalAJAXHandler)
]
handlers.extend(portals)
if is_test_environment():
# We add the endpoints for testing plugins
test_handlers = [
(r"/apitest/processing_job/", ProcessingJobAPItestHandler),
(r"/apitest/reset/", ResetAPItestHandler),
(r"/apitest/prep_template/", PrepTemplateAPItestHandler),
(r"/apitest/artifact/", ArtifactAPItestHandler),
(r"/apitest/reload_plugins/", ReloadPluginAPItestHandler)
]
handlers.extend(test_handlers)
# 404 PAGE MUST BE LAST IN THIS LIST!
handlers.append((r".*", NoPageHandler))
settings = {
"template_path": TEMPLATE_PATH,
"debug": DEBUG,
"cookie_secret": qiita_config.cookie_secret,
"login_url": "%s/auth/login/" % qiita_config.portal_dir,
"ui_modules": uimodules,
}
tornado.web.Application.__init__(self, handlers, **settings)
| bsd-3-clause | -2,669,512,977,122,320,000 | 49.330544 | 79 | 0.646022 | false |
zachmueller/charts | python/sweet_charts.py | 1 | 2782 | from pandas_datareader import data as pdr
import datetime
import pandas as pd
import numpy as np
def download_data(ticker, start = datetime.datetime(1950, 1, 1),
end = datetime.datetime.today(),
source = 'yahoo', drop_extra = True):
# may need to use this for weekly data
# http://stackoverflow.com/a/20584971
df = pdr.DataReader(ticker, source, start, end)
df = df.rename(columns={'Adj Close':'adj_close'})
df.index.name = 'date'
if drop_extra:
df = df[['adj_close']]
return df
def get_returns(df, drop_extra = True):
df.loc[:,'prior'] = df['adj_close'].shift(1)
df = df.dropna()
change = (df['prior'] / df['adj_close']) - 1
df.loc[:,'returns'] = change
if drop_extra:
df = df[['returns']]
return df
def get_beta(a, b):
return np.cov(b, a)[0,1]/np.var(b)
def get_value(a, b, kind = 'beta'):
# need to add in more calculation types (e.g., Std Dev, Correl, etc.)
if kind=='beta':
return get_beta(a, b)
else:
return None
def get_chart_data(tickers, market = '^GSPC', kind = 'beta',
start = datetime.datetime(1950, 1, 1),
end = datetime.datetime.today(), rolling_weeks = 156,
source = 'yahoo', return_type = 'df'):
# download market data
mkt = download_data(market, start, end, source, drop_extra=True)
mkt = get_returns(mkt, drop_extra=True)
mkt.columns = ['market']
# download stock data for each ticker provided
stocks = []
min_date = end
for ticker in tickers:
df = download_data(ticker, start, end, source, drop_extra=True)
df = get_returns(df, drop_extra=True)
df.columns = [ticker]
stocks.append(df.copy())
# find min date across all stock data collected
temp_date = df.index.min().to_pydatetime()
min_date = min(min_date, temp_date)
# truncate market data based on min_date found
mkt = mkt.loc[mkt.index>=min_date]
df = pd.concat([mkt] + stocks, axis=1)
# prep dict for capturing calculations
out = {}
for ticker in tickers:
out[ticker] = []
# calc values
rolling_start = min_date + datetime.timedelta(weeks=rolling_weeks)
dates = list(df.ix[rolling_start:].index.to_pydatetime())
for date in dates:
prior_date = date - datetime.timedelta(weeks=rolling_weeks)
tmp = df.ix[prior_date:date]
for ticker in tickers:
val = get_value(tmp[ticker], tmp['market'])
out[ticker].append(val)
d = {'data':out, 'dates':dates}
if return_type=='dict':
return d
elif return_type=='df':
return pd.DataFrame(d['data'], index=d['dates'])
return d
| apache-2.0 | -1,158,044,879,565,456,600 | 31.729412 | 73 | 0.586628 | false |
poodarchu/SogouPersona | outputResult.py | 1 | 1060 | # -*- coding=utf-8 -*-
import codecs
if __name__ == '__main__':
UID = []
with codecs.open('./data/test.csv', 'r', 'utf-8') as fr:
for user in fr.readlines():
user = user.split('\t')
UID.append(user[0])
fr.close()
ages = []
with codecs.open('./data/output/0_predict.csv', 'r', 'utf-8') as fr:
for age in fr:
ages.append(int(age))
fr.close
genders = []
with codecs.open('./data/output/1_predict.csv', 'r', 'utf-8') as fr:
for gender in fr:
genders.append(int(gender))
fr.close
educations = []
with codecs.open('./data/output/2_predict.csv', 'r', 'utf-8') as fr:
for edu in fr:
educations.append(int(edu))
fr.close
with codecs.open('./data/output/UID_age_gender_education.csv', 'w', 'utf-8') as fw:
uid_age = zip(UID, ages, genders, educations)
for (uid, age, gender, education) in uid_age:
fw.write('%s %s %d %d\n' % (uid, age, gender, education))
fw.close()
| mit | -4,194,074,875,685,905,400 | 28.444444 | 87 | 0.521698 | false |
20017578/MasterSoftwareLibero | ProvePython/Trasparenza/XX_leggi_config.py | 1 | 1265 | # Legge il file di configurazione per produrre il lod
# import urllib
import sys
import csv
import string
try:
file_csv_config_main = open ('campi_config.csv')
except:
print 'File non trovato, provo da rete.'
def_campi=[]
# LEGGIAMO IL FILE CON IL NOME DEI TIPI DI CAMPO (Attenzione le prime 10 righe descrivono il nodo e non il campo
riga_csv=file_csv_config_main.readline()
while riga_csv:
riga_csv=riga_csv.rstrip()
def_campi.append(riga_csv)
riga_csv = file_csv_config_main.readline()
# APRO IL FILE CONTENENTE LA CONFIGURAZIONE
campi = []
nomeFileDati = 'config_toponomastica.csv'
with open(nomeFileDati, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
n_righe=0
for row in reader:
n_righe=n_righe+1
n_campi=len(row)
if n_righe>9:
i = 0
while i<n_campi:
campi.append(row[i])
i=i+1
print ('Il CSV da sottoporre deve avere le seguenti caratteristiche :')
print ('NUMERO CAMPI POSSIBILI = ',n_campi)
i=0
while i<n_campi:
print
print 'Campo nr.',i+1,' :'
j=0
while j<n_righe-10:
print (j*n_campi)+i,':',def_campi[j+10],' -> ',campi[(j*n_campi)+i]
j=j+1
i=i+1
| agpl-3.0 | -1,256,895,054,665,019,400 | 25.354167 | 112 | 0.612648 | false |
bhaveshAn/crisscross | crisscross/facades/compass.py | 1 | 1269 | '''
Compass
=======
The :class:`Compass` provides access to public methods to use compass of your
device.
Simple Examples
---------------
To enable compass::
>>> from crisscross import compass
>>> compass.enable()
To disable compass::
>>> compass.disable()
To get the orientation::
>>> compass.orientation
(-23.721826553344727, -5.7114701271057129, -36.749668121337891)
'''
class Compass(object):
'''Compass facade.
.. versionadded:: 1.2.0
'''
@property
def orientation(self):
'''
Property that returns values of the current compass
(magnetic field) sensors, as a (x, y, z) tuple.
Returns (None, None, None) if no data is currently available.
'''
return self.get_orientation()
def enable(self):
'''
Activate the compass sensor.
'''
self._enable()
def disable(self):
'''
Disable the compass sensor.
'''
self._disable()
def get_orientation(self):
return self._get_orientation()
# private
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
def _get_orientation(self):
raise NotImplementedError()
| mit | -8,531,520,257,634,616,000 | 17.940299 | 77 | 0.588652 | false |
jrief/django-shop | shop/models/address.py | 1 | 10316 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Holds all the information relevant to the client (addresses for instance)
"""
from six import with_metaclass
from django.conf import settings
from django.db import models
from django.template import Context
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from shop import app_settings
from shop import deferred
class AddressManager(models.Manager):
def get_max_priority(self, customer):
aggr = self.get_queryset().filter(customer=customer).aggregate(models.Max('priority'))
priority = aggr['priority__max'] or 0
return priority
def get_fallback(self, customer):
"""
Return a fallback address, whenever the customer has not declared one.
"""
return self.get_queryset().filter(customer=customer).order_by('priority').last()
class BaseAddress(models.Model):
customer = deferred.ForeignKey('BaseCustomer')
priority = models.SmallIntegerField(
default=0,
db_index=True,
help_text=_("Priority for using this address"),
)
class Meta:
abstract = True
objects = AddressManager()
def as_text(self):
"""
Return the address as plain text to be used for printing, etc.
"""
template_names = [
'{}/{}-address.txt'.format(app_settings.APP_LABEL, self.address_type),
'{}/address.txt'.format(app_settings.APP_LABEL),
'shop/address.txt',
]
template = select_template(template_names)
context = Context({'address': self})
return template.render(context)
as_text.short_description = _("Address")
class BaseShippingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'shipping'
class Meta:
abstract = True
ShippingAddressModel = deferred.MaterializedModel(BaseShippingAddress)
class BaseBillingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'billing'
class Meta:
abstract = True
BillingAddressModel = deferred.MaterializedModel(BaseBillingAddress)
ISO_3166_CODES = (
('AF', _("Afghanistan")),
('AX', _("Aland Islands")),
('AL', _("Albania")),
('DZ', _("Algeria")),
('AS', _("American Samoa")),
('AD', _("Andorra")),
('AO', _("Angola")),
('AI', _("Anguilla")),
('AQ', _("Antarctica")),
('AG', _("Antigua And Barbuda")),
('AR', _("Argentina")),
('AM', _("Armenia")),
('AW', _("Aruba")),
('AU', _("Australia")),
('AT', _("Austria")),
('AZ', _("Azerbaijan")),
('BS', _("Bahamas")),
('BH', _("Bahrain")),
('BD', _("Bangladesh")),
('BB', _("Barbados")),
('BY', _("Belarus")),
('BE', _("Belgium")),
('BZ', _("Belize")),
('BJ', _("Benin")),
('BM', _("Bermuda")),
('BT', _("Bhutan")),
('BO', _("Bolivia, Plurinational State Of")),
('BQ', _("Bonaire, Saint Eustatius And Saba")),
('BA', _("Bosnia And Herzegovina")),
('BW', _("Botswana")),
('BV', _("Bouvet Island")),
('BR', _("Brazil")),
('IO', _("British Indian Ocean Territory")),
('BN', _("Brunei Darussalam")),
('BG', _("Bulgaria")),
('BF', _("Burkina Faso")),
('BI', _("Burundi")),
('KH', _("Cambodia")),
('CM', _("Cameroon")),
('CA', _("Canada")),
('CV', _("Cape Verde")),
('KY', _("Cayman Islands")),
('CF', _("Central African Republic")),
('TD', _("Chad")),
('CL', _("Chile")),
('CN', _("China")),
('CX', _("Christmas Island")),
('CC', _("Cocos (Keeling) Islands")),
('CO', _("Colombia")),
('KM', _("Comoros")),
('CG', _("Congo")),
('CD', _("Congo, The Democratic Republic Of The")),
('CK', _("Cook Islands")),
('CR', _("Costa Rica")),
('HR', _("Croatia")),
('CU', _("Cuba")),
('CW', _("Curacao")),
('CY', _("Cyprus")),
('CZ', _("Czech Republic")),
('DK', _("Denmark")),
('DJ', _("Djibouti")),
('DM', _("Dominica")),
('DO', _("Dominican Republic")),
('EC', _("Ecuador")),
('EG', _("Egypt")),
('SV', _("El Salvador")),
('GQ', _("Equatorial Guinea")),
('ER', _("Eritrea")),
('EE', _("Estonia")),
('ET', _("Ethiopia")),
('FK', _("Falkland Islands (Malvinas)")),
('FO', _("Faroe Islands")),
('FJ', _("Fiji")),
('FI', _("Finland")),
('FR', _("France")),
('GF', _("French Guiana")),
('PF', _("French Polynesia")),
('TF', _("French Southern Territories")),
('GA', _("Gabon")),
('GM', _("Gambia")),
('DE', _("Germany")),
('GH', _("Ghana")),
('GI', _("Gibraltar")),
('GR', _("Greece")),
('GL', _("Greenland")),
('GD', _("Grenada")),
('GP', _("Guadeloupe")),
('GU', _("Guam")),
('GT', _("Guatemala")),
('GG', _("Guernsey")),
('GN', _("Guinea")),
('GW', _("Guinea-Bissau")),
('GY', _("Guyana")),
('HT', _("Haiti")),
('HM', _("Heard Island and McDonald Islands")),
('VA', _("Holy See (Vatican City State)")),
('HN', _("Honduras")),
('HK', _("Hong Kong")),
('HU', _("Hungary")),
('IS', _("Iceland")),
('IN', _("India")),
('ID', _("Indonesia")),
('IR', _("Iran, Islamic Republic Of")),
('IQ', _("Iraq")),
('IE', _("Ireland")),
('IL', _("Israel")),
('IT', _("Italy")),
('CI', _("Ivory Coast")),
('JM', _("Jamaica")),
('JP', _("Japan")),
('JE', _("Jersey")),
('JO', _("Jordan")),
('KZ', _("Kazakhstan")),
('KE', _("Kenya")),
('KI', _("Kiribati")),
('KP', _("Korea, Democratic People's Republic Of")),
('KR', _("Korea, Republic Of")),
('KS', _("Kosovo")),
('KW', _("Kuwait")),
('KG', _("Kyrgyzstan")),
('LA', _("Lao People's Democratic Republic")),
('LV', _("Latvia")),
('LB', _("Lebanon")),
('LS', _("Lesotho")),
('LR', _("Liberia")),
('LY', _("Libyan Arab Jamahiriya")),
('LI', _("Liechtenstein")),
('LT', _("Lithuania")),
('LU', _("Luxembourg")),
('MO', _("Macao")),
('MK', _("Macedonia")),
('MG', _("Madagascar")),
('MW', _("Malawi")),
('MY', _("Malaysia")),
('MV', _("Maldives")),
('ML', _("Mali")),
('ML', _("Malta")),
('MH', _("Marshall Islands")),
('MQ', _("Martinique")),
('MR', _("Mauritania")),
('MU', _("Mauritius")),
('YT', _("Mayotte")),
('MX', _("Mexico")),
('FM', _("Micronesia")),
('MD', _("Moldova")),
('MC', _("Monaco")),
('MN', _("Mongolia")),
('ME', _("Montenegro")),
('MS', _("Montserrat")),
('MA', _("Morocco")),
('MZ', _("Mozambique")),
('MM', _("Myanmar")),
('NA', _("Namibia")),
('NR', _("Nauru")),
('NP', _("Nepal")),
('NL', _("Netherlands")),
('AN', _("Netherlands Antilles")),
('NC', _("New Caledonia")),
('NZ', _("New Zealand")),
('NI', _("Nicaragua")),
('NE', _("Niger")),
('NG', _("Nigeria")),
('NU', _("Niue")),
('NF', _("Norfolk Island")),
('MP', _("Northern Mariana Islands")),
('NO', _("Norway")),
('OM', _("Oman")),
('PK', _("Pakistan")),
('PW', _("Palau")),
('PS', _("Palestinian Territory, Occupied")),
('PA', _("Panama")),
('PG', _("Papua New Guinea")),
('PY', _("Paraguay")),
('PE', _("Peru")),
('PH', _("Philippines")),
('PN', _("Pitcairn")),
('PL', _("Poland")),
('PT', _("Portugal")),
('PR', _("Puerto Rico")),
('QA', _("Qatar")),
('RE', _("Reunion")),
('RO', _("Romania")),
('RU', _("Russian Federation")),
('RW', _("Rwanda")),
('BL', _("Saint Barthelemy")),
('SH', _("Saint Helena, Ascension & Tristan Da Cunha")),
('KN', _("Saint Kitts and Nevis")),
('LC', _("Saint Lucia")),
('MF', _("Saint Martin (French Part)")),
('PM', _("Saint Pierre and Miquelon")),
('VC', _("Saint Vincent And The Grenadines")),
('WS', _("Samoa")),
('SM', _("San Marino")),
('ST', _("Sao Tome And Principe")),
('SA', _("Saudi Arabia")),
('SN', _("Senegal")),
('RS', _("Serbia")),
('SC', _("Seychelles")),
('SL', _("Sierra Leone")),
('SG', _("Singapore")),
('SX', _("Sint Maarten (Dutch Part)")),
('SK', _("Slovakia")),
('SI', _("Slovenia")),
('SB', _("Solomon Islands")),
('SO', _("Somalia")),
('ZA', _("South Africa")),
('GS', _("South Georgia And The South Sandwich Islands")),
('ES', _("Spain")),
('LK', _("Sri Lanka")),
('SD', _("Sudan")),
('SR', _("Suriname")),
('SJ', _("Svalbard And Jan Mayen")),
('SZ', _("Swaziland")),
('SE', _("Sweden")),
('CH', _("Switzerland")),
('SY', _("Syrian Arab Republic")),
('TW', _("Taiwan")),
('TJ', _("Tajikistan")),
('TZ', _("Tanzania")),
('TH', _("Thailand")),
('TL', _("Timor-Leste")),
('TG', _("Togo")),
('TK', _("Tokelau")),
('TO', _("Tonga")),
('TT', _("Trinidad and Tobago")),
('TN', _("Tunisia")),
('TR', _("Turkey")),
('TM', _("Turkmenistan")),
('TC', _("Turks And Caicos Islands")),
('TV', _("Tuvalu")),
('UG', _("Uganda")),
('UA', _("Ukraine")),
('AE', _("United Arab Emirates")),
('GB', _("United Kingdom")),
('US', _("United States")),
('UM', _("United States Minor Outlying Islands")),
('UY', _("Uruguay")),
('UZ', _("Uzbekistan")),
('VU', _("Vanuatu")),
('VE', _("Venezuela, Bolivarian Republic Of")),
('VN', _("Viet Nam")),
('VG', _("Virgin Islands, British")),
('VI', _("Virgin Islands, U.S.")),
('WF', _("Wallis and Futuna")),
('EH', _("Western Sahara")),
('YE', _("Yemen")),
('ZM', _("Zambia")),
('ZW', _("Zimbabwe")),
)
class CountryField(models.CharField):
"""
This creates a simple input field to choose a country.
"""
def __init__(self, *args, **kwargs):
defaults = {
'max_length': 3,
'choices': ISO_3166_CODES,
}
defaults.update(kwargs)
super(CountryField, self).__init__(*args, **defaults)
def deconstruct(self):
name, path, args, kwargs = super(CountryField, self).deconstruct()
if kwargs['max_length'] == 3:
kwargs.pop('max_length')
if kwargs['choices'] == ISO_3166_CODES:
kwargs.pop('choices')
return name, path, args, kwargs
| bsd-3-clause | 3,705,730,101,055,474,000 | 28.815029 | 94 | 0.477995 | false |
dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline | archive/Preprocessing/map_TSV.py | 1 | 1467 | #!/usr/bin/python
import sys
from collections import defaultdict
def read_tsv_as_array(filename):
'''
Converts 2 column tsv to array.
First column is samplename.
Second column is location of file.
'''
tsv_array = []
with open(filename, 'rU') as handle:
for line in handle:
arow = line.strip('\n').split('\t')
tsv_array.append(arow)
return tsv_array
def map_array_to_dict(tsv_array):
'''
Converts array of paired samplename and file to dict.
Sample name is key.
'''
mapped_tsv = defaultdict(list)
for key, value in tsv_array:
mapped_tsv[key].append(value)
return mapped_tsv
def create_mapped_files(mapped_tsv):
'''
Creates file listing files from key.
Creates a mapped file to stdout.
'''
for key, value in mapped_tsv.items():
write_list(key + ".list", value)
sys.stdout.write('\t'.join([key, key+".list"]) + '\n')
def write_list(filename, list_text):
'''
Writes file with listed files.
key (samplename) is filename + ".list", .list passed
'''
with open(filename, 'w') as handle:
for out_str in list_text:
handle.write(out_str + '\n')
def main(sa):
'''
Parses CLI input
'''
inputs_tsv_filename = sa[0]
mapped_tsv = map_array_to_dict(read_tsv_as_array(inputs_tsv_filename))
create_mapped_files(mapped_tsv)
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-2-clause | 774,471,899,134,565,600 | 22.66129 | 74 | 0.601227 | false |
naresh21/synergetics-edx-platform | openedx/core/djangoapps/micro_masters/views.py | 1 | 43485 | import json
import uuid
import hmac
import logging
import binascii
import urllib
import pytz
from collections import OrderedDict
from datetime import datetime
from hashlib import sha256
from decimal import Decimal, InvalidOperation
from django.db.models import Q
from django.utils.encoding import smart_str
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.core.urlresolvers import reverse
from django.http import (
Http404, HttpResponseRedirect,
HttpResponseNotFound, HttpResponse,
HttpResponseBadRequest
)
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from edxmako.shortcuts import render_to_response, render_to_string
from xmodule.modulestore.django import ModuleI18nService
from shoppingcart.processors.exceptions import *
from microsite_configuration import microsite
from courseware.courses import get_course_by_id
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from .models import (
Program, ProgramEnrollment,
ProgramOrder, ProgramCoupon,
ProgramCouponRedemption, ProgramGeneratedCertificate,
ProgramCertificateSignatories
)
from shoppingcart.exceptions import (
MultipleCouponsNotAllowedException, InvalidCartItem,
ItemNotFoundInCartException, RedemptionCodeError
)
from student.models import LinkedInAddToProfileConfiguration
from certificates.api import (
get_certificate_header_context,
get_certificate_footer_context,
)
from leaderboard.models import LeaderBoard
log = logging.getLogger(__name__)
CC_PROCESSOR = settings.CC_PROCESSOR.get(settings.CC_PROCESSOR_NAME)
import shoppingcart
from django.conf import settings
from courseware.access import has_access
from course_modes.models import CourseMode
from student.models import CourseEnrollment
from commerce.utils import EcommerceService
from shoppingcart.utils import is_shopping_cart_enabled
from courseware.courses import (
get_course_with_access,
get_permission_for_course_about)
from courseware.views.views import get_cosmetic_display_price
# Start before payment_method
def processor_hash(value):
"""
Calculate the base64-encoded, SHA-256 hash used by CyberSource.
Args:
value (string): The value to encode.
Returns:
string
"""
secret_key = CC_PROCESSOR.get('SECRET_KEY', '')
hash_obj = hmac.new(secret_key.encode('utf-8'),
value.encode('utf-8'), sha256)
# last character is a '\n', which we don't want
return binascii.b2a_base64(hash_obj.digest())[:-1]
def sign(params):
"""
Sign the parameters dictionary so CyberSource can validate our identity.
The params dict should contain a key 'signed_field_names' that is a comma-separated
list of keys in the dictionary. The order of this list is important!
Args:
params (dict): Dictionary of parameters; must include a 'signed_field_names' key
Returns:
dict: The same parameters dict, with a 'signature' key calculated from the other values.
"""
fields = u",".join(params.keys())
params['signed_field_names'] = fields
signed_fields = params.get('signed_field_names', '').split(',')
values = u",".join([u"{0}={1}".format(i, params.get(i, ''))
for i in signed_fields])
params['signature'] = processor_hash(values)
params['signed_field_names'] = fields
return params
def get_purchase_params(cart, callback_url=None):
"""
This method will build out a dictionary of parameters needed by CyberSource to complete the transaction
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
callback_url (unicode): The URL that CyberSource should POST to when the user
completes a purchase. If not provided, then CyberSource will use
the URL provided by the administrator of the account
(CyberSource config, not LMS config).
extra_data (list): Additional data to include as merchant-defined data fields.
Returns:
dict
"""
params = OrderedDict()
program_price = cart.discounted_price if cart.discount_applied else cart.program.price
amount = "{0:0.2f}".format(program_price)
params['amount'] = amount
params['currency'] = settings.PAID_COURSE_REGISTRATION_CURRENCY[0]
params['orderNumber'] = "OrderId: {0:d}".format(cart.id)
params['access_key'] = CC_PROCESSOR.get('ACCESS_KEY', '')
params['profile_id'] = CC_PROCESSOR.get('PROFILE_ID', '')
params['reference_number'] = cart.id
params['transaction_type'] = 'sale'
params['locale'] = 'en'
params['signed_date_time'] = datetime.utcnow(
).strftime('%Y-%m-%dT%H:%M:%SZ')
params['signed_field_names'] = 'access_key,profile_id,amount,currency,transaction_type,reference_number,signed_date_time,locale,transaction_uuid,signed_field_names,unsigned_field_names,orderNumber'
params['unsigned_field_names'] = ''
params['transaction_uuid'] = uuid.uuid4().hex
params['payment_method'] = 'card'
if callback_url is not None:
params['override_custom_receipt_page'] = callback_url.get('success')
params['override_custom_cancel_page'] = callback_url.get('cancel')
return sign(params)
# End before payment_method
# Start after payment method
def _record_purchase(params, order):
"""
Record the purchase and run purchased_callbacks
Args:
params (dict): The parameters we received from CyberSource.
order (Order): The order associated with this payment.
Returns:
None
"""
if settings.FEATURES.get("LOG_POSTPAY_CALLBACKS"):
log.info(
"Order %d purchased with params: %s", order.id, json.dumps(params)
)
# Mark the order as purchased and store the billing information
# order.purchase(
# first=params.get('req_bill_to_forename', ''),
# last=params.get('req_bill_to_surname', ''),
# street1=params.get('req_bill_to_address_line1', ''),
# street2=params.get('req_bill_to_address_line2', ''),
# city=params.get('req_bill_to_address_city', ''),
# state=params.get('req_bill_to_address_state', ''),
# country=params.get('req_bill_to_address_country', ''),
# postalcode=params.get('req_bill_to_address_postal_code', ''),
# processor_reply_dump=json.dumps(params)
# )
order.purchase(
first=params.get('req_ship_to_forename', ''),
last=params.get('req_ship_to_surname', ''),
street1=params.get('req_ship_to_address_line1', ''),
street2=params.get('req_ship_to_address_line1', ''),
city=params.get('req_bill_to_address_city', ''),
state=params.get('req_bill_to_address_state', ''),
country=params.get('req_ship_to_address_country', ''),
postalcode=params.get('req_ship_to_address_postal_code', ''),
processor_reply_dump=json.dumps(params)
)
def verify_signatures(params):
"""
Use the signature we receive in the POST back from CyberSource to verify
the identity of the sender (CyberSource) and that the contents of the message
have not been tampered with.
Args:
params (dictionary): The POST parameters we received from CyberSource.
Returns:
dict: Contains the parameters we will use elsewhere, converted to the
appropriate types
Raises:
CCProcessorSignatureException: The calculated signature does not match
the signature we received.
CCProcessorDataException: The parameters we received from CyberSource were not valid
(missing keys, wrong types)
"""
# comment did intencinaly for checking recept of programs
# if params.get('decision') == u'CANCEL':
# raise CCProcessorUserCancelled()
# if params.get('decision') == u'DECLINE':
# raise CCProcessorUserDeclined()
# signed_fields = params.get('signed_field_names', '').split(',')
# data = u",".join([u"{0}={1}".format(k, params.get(k, '')) for k in signed_fields])
# returned_sig = params.get('signature', '')
# if processor_hash(data) != returned_sig:
# raise CCProcessorSignatureException()
# Validate that we have the paramters we expect and can convert them
# to the appropriate types.
# Usually validating the signature is sufficient to validate that these
# fields exist, but since we're relying on CyberSource to tell us
# which fields they included in the signature, we need to be careful.
valid_params = {}
required_params = [
('req_reference_number', int),
('req_currency', str),
('decision', str),
('auth_amount', Decimal),
]
# for key, key_type in required_params:
# if key not in params:
# raise CCProcessorDataException(
# _(
# u"The payment processor did not return a required parameter: {parameter}"
# ).format(parameter=key)
# )
# try:
# valid_params[key] = key_type(params[key])
# except (ValueError, TypeError, InvalidOperation):
# raise CCProcessorDataException(
# _(
# u"The payment processor returned a badly-typed value {value} for parameter {parameter}."
# ).format(value=params[key], parameter=key)
# )
# temporary fix
valid_params['req_reference_number'] = params.get('req_reference_number')
valid_params['req_currency'] = params.get('req_currency')
valid_params['decision'] = 'ACCEPT' or params.get('decision')
valid_params['auth_amount'] = params.get('req_amount')
return valid_params
def _payment_accepted(order_id, auth_amount, currency, decision):
"""
Check that CyberSource has accepted the payment.
Args:
order_num (int): The ID of the order associated with this payment.
auth_amount (Decimal): The amount the user paid using CyberSource.
currency (str): The currency code of the payment.
decision (str): "ACCEPT" if the payment was accepted.
Returns:
dictionary of the form:
{
'accepted': bool,
'amnt_charged': int,
'currency': string,
'order': Order
}
Raises:
CCProcessorDataException: The order does not exist.
CCProcessorWrongAmountException: The user did not pay the correct amount.
"""
try:
order = ProgramOrder.objects.get(id=order_id)
except Order.DoesNotExist:
raise CCProcessorDataException(
_("The payment processor accepted an order whose number is not in our system."))
if decision == 'ACCEPT':
return {
'accepted': True,
'amt_charged': auth_amount,
'currency': currency,
'order': order
}
else:
return {
'accepted': False,
'amt_charged': 0,
'currency': 'usd',
'order': order
}
def _record_payment_info(params, order):
"""
Record the purchase and run purchased_callbacks
Args:
params (dict): The parameters we received from CyberSource.
Returns:
None
"""
if settings.FEATURES.get("LOG_POSTPAY_CALLBACKS"):
log.info(
"Order %d processed (but not completed) with params: %s", order.id, json.dumps(
params)
)
order.processor_reply_dump = json.dumps(params)
order.save()
def _format_error_html(msg):
""" Format an HTML error message """
return u'<p class="error_msg">{msg}</p>'.format(msg=msg)
def _get_processor_exception_html(exception):
"""
Return HTML indicating that an error occurred.
Args:
exception (CCProcessorException): The exception that occurred.
Returns:
unicode: The rendered HTML.
"""
payment_support_email = microsite.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
if isinstance(exception, CCProcessorDataException):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a payment confirmation that had inconsistent data! "
u"We apologize that we cannot verify whether the charge went through and take further action on your order. "
u"The specific error message is: {msg} "
u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(
msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorWrongAmountException):
return _format_error_html(
_(
u"Sorry! Due to an error your purchase was charged for a different amount than the order total! "
u"The specific error message is: {msg}. "
u"Your credit card has probably been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(
msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorSignatureException):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a corrupted message regarding your charge, so we are "
u"unable to validate that the message actually came from the payment processor. "
u"The specific error message is: {msg}. "
u"We apologize that we cannot verify whether the charge went through and take further action on your order. "
u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(
msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorUserCancelled):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a message saying that you have cancelled this transaction. "
u"The items in your shopping cart will exist for future purchase. "
u"If you feel that this is in error, please contact us with payment-specific questions at {email}."
).format(
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorUserDeclined):
return _format_error_html(
_(
u"We're sorry, but this payment was declined. The items in your shopping cart have been saved. "
u"If you have any questions about this transaction, please contact us at {email}."
).format(
email=payment_support_email
)
)
else:
return _format_error_html(
_(
u"Sorry! Your payment could not be processed because an unexpected exception occurred. "
u"Please contact us at {email} for assistance."
).format(email=payment_support_email)
)
def _get_processor_decline_html(params):
"""
Return HTML indicating that the user's payment was declined.
Args:
params (dict): Parameters we received from CyberSource.
Returns:
unicode: The rendered HTML.
"""
payment_support_email = microsite.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
return _format_error_html(
_(
"Sorry! Our payment processor did not accept your payment. "
"The decision they returned was {decision}, "
"and the reason was {reason}. "
"You were not charged. Please try a different form of payment. "
"Contact us with payment-related questions at {email}."
).format(
decision='<span class="decision">{decision}</span>'.format(decision=params[
'decision']),
reason='<span class="reason">{reason_code}</span>'.format(
reason_code=params['reason_code']
),
email=payment_support_email
)
)
def process_postpay_callback(params):
"""
Handle a response from the payment processor.
Concrete implementations should:
1) Verify the parameters and determine if the payment was successful.
2) If successful, mark the order as purchased and call `purchased_callbacks` of the cart items.
3) If unsuccessful, try to figure out why and generate a helpful error message.
4) Return a dictionary of the form:
{'success': bool, 'order': Order, 'error_html': str}
Args:
params (dict): Dictionary of parameters received from the payment processor.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
try:
valid_params = verify_signatures(params)
result = _payment_accepted(
valid_params['req_reference_number'],
valid_params['auth_amount'],
valid_params['req_currency'],
valid_params['decision']
)
if result['accepted']:
_record_purchase(params, result['order'])
return {
'success': True,
'order': result['order'],
'error_html': ''
}
else:
_record_payment_info(params, result['order'])
return {
'success': False,
'order': result['order'],
'error_html': _get_processor_decline_html(params)
}
except CCProcessorException as error:
log.exception('error processing CyberSource postpay callback')
# if we have the order and the id, log it
if hasattr(error, 'order'):
_record_payment_info(params, error.order)
else:
log.info(json.dumps(params))
return {
'success': False,
'order': None, # due to exception we may not have the order
'error_html': _get_processor_exception_html(error)
}
def _show_receipt_html(request, order):
"""Render the receipt page as HTML.
Arguments:
request (HttpRequest): The request for the receipt.
order (Order): The order model to display.
Returns:
HttpResponse
"""
order_item = order
program = order_item.program
shoppingcart_items = []
course_names_list = []
shoppingcart_items.append((order_item, program))
course_names_list.append(program.name)
appended_course_names = ", ".join(course_names_list)
any_refunds = order_item.status == "refunded"
receipt_template = 'micro_masters/receipt.html'
recipient_list = []
total_registration_codes = None
reg_code_info_list = []
recipient_list.append(order.user.email)
appended_recipient_emails = ", ".join(recipient_list)
context = {
'order': order,
'shoppingcart_items': shoppingcart_items,
'any_refunds': any_refunds,
'site_name': configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME),
'appended_course_names': appended_course_names,
'appended_recipient_emails': appended_recipient_emails,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0],
'total_registration_codes': total_registration_codes,
'reg_code_info_list': reg_code_info_list,
'order_purchase_date': order.purchase_time.strftime("%B %d, %Y"),
}
# receipt_template = order_items.single_item_receipt_template
context.update({'receipt_has_donation_item': True})
return render_to_response(receipt_template, context)
@login_required
def show_program_receipt(request, ordernum):
"""
Displays a receipt for a particular order.
404 if order is not yet purchased or request.user != order.user
"""
try:
order = ProgramOrder.objects.get(id=ordernum)
except ProgramOrder.DoesNotExist:
raise Http404('Order not found!')
if order.user != request.user or order.status not in ['purchased', 'refunded']:
raise Http404('Order not found!')
return _show_receipt_html(request, order)
@login_required
def program_enroll(request, program_id):
user = request.user
try:
program = Program.objects.get(pk=program_id)
except Exception, e:
raise Http404('Program not found!')
courses = []
for course in program.courses.select_related():
courses += [CourseOverview.get_from_id(course.course_key)]
if program.price <= 0:
ProgramEnrollment.enroll(user, program.id)
dashboard = reverse('dashboard') + '?active=program'
return HttpResponseRedirect(dashboard)
else:
return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_buy', args=[program.id]))
@login_required
def program_unenroll(request):
user = request.user
program_id = request.POST.get('program_id', '')
try:
program = Program.objects.get(pk=program_id)
except Exception, e:
raise Http404('Program not found!')
courses = []
for course in program.courses.select_related():
courses += [CourseOverview.get_from_id(course.course_key)]
if program.price <= 0:
ProgramEnrollment.unenroll(user, program.id)
return HttpResponse()
else:
return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_buy', args=[program.id]))
def program_about(request, program_id):
"""
get details for specific program or package
"""
user = request.user
try:
program = Program.objects.get(pk=program_id)
except Exception, e:
raise Http404
courses = []
for course in program.courses.select_related():
courses += [CourseOverview.get_from_id(course.course_key)]
user_is_enrolled = False
program_is_free_not_enroll = False
if user.is_authenticated():
user_is_enrolled = ProgramEnrollment.is_enrolled(user, program.id)
if program.price <= 0 and not user_is_enrolled:
program_is_free_not_enroll = True
context = {}
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY
context['currency'] = currency
context['program'] = program
context['courses'] = courses
context['user_is_enrolled'] = user_is_enrolled
context['program_is_free_not_enroll'] = program_is_free_not_enroll
return render_to_response('micro_masters/program_about.html', context)
@csrf_exempt
@require_POST
def program_postpay_callback(request):
"""
Receives the POST-back from processor.
Mainly this calls the processor-specific code to check if the payment was accepted, and to record the order
if it was, and to generate an error page.
If successful this function should have the side effect of changing the "cart" into a full "order" in the DB.
The cart can then render a success page which links to receipt pages.
If unsuccessful the order will be left untouched and HTML messages giving more detailed error info will be
returned.
"""
params = request.POST.dict()
result = process_postpay_callback(params)
if result['success']:
order = result['order']
# See if this payment occurred as part of the verification flow process
# If so, send the user back into the flow so they have the option
# to continue with verification.
# Only orders where order_items.count() == 1 might be attempting to
# upgrade
attempting_upgrade = request.session.get('attempting_upgrade', False)
if attempting_upgrade:
request.session['attempting_upgrade'] = False
ProgramEnrollment.enroll(request.user, order.program.id)
# Otherwise, send the user to the receipt page
return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.show_program_receipt', args=[result['order'].id]))
else:
request.session['attempting_upgrade'] = False
return render_to_response('shoppingcart/error.html', {'order': result['order'],
'error_html': result['error_html']})
def programs_order_history(user):
"""
Returns the list of previously purchased orders for a user. Only the orders with
PaidCourseRegistration and CourseRegCodeItem are returned.
"""
order_history_list = []
purchased_order_items = ProgramOrder.objects.filter(
user=user, status='purchased').order_by('-purchase_time')
for order_item in purchased_order_items:
# Avoid repeated entries for the same order id.
if order_item.id not in [item['number'] for item in order_history_list]:
order_history_list.append({
'number': order_item.id,
'title': order_item.program.name,
'price': float(order_item.program.price),
'receipt_url': reverse('openedx.core.djangoapps.micro_masters.views.show_program_receipt', kwargs={'ordernum': order_item.id}),
'order_date': ModuleI18nService().strftime(order_item.purchase_time, 'SHORT_DATE')
})
return order_history_list
def render_purchase_form_html(cart, callback_url=None, extra_data=None):
"""
Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
callback_url (unicode): The URL that CyberSource should POST to when the user
completes a purchase. If not provided, then CyberSource will use
the URL provided by the administrator of the account
(CyberSource config, not LMS config).
extra_data (list): Additional data to include as merchant-defined data fields.
Returns:
unicode: The rendered HTML form.
"""
return render_to_string('micro_masters/cybersource_form.html', {
'action': CC_PROCESSOR.get('PURCHASE_ENDPOINT', ''),
'params': get_purchase_params(cart, callback_url),
})
@csrf_exempt
@login_required
def program_buy(request, program_id):
user = request.user
try:
program = Program.objects.get(pk=program_id)
except Exception, e:
raise Http404
user_is_enrolled = False
user_is_enrolled = ProgramEnrollment.is_enrolled(user, program.id)
if program.price <= 0 and not user_is_enrolled:
return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_about', args=[program.id]))
courses = []
for course in program.courses.select_related():
courses += [get_course_by_id(course.course_key)]
cart = ProgramOrder.get_or_create_order(user, program)
# check coupon expiration_date
if cart.discount_applied:
try:
coupon_redemption = ProgramCouponRedemption.objects.get(user=user, order=cart)
if coupon_redemption.coupon.is_active:
if coupon_redemption.coupon.expiration_date:
if datetime.now(pytz.UTC).__gt__(coupon_redemption.coupon.expiration_date):
ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart)
cart.discounted_price = 0
cart.save()
else:
ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart)
cart.discounted_price = 0
cart.save()
except Exception, e:
ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart)
cart.discounted_price = 0
cart.save()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
protocol = 'https' if request.is_secure() else 'http'
callback_urls = {
'success': 'http://edlab.edx.drcsystems.com/programs/program_postpay_callback/',
'cancel': protocol + '://' + request.get_host() + request.path
}
form_html = render_purchase_form_html(cart, callback_url=callback_urls)
context = {
'order': cart,
'shoppingcart_items': courses,
'amount': cart.item_price,
'site_name': configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME),
'form_html': form_html,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0],
}
return render_to_response("micro_masters/shopping_cart.html", context)
def use_coupon_code(coupons, user, order):
"""
This method utilize program coupon code
"""
cart = order
is_redemption_applied = False
for coupon in coupons:
try:
if ProgramCouponRedemption.add_coupon_redemption(coupon, cart):
is_redemption_applied = True
except MultipleCouponsNotAllowedException:
return HttpResponseBadRequest(_("Only one coupon redemption is allowed against an order"))
if not is_redemption_applied:
log.warning(u"Discount does not exist against code '%s'.", coupons[0].code)
return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=coupons[0].code))
return HttpResponse(
json.dumps({'response': 'success', 'coupon_code_applied': True}),
content_type="application/json"
)
@login_required
def reset_code_redemption(request):
"""
This method reset the code redemption from user cart items.
"""
order_id = request.POST.get('order_id', '')
try:
order = ProgramOrder.objects.get(pk=order_id)
except Exception, e:
return HttpResponseNotFound(_("Order does not exist"))
order.discounted_price = 0
order.discount_applied = False
order.save()
ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, order)
return HttpResponse('reset')
@login_required
def use_code(request):
"""
Valid Code can be either Coupon or Registration code.
For a valid Coupon Code, this applies the coupon code and generates a discount against all applicable items.
For a valid Registration code, it deletes the item from the shopping cart and redirects to the
Registration Code Redemption page.
"""
code = request.POST["code"]
order_id = request.POST.get('order_id', '')
try:
order = ProgramOrder.objects.get(pk=order_id)
except Exception, e:
return HttpResponseNotFound(_("Order does not exist"))
coupons = ProgramCoupon.objects.filter(
Q(code=code),
Q(is_active=True),
Q(expiration_date__gt=datetime.now(pytz.UTC)) |
Q(expiration_date__isnull=True)
)
if not coupons:
return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=code))
return use_coupon_code(coupons, request.user, order)
def prorgam_user_certificate(request, certificate_uuid):
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
context = {}
try:
user_program_certificate = ProgramGeneratedCertificate.objects.get(
verify_uuid=certificate_uuid,
issued=True
)
user = user_program_certificate.user
program_certificate_signs = ProgramCertificateSignatories.objects.filter(
program=user_program_certificate.program
)
context['user_program_certificate'] = user_program_certificate
context['program_certificate_signs'] = program_certificate_signs
context['platform_name'] = platform_name
context['course_id'] = user_program_certificate.program.id
context['full_course_image_url'] = request.build_absolute_uri(user_program_certificate.program.banner_image.url)
# Needed
# Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content
reserved = _("All rights reserved")
context['copyright_text'] = u'© {year} {platform_name}. {reserved}.'.format(
year=settings.COPYRIGHT_YEAR,
platform_name=platform_name,
reserved=reserved
)
# Needed
# Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information
context['company_privacy_urltext'] = _("Privacy Policy")
# Needed
# Translators: This line appears as a byline to a header image and describes the purpose of the page
context['logo_subtitle'] = _("Certificate Validation")
# Needed
# Translators: Accomplishments describe the awards/certifications obtained by students on this platform
context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format(
platform_name=platform_name
)
# Needed
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Needed
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format(
platform_name=platform_name)
# Needed
context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name)
# Needed
context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name)
# Needed banner docs
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
# Needed
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
# Needed
context['accomplishment_copy_course_name'] = user_program_certificate.program.name
# Needed
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by '
'{platform_name}.').format(
platform_name=platform_name)
user_fullname = user.profile.name
# Needed
context['accomplishment_user_id'] = user.id
# Needed
context['accomplishment_copy_name'] = user_fullname
# Needed
context['accomplishment_copy_username'] = user.username
# Needed banner text
# Translators: This line is displayed to a user who has completed a course and achieved a certification
context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format(
fullname=user_fullname
)
# Needed banner text
# Translators: This line congratulates the user and instructs them to share their accomplishment on social networks
context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what "
"you accomplished. Show it off to family, friends, and colleagues "
"in your social and professional networks.")
# Needed
# Translators: This line leads the reader to understand more about the certificate that a student has been awarded
context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format(
fullname=user_fullname
)
# Needed for social sharing
share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS)
context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False)
context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID)
context['facebook_share_text'] = share_settings.get(
'CERTIFICATE_FACEBOOK_TEXT',
_("I completed the {course_title} course on {platform_name}.").format(
course_title=context['accomplishment_copy_course_name'],
platform_name=platform_name
)
)
context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False)
context['twitter_share_text'] = share_settings.get(
'CERTIFICATE_TWITTER_TEXT',
_("I completed a course at {platform_name}. Take a look at my certificate.").format(
platform_name=platform_name
)
)
# Need to change certificate url
share_url = request.build_absolute_uri(reverse('openedx.core.djangoapps.micro_masters.views.prorgam_user_certificate', kwargs={'certificate_uuid': certificate_uuid}))
context['share_url'] = share_url
twitter_url = ''
if context.get('twitter_share_enabled', False):
twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format(
twitter_share_text=smart_str(context['twitter_share_text']),
share_url=urllib.quote_plus(smart_str(share_url))
)
context['twitter_url'] = twitter_url
context['linked_in_url'] = None
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled)
if linkedin_share_enabled:
context['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
context['accomplishment_copy_course_name'],
user_certificate.mode,
smart_str(share_url)
)
# certificate_type = context.get('certificate_type')
# Override the defaults with any mode-specific static values
# Needed
context['certificate_id_number'] = certificate_uuid
# Needed
# Translators: The format of the date includes the full name of the month
context['certificate_date_issued'] = _('{month} {day}, {year}').format(
month=user_program_certificate.modified.strftime("%B"),
day=user_program_certificate.modified.day,
year=user_program_certificate.modified.year
)
# Needed
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _("Certificate | {platform_name}").format(
platform_name=platform_name
)
# Needed
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was "
"awarded this {platform_name} "
"Certificate of Completion in ").format(
platform_name=platform_name)
# Needed
# Translators: This text describes the purpose (and therefore, value) of a course certificate
context['certificate_info_description'] = _("{platform_name} acknowledges achievements through "
"certificates, which are awarded for course activities "
"that {platform_name} students complete.").format(
platform_name=platform_name,
tos_url=context.get('company_tos_url'))
return render_to_response("micro_masters/certificates/valid.html", context)
except Exception, e:
raise Http404
@login_required
def program_info(request, program_id):
user = request.user
context = {}
try:
user_program = ProgramEnrollment.objects.get(user=user, is_active=True, program__id=program_id)
except Exception, e:
raise Http404
course_grades = {}
courses = []
for course in user_program.program.courses.select_related():
try:
course_grade = LeaderBoard.objects.get(student=user, course_id=course.course_key)
course_grades.update({
course.course_key: {
'points': course_grade.points,
'pass': course_grade.has_passed,
}
})
if course_grade.points and course_grade.has_passed:
course_grades.get(course.course_key)['course_states'] = {
'completed': True,
'in_progress': False,
'not_started': False
}
elif course_grade.points:
course_grades.get(course.course_key)['course_states'] = {
'completed': False,
'in_progress': True,
'not_started': False
}
else:
course_grades.get(course.course_key)['course_states'] = {
'completed': False,
'in_progress': False,
'not_started': True
}
except Exception, e:
course_grades.update({
course.course_key: {
'points': 0,
'pass': False,
'course_states': {
'completed': False,
'in_progress': False,
'not_started': True
}
}
})
try:
courses += [CourseOverview.get_from_id(course.course_key)]
except Exception, e:
courses = courses
context['program_courses'] = courses
context['program'] = user_program.program
context['course_grades'] = course_grades
return render_to_response('micro_masters/program_info.html', context)
| agpl-3.0 | 8,344,092,858,772,803,000 | 37.93017 | 201 | 0.633552 | false |
tleonardi/bedparse | bedparse/bedparse.py | 1 | 15785 | #!/usr/bin/python3
import signal
import argparse
import sys
import csv
import re
from pkg_resources import get_distribution
from bedparse import bedline
from bedparse import gtf2bed
from bedparse import BEDexception
# This allows using the program in a pipe
# The program is killed when it receives a sigpipe
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
__version__ = get_distribution('bedparse').version
def introns(args):
with args.bedfile as tsvfile:
for line in tsvfile:
introns=bedline(line.split('\t')).introns()
if(introns): introns.print()
tsvfile.close()
def threeP(args):
with args.bedfile as tsvfile:
for line in tsvfile:
utr=bedline(line.split('\t')).utr(which=3)
if(utr): utr.print()
tsvfile.close()
def fiveP(args):
with args.bedfile as tsvfile:
for line in tsvfile:
utr=bedline(line.split('\t')).utr(which=5)
if(utr): utr.print()
tsvfile.close()
def cds(args):
with args.bedfile as tsvfile:
for line in tsvfile:
utr=bedline(line.split('\t')).cds(ignoreCDSonly=args.ignoreCDSonly)
if(utr): utr.print()
tsvfile.close()
def prom(args):
with args.bedfile as tsvfile:
for line in tsvfile:
bedline(line.split('\t')).promoter(up=args.up, down=args.down, strand=(not args.unstranded)).print()
tsvfile.close()
def bed12tobed6(args):
if args.whichExon is not "all" and args.keepIntrons:
raise BEDexception("--keepIntrons is only allowed with --whichExon all")
with args.bedfile as tsvfile:
for line in tsvfile:
tx = bedline(line.split('\t'))
exon_list = tx.bed12tobed6(appendExN=args.appendExN, whichExon=args.whichExon)
for el in exon_list:
el.print()
if(args.keepIntrons):
nameSub=re.compile("_Exon([0-9]+)")
for el in tx.introns().bed12tobed6(appendExN=args.appendExN):
el.name=nameSub.sub(r"_Intron\1", el.name)
el.print()
tsvfile.close()
def filter(args):
col=args.column-1
inverse=args.inverse
filterset=set()
try:
annotation=open(args.annotation)
except:
raise BEDexception("Annotation file not valid")
annotationReader = csv.reader(annotation, delimiter="\t")
for line in annotationReader:
filterset.add(line[col])
annotation.close()
with args.bedfile as tsvfile:
for line in tsvfile:
if(line.split('\t')[3] in filterset and not inverse):
print(line.rstrip())
elif(line.split('\t')[3] not in filterset and inverse):
print(line.rstrip())
tsvfile.close()
def join(args):
col=args.column-1
annot=dict()
try:
annotation=open(args.annotation)
except:
raise BEDexception("Annotation file not valid")
annotationReader = csv.reader(annotation, delimiter=args.separator)
for line in annotationReader:
if(len(line)<=col):
raise BEDexception("Some lines don't contain the annotation column")
annot.setdefault(line[col], []).append(line[0:col]+line[col+1:])
annotation.close()
with args.bedfile as tsvfile:
for line in tsvfile:
line=line.split('\t')
if(args.noUnmatched==False or line[3] in annot.keys()):
record=bedline(line)
if(record):
nrec=len(annot.setdefault(record.name, []))
if(nrec==0):
if(args.empty==''):
record.print()
else:
record.print(end='')
print('',args.empty,sep="\t")
else:
for i in range(0,nrec):
record.print(end='')
print('',*annot[record.name][i], sep='\t')
tsvfile.close()
def convertChr(args):
with args.bedfile as tsvfile:
for line in tsvfile:
translatedLine=bedline(line.split('\t')).translateChr(assembly=args.assembly, target=args.target, suppress=args.suppressMissing, ignore=args.allowMissing, patches=args.patches)
if(translatedLine):
translatedLine.print()
tsvfile.close()
def validateFormat(args):
with args.bedfile as tsvfile:
for n,line in enumerate(tsvfile):
if args.fixSeparators:
line=re.sub(r'^\s+', '', line)
line=re.sub(r'\s+', '\t', line)
line=re.sub(r'\s+$', '', line)
try:
validatedLine=bedline(line.split('\t'))
except BEDexception as formatException:
raise BEDexception("\nThis doesn't appear to be a valid BED file. There was an error at line %s:\n\t\"%s\"" %(n+1, formatException))
tsvfile.close()
else:
validatedLine.print()
tsvfile.close()
def main(args=None):
desc_threep="Report the 3'UTR of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported."
desc_fivep="Report the 5'UTR of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported."
desc_cds="Report the CDS of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported."
desc_prom="Report the promoter of each transcript, defined as a fixed interval around its start."
desc_intron="Report BED12 lines corresponding to the introns of each transcript. Unspliced transcripts are not reported."
desc_filter="""Filters a BED file based on an annotation. BED entries with a name (i.e. col4) that appears in the specified column of the annotation are
printed to stdout. For efficiency reasons this command doesn't perform BED validation."""
desc_join="""Adds the content of an annotation file to a BED file as extra columns. The two files are joined by matching the BED Name field (column 4) with
a user-specified field of the annotation file."""
desc_gtf2bed="""Converts a GTF file to BED12 format. This tool supports the Ensembl GTF format, which uses features of type 'transcript' (field 3) to define transcripts.
In case the GTF file defines transcripts with a different feature type, it is possible to provide the feature name from the command line.
If the GTF file also annotates 'CDS' 'start_codon' or 'stop_codon' these are used to annotate the thickStart and thickEnd in the BED file."""
desc_bed12tobed6="Convert the BED12 format into BED6 by reporting a separate line for each block of the original record."
desc_convertChr="""Convert chromosome names between UCSC and Ensembl formats.
The conversion supports the hg38 assembly up to patch 11 and the mm10 assembly up to patch 4. By default patches
are not converted (because the UCSC genome browser does not support them), but can be enabled using the -p flag.
When the BED file contains a chromosome that is not recognised, by default the program stops and throws an error. Alternatively,
unrecognised chromosomes can be suppressed (-s) or artificially set to 'NA' (-a)."""
desc_validateFormat="Checks whether the BED file provided adheres to the BED format specifications. Optionally, it can fix field speration errors."
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description="""Perform various simple operations on BED files.""")
parser.add_argument('--version', '-v', action='version', version='v'+__version__)
subparsers = parser.add_subparsers(help='sub-command help', dest='sub-command')
subparsers.required = True
parser_3pUTR = subparsers.add_parser('3pUTR', help="Prints the 3' of coding genes.", description=desc_threep)
parser_3pUTR.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_3pUTR.set_defaults(func=threeP)
parser_5pUTR = subparsers.add_parser('5pUTR', help="Prints the 5' of coding genes.", description=desc_fivep)
parser_5pUTR.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_5pUTR.set_defaults(func=fiveP)
parser_cds = subparsers.add_parser('cds', help="Prints the CDS of coding genes.", description=desc_cds)
parser_cds.add_argument("--ignoreCDSonly",action="store_true", help="Ignore transcripts that only consist of CDS.")
parser_cds.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_cds.set_defaults(func=cds)
parser_prom = subparsers.add_parser('promoter', help="Prints the promoters of transcripts.", description=desc_prom)
parser_prom.add_argument("--up",type=int, default=500, help="Get this many nt upstream of each feature.")
parser_prom.add_argument("--down",type=int, default=500, help="Get this many nt downstream of each feature.")
parser_prom.add_argument("--unstranded",action="store_true", help="Do not consider strands.")
parser_prom.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_prom.set_defaults(func=prom)
parser_introns = subparsers.add_parser('introns', help="Prints BED records corresponding to the introns of each transcript in the original file.", description=desc_intron)
parser_introns.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_introns.set_defaults(func=introns)
parser_filter = subparsers.add_parser('filter',
help="Filters a BED file based on an annotation.", description=desc_filter)
parser_filter.add_argument("--annotation", "-a", type=str, help="Path to the annotation file.", required=True)
parser_filter.add_argument("--column","-c",type=int, default=1, help="Column of the annotation file (1-based, default=1).")
parser_filter.add_argument("--inverse", "-v" ,action="store_true", help="Only report BED entries absent from the annotation file.")
parser_filter.set_defaults(func=filter)
parser_filter.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin,
help="Path to the BED file.")
parser_join = subparsers.add_parser('join',
help="""Joins a BED file with an annotation file using
the BED name (col4) as the joining key.""", description=desc_join)
parser_join.add_argument("--annotation", "-a", type=str, help="Path to the annotation file.", required=True)
parser_join.add_argument("--column","-c",type=int, default=1, help="Column of the annotation file (1-based, default=1).")
parser_join.add_argument("--separator","-s",type=str, default='\t', help="Field separator for the annotation file (default tab)")
parser_join.add_argument("--empty","-e",type=str, default='.', help="String to append to empty records (default '.').")
parser_join.add_argument("--noUnmatched", "-n" ,action="store_true", help="Do not print unmatched lines.")
parser_join.set_defaults(func=join)
parser_join.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin,
help="Path to the BED file.")
parser_gtf2bed = subparsers.add_parser('gtf2bed',
help="Converts a GTF file to BED12 format.", description=desc_gtf2bed)
parser_gtf2bed.add_argument("gtf", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the GTF file.")
parser_gtf2bed.add_argument("--extraFields",type=str, default='', help="Comma separated list of extra GTF fields to be added after col 12 (e.g. gene_id,gene_name).")
parser_gtf2bed.add_argument("--filterKey", type=str, default='transcript_biotype', help="GTF extra field on which to apply the filtering")
parser_gtf2bed.add_argument("--filterType",type=str, default='', help="Comma separated list of filterKey field values to retain.")
parser_gtf2bed.add_argument("--transcript_feature_name",type=str, default='transcript', help="Transcript feature name. Features with this string in field 3 of the GTF file will be considered transcripts. (default 'transcript')")
parser_gtf2bed.set_defaults(func=lambda args: gtf2bed(args.gtf, extra=args.extraFields.split(','), filterKey=args.filterKey, filterType=args.filterType.split(','), transcript_feature_name=args.transcript_feature_name))
parser_bed12tobed6 = subparsers.add_parser('bed12tobed6',
help="Converts a BED12 file to BED6 format", description=desc_bed12tobed6)
parser_bed12tobed6.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the GTF file.")
parser_bed12tobed6.add_argument("--appendExN", action="store_true", help="Appends the exon number to the transcript name.")
parser_bed12tobed6.add_argument("--whichExon",type=str, default='all', choices=["all", "first", "last"], help="Which exon to return. First and last respectively report the first or last exon relative to the TSS (i.e. taking strand into account).")
parser_bed12tobed6.add_argument("--keepIntrons", action="store_true", help="Add records for introns as well. Only allowed if --whichExon all")
parser_bed12tobed6.set_defaults(func=bed12tobed6)
parser_convertChr = subparsers.add_parser('convertChr', help="Convert chromosome names between UCSC and Ensembl formats", description=desc_convertChr)
parser_convertChr.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_convertChr.add_argument("--assembly", type=str, help="Assembly of the BED file (either hg38 or mm10).", required=True)
parser_convertChr.add_argument("--target", type=str, help="Desidered chromosome name convention (ucsc or ens).", required=True)
parser_convertChr.add_argument("--allowMissing", "-a" ,action="store_true", help="""When a chromosome name can't be matched between USCS and Ensembl set it to 'NA' (by default thrown as error).""")
parser_convertChr.add_argument("--suppressMissing", "-s" ,action="store_true", help="""When a chromosome name can't be matched between USCS and Ensembl do not report it in the output (by default throws an error).""")
parser_convertChr.add_argument("--patches", "-p" ,action="store_true", help="""Allows conversion of all patches up to p11 for hg38 and p4 for mm10. Without this option, if the BED file contains contigs added by a patch the conversion terminates with an error (unless the -a or -s flags are present).""")
parser_convertChr.set_defaults(func=convertChr)
parser_validateFormat = subparsers.add_parser('validateFormat', help="Check whether the BED file adheres to the BED format specifications", description=desc_validateFormat)
parser_validateFormat.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.")
parser_validateFormat.add_argument("--fixSeparators", "-f" ,action="store_true", help="""If the fields are separated by multiple spaces (e.g. when copy-pasting BED files), replace them into tabs.""")
parser_validateFormat.set_defaults(func=validateFormat)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| mit | 3,832,478,660,521,857,000 | 61.391304 | 307 | 0.669876 | false |
istresearch/scrapy-cluster | redis-monitor/plugins/expire_monitor.py | 1 | 2208 | from __future__ import absolute_import
from .stop_monitor import StopMonitor
class ExpireMonitor(StopMonitor):
'''
Monitors for expiring crawls
'''
regex = "timeout:*:*:*"
def setup(self, settings):
'''
Setup kafka
'''
StopMonitor.setup(self, settings)
def check_precondition(self, key, value):
'''
Override to check for timeout
'''
timeout = float(value)
curr_time = self.get_current_time()
if curr_time > timeout:
return True
return False
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# very similar to stop
# break down key
elements = key.split(":")
spiderid = elements[1]
appid = elements[2]
crawlid = elements[3]
# log ack of expire
extras = self.get_log_dict('expire', appid,
spiderid, crawlid=crawlid)
self.logger.info("Expiring crawl found", extra=extras)
# add crawl to blacklist so it doesnt propagate
redis_key = spiderid + ":blacklist"
value = '{appid}||{crawlid}'.format(appid=appid,
crawlid=crawlid)
# add this to the blacklist set
self.redis_conn.sadd(redis_key, value)
# everything stored in the queue is now expired
result = self._purge_crawl(spiderid, appid, crawlid)
# add result to our dict
master = {}
master['server_time'] = int(self.get_current_time())
master['crawlid'] = crawlid
master['spiderid'] = spiderid
master['appid'] = appid
master['total_expired'] = result
master['action'] = 'expired'
if self._send_to_kafka(master):
master['success'] = True
self.logger.info('Sent expired ack to kafka', extra=master)
else:
master['success'] = False
self.logger.error('Failed to send expired ack to kafka',
extra=master)
| mit | -1,714,228,095,611,840,500 | 29.666667 | 71 | 0.552083 | false |
xiaoda99/keras | examples/trading/ifshort_mlp.py | 1 | 2386 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
#from keras.datasets import mnist
from keras.models_xd import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.initializations import uniform
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint
#from pylearn2.datasets.if_monthly import IFMonthlyLong, IFMonthly2
#train = IFMonthly2(which_set='train', short_ts=[5, 10], use_long=False, target_type='ASV', gain_range=[0, 10], hist_len=3)
#test = IFMonthly2(which_set='test', short_ts=[5, 10], use_long=False, target_type='ASV', gain_range=[0, 10], hist_len=3)
#train = IFMonthlyLong(which_set='train', target_type='ASV', gain_range=[0, 10])
#test = IFMonthlyLong(which_set='test', target_type='ASV', gain_range=[0, 10])
#X_train = train.X
#y_train = train.y
#X_test = test.X
#y_test = test.y
def train_model(dataset, h0_dim, h1_dim, y_dim):
X_train, y_train, X_test, y_test = dataset
batch_size = 512
nb_epoch = 100
model = Sequential()
model.add(Dense(h0_dim, input_shape=(X_train.shape[1],),
init='uniform',
W_regularizer=l2(0.0005),
activation='relu'))
model.add(Dense(h1_dim,
init='uniform',
W_regularizer=l2(0.0005),
activation='relu'))
model.add(Dense(y_dim,
init='uniform',
W_regularizer=l2(0.0005)))
rms = RMSprop()
sgd = SGD(lr=0.01, decay=1e-4, momentum=0.6, nesterov=False)
model.compile(loss='mse', optimizer=sgd)
#model.get_config(verbose=1)
#yaml_string = model.to_yaml()
#with open('ifshort_mlp.yaml', 'w') as f:
# f.write(yaml_string)
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
checkpointer = ModelCheckpoint(filepath="/tmp/ifshort_mlp_weights.hdf5", verbose=1, save_best_only=True)
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
show_accuracy=False,
verbose=2,
validation_data=(X_test, y_test),
callbacks=[early_stopping, checkpointer])
| mit | 947,155,060,810,931,500 | 38.766667 | 123 | 0.630763 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_string_format_block_pattern1755.py | 1 | 7539 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_string_format_block_pattern1755_all_of
except ImportError:
bt_string_format_block_pattern1755_all_of = sys.modules[
"onshape_client.oas.models.bt_string_format_block_pattern1755_all_of"
]
try:
from onshape_client.oas.models import bt_string_format_condition683
except ImportError:
bt_string_format_condition683 = sys.modules[
"onshape_client.oas.models.bt_string_format_condition683"
]
class BTStringFormatBlockPattern1755(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"reg_exp_to_block": (str,), # noqa: E501
"error_message": (str,), # noqa: E501
"should_reset_value_when_confirmed": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"reg_exp_to_block": "regExpToBlock", # noqa: E501
"error_message": "errorMessage", # noqa: E501
"should_reset_value_when_confirmed": "shouldResetValueWhenConfirmed", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_string_format_block_pattern1755.BTStringFormatBlockPattern1755 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
reg_exp_to_block (str): [optional] # noqa: E501
error_message (str): [optional] # noqa: E501
should_reset_value_when_confirmed (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_string_format_block_pattern1755_all_of.BTStringFormatBlockPattern1755AllOf,
bt_string_format_condition683.BTStringFormatCondition683,
],
"oneOf": [],
}
| mit | -3,901,745,233,082,738,000 | 35.77561 | 105 | 0.593845 | false |
after1990s/little_utils | CodeGenerator/codegen.py | 1 | 1466 | # -*- coding: utf-8 -*-
#!/bin/python
def codegen(paratype, paraname):
string_code_raw = '''
private {0} m_{1};
public {0} {1}
{{
get
{{
return m_{1};
}}
set
{{
m_{1} = value;
if (PropertyChanged != null)
PropertyChanged.Invoke(this, new PropertyChangedEventArgs("{1}"));
}}
}}'''.format(paratype, paraname)
print(string_code_raw);
def main():
codegen('String', 'Host_0');
codegen('String', 'Host_1');
codegen('String', 'Host_2');
codegen('String', 'Host_3');
codegen('Int32', 'HostPort_0');
codegen('Int32', 'HostPort_1');
codegen('Int32', 'HostPort_2');
codegen('Int32', 'HostPort_3');
codegen('bool', 'VmCheck');
codegen('Int32', 'VmCpu');
codegen('Int32', 'VmMemory');
codegen('Int32', 'VmResHeight');
codegen('Int32', 'VmResWidth');
codegen('Int32', 'VmDisk');
codegen('String', 'NoticeTitle');
codegen('String', 'NoticeContent');
codegen('String', 'Notice');
codegen('String', 'TargetFilePath');
codegen('String', 'TimeMon');
codegen('String', 'TimeTue');
codegen('String', 'TimeWed');
codegen('String', 'TimeThu');
codegen('String', 'TimeFri');
codegen('String', 'TimeSat');
codegen('String', 'TimeSun');
codegen('bool', 'TimeCheck');
if __name__=='__main__':
main();
| apache-2.0 | -2,745,543,578,551,633,000 | 27.192308 | 86 | 0.519782 | false |
Lvadislav/russian-speech-site | speech/forms.py | 1 | 1179 | from django import forms
from speech.models import Feedback
from speech import config
ERROR_MESSAGES = {
'required': 'Это поле обязательно к заполнению.',
'max_length': 'Слишкое длинное значение.',
'min_length': 'Слишком короткое значение.',
'invalid': 'Некорректное значение.',
}
class FeedbackForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'autofocus': 'autofocus'})
name = forms.CharField(
max_length=config.MAX_FEEDBACK_NAME_LENGTH,
error_messages=ERROR_MESSAGES,
help_text='Ваше имя'
)
email = forms.EmailField(
max_length=config.MAX_FEEDBACK_EMAIL_LENGTH,
error_messages=ERROR_MESSAGES,
help_text='Ваш email'
)
text = forms.CharField(
widget=forms.Textarea,
max_length=config.MAX_FEEDBACK_TEXT_LENGTH,
error_messages=ERROR_MESSAGES,
help_text='Сообщение'
)
class Meta:
model = Feedback
exclude = []
| mit | -6,923,204,756,651,097,000 | 22.688889 | 75 | 0.630394 | false |
ielia/prtg-py | prtg/cache.py | 1 | 4288 | # -*- coding: utf-8 -*-
"""
Python library for Paessler's PRTG (http://www.paessler.com/)
"""
import atexit
import logging
import os
import shelve
import tempfile
from prtg.exceptions import UnknownObjectType
from prtg.models import CONTENT_TYPE_ALL, PrtgObject
class Cache(object):
"""
Cache of prtg.models.PrtgObject instances, having the following:
* An id as an "objid" member.
* A content type as a "content_type" member.
Wrapper around 'shelve' (https://docs.python.org/2/library/shelve.html), a persistence library.
Upon initialisation, it looks for cached dictionaries 'devices', 'groups', 'sensors' and 'status' and, if not
present, it creates them.
"""
__FILE_PREFIX = 'prtg.'
__FILE_SUFFIX = '.cache'
__DIR = None
def __init__(self, directory=__DIR):
"""
Creates a temporary file to be used by shelve.
:param directory: Directory where the cache file is going to be written.
"""
self.cache_fd, self.cache_filename = tempfile.mkstemp(dir=directory, prefix=self.__FILE_PREFIX,
suffix=self.__FILE_SUFFIX)
os.close(self.cache_fd)
# TODO: Figure out how to do this gracefully and not leaving a potential (but insignificant) security hole.
os.remove(self.cache_filename)
self.cache = shelve.open(self.cache_filename)
atexit.register(self._stop)
def write_content(self, content, force=False):
"""
Stores the contents into the main cache by objid.
:param content: List of instances of prtg.models.PrtgObject to put in the cache.
:param force: Forces the insertion of the object in the cache.
"""
logging.debug('Writing Cache')
for obj in content:
if not isinstance(obj, PrtgObject):
raise UnknownObjectType
if not str(obj.objid) in self.cache:
# TODO: Compare new objects with cached objects.
logging.debug('Writing new object {} to cache'.format(str(obj.objid)))
self.cache[str(obj.objid)] = obj
elif force:
logging.debug('Updating object {} in cache'.format(str(obj.objid)))
obj.changed = True
self.cache[str(obj.objid)] = obj
else:
logging.debug('Object {} already cached'.format(str(obj.objid)))
def get_object(self, objectid):
"""
Gets the object by id.
:param objectid: Object id to retrieve.
:return: The requested object, that has to exist.
:raise KeyError: If no such id is in the cache.
"""
with shelve.open(self.cache_filename) as cache:
return cache[str(objectid)]
def get_content(self, content_type):
"""
Generator that retrieves objects by content type.
:param content_type: Content type to retrieve.
:yield: Objects contained in the cache with the specified content type.
"""
for objid, value in self.cache.items(): # items() is a generator, thus this usage.
try:
if content_type == CONTENT_TYPE_ALL or value.content_type == content_type:
yield value
except AttributeError:
logging.warning('Bad object returned from cache: {}'.format(value))
def get_changed_content(self, content_type):
"""
Generator that retrieves changed objects by content type.
:param content_type: Content type to retrieve.
:yield: Objects contained in the cache with the specified content type, that have been changed in the life of
the cache.
"""
for value in self.get_content(content_type):
if value.changed:
yield value
def _stop(self):
if self.cache is not None:
try:
self.cache.close()
except:
logging.error("Couldn't close cache file")
raise
if self.cache_filename:
try:
os.remove(self.cache_filename)
except:
logging.error("Couldn't delete cache file '{}'".format(self.cache_filename))
raise
| mit | -9,151,001,026,375,969,000 | 37.981818 | 117 | 0.595149 | false |
xyos/horarios | horarios/settings.py | 1 | 4461 | import os
# Django settings for horarios project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
DEPLOY_SCRIPT = "echo 0> /dev/null"
DAO_FACTORY = 'factories.MixedFactory' # or factories.SiaFactory , factories.LocalFactory
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.sqlite', # Or path to database file if using sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Bogota'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_PATH + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# STATIC_ROOT = PROJECT_PATH + '/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATIC_PATH = PROJECT_PATH + '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATIC_PATH,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '1%k#v0%-52jm5tf)5om_+lv23siy45ydt_qtthvaz%pri0uxp2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'horarios.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'horarios.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_PATH + '/templates/',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'horarios',
)
try:
from local_settings import *
except ImportError:
pass
| mit | -6,325,953,530,759,111,000 | 33.851563 | 108 | 0.72876 | false |
hjanime/VisTrails | vistrails/packages/vtk/init.py | 1 | 31387 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
import re
import os.path
import vtk
from distutils.version import LooseVersion
from vistrails.core.configuration import ConfigField
from vistrails.core.modules.basic_modules import Path, PathObject, \
identifier as basic_pkg
from vistrails.core.modules.config import ModuleSettings
from vistrails.core.modules.vistrails_module import ModuleError
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.output_modules import OutputModule, ImageFileMode, \
ImageFileModeConfig, IPythonMode, IPythonModeConfig
from vistrails.core.system import get_vistrails_default_pkg_prefix, systemType, current_dot_vistrails
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler,\
UpgradeModuleRemap, UpgradePackageRemap
from vistrails.core.vistrail.connection import Connection
from vistrails.core.vistrail.port import Port
from .pythonclass import BaseClassModule, gen_class_module
from .tf_widget import _modules as tf_modules
from .inspectors import _modules as inspector_modules
from .offscreen import _modules as offscreen_modules
from identifiers import identifier, version as package_version
from .vtk_wrapper import vtk_classes
from . import hasher
_modules = tf_modules + inspector_modules + offscreen_modules
registry = get_module_registry()
if registry.has_module('org.vistrails.vistrails.spreadsheet', 'SpreadsheetCell'):
# load these only if spreadsheet is enabled
from .vtkcell import _modules as cell_modules
from .vtkhandler import _modules as handler_modules
_modules += cell_modules + handler_modules
################# OUTPUT MODULES #############################################
def render_to_image(output_filename, vtk_format, renderer, w, h):
window = vtk.vtkRenderWindow()
window.OffScreenRenderingOn()
window.SetSize(w, h)
# FIXME think this may be fixed in VTK6 so we don't have this
# dependency...
widget = None
if systemType=='Darwin':
from PyQt4 import QtCore, QtGui
widget = QtGui.QWidget(None, QtCore.Qt.FramelessWindowHint)
widget.resize(w, h)
widget.show()
window.SetWindowInfo(str(int(widget.winId())))
window.AddRenderer(renderer)
window.Render()
win2image = vtk.vtkWindowToImageFilter()
win2image.SetInput(window)
win2image.Update()
writer = vtk_format()
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= \
LooseVersion('6.0.0'):
writer.SetInputData(win2image.GetOutput())
else:
writer.SetInput(win2image.GetOutput())
writer.SetFileName(output_filename)
writer.Write()
window.Finalize()
if widget!=None:
widget.close()
class vtkRendererToFile(ImageFileMode):
config_cls = ImageFileModeConfig
formats = ['png', 'jpg', 'tif', 'pnm']
@classmethod
def can_compute(cls):
return True
def compute_output(self, output_module, configuration):
format_map = {'png': vtk.vtkPNGWriter,
'jpg': vtk.vtkJPEGWriter,
'tif': vtk.vtkTIFFWriter,
'pnm': vtk.vtkPNMWriter}
r = output_module.get_input("value")[0].vtkInstance
w = configuration["width"]
h = configuration["height"]
img_format = self.get_format(configuration)
if img_format not in format_map:
raise ModuleError(output_module,
'Cannot output in format "%s"' % img_format)
fname = self.get_filename(configuration, suffix='.%s' % img_format)
render_to_image(fname, format_map[img_format], r, w, h)
class vtkRendererToIPythonModeConfig(IPythonModeConfig):
_fields = [ConfigField('width', 640, int),
ConfigField('height', 480, int)]
class vtkRendererToIPythonMode(IPythonMode):
config_cls = vtkRendererToIPythonModeConfig
def compute_output(self, output_module, configuration):
from IPython.core.display import display, Image
r = output_module.get_input('value')[0].vtkInstance
width = configuration['width']
height = configuration['height']
window = vtk.vtkRenderWindow()
window.OffScreenRenderingOn()
window.SetSize(width, height)
fname = output_module.interpreter.filePool.create_file(
prefix='ipython_', suffix='.png').name
render_to_image(fname, vtk.vtkPNGWriter, r, width, height)
display(Image(filename=fname, width=width, height=height))
class vtkRendererOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules."
"output_configuration:OutputModuleConfigurationWidget")
_input_ports = [('value', 'vtkRenderer', {'depth':1}),
('interactorStyle', 'vtkInteractorStyle'),
('picker', 'vtkAbstractPicker')]
_output_modes = [vtkRendererToFile, vtkRendererToIPythonMode]
if registry.has_module('org.vistrails.vistrails.spreadsheet',
'SpreadsheetCell'):
from .vtkcell import vtkRendererToSpreadsheet
_output_modes.append(vtkRendererToSpreadsheet)
_modules.append(vtkRendererOutput)
################# ADD VTK CLASSES ############################################
# keep track of created modules for use as subclasses
klasses = {}
def initialize():
# First check if spec for this VTK version exists
v = vtk.vtkVersion()
vtk_version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
# vtk-VTKVERSION-spec-PKGVERSION.xml
spec_name = os.path.join(current_dot_vistrails(),
'vtk-%s-spec-%s.xml' %
('_'.join([str(v) for v in vtk_version]),
package_version.replace('.', '_')))
# TODO: how to patch with diff/merge
if not os.path.exists(spec_name):
from .vtk_wrapper.parse import parse
parse(spec_name)
vtk_classes.initialize(spec_name)
_modules.insert(0, BaseClassModule)
_modules.extend([gen_class_module(spec, vtk_classes, klasses, signature=hasher.vtk_hasher)
for spec in vtk_classes.specs.module_specs])
################# UPGRADES ###################################################
_remap = None
_controller = None
_pipeline = None
def _get_controller():
global _controller
return _controller
def _get_pipeline():
global _pipeline
return _pipeline
module_name_remap = {'vtkPLOT3DReader': 'vtkMultiBlockPLOT3DReader'}
def base_name(name):
"""Returns name without overload index.
"""
i = name.find('_')
if i != -1:
return name[:i]
return name
def build_remap(module_name=None):
global _remap, _controller
reg = get_module_registry()
uscore_num = re.compile(r"(.+)_(\d+)$")
def create_function(module, *argv, **kwargs):
controller = _get_controller()
# create function using the current module version and identifier
# FIXME: This should really be handled by the upgrade code somehow
new_desc = reg.get_descriptor_by_name(module.package,
module.name,
module.namespace)
old_identifier = module.package
module.package = identifier
old_package_version = module.version
module.version = new_desc.package_version
new_function = controller.create_function(module, *argv, **kwargs)
module.package = old_identifier
module.version = old_package_version
return new_function
def get_port_specs(descriptor, port_type):
ports = {}
for desc in reversed(reg.get_module_hierarchy(descriptor)):
ports.update(reg.module_ports(port_type, desc))
return ports
def get_input_port_spec(module, port_name):
# Get current desc
# FIXME: This should really be handled by the upgrade code somehow
new_desc = reg.get_descriptor_by_name(module.package,
module.name,
module.namespace)
port_specs = get_port_specs(new_desc, 'input')
return port_name in port_specs and port_specs[port_name]
def get_output_port_spec(module, port_name):
# Get current desc
new_desc = reg.get_descriptor_by_name(module.package,
module.name,
module.namespace)
port_specs = get_port_specs(new_desc, 'output')
return port_name in port_specs and port_specs[port_name]
def build_function(old_function, new_function_name, new_module):
controller = _get_controller()
if len(old_function.parameters) > 0:
new_param_vals, aliases = \
zip(*[(p.strValue, p.alias)
for p in old_function.parameters])
else:
new_param_vals = []
aliases = []
new_function = create_function(new_module,
new_function_name,
new_param_vals,
aliases)
return new_function
def build_function_remap_method(desc, port_prefix, port_num):
f_map = {"vtkCellArray": {"InsertNextCell": 3}}
def remap(old_function, new_module):
for i in xrange(1, port_num):
port_name = "%s_%d" % (port_prefix, i)
port_spec = get_input_port_spec(new_module, port_name)
old_sigstring = \
reg.expand_port_spec_string(old_function.sigstring,
basic_pkg)
if port_spec.sigstring == old_sigstring:
new_function = build_function(old_function, port_name,
new_module)
new_module.add_function(new_function)
return []
port_idx = 1
if desc.name in f_map:
if port_prefix in f_map[desc.name]:
port_idx = f_map[desc.name][port_prefix]
port_name = "%s_%d" % (port_prefix, port_idx)
new_function = build_function(old_function, port_name, new_module)
new_module.add_function(new_function)
return []
return remap
def build_remap_method(desc, port_prefix, port_num, port_type):
# for connection, need to differentiate between src and dst
if port_type == 'input':
conn_lookup = Connection._get_destination
get_port_spec = get_input_port_spec
idx = 1
else:
conn_lookup = Connection._get_source
get_port_spec = get_output_port_spec
idx = 0
def remap(old_conn, new_module):
create_new_connection = UpgradeWorkflowHandler.create_new_connection
port = conn_lookup(old_conn)
pipeline = _get_pipeline()
modules = [pipeline.modules[old_conn.source.moduleId],
pipeline.modules[old_conn.destination.moduleId]]
modules[idx] = new_module
ports = [old_conn.source, old_conn.destination]
for i in xrange(1, port_num):
port_name = "%s_%d" % (port_prefix, i)
port_spec = get_port_spec(modules[idx], port_name)
if port_spec.sigstring == port.signature:
ports[idx] = port_name
new_conn = create_new_connection(_get_controller(),
modules[0],
ports[0],
modules[1],
ports[1])
return [('add', new_conn)]
# if get here, just try to use _1 version?
ports[idx] = "%s_%d" % (port_prefix, 1)
new_conn = create_new_connection(_get_controller(),
modules[0],
ports[0],
modules[1],
ports[1])
return [('add', new_conn)]
return remap
def process_ports(desc, remap, port_type):
if port_type == 'input':
remap_dict_key = 'dst_port_remap'
else:
remap_dict_key = 'src_port_remap'
ports = get_port_specs(desc, port_type)
port_nums = {}
for port_name, port_spec in ports.iteritems():
# FIXME just start at 1 and go until don't find port (no
# need to track max)?
search_res = uscore_num.search(port_name)
if search_res:
port_prefix = search_res.group(1)
port_num = int(search_res.group(2))
if port_prefix not in port_nums:
port_nums[port_prefix] = port_num
elif port_num > port_nums[port_prefix]:
port_nums[port_prefix] = port_num
for port_prefix, port_num in port_nums.iteritems():
m = build_remap_method(desc, port_prefix, port_num, port_type)
remap.add_remap(remap_dict_key, port_prefix, m)
if port_type == 'input':
m = build_function_remap_method(desc, port_prefix, port_num)
remap.add_remap('function_remap', port_prefix, m)
if port_type == 'output' and desc.name in klasses:
remap.add_remap('src_port_remap', 'self', 'Instance')
def change_func(name, value):
def remap(old_func, new_module):
controller = _get_controller()
new_function = create_function(new_module, name, [value])
return [('add', new_function, 'module', new_module.id)]
return remap
def change_SetXint(spec):
# Fix old SetX methods that takes an int representing the enum
def remap(old_func, new_module):
controller = _get_controller()
value = int(old_func.params[0].strValue)
value = spec.values[0][value]
new_function = create_function(new_module, spec.name, [value])
return [('add', new_function, 'module', new_module.id)]
return remap
def color_func(name):
def remap(old_func, new_module):
controller = _get_controller()
value = ','.join([p.strValue for p in old_func.params])
new_function = create_function(new_module, name, [value])
return [('add', new_function, 'module', new_module.id)]
return remap
def file_func(name):
def remap(old_func, new_module):
controller = _get_controller()
value = PathObject(old_func.params[0].strValue)
new_function = create_function(new_module, name, [value])
return [('add', new_function, 'module', new_module.id)]
return remap
def to_file_func(name):
# Add Path module as name->File converter
def remap(old_conn, new_module):
controller = _get_controller()
create_new_connection = UpgradeWorkflowHandler.create_new_connection
pipeline = _get_pipeline()
module = pipeline.modules[old_conn.source.moduleId]
x = (module.location.x + new_module.location.x)/2
y = (module.location.y + new_module.location.y)/2
path_module = controller.create_module(basic_pkg, 'Path',
'', x, y)
conn1 = create_new_connection(controller,
module,
old_conn.source,
path_module,
'name')
# Avoid descriptor lookup by explicitly creating Ports
input_port_id = controller.id_scope.getNewId(Port.vtType)
input_port = Port(id=input_port_id,
name='value',
type='source',
signature=(Path,),
moduleId=path_module.id,
moduleName=path_module.name)
output_port_id = controller.id_scope.getNewId(Port.vtType)
output_port = Port(id=output_port_id,
name=name,
type='destination',
signature=(Path,),
moduleId=new_module.id,
moduleName=new_module.name)
conn2 = create_new_connection(controller,
path_module,
input_port,
new_module,
output_port)
return [('add', path_module),
('add', conn1),
('add', conn2)]
return remap
def wrap_block_func():
def remap(old_conn, new_module):
controller = _get_controller()
create_new_connection = UpgradeWorkflowHandler.create_new_connection
pipeline = _get_pipeline()
module1 = pipeline.modules[old_conn.destination.moduleId]
dest_port = old_conn.destination
candidates = ['AddInputData_1', 'AddInputData',
'SetInputData_1', 'SetInputData',
'AddInput', 'SetInput']
if 'Connection' in old_conn.destination.name:
_desc = reg.get_descriptor_by_name(identifier,
module1.name)
ports = get_port_specs(_desc, 'input')
for c in candidates:
if c in ports:
dest_port = c
break
conn = create_new_connection(controller,
new_module,
'StructuredGrid',
module1,
dest_port)
return [('add', conn)]
return remap
def fix_vtkcell_func():
# Move VTKCell.self -> X.VTKCell to
# vtkRenderer.Instance -> X.vtkRenderer
def remap(old_conn, new_module):
controller = _get_controller()
create_new_connection = UpgradeWorkflowHandler.create_new_connection
pipeline = _get_pipeline()
# find vtkRenderer
vtkRenderer = None
for conn in pipeline.connections.itervalues():
src_module_id = conn.source.moduleId
dst_module_id = conn.destination.moduleId
if dst_module_id == old_conn.source.moduleId and \
pipeline.modules[src_module_id].name == 'vtkRenderer':
vtkRenderer = pipeline.modules[src_module_id]
if vtkRenderer:
conn = create_new_connection(controller,
vtkRenderer,
'Instance',
new_module,
'vtkRenderer')
return [('add', conn)]
return []
return remap
def process_module(desc):
# 0.9.3 upgrades
if not desc.name in klasses:
return
remap = UpgradeModuleRemap(None, '0.9.3', '0.9.3',
module_name=desc.name)
process_ports(desc, remap, 'input')
process_ports(desc, remap, 'output')
_remap.add_module_remap(remap)
for old, new in module_name_remap.iteritems():
if desc.name == new:
# Remap using old name
remap.new_module = old
_remap.add_module_remap(remap, old)
# 0.9.5 upgrades
remap = UpgradeModuleRemap('0.9.3', '0.9.5', '0.9.5',
module_name=desc.name)
remap.add_remap('src_port_remap', 'self', 'Instance')
_remap.add_module_remap(remap)
for old, new in module_name_remap.iteritems():
if desc.name == new:
# Remap using old name
remap.new_module = old
_remap.add_module_remap(remap, old)
# 1.0.0 upgrades
input_mappings = {}
function_mappings = {}
input_specs = [desc.module._get_input_spec(s)
for s in get_port_specs(desc, 'input')]
input_names = [s.name for s in input_specs]
for spec in input_specs:
if spec is None:
continue
elif spec.name == 'TextScaleMode':
function_mappings['ScaledTextOn'] = \
change_func('TextScaleMode', 'Prop')
elif spec.method_type == 'OnOff':
# Convert On/Off to single port
input_mappings[spec.name + 'On'] = spec.name
input_mappings[spec.name + 'Off'] = spec.name
function_mappings[spec.name + 'On'] = \
change_func(spec.name, True)
function_mappings[spec.name + 'Off'] = \
change_func(spec.name, False)
elif spec.method_type == 'nullary':
# Add True to execute empty functions
function_mappings[spec.name] = change_func(spec.name, True)
elif spec.method_type == 'SetXToY':
# Add one mapping for each default
for enum in spec.values[0]:
input_mappings[spec.method_name + enum] = spec.name
# Add enum value to function
function_mappings[spec.method_name + enum] = \
change_func(spec.name, enum)
# Convert SetX(int) methods
old_name = spec.method_name[:-2]
function_mappings[spec.method_name[:-2]] = change_SetXint(spec)
elif spec.port_type == 'basic:Color':
# Remove 'Widget' suffix on Color
input_mappings[spec.method_name + 'Widget'] = spec.name
# Remove 'Set prefix'
input_mappings[spec.method_name] = spec.name
# Change old type (float, float, float) -> (,)*3
function_mappings[spec.method_name] = color_func(spec.name)
elif spec.port_type == 'basic:File':
input_mappings[spec.method_name] = to_file_func(spec.name) # Set*FileName -> (->File->*File)
input_mappings['Set' + spec.name] = spec.name # Set*File -> *File
function_mappings[spec.method_name] = file_func(spec.name)
elif base_name(spec.name) == 'AddDataSetInput':
# SetInput* does not exist in VTK 6
if spec.name[15:] == '_1':
# Upgrade from version without overload
input_mappings['AddInput'] = spec.name
input_mappings['AddInput' + spec.name[15:]] = spec.name
elif base_name(spec.name) == 'InputData':
# SetInput* does not exist in VTK 6
if spec.name[9:] == '_1':
# Upgrade from version without overload
input_mappings['SetInput'] = spec.name
input_mappings['SetInput' + spec.name[9:]] = spec.name
elif base_name(spec.name) == 'AddInputData':
# AddInput* does not exist in VTK 6
if spec.name[12:] == '_1':
# Upgrade from version without overload
input_mappings['AddInput'] = spec.name
input_mappings['AddInput' + spec.name[12:]] = spec.name
elif base_name(spec.name) == 'SourceData':
# SetSource* does not exist in VTK 6
if spec.name[10:] == '_1':
# Upgrade from version without overload
input_mappings['SetSource'] = spec.name
input_mappings['SetSource' + spec.name[10:]] = spec.name
elif spec.method_name == 'Set' + base_name(spec.name):
if spec.name[-2:] == '_1':
# Upgrade from versions without overload
input_mappings[spec.name[:-2]] = spec.name
input_mappings['Set' + spec.name[:-2]] = spec.name
# Remove 'Set' prefixes
input_mappings['Set' + spec.name] = spec.name
elif spec.name == 'AddInput_1':
# FIXME what causes this?
# New version does not have AddInput
input_mappings['AddInput'] = 'AddInput_1'
elif spec.name == 'vtkRenderer':
# Classes having SetRendererWindow also used to have VTKCell
input_mappings['SetVTKCell'] = fix_vtkcell_func()
output_mappings = {}
for spec_name in get_port_specs(desc, 'output'):
spec = desc.module._get_output_spec(spec_name)
if spec is None:
continue
if spec.method_name == 'Get' + spec.name:
# Remove 'Get' prefixes
output_mappings[spec.method_name] = spec.name
if desc.name == 'vtkMultiBlockPLOT3DReader':
# Move GetOutput to custom FirstBlock
output_mappings['GetOutput'] = wrap_block_func() # what!?
# Move GetOutputPort0 to custom FirstBlock
# and change destination port to AddInputData_1 or similar
output_mappings['GetOutputPort0'] = wrap_block_func()
remap = UpgradeModuleRemap('0.9.5', '1.0.0', '1.0.0',
module_name=desc.name)
for k, v in input_mappings.iteritems():
remap.add_remap('dst_port_remap', k, v)
for k, v in output_mappings.iteritems():
remap.add_remap('src_port_remap', k, v)
for k, v in function_mappings.iteritems():
remap.add_remap('function_remap', k, v)
_remap.add_module_remap(remap)
for old, new in module_name_remap.iteritems():
if desc.name == new:
# Remap to new name
remap.new_module = new
_remap.add_module_remap(remap, old)
pkg = reg.get_package_by_name(identifier)
if module_name is not None:
desc = reg.get_descriptor_by_name(identifier, module_name)
process_module(desc)
else:
# FIXME do this by descriptor first, then build the hierarchies for each
# module after that...
for desc in pkg.descriptor_list:
process_module(desc)
def handle_module_upgrade_request(controller, module_id, pipeline):
global _remap, _controller, _pipeline
if _remap is None:
_remap = UpgradePackageRemap()
remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0',
module_name='vtkInteractionHandler')
remap.add_remap('src_port_remap', 'self', 'Instance')
_remap.add_module_remap(remap)
remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0',
module_name='VTKCell')
_remap.add_module_remap(remap)
remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0',
module_name='VTKViewCell',
new_module='VTKCell')
_remap.add_module_remap(remap)
_controller = controller
_pipeline = pipeline
module_name = pipeline.modules[module_id].name
module_name = module_name_remap.get(module_name, module_name)
if not _remap.has_module_remaps(module_name):
build_remap(module_name)
try:
from vistrails.packages.spreadsheet.init import upgrade_cell_to_output
except ImportError:
# Manually upgrade to 1.0.1
if _remap.get_module_remaps(module_name):
module_remap = copy.copy(_remap)
module_remap.add_module_remap(
UpgradeModuleRemap('1.0.0', '1.0.1', '1.0.1',
module_name=module_name))
else:
module_remap = _remap
else:
module_remap = upgrade_cell_to_output(
_remap, module_id, pipeline,
'VTKCell', 'vtkRendererOutput',
'1.0.1', 'AddRenderer',
start_version='1.0.0')
if _remap.get_module_remaps(module_name):
remap = module_remap.get_module_upgrade(module_name, '1.0.0')
if remap is None:
# Manually upgrade to 1.0.1
module_remap.add_module_remap(
UpgradeModuleRemap('1.0.0', '1.0.1', '1.0.1',
module_name=module_name))
return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline,
module_remap)
| bsd-3-clause | 7,075,514,750,648,230,000 | 43.647226 | 109 | 0.5465 | false |
cxxgtxy/tensorflow | tensorflow/python/training/basic_session_run_hooks.py | 1 | 25214 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@GlobalStepWaiterHook
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class SecondOrStepTimer(object):
"""Timer that triggers at most once every N seconds or once every N steps.
"""
def __init__(self, every_secs=None, every_steps=None):
self._every_secs = every_secs
self._every_steps = every_steps
self._last_triggered_step = None
self._last_triggered_time = None
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors once every N local steps or once every N seconds.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None,
formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
if (every_n_iter is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_iter and every_n_secs must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = tensors.keys()
self._tensors = tensors
self._formatter = formatter
self._timer = SecondOrStepTimer(every_secs=every_n_secs,
every_steps=every_n_iter)
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(run_values.results))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, run_values.results[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListerner(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListerner()
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with tf.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: Exactly one of saver or scaffold should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
if saver is None and scaffold is None:
saver = saver_lib._get_saver_or_default() # pylint: disable=protected-access
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
self._listeners = listeners or []
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(training_util.get_global_step())
if last_step != self._timer.last_triggered_step():
self._save(last_step, session)
for l in self._listeners:
l.end(session, last_step)
def _save(self, step, session):
"""Saves the latest checkpoint."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
return None
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(every_steps=every_n_steps,
every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = self._global_step_tensor.op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
global_step = run_values.results
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info("Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of
names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
self._final_ops_values = session.run(self._final_ops,
feed_dict=self._final_ops_feed_dict)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| apache-2.0 | -4,970,458,662,585,084,000 | 34.866287 | 83 | 0.662965 | false |
ddutta/savanna | savanna/service/api.py | 1 | 7957 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from oslo.config import cfg
from flask import request
from savanna.storage.models import NodeTemplate, NodeType, NodeProcess, \
NodeTemplateConfig, Cluster, ClusterNodeCount
from savanna.storage.storage import DB
from savanna.utils.api import abort_and_log
from savanna.service import cluster_ops
from savanna.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('allow_cluster_ops', 'savanna.config')
def _clean_nones(obj):
d_type = type(obj)
if d_type is not dict or d_type is not list:
return obj
if d_type is dict:
remove = []
for key in obj:
value = _clean_nones(obj.get(key))
if value is None or len(value) == 0:
remove.append(key)
for key in remove:
obj.pop(key)
elif d_type is list:
new_list = []
for elem in obj:
elem = _clean_nones(elem)
if elem is not None and len(elem) == 0:
new_list.append(elem)
return new_list
return obj
class Resource(object):
def __init__(self, _name, _info):
self._name = _name
self._info = _clean_nones(_info)
def __getattr__(self, k):
if k not in self.__dict__:
return self._info.get(k)
return self.__dict__[k]
def __repr__(self):
return '<%s %s>' % (self._name, self._info)
@property
def dict(self):
return self._info
@property
def wrapped_dict(self):
return {self._name: self._info}
def _node_template(nt):
if not nt:
abort_and_log(404, 'NodeTemplate not found')
d = {
'id': nt.id,
'name': nt.name,
'node_type': {
'name': nt.node_type.name,
'processes': [p.name for p in nt.node_type.processes]},
'flavor_id': nt.flavor_id
}
for conf in nt.node_template_configs:
c_section = conf.node_process_property.node_process.name
c_name = conf.node_process_property.name
c_value = conf.value
if c_section not in d:
d[c_section] = dict()
d[c_section][c_name] = c_value
return Resource('node_template', d)
def _template_id_by_name(template):
return NodeTemplate.query.filter_by(name=template).first().id
def _type_id_by_name(_type):
return NodeType.query.filter_by(name=_type).first().id
def get_node_template(**args):
return _node_template(NodeTemplate.query.filter_by(**args).first())
def get_node_templates(**args):
return [_node_template(tmpl) for tmpl
in NodeTemplate.query.filter_by(**args).all()]
def create_node_template(values):
"""
Creates new node template from values dict
:param values: dict
:return: created node template resource
"""
values = values.pop('node_template')
name = values.pop('name')
node_type_id = _type_id_by_name(values.pop('node_type'))
# todo(slukjanov): take tenant_id from headers
tenant_id = "tenant-01"
flavor_id = values.pop('flavor_id')
nt = NodeTemplate(name, node_type_id, tenant_id, flavor_id)
DB.session.add(nt)
for process_name in values:
process = NodeProcess.query.filter_by(name=process_name).first()
conf = values.get(process_name)
for prop in process.node_process_properties:
val = conf.get(prop.name, None)
if not val and prop.required:
if not prop.default:
raise RuntimeError('Template \'%s\', value missed '
'for required param: %s %s'
% (name, process.name, prop.name))
val = prop.default
DB.session.add(NodeTemplateConfig(nt.id, prop.id, val))
DB.session.commit()
return get_node_template(id=nt.id)
def _cluster(cluster):
if not cluster:
abort_and_log(404, 'Cluster not found')
d = {
'id': cluster.id,
'name': cluster.name,
'base_image_id': cluster.base_image_id,
'status': cluster.status,
'service_urls': {},
'node_templates': {},
'nodes': [{'vm_id': n.vm_id,
'node_template': {
'id': n.node_template.id,
'name': n.node_template.name
}}
for n in cluster.nodes]
}
for ntc in cluster.node_counts:
d['node_templates'][ntc.node_template.name] = ntc.count
for service in cluster.service_urls:
d['service_urls'][service.name] = service.url
return Resource('cluster', d)
def get_cluster(**args):
return _cluster(Cluster.query.filter_by(**args).first())
def get_clusters(**args):
return [_cluster(cluster) for cluster in
Cluster.query.filter_by(**args).all()]
def create_cluster(values):
values = values.pop('cluster')
name = values.pop('name')
base_image_id = values.pop('base_image_id')
# todo(slukjanov): take tenant_id from headers
tenant_id = "tenant-01"
templates = values.pop('node_templates')
# todo(slukjanov): check that we can create objects in the specified tenant
cluster = Cluster(name, base_image_id, tenant_id)
DB.session.add(cluster)
for template in templates:
count = templates.get(template)
template_id = _template_id_by_name(template)
cnc = ClusterNodeCount(cluster.id, template_id, int(count))
DB.session.add(cnc)
DB.session.commit()
eventlet.spawn(_cluster_creation_job, request.headers, cluster.id)
return get_cluster(id=cluster.id)
def _cluster_creation_job(headers, cluster_id):
cluster = Cluster.query.filter_by(id=cluster_id).first()
LOG.debug("Starting cluster '%s' creation: %s", cluster_id,
_cluster(cluster).dict)
if CONF.allow_cluster_ops:
cluster_ops.launch_cluster(headers, cluster)
else:
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
# update cluster status
cluster = Cluster.query.filter_by(id=cluster.id).first()
cluster.status = 'Active'
DB.session.add(cluster)
DB.session.commit()
def terminate_cluster(**args):
# update cluster status
cluster = Cluster.query.filter_by(**args).first()
cluster.status = 'Stoping'
DB.session.add(cluster)
DB.session.commit()
eventlet.spawn(_cluster_termination_job, request.headers, cluster.id)
def _cluster_termination_job(headers, cluster_id):
cluster = Cluster.query.filter_by(id=cluster_id).first()
LOG.debug("Stoping cluster '%s' creation: %s", cluster_id,
_cluster(cluster).dict)
if CONF.allow_cluster_ops:
cluster_ops.stop_cluster(headers, cluster)
else:
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
DB.session.delete(cluster)
DB.session.commit()
def terminate_node_template(**args):
template = NodeTemplate.query.filter_by(**args).first()
if template:
if len(template.nodes):
abort_and_log(500, "There are active nodes created using "
"template '%s' you trying to terminate"
% args)
else:
DB.session.delete(template)
DB.session.commit()
return True
else:
return False
| apache-2.0 | 9,215,404,232,533,538,000 | 28.913534 | 79 | 0.612919 | false |
kevkruemp/HRI_Plant_Monitor | motor_control.py | 1 | 1610 | # pypot dynamixel library
import pypot.dynamixel as pd
# threading for motor control
import threading
import time
import numpy as np
# get ports
# USB2AX will be the first result
ports = pd.get_available_ports()
# connect to port
motors = pd.DxlIO(ports[0], 1000000)
# get list of motors
print 'Scanning for motors...'
motor_list = motors.scan()
print 'Found motors: ' + str(motor_list)
def set_speed(motor, speed):
motors.set_moving_speed({motor:speed})
# move wheel to limits
def move_to_limit(motor, speed):
# while (abs(motors.get_moving_speed({motor})[0])<1):
# motors.set_torque_limit({motor:100})
# time.sleep(0.2)
# motors.set_moving_speed({motor: speed})
# time.sleep(0.2)
# print motors.get_moving_speed({motor})[0]
print "Moving motor "+str(motor)+" speed "+str(speed)
while(1):
try:
# keep trying to move the motors
motors.set_torque_limit({motor:100})
time.sleep(0.2)
motors.set_moving_speed({motor: speed})
time.sleep(0.2)
load = motors.get_present_load({motor})[0]
# print motors.get_moving_speed({motor})[0]
# print load
# load = +-96 indicates stalling
if (abs(load+np.sign(speed)*96)<2):
raise KeyboardInterrupt
# catch either keyboard interrupts or motor errors
except KeyboardInterrupt, DxlTimeoutError:
# stop the motor
motors.set_moving_speed({motor: 0})
break
def get_load(motor):
return motors.get_present_load({motor})
| mit | -1,964,310,526,230,350,600 | 28.272727 | 58 | 0.612422 | false |
le9i0nx/ansible | lib/ansible/modules/network/avi/avi_systemconfiguration.py | 1 | 7082 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
default_license_tier:
description:
- Specifies the default license tier which would be used by new clouds.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as ENTERPRISE_18.
version_added: "2.5"
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
admin_auth_configuration=dict(type='dict',),
default_license_tier=dict(type='str',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -1,877,140,415,009,478,700 | 36.871658 | 116 | 0.65137 | false |
lethaljd/trical | trical.py | 1 | 2505 | """
A simple program to write an iCal file to create a calendar for the Low Volume Base
Training Plan for trainerroad.com
-Justin Deardorff 2015
"""
import re
import datetime
from datetime import timedelta
#defining iCal pieces for header, footer, and events
header = ["BEGIN:VCALENDAR\n",
"VERSION:2.0\n",
"X-WR-CALNAME: TrainerRoad.com LVBase\n",
"CALSCALE:GREGORIAN\n"]
footer = ["END:VCALENDAR"]
n1 = ["BEGIN:VEVENT\n",
"DTSTAMP:"] #after inserting this, curdtstamp is added
n5 = ["DTSTART;VALUE=DATE:"]
#after inserting this, add start date and line terminator
n2 = ["DTEND;VALUE=DATE:"]
#after inserting this, add date and line terminator
n3 = ["SUMMARY:"]
#after inserting this, add workout name and line terminator
n4 = ["END:VEVENT\n"]
#prompt user for plan start date
print "Please enter plan desired start date."
print "Tuesday start date recommended"
print "Enter date in the following format"
print "YYYYMMDD"
startdate = raw_input('>')
#validate input meets requirements
while len(startdate) != 8:
print "Incorrect date format!"
print "Enter date in the following format"
print "YYYYMMDD"
startdate = raw_input('>')
print "Enter input file name, include filename extension"
print "example.txt"
wrkfile = raw_input('>')
#open input file
infile = open(wrkfile, "r")
#open output file
outfile = open("trbasecal.ics", "w+")
#generate ical header info and write to output file
outfile.writelines(header)
#declare counter variable for workout
workoutnum = 0
for line in infile:
name, days = line.split(",",1) #splits infile into two variables called name and days
name = str(name)
days = int(days)+1
curdtstamp = datetime.datetime.now().strftime("%Y%m%d"+"T"+"%H%M%S"+"Z") #calcs current DTSTAMP
outfile.writelines(n1) #writes beginning of event block
outfile.write(curdtstamp + "\n")
outfile.writelines(n5)
outfile.write(startdate + "\n")
outfile.writelines(n2)
outfile.write(startdate + "\n")
outfile.writelines(n3)
outfile.write(name)
outfile.write("\n")
outfile.writelines(n4)
workoutnum+=1
#insert function to calcuate next workout date
prevdate = datetime.datetime.strptime(startdate, "%Y%m%d")
startdate = prevdate + datetime.timedelta(days=days)
startdate = startdate.strftime("%Y%m%d")
#when loop completes, write iCal file end syntax
outfile.write("END:VCALENDAR")
#close files
outfile.close()
#success message
print "iCal file created. %i workouts added to calendar." %workoutnum
#exit
| gpl-2.0 | -15,695,206,011,885,322 | 23.558824 | 96 | 0.720958 | false |
spapas/auditing-sample | sample/migrations/0001_initial.py | 1 | 1110 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128)),
('author', models.CharField(max_length=128)),
('created_by', models.ForeignKey(related_name=b'created_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(related_name=b'modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| unlicense | -3,547,652,464,458,262,000 | 34.806452 | 114 | 0.574775 | false |
marchaos/plugin.image.flickr | default.py | 1 | 41121 | #!/usr/bin/python
import flickrapi
import urllib
import xbmc, xbmcgui, xbmcplugin, xbmcaddon #@UnresolvedImport
import sys, os, time
from urllib2 import HTTPError, URLError
__plugin__ = 'flickr'
__author__ = 'ruuk'
__url__ = 'http://code.google.com/p/flickrxbmc/'
__date__ = '01-07-2013'
__settings__ = xbmcaddon.Addon(id='plugin.image.flickr')
__version__ = __settings__.getAddonInfo('version')
__language__ = __settings__.getLocalizedString
IMAGES_PATH = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('path')),'resources', 'images')
CACHE_PATH = xbmc.translatePath('special://profile/addon_data/plugin.image.flickr/cache/')
import locale
loc = locale.getdefaultlocale()
ENCODING = loc[1] or 'utf-8'
ShareSocial = None
def ENCODE(string):
return string.encode(ENCODING,'replace')
def LOG(message):
print 'plugin.image.flickr: %s' % ENCODE(str(message))
def ERROR(message,caption=''):
LOG(message)
import traceback
traceback.print_exc()
err = str(sys.exc_info()[1])
xbmcgui.Dialog().ok(__language__(30520) + caption,err)
return err
if not os.path.exists(CACHE_PATH): os.makedirs(CACHE_PATH)
class NetworkTokenCache(flickrapi.tokencache.TokenCache):
def __init__(self, api_key, username=None):
flickrapi.tokencache.TokenCache.__init__(self,api_key, username)
self.path = __settings__.getSetting('network_token_path')
self.localBackup = flickrapi.tokencache.TokenCache(api_key,username)
def get_cached_token_path(self,filename=''):
if os.path.exists(self.path): return os.path.join(self.path, self.api_key, filename)
path = self.path.rstrip('/') + '/' + self.api_key
if filename: path += '/' + filename
return path
def get_cached_token_filename(self):
if self.username:
filename = 'auth-%s.token' % self.username
else:
filename = 'auth.token'
return self.get_cached_token_path(filename)
def set_cached_token(self, token):
self.localBackup.set_cached_token(token)
self.memory[self.username] = token
if not token: return
import xbmcvfs
path = self.get_cached_token_path()
if not xbmcvfs.exists(path):
xbmcvfs.mkdirs(path)
f = xbmcvfs.File(self.get_cached_token_filename(), "w")
f.write(str(token))
f.close()
def get_cached_token(self):
backup = self.localBackup.get_cached_token()
if self.username in self.memory: return self.memory[self.username]
import xbmcvfs
filename = self.get_cached_token_filename()
if xbmcvfs.exists(filename):
try:
f = xbmcvfs.File(filename)
token = f.read()
f.close()
return token.strip()
except:
pass
return backup
def forget(self):
self.localBackup.forget()
if self.username in self.memory:
del self.memory[self.username]
import xbmcvfs
filename = self.get_cached_token_filename()
if xbmcvfs.exists(filename):
xbmcvfs.delete(filename)
@staticmethod
def isValid():
import xbmcvfs
path = __settings__.getSetting('network_token_path')
return path and xbmcvfs.exists(path)
token = property(get_cached_token, set_cached_token, forget, "The cached token")
class flickrPLUS(flickrapi.FlickrAPI):
def __init__(self, api_key, secret=None, username=None, token=None, format='etree', store_token=True, cache=False):
flickrapi.FlickrAPI.__init__(self, api_key, secret, username, token, format, store_token, cache)
if NetworkTokenCache.isValid():
self.token_cache = NetworkTokenCache(api_key, username)
def walk_photos_by_page(self, method, **params):
rsp = method(**params)
photoset = rsp.getchildren()[0]
page = int(photoset.attrib.get('page','1'))
pages = int(photoset.attrib.get('pages','1'))
perpage = int(photoset.attrib.get('perpage','1'))
total = int(photoset.attrib.get('total','1'))
self.TOTAL = total
self.TOTAL_ON_LAST_PAGE = total % perpage
self.TOTAL_ON_PAGE = perpage
self.TOTAL_PAGES = pages
if page == pages: self.TOTAL_ON_PAGE = self.TOTAL_ON_LAST_PAGE
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
def get_full_token(self, mini_token):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getFullToken(mini_token=mini_token, format='xmlnode')
token = rsp.auth[0].token[0].text
flickrapi.LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def photoURL(farm,server,nsid,secret='',buddy=False,size='',ext='jpg'):
replace = (farm,server,nsid)
if secret: secret = '_' + secret
if buddy:
return 'http://farm%s.staticflickr.com/%s/buddyicons/%s.jpg' % replace #last %s not is to use same replace
elif not size:
return 'http://farm%s.staticflickr.com/%s/%s%s.jpg' % (replace + (secret,))
else:
return 'http://farm%s.staticflickr.com/%s/%s%s_%s.%s' % (replace + (secret,size,ext))
'''
s small square 75x75
q large square 150x150
t thumbnail, 100 on longest side
m small, 240 on longest side
n small, 320 on longest side
- medium, 500 on longest side
z medium 640, 640 on longest side
b large, 1024 on longest side*
o original image, either a jpg, gif or png, depending on source format
'''
class Maps:
def __init__(self):
self.map_source = ['google','yahoo','osm'][int(__settings__.getSetting('default_map_source'))]
if self.map_source == 'yahoo':
import elementtree.ElementTree as et #@UnresolvedImport
self.ET = et
self.zoom = { 'country':int(__settings__.getSetting('country_zoom')),
'region':int(__settings__.getSetting('region_zoom')),
'locality':int(__settings__.getSetting('locality_zoom')),
'neighborhood':int(__settings__.getSetting('neighborhood_zoom')),
'photo':int(__settings__.getSetting('photo_zoom'))}
self.default_map_type = ['hybrid','satellite','terrain','roadmap'][int(__settings__.getSetting('default_map_type'))]
def getMap(self,lat,lon,zoom,width=256,height=256,scale=1,marker=False):
#640x36
source = self.map_source
lat = str(lat)
lon = str(lon)
zoom = str(self.zoom[zoom])
#create map file name from lat,lon,zoom and time. Take that thumbnail cache!!! :)
fnamebase = (lat+lon+zoom+str(int(time.time()))).replace('.','')
ipath = os.path.join(CACHE_PATH,fnamebase+'.jpg')
mark = ''
if marker:
if source == 'osm':
mark = '&mlat0=' + lat + '&mlon0=' + lon + '&mico0=0'
elif source == 'yahoo':
mark = ''
else:
mark = '&markers=color:blue|' + lat + ',' + lon
if source == 'osm':
url = "http://ojw.dev.openstreetmap.org/StaticMap/?lat="+lat+"&lon="+lon+"&z="+zoom+"&w="+str(width)+"&h="+str(height)+"&show=1&fmt=jpg"
elif source == 'yahoo':
#zoom = str((int((21 - int(zoom)) * (12/21.0)) or 1) + 1)
zoom = self.translateZoomToYahoo(zoom)
xml = urllib.urlopen("http://local.yahooapis.com/MapsService/V1/mapImage?appid=BteTjhnV34E7M.r_gjDLCI33rmG0FL7TFPCMF7LHEleA_iKm6S_rEjpCmns-&latitude="+lat+"&longitude="+lon+"&image_height="+str(height)+"&image_width="+str(width)+"&zoom="+zoom).read()
url = self.ET.fromstring(xml).text.strip()
url = urllib.unquote_plus(url)
if 'error' in url: return ''
else:
url = "http://maps.google.com/maps/api/staticmap?center="+lat+","+lon+"&zoom="+zoom+"&size="+str(width)+"x"+str(height)+"&sensor=false&maptype="+self.default_map_type+"&scale="+str(scale)+"&format=jpg"
fname,ignore = urllib.urlretrieve(url + mark,ipath) #@UnusedVariable
return fname
def translateZoomToYahoo(self,zoom):
#Yahoo and your infernal static maps 12 level zoom!
#This matches as closely as possible the defaults for google and osm while allowing all 12 values
zoom = 16 - int(zoom)
if zoom < 1: zoom = 1
if zoom >12: zoom = 12
return str(zoom)
def doMap(self):
clearDirFiles(CACHE_PATH)
self.getMap(sys.argv[2],sys.argv[3],'photo',width=640,height=360,scale=2,marker=True)
xbmc.executebuiltin('SlideShow('+CACHE_PATH+')')
class FlickrSession:
API_KEY = '0a802e6334304794769996c84c57d187'
API_SECRET = '655ce70e86ac412e'
MOBILE_API_KEY = 'f9b69ca9510b3f55fdc15aa869614b39'
MOBILE_API_SECRET = 'fdba8bb77fc10921'
DISPLAY_VALUES = ['Square','Thumbnail','Small','Medium','Medium640','Large','Original']
SIZE_KEYS = { 'Square':'url_sq',
'Thumbnail':'url_t',
'Small':'url_s',
'Medium':'url_m',
'Medium640':'url_z',
'Large':'url_l',
'Original':'url_o'}
def __init__(self,username=None):
self.flickr = None
self._authenticated = False
self.mobile = True
self.username = username
self.user_id = None
self.loadSettings()
self.maps = None
self.justAuthorized = False
self.isSlideshow = False
self._isMobile = None
if __settings__.getSetting('enable_maps') == 'true': self.maps = Maps()
def authenticated(self): return self._authenticated
def loadSettings(self):
self.username = __settings__.getSetting('flickr_username')
self.defaultThumbSize = self.getDisplayValue(__settings__.getSetting('default_thumb_size'))
self.defaultDisplaySize = self.getDisplayValue(__settings__.getSetting('default_display_size'))
mpp = __settings__.getSetting('max_per_page')
mpp = [10,20,30,40,50,75,100,200,500][int(mpp)]
self.max_per_page = mpp
def getDisplayValue(self,index):
return self.DISPLAY_VALUES[int(index)]
def isMobile(self,set=None):
if set == None:
if self._isMobile != None: return self._isMobile
return __settings__.getSetting('mobile') == 'true'
if set:
__settings__.setSetting('mobile','true')
self.flickr.api_key = self.MOBILE_API_KEY
self.flickr.secret = self.MOBILE_API_SECRET
else:
__settings__.setSetting('mobile','false')
self.flickr.api_key = self.API_KEY
self.flickr.secret = self.API_SECRET
self._isMobile = set
def getKeys(self):
if self.isMobile():
return self.MOBILE_API_KEY,self.MOBILE_API_SECRET
else:
return self.API_KEY,self.API_SECRET
def doTokenDialog(self,frob,perms):
# if False:
# try:
# from webviewer import webviewer #@UnresolvedImport @UnusedImport
# yes = xbmcgui.Dialog().yesno('Authenticate','Press \'Yes\' to authenticate in any browser','Press \'No\' to use Web Viewer (If Installed)')
# if not yes:
# self.isMobile(False)
# self.doNormalTokenDialog(frob, perms)
# return
# except ImportError:
# LOG("Web Viewer Not Installed - Using Mobile Method")
# pass
# except:
# ERROR('')
# return
self.isMobile(True)
self.doMiniTokenDialog(frob, perms)
def doNormalTokenDialog(self,frob,perms):
url = self.flickr.auth_url('read',frob)
if PLUGIN: xbmcplugin.endOfDirectory(int(sys.argv[1]),succeeded=False)
self.justAuthorized = True
xbmcgui.Dialog().ok(__language__(30507),__language__(30508),__language__(30509))
from webviewer import webviewer #@UnresolvedImport
autoforms = [ {'action':'login.yahoo.com/config/login'},
{'url':'.+perms=.+','action':'services/auth','index':2},
{'url':'.+services/auth/$','action':'services/auth'}]
autoClose = { 'url':'.+services/auth/$',
'html':'(?s).+successfully authorized.+',
'heading':__language__(30505),
'message':__language__(30506)}
url,html = webviewer.getWebResult(url,autoForms=autoforms,autoClose=autoClose) #@UnusedVariable
LOG('AUTH RESPONSE URL: ' + url)
def extractTokenFromURL(self,url):
from cgi import parse_qs
import urlparse
try:
token = parse_qs(urlparse.urlparse(url.replace('#','?',1))[4])['token'][0].strip()
except:
LOG('Invalid Token')
return None
return token
def doMiniTokenDialog(self,frob,perms):
xbmcgui.Dialog().ok("AUTHENTICATE",'Go to flickr.2ndmind.com','get the code and click OK to continue')
mini_token = ''
message = 'Enter 9 digit code'
while not len(mini_token) == 9 or not mini_token.isdigit():
keyboard = xbmc.Keyboard('',message)
message = 'BAD CODE. Re-enter 9 digit code'
keyboard.doModal()
if not keyboard.isConfirmed(): return
mini_token = keyboard.getText().replace('-','')
if not mini_token: return
self.flickr.get_full_token(mini_token) #@UnusedVariable
def authenticate(self,force=False):
key,secret = self.getKeys()
self.flickr = flickrPLUS(key,secret)
if force:
self.flickr.token_cache.token = ''
else:
if __settings__.getSetting('authenticate') != 'true': return True
(token, frob) = self.flickr.get_token_part_one(perms='read',auth_callback=self.doTokenDialog)
if self.isMobile():
result = self.authenticateMobile(self.flickr.token_cache.token)
else:
result = self.authenticateWebViewer(token,frob)
if result: self._authenticated = True
return result
def authenticateWebViewer(self,token,frob):
try:
self.flickr.get_token_part_two((token, frob))
except:
if self.justAuthorized:
xbmcgui.Dialog().ok(__language__(30520),__language__(30521),str(sys.exc_info()[1]))
else:
xbmcgui.Dialog().ok(__language__(30522),__language__(30523),str(sys.exc_info()[1]))
LOG("Failed to get token. Probably did not authorize.")
LOG("AUTH DONE")
if self.justAuthorized: return False
return self.finishAuthenticate(token)
def authenticateMobile(self,token):
if not token:
LOG("Failed to get token (Mobile). Probably did not authorize.")
return False
return self.finishAuthenticate(token)
def finishAuthenticate(self,token):
self.flickr.token_cache.token = token
# if self.username:
# try:
# user = self.flickr.people_findByUsername(username=self.username)
# self.user_id = user.findall('*')[0].get('id')
# return True
# except:
# ERROR('Failed to authenticate with username in settings')
rsp = self.flickr.auth_checkToken(auth_token=token,format='xmlnode')
user = rsp.auth[0].user[0]
self.user_id = user.attrib.get('nsid')
self.username = user.attrib.get('username')
if self.username: __settings__.setSetting('flickr_username',self.username)
return True
def getCollectionsInfoList(self,userid=None,cid='0'):
if not userid: userid = self.user_id
col = self.flickr.collections_getTree(user_id=userid,collection_id=cid)
info_list = []
mode = None
colCount = len(col.find('collections').findall('collection'));
if colCount < 1: return (2,[])
if colCount > 1 or (colCount < 2 and col.find('collections').find('collection').attrib.get('id') != cid):
mode = 2
for c in col.find('collections').findall('collection'):
if cid != c.attrib.get('id'): info_list.append({'title':c.attrib.get('title',''),'id':c.attrib.get('id',''),'tn':c.attrib.get('iconlarge','')})
else:
mode = 103
tn_dict = self.getSetsThumbnailDict(userid=userid)
for c in col.find('collections').find('collection').findall('set'):
info_list.append({'title':c.attrib.get('title',''),'id':c.attrib.get('id',''),'tn':tn_dict.get(c.attrib.get('id',''),'')})
return (mode, info_list)
def getSetsInfoList(self,userid=None):
if not userid: userid = self.user_id
sets = self.flickr.photosets_getList(user_id=userid)
info_list = []
for s in sets.find('photosets').findall('photoset'):
tn = "http://farm"+s.attrib.get('farm','')+".static.flickr.com/"+s.attrib.get('server','')+"/"+s.attrib.get('primary','')+"_"+s.attrib.get('secret','')+"_q.jpg"
info_list.append({'title':s.find('title').text,'count':s.attrib.get('photos','0'),'id':s.attrib.get('id',''),'tn':tn})
return info_list
def getContactsInfoList(self,userid=None):
if userid: contacts = self.flickr.contacts_getPublicList(user_id=userid)
else: contacts = self.flickr.contacts_getList()
info_list = []
for c in contacts.find('contacts').findall('contact'):
if c.attrib.get('iconserver','') == '0':
tn = 'http://l.yimg.com/g/images/buddyicon.jpg'
else:
tn = "http://farm"+c.attrib.get('iconfarm','')+".static.flickr.com/"+c.attrib.get('iconserver','')+"/buddyicons/"+c.attrib.get('nsid','')+".jpg"
info_list.append({'username':c.attrib.get('username',''),'id':c.attrib.get('nsid',''),'tn':tn})
return info_list
def getGroupsInfoList(self,userid=None,search=None,page=1):
total = None
if search:
groups = self.flickr.groups_search(text=search,page=page,per_page=self.max_per_page)
info = groups.find('groups')
page = int(info.attrib.get('page','1'))
pages = int(info.attrib.get('pages','1'))
perpage = int(info.attrib.get('perpage','1'))
total = int(info.attrib.get('total','1'))
self.flickr.TOTAL = total
self.flickr.TOTAL_ON_LAST_PAGE = total % perpage
self.flickr.TOTAL_ON_PAGE = perpage
self.flickr.TOTAL_PAGES = pages
if page == pages: self.flickr.TOTAL_ON_PAGE = self.flickr.TOTAL_ON_LAST_PAGE
else:
if not userid: userid = self.user_id
groups = self.flickr.groups_pools_getGroups(user_id=userid)
info_list = []
for g in groups.find('groups').findall('group'):
tn = "http://farm"+g.attrib.get('iconfarm','')+".static.flickr.com/"+g.attrib.get('iconserver','')+"/buddyicons/"+g.attrib.get('nsid','')+".jpg"
info_list.append({'name':g.attrib.get('name','0'),'count':g.attrib.get('photos',g.attrib.get('pool_count','0')),'id':g.attrib.get('id',g.attrib.get('nsid','')),'tn':tn})
return info_list
def getGalleriesInfoList(self,userid=None):
if not userid: userid = self.user_id
galleries = self.flickr.galleries_getList(user_id=userid)
info_list = []
for g in galleries.find('galleries').findall('gallery'):
tn = "http://farm"+g.attrib.get('primary_photo_farm','')+".static.flickr.com/"+g.attrib.get('primary_photo_server','')+"/"+g.attrib.get('primary_photo_id','')+"_"+g.attrib.get('primary_photo_secret','')+"_s.jpg"
info_list.append({ 'title':g.find('title').text,
'id':g.attrib.get('id'),
'tn':tn})
return info_list
def getTagsList(self,userid=None):
if not userid: userid = self.user_id
tags = self.flickr.tags_getListUser(user_id=userid)
t_list = []
for t in tags.find('who').find('tags').findall('tag'):
t_list.append(t.text)
return t_list
def getPlacesInfoList(self,pid,woeid=None):
#12,8,7
places = self.flickr.places_placesForUser(place_type_id=pid,woe_id=woeid)
info_list=[]
for p in places.find('places').findall('place'):
info_list.append({ 'place':p.text.split(',')[0],
'woeid':p.attrib.get('woeid'),
'count':p.attrib.get('photo_count'),
'lat':p.attrib.get('latitude'),
'lon':p.attrib.get('longitude')})
return info_list
def getSetsThumbnailDict(self,userid=None):
if not userid: userid = self.user_id
sets = self.flickr.photosets_getList(user_id=userid)
tn_dict = {}
for s in sets.find('photosets').findall('photoset'):
tn_dict[s.attrib.get('id','0')] = "http://farm"+s.attrib.get('farm','')+".static.flickr.com/"+s.attrib.get('server','')+"/"+s.attrib.get('primary','')+"_"+s.attrib.get('secret','')+"_s.jpg"
return tn_dict
def getImageUrl(self,pid,label='Square'):
ps = self.flickr.photos_getSizes(photo_id=pid)
if label == 'all':
allsizes = {}
for s in ps.find('sizes').findall('size'):
allsizes[s.get('label')] = s.get('source')
#if not 'Original' in allsizes: allsizes['Original'] = ps.find('sizes')[0].findall('size')[-1].get('source')
return allsizes
for s in ps.find('sizes').findall('size'):
if s.get('label') == label:
return s.get('source')
def addPhotos(self,method,mode,url='BLANK',page='1',mapOption=True,with_username=False,**kwargs):
global ShareSocial
try:
import ShareSocial #analysis:ignore
except:
pass
page = int(page)
#Add Previous Header if necessary
if page > 1:
previous = '<- '+__language__(30511)
pg = (page==2) and '-1' or str(page-1) #if previous page is one, set to -1 to differentiate from initial showing
self.addDir(previous.replace('@REPLACE@',str(self.max_per_page)),url,mode,os.path.join(IMAGES_PATH,'previous.png'),page = pg,userid=kwargs.get('userid',''))
#info_list = []
extras = 'media, date_upload, date_taken, url_sq, url_t, url_s, url_m, url_l,url_o' + self.SIZE_KEYS[self.defaultThumbSize] + ',' + self.SIZE_KEYS[self.defaultDisplaySize]
if mapOption: extras += ',geo'
#Walk photos
ct=0
mpp = self.max_per_page
if self.isSlideshow: mpp = 500
for photo in self.flickr.walk_photos_by_page(method,page=page,per_page=mpp,extras=extras,**kwargs):
ok = self.addPhoto(photo, mapOption=mapOption,with_username=with_username)
if not ok: break
ct+=1
#Add Next Footer if necessary
#print "PAGES: " + str(page) + " " + str(self.flickr.TOTAL_PAGES) + " " + str(self.flickr.TOTAL_ON_LAST_PAGE)
if ct >= self.max_per_page or page < self.flickr.TOTAL_PAGES:
sofar = (max(0,page - 1) * self.max_per_page) + ct
nextp = '({0}/{1}) '.format(sofar,self.flickr.TOTAL)
replace = ''
if page + 1 == self.flickr.TOTAL_PAGES:
nextp += __language__(30513)
if self.flickr.TOTAL_ON_LAST_PAGE: replace = str(self.flickr.TOTAL_ON_LAST_PAGE)
else: replace = str(self.max_per_page)
else:
nextp += __language__(30512)
replace = str(self.max_per_page)
if page < self.flickr.TOTAL_PAGES: self.addDir(nextp.replace('@REPLACE@',replace)+' ->',url,mode,os.path.join(IMAGES_PATH,'next.png'),page=str(page+1),userid=kwargs.get('userid',''))
def addPhoto(self,photo,mapOption=False,with_username=False):
pid = photo.get('id')
title = photo.get('title')
if not title:
title = photo.get('datetaken')
if not title:
try: title = time.strftime('%m-%d-%y %I:%M %p',time.localtime(int(photo.get('dateupload'))))
except: pass
if not title: title = pid
if with_username:
username = photo.get('username','') or ''
title = '[B]%s:[/B] %s' % (username,title)
ptype = photo.get('media') == 'video' and 'video' or 'image'
#ptype = 'image'
thumb = photo.get(self.SIZE_KEYS[self.defaultThumbSize])
display = photo.get(self.SIZE_KEYS[self.defaultDisplaySize])
if not (thumb and display):
display = photo.get(self.SIZE_KEYS[self.defaultDisplaySize],photo.get('url_o',''))
thumb = photo.get(self.SIZE_KEYS[self.defaultThumbSize],photo.get('url_s',''))
if not display:
rd = self.DISPLAY_VALUES[:]
rd.reverse()
for s in rd:
if photo.get(s):
display = photo.get(s)
break
sizes = {}
if ptype == 'video':
sizes = self.getImageUrl(pid,'all')
display = selectVideoURL(sizes)
#display = 'plugin://plugin.image.flickr/?play_video&' + pid
contextMenu = []
if mapOption:
lat=photo.get('latitude')
lon=photo.get('longitude')
if not lat+lon == '00':
contextMenu.append((__language__(30510),'XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,map,'+lat+','+lon+')'))
if ShareSocial:
run = self.getShareString(photo,sizes)
if run: contextMenu.append(('Share...',run))
saveURL = photo.get('url_o',display)
contextMenu.append((__language__(30517),'XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,save,'+urllib.quote_plus(saveURL)+','+title+')'))
#contextMenu.append(('Test...','XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,slideshow)'))
return self.addLink(title,display,thumb,tot=self.flickr.TOTAL_ON_PAGE,contextMenu=contextMenu,ltype=ptype)
def getShareString(self,photo,sizes):
plink = 'http://www.flickr.com/photos/%s/%s' % (photo.get('owner',self.user_id),photo.get('id'))
if photo.get('media') == 'photo':
share = ShareSocial.getShare('plugin.image.flickr','image')
else:
share = ShareSocial.getShare('plugin.image.flickr','video')
share.sourceName = 'flickr'
share.page = plink
share.latitude = photo.get('latitude')
share.longitude = photo.get('longitude')
if photo.get('media') == 'photo':
share.thumbnail = photo.get('url_t',photo.get('url_s',''))
share.media = photo.get('url_l',photo.get('url_o',photo.get('url_t','')))
share.title = 'flickr Photo: %s' % photo.get('title')
elif photo.get('media') == 'video':
share.thumbnail = photo.get('url_o',photo.get('url_l',photo.get('url_m','')))
embed = '<object type="application/x-shockwave-flash" width="%s" height="%s" data="%s" classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"> <param name="flashvars" value="flickr_show_info_box=false"></param> <param name="movie" value="%s"></param><param name="bgcolor" value="#000000"></param><param name="allowFullScreen" value="true"></param><embed type="application/x-shockwave-flash" src="%s" bgcolor="#000000" allowfullscreen="true" flashvars="flickr_show_info_box=false" height="%s" width="%s"></embed></object>'
url = sizes.get('Video Player','')
embed = embed % (640,480,url,url,url,480,640)
share.title = 'flickr Video: %s' % photo.get('title')
share.swf = url
share.media = sizes.get('Site MP4',sizes.get('Video Original',''))
share.embed = embed
else:
return None
return share.toPluginRunscriptString()
def userID(self):
if self.user_id: return self.user_id
username = __settings__.getSetting('flickr_username')
self.username = username
if not username: return None
self.user_id = self.getUserID(username)
return self.userID()
def getUserID(self,username):
if not username: return None
obj = self.flickr.people_findByUsername(username=username)
user = obj.find('user')
return user.attrib.get('nsid')
def CATEGORIES(self):
uid = self.userID()
if self.authenticated():
self.addDir(__language__(30300),'photostream',1,os.path.join(IMAGES_PATH,'photostream.png'))
self.addDir(__language__(30301),'collections',2,os.path.join(IMAGES_PATH,'collections.png'))
self.addDir(__language__(30302),'sets',3,os.path.join(IMAGES_PATH,'sets.png'))
self.addDir(__language__(30303),'galleries',4,os.path.join(IMAGES_PATH,'galleries.png'))
self.addDir(__language__(30304),'tags',5,os.path.join(IMAGES_PATH,'tags.png'))
self.addDir(__language__(30307),'places',8,os.path.join(IMAGES_PATH,'places.png'))
self.addDir(__language__(30305),'favorites',6,os.path.join(IMAGES_PATH,'favorites.png'))
self.addDir(__language__(30306),'contacts',7,os.path.join(IMAGES_PATH,'contacts.png'))
self.addDir(__language__(30311),'groups',12,os.path.join(IMAGES_PATH,'groups.png'))
self.addDir(__language__(30308),'@@search@@',9,os.path.join(IMAGES_PATH,'search_photostream.png'))
elif uid:
self.CONTACT(uid, self.username)
self.addDir(__language__(30309),'@@search@@',10,os.path.join(IMAGES_PATH,'search_flickr.png'))
self.addDir(__language__(30312),'@@search@@',13,os.path.join(IMAGES_PATH,'search_flickr.png'))
self.addDir(__language__(30310),'interesting',11,os.path.join(IMAGES_PATH,'interesting.png'))
def PHOTOSTREAM(self,page,mode=1,userid='me'):
#if not self.authenticated() and userid == 'me':
# userid = self.userID()
# if not userid: return
#
self.addPhotos(self.flickr.photos_search,mode,url=userid,page=page,user_id=userid)
def COLLECTION(self,cid,userid=None):
if cid == 'collections': cid = 0
mode,cols = self.getCollectionsInfoList(cid=cid,userid=userid)
total = len(cols)
for c in cols:
if not self.addDir(c['title'],c['id'],mode,c['tn'],tot=total,userid=userid): break
def SETS(self,mode=103,userid=None):
sets = self.getSetsInfoList(userid=userid)
total = len(sets)
for s in sets:
if not self.addDir(s['title']+' ('+s['count']+')',s['id'],mode,s['tn'],tot=total): break
def GALLERIES(self,userid=None):
galleries = self.getGalleriesInfoList(userid=userid)
for g in galleries:
if not self.addDir(g.get('title',''),g.get('id'),104,g.get('tn'),tot=len(galleries)): break
def TAGS(self,userid=''):
tags = self.getTagsList(userid=userid)
for t in tags:
if not self.addDir(t,t,105,'',tot=len(tags),userid=userid): break
def PLACES(self,pid,woeid=None,name='',zoom='2'):
places = self.getPlacesInfoList(pid,woeid=woeid)
#If there are no places in this place id level, show all the photos
if not places:
self.PLACE(woeid,1)
return
if woeid and len(places) > 1: self.addDir(__language__(30500).replace('@REPLACE@',name),woeid,1022,'')
idx=0
for p in places:
count = p.get('count','0')
tn = ''
if self.maps: tn = self.maps.getMap(p.get('lat','0'),p.get('lon','0'),zoom)
if not self.addDir(p.get('place','')+' ('+count+')',p.get('woeid'),1000 + pid,tn,tot=len(places)): break
idx+=1
def FAVORITES(self,page,userid=None):
self.addPhotos(self.flickr.favorites_getList,6,page=page,user_id=userid)
def CONTACTS(self,userid=None):
contacts = self.getContactsInfoList(userid=userid)
total = len(contacts) + 1
for c in contacts:
if not self.addDir(c['username'],c['id'],107,c['tn'],tot=total): break
if contacts:
self.addDir("[B][%s][/B]" % __language__(30518),'recent_photos',800,os.path.join(IMAGES_PATH,'photostream.png'),tot=total)
def CONTACTS_RECENT_PHOTOS(self,userid=None):
self.addPhotos(self.flickr.photos_getContactsPhotos,800,mapOption=True, with_username=True, count=50)
def GROUPS(self,userid=None):
groups = self.getGroupsInfoList(userid)
total = len(groups)
for g in groups:
if not self.addDir(g['name'] + ' (%s)' % g['count'],g['id'],112,g['tn'],tot=total): break
def getText(self,prompt=__language__(30501)):
keyboard = xbmc.Keyboard('',prompt)
keyboard.doModal()
if (keyboard.isConfirmed()):
return keyboard.getText()
return None
def SEARCH_GROUPS(self,tags,page=1):
if not tags or tags == '@@search@@':
tags = self.getText() or tags
groups = self.getGroupsInfoList(search=tags,page=page)
total = len(groups)
page = int(page)
#Add Previous Header if necessary
if page > 1:
previous = '<- '+__language__(30511)
pg = (page==2) and '-1' or str(page-1) #if previous page is one, set to -1 to differentiate from initial showing
self.addDir(previous.replace('@REPLACE@',str(self.max_per_page)),tags,13,os.path.join(IMAGES_PATH,'previous.png'),page = pg)
for g in groups:
if not self.addDir(g['name'] + ' (%s)' % g['count'],g['id'],112,g['tn'],tot=total): break
if total >= self.max_per_page:
nextp = '('+str(page*self.max_per_page)+'/'+str(self.flickr.TOTAL)+') '
replace = ''
if page + 1 == self.flickr.TOTAL_PAGES:
nextp += __language__(30513)
if self.flickr.TOTAL_ON_LAST_PAGE: replace = str(self.flickr.TOTAL_ON_LAST_PAGE)
else: replace = str(self.max_per_page)
else:
nextp += __language__(30512)
replace = str(self.max_per_page)
if page < self.flickr.TOTAL_PAGES: self.addDir(nextp.replace('@REPLACE@',replace)+' ->',tags,13,os.path.join(IMAGES_PATH,'next.png'),page=str(page+1))
def SEARCH_TAGS(self,tags,page,mode=9,userid=None):
if tags == '@@search@@' or tags == userid:
tags = self.getText() or tags
self.addPhotos(self.flickr.photos_search,mode,url=tags,page=page,tags=tags,user_id=userid)
def INTERESTING(self,page):
self.addPhotos(self.flickr.interestingness_getList,11,page=page)
def SET(self,psid,page):
self.addPhotos(self.flickr.photosets_getPhotos,103,url=psid,page=page,photoset_id=psid)
def GALLERY(self,gid,page):
self.addPhotos(self.flickr.galleries_getPhotos,103,url=gid,page=page,gallery_id=gid)
def TAG(self,tag,page,userid=None):
if not userid: userid = 'me'
self.addPhotos(self.flickr.photos_search,105,url=tag,page=page,tags=tag,user_id=userid)
def CONTACT(self,cid,name):
self.addDir(__language__(30514).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30300)),cid,701,os.path.join(IMAGES_PATH,'photostream.png'))
self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30301)),cid,702,os.path.join(IMAGES_PATH,'collections.png'))
self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30302)),cid,703,os.path.join(IMAGES_PATH,'sets.png'))
self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30303)),cid,704,os.path.join(IMAGES_PATH,'galleries.png'))
self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30304)),cid,705,os.path.join(IMAGES_PATH,'tags.png'))
if self.authenticated(): self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30305)),cid,706,os.path.join(IMAGES_PATH,'favorites.png'))
self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30306)),cid,707,os.path.join(IMAGES_PATH,'contacts.png'))
self.addDir(__language__(30516).replace('@NAMEREPLACE@',name),cid,709,os.path.join(IMAGES_PATH,'search_photostream.png'))
def GROUP(self,groupid):
self.addPhotos(self.flickr.groups_pools_getPhotos,112,mapOption=True,group_id=groupid)
def PLACE(self,woeid,page):
self.addPhotos(self.flickr.photos_search,1022,url=woeid,page=page,woe_id=woeid,user_id='me',mapOption=True)
def addLink(self,name,url,iconimage,tot=0,contextMenu=None,ltype='image'):
#u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultImage.png", thumbnailImage=iconimage)
liz.setInfo( type=ltype, infoLabels={ "Title": name } )
liz.setProperty( "sharing","handled" )
if contextMenu: liz.addContextMenuItems(contextMenu)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=False,totalItems=tot)
def addDir(self,name,url,mode,iconimage,page=1,tot=0,userid=''):
if userid: userid = "&userid="+urllib.quote_plus(userid)
u=sys.argv[0]+"?url="+urllib.quote_plus(url.encode('utf-8'))+"&mode="+str(mode)+"&page="+str(page)+userid+"&name="+urllib.quote_plus(name.encode('utf-8'))
liz=xbmcgui.ListItem(name, 'test',iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="image", infoLabels={"Title": name} )
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True,totalItems=tot)
class ImageShower(xbmcgui.Window):
def showImage(self,image):
self.addControl(xbmcgui.ControlImage(0,0,self.getWidth(),self.getHeight(), image, aspectRatio=2))
def onAction(self,action):
if action == 10 or action == 9: self.close()
def clearDirFiles(filepath):
if not os.path.exists(filepath): return
for f in os.listdir(filepath):
f = os.path.join(filepath,f)
if os.path.isfile(f): os.remove(f)
## XBMC Plugin stuff starts here --------------------------------------------------------
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
else:
param={}
return param
### Do plugin stuff --------------------------------------------------------------------------
def doPlugin():
params=get_params()
url = urllib.unquote_plus(params.get("url",'')).decode('utf-8')
page = int(params.get("page",'1'))
userid = urllib.unquote_plus(params.get("userid",''))
name = urllib.unquote_plus(params.get("name",'')).decode('utf-8')
mode = int(params.get("mode",'0'))
#print "Mode: "+str(mode)
#print "URL: "+str(url)
#print "Name: "+str(name)
#print "Page: "+str(page)
update_dir = False
success = True
cache = True
try:
fsession = FlickrSession()
fsession.isSlideshow = params.get('plugin_slideshow_ss','false') == 'true'
if not fsession.authenticate():
mode = 9999
url = 'AUTHENTICATE'
if page>1 or page<0: update_dir=True
page = abs(page)
if mode==0 or url==None or len(url)<1:
LOG('Version: ' + __version__)
LOG('Encoding: ' + ENCODING)
registerAsShareTarget()
clearDirFiles(CACHE_PATH)
fsession.CATEGORIES()
elif mode==1:
fsession.PHOTOSTREAM(page)
elif mode==2:
fsession.COLLECTION(url,userid=userid)
elif mode==3:
fsession.SETS()
elif mode==4:
fsession.GALLERIES()
elif mode==5:
fsession.TAGS()
elif mode==6:
fsession.FAVORITES(page)
elif mode==7:
fsession.CONTACTS()
elif mode==8:
clearDirFiles(CACHE_PATH)
fsession.PLACES(12,zoom='country')
elif mode==9:
fsession.SEARCH_TAGS(url,page,mode=9,userid='me')
elif mode==10:
fsession.SEARCH_TAGS(url,page,mode=10)
elif mode==11:
fsession.INTERESTING(page)
elif mode==12:
fsession.GROUPS()
elif mode==13:
fsession.SEARCH_GROUPS(url,page)
elif mode==103:
fsession.SET(url,page)
elif mode==104:
fsession.GALLERY(url,page)
elif mode==105:
fsession.TAG(url,page,userid=userid)
elif mode==107:
fsession.CONTACT(url,name)
elif mode==112:
fsession.GROUP(url)
elif mode==701:
fsession.PHOTOSTREAM(page,mode=701,userid=url)
elif mode==702:
fsession.COLLECTION('collections',userid=url)
elif mode==703:
fsession.SETS(userid=url)
elif mode==704:
fsession.GALLERIES(userid=url)
elif mode==705:
fsession.TAGS(userid=url)
elif mode==706:
fsession.FAVORITES(page,userid=url)
elif mode==707:
fsession.CONTACTS(userid=url)
elif mode==709:
fsession.SEARCH_TAGS(url,page,mode=709,userid=url)
elif mode==800:
fsession.CONTACTS_RECENT_PHOTOS()
elif mode==1022:
fsession.PLACE(url,page)
elif mode==1007:
fsession.PLACES(22,woeid=url,name=name,zoom='neighborhood')
elif mode==1008:
fsession.PLACES(7,woeid=url,name=name,zoom='locality')
elif mode==1012:
fsession.PLACES(8,woeid=url,name=name,zoom='region')
except HTTPError,e:
if(e.reason[1] == 504):
xbmcgui.Dialog().ok(__language__(30502), __language__(30504))
success = False
else:
ERROR('UNHANDLED HTTP ERROR',' (HTTP)')
except URLError,e:
LOG(e.reason)
if(e.reason[0] == 110):
xbmcgui.Dialog().ok(__language__(30503), __language__(30504))
success = False
else:
ERROR('UNHANDLED URL ERROR',' (URL)')
except:
ERROR('UNHANDLED ERROR')
if mode != 9999: xbmcplugin.endOfDirectory(int(sys.argv[1]),succeeded=success,updateListing=update_dir,cacheToDisc=cache)
def selectVideoURL(sizes):
sizeIDX = int(__settings__.getSetting('video_display_size') or '1')
sizeNames = ('Mobile MP4','Site MP4','HD MP4','Video Original')
size = sizeNames[sizeIDX]
if size in sizes: return sizes[size]
for size in sizeNames[:sizeIDX]:
if size in sizes: return sizes[size]
return ''
def playVideo():
fsession = FlickrSession()
if not fsession.authenticate():
return None
vid = sys.argv[2].split('=')[-1]
LOG('Playing video with ID: ' + vid)
sizes = fsession.getImageUrl(vid, 'all')
url = selectVideoURL(sizes)
listitem = xbmcgui.ListItem(label='flickr Video', path=url)
listitem.setInfo(type='Video',infoLabels={"Title": 'flickr Video'})
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listitem)
class SavePhoto:
def __init__(self):
url = urllib.unquote_plus(sys.argv[2])
savename = sys.argv[3]
if not savename.lower().endswith('.jpg'): savename += '.jpg' #Would be better if we determined image type but it should be .jpg 99.9% of the time
save_path = __settings__.getSetting('save_path')
saveFullPath = os.path.join(save_path,savename)
basePath = saveFullPath
ct=1
while os.path.exists(saveFullPath):
base = os.path.splitext(basePath)[0]
saveFullPath = base + '_%s.jpg' % ct
ct+=1
if ct > 99: break
self.pd = xbmcgui.DialogProgress()
self.pd.create(__language__(30415),__language__(30416))
try:
fail = False
if save_path:
try:
urllib.urlretrieve(url,saveFullPath,self.progressUpdate)
except:
fail = True
else:
fail = True
if fail:
xbmcgui.Dialog().ok(__language__(30417),__language__(30418))
__settings__.openSettings()
save_path = __settings__.getSetting('save_path')
try:
urllib.urlretrieve(url,saveFullPath,self.progressUpdate)
except:
import traceback
traceback.print_exc()
xbmcgui.Dialog().ok(__language__(30419),__language__(30420))
return
finally:
self.pd.close()
xbmcgui.Dialog().ok(__language__(30412),__language__(30413).replace('@REPLACE@',os.path.basename(saveFullPath)),__language__(30414).replace('@REPLACE@',save_path))
def progressUpdate(self,blocks,bsize,fsize):
#print 'cool',blocks,bsize,fsize
if fsize == -1 or fsize <= bsize:
self.pd.update(0)
#print 'test'
return
percent = int((float(blocks) / (fsize/bsize)) * 100)
#print percent
self.pd.update(percent)
def registerAsShareTarget():
try:
import ShareSocial #@UnresolvedImport
except:
LOG('Could not import ShareSocial')
return
target = ShareSocial.getShareTarget()
target.addonID = 'plugin.image.flickr'
target.name = 'flickr'
target.importPath = 'share'
target.provideTypes = ['feed']
ShareSocial.registerShareTarget(target)
LOG('Registered as share target with ShareSocial')
PLUGIN = False
if __name__ == '__main__':
#print sys.argv
if sys.argv[1] == 'map':
Maps().doMap()
elif sys.argv[1] == 'save':
SavePhoto()
elif sys.argv[1] == 'slideshow':
xbmc.executebuiltin('SlideShow(plugin://plugin.image.flickr?mode=1&url=slideshow&name=photostream)')
elif sys.argv[1] == 'reset_auth':
fsession = FlickrSession()
if fsession.authenticate(force=True):
xbmcgui.Dialog().ok(__language__(30507),__language__(30506))
else:
xbmcgui.Dialog().ok(__language__(30520),__language__(30521))
elif len(sys.argv) > 2 and sys.argv[2].startswith('?video_id'):
playVideo()
else:
PLUGIN = True
doPlugin()
| gpl-2.0 | 8,714,209,520,837,399,000 | 37.110287 | 523 | 0.678461 | false |
openlabs/sale-shipment-cost-cap | setup.py | 1 | 3690 | #!/usr/bin/env python
import re
import os
import sys
import time
import unittest
import ConfigParser
from setuptools import setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.environ['TRYTOND_DATABASE_URI'] = "sqlite://"
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.environ['TRYTOND_DATABASE_URI'] = "postgresql://"
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = []
MODULE2PREFIX = {}
MODULE = "sale_shipment_cost_cap"
PREFIX = "openlabs"
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version,
minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="Cap the shipment cost to order shipping value",
author="Openlabs Technologies and Consulting (P) Ltd.",
author_email='[email protected]',
url='http://www.openlabs.co.in/',
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE: info.get('xml', []) +
info.get('translation', []) +
['tryton.cfg', 'locale/*.po', 'tests/*.rst', 'reports/*.odt'] +
['view/*.xml'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
license='GPL-3',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
cmdclass={
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
}
)
| bsd-3-clause | 7,624,043,393,774,007,000 | 25.546763 | 76 | 0.582385 | false |
i32ropie/lol | plugins/ban.py | 1 | 3887 | # -*- coding: utf-8 -*-
from config import *
print(Color(
'{autored}[{/red}{autoyellow}+{/yellow}{autored}]{/red} {autocyan} ban.py importado.{/cyan}'))
@bot.message_handler(commands=['ban'])
def command_ban(m):
cid = m.chat.id
uid = m.from_user.id
date = m.date
if not is_recent(m):
return None
if is_admin(uid):
try:
banned_id = m.text.split(' ')[1]
except:
bot.send_chat_action(cid, 'typing')
bot.send_message(cid, responses['ban']['failure'][0])
return None
if isint(banned_id):
if is_user(banned_id):
if is_banned(banned_id):
bot.send_chat_action(cid, 'typing')
bot.send_message(
cid, responses['ban']['failure'][1] %
banned_id)
else:
db.users.update({"_id": banned_id},
{"$set": {"banned": True}})
bot.send_chat_action(cid, 'typing')
bot.send_message(
cid, responses['ban']['success'] %
banned_id)
else:
# db.users.insert({
# "_id": banned_id,
# "lang": "en",
# "banned": True,
# "notify": True,
# "server": "",
# "summoner": ""
# })
db.users.insert({
"_id": banned_id,
"lang": "en",
"banned": True,
"notify": True,
"server": "",
"summoner": "",
"active": True,
"register": date,
"returns": []
})
bot.send_chat_action(cid, 'typing')
bot.send_message(cid, responses['ban']['success'] % banned_id)
@bot.message_handler(commands=['unban'])
def command_unban(m):
cid = m.chat.id
uid = m.from_user.id
if is_admin(uid):
try:
banned_id = m.text.split(' ')[1]
except:
bot.send_chat_action(cid, 'typing')
bot.send_message(cid, responses['unban']['failure'][0])
return None
if isint(banned_id):
if is_user(banned_id):
if is_banned(banned_id):
db.users.update({"_id": banned_id},
{"$set": {"banned": False}})
bot.send_chat_action(cid, 'typing')
bot.send_message(
cid, responses['unban']['success'] %
banned_id)
else:
bot.send_chat_action(cid, 'typing')
bot.send_message(
cid, responses['unban']['failure'][1] %
banned_id)
else:
bot.send_chat_action(cid, 'typing')
bot.send_message(
cid, responses['unban']['failure'][2] %
banned_id)
@bot.message_handler(commands=['mute'])
def command_mute(m):
cid = m.chat.id
uid = m.from_user.id
if is_admin(uid):
extra['muted'] = True
bot.send_chat_action(cid, 'typing')
bot.send_message(cid, "Mensajes a baneados desactivados")
with open("extra_data/extra.json", "w") as f:
json.dump(extra, f)
@bot.message_handler(commands=['unmute'])
def command_unmute(m):
cid = m.chat.id
uid = m.from_user.id
if is_admin(uid):
extra['muted'] = False
bot.send_chat_action(cid, 'typing')
bot.send_message(cid, "Mensajes a baneados activados")
with open("extra_data/extra.json", "w") as f:
json.dump(extra, f)
| gpl-2.0 | 3,995,733,429,601,955,000 | 33.096491 | 99 | 0.434268 | false |
Akay7/hospital | appointments/tests.py | 1 | 2998 | from django.test import TestCase
from .models import Appointment, Doctor, TimeManager
from .forms import AppointmentForm
class TestFormAppointment(TestCase):
def setUp(self):
self.doctor = Doctor.objects.create(name="Gordon Freeman")
self.doctor2 = Doctor.objects.create(name="Isaac Kleiner")
def test_add_new_appointment(self):
data = {'doctor': self.doctor.id, 'day': '2015-04-23', 'time': '10:00:00', 'patient': 'Head Crab'}
form = AppointmentForm(data)
self.assertEqual(form.is_valid(), True)
def test_cant_add_multiply_appointments_to_one_doctor(self):
data = {'doctor': self.doctor.id, 'day': '2015-04-23', 'time': '10:00:00', 'patient': 'Head Crab'}
form = AppointmentForm(data)
form.save()
form2 = AppointmentForm(data)
self.assertEqual(form2.is_valid(), False)
def test_can_add_at_same_time_appointments_to_different_doctors(self):
data = {'doctor': self.doctor.id, 'day': '2015-04-23', 'time': '10:00:00', 'patient': 'Head Crab'}
form = AppointmentForm(data)
form.save()
data2 = {'doctor': self.doctor2.id, 'day': '2015-04-23', 'time': '10:00:00', 'patient': 'Lamar'}
form2 = AppointmentForm(data2)
self.assertEqual(form2.is_valid(), True)
class TestGetFreeTime(TestCase):
def setUp(self):
self.doctor = Doctor.objects.create(name="Gordon Freeman")
self.doctor2 = Doctor.objects.create(name="Isaac Kleiner")
Appointment.objects.create(doctor=self.doctor, day="2015-10-14", time="10:00:00", patient="Lamar")
self.doctor.save()
def test_returning_all_free_time_for_selected_day(self):
response = self.client.post('/get_free_time', {"day": "2015-10-14", "doctor_id": self.doctor.id})
self.assertNotIn(b'"10:00:00": "10:00"', response.content)
self.assertIn(b'"11:00:00": "11:00"', response.content)
def test_timemanager_returning_only_free_time(self):
free_time = TimeManager.get_free_time(self.doctor.id, "2015-10-14")
self.assertIn(("11:00:00", "11:00"), free_time)
self.assertNotIn(("10:00:00", "10:00"), free_time)
def test_timemanager_returning_only_free_time_for_first_day(self):
Appointment.objects.create(doctor=self.doctor, day="2015-01-01", time="10:00:00", patient="Lamar")
free_time = TimeManager.get_free_time(self.doctor.id, "2015-1-1")
self.assertIn(("11:00:00", "11:00"), free_time)
self.assertNotIn(("10:00:00", "10:00"), free_time)
def test_dont_have_any_time_for_appointments_on_weekend(self):
free_time = TimeManager.get_free_time(self.doctor.id, "2015-4-25")
self.assertNotIn(("11:00:00", "11:00"), free_time)
self.assertNotIn(("10:00:00", "10:00"), free_time)
free_time = TimeManager.get_free_time(self.doctor.id, "2015-4-26")
self.assertNotIn(("11:00:00", "11:00"), free_time)
self.assertNotIn(("10:00:00", "10:00"), free_time)
| lgpl-3.0 | -4,296,485,343,337,901,000 | 44.424242 | 106 | 0.637425 | false |
cpennington/edx-platform | lms/djangoapps/discussion/django_comment_client/tests/test_utils.py | 1 | 79774 | # pylint: skip-file
# -*- coding: utf-8 -*-
import datetime
import json
import ddt
import mock
import six
from django.test import RequestFactory, TestCase
from django.urls import reverse
from edx_django_utils.cache import RequestCache
from mock import Mock, patch
from pytz import UTC
from six import text_type
import lms.djangoapps.discussion.django_comment_client.utils as utils
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.courseware.tabs import get_course_tab_list
from lms.djangoapps.courseware.tests.factories import InstructorFactory
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from lms.djangoapps.discussion.django_comment_client.tests.factories import RoleFactory
from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin
from lms.djangoapps.discussion.django_comment_client.tests.utils import config_course_discussions, topic_name_to_id
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
from openedx.core.djangoapps.course_groups import cohorts
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts
from openedx.core.djangoapps.django_comment_common.comment_client.utils import (
CommentClientMaintenanceError,
perform_request
)
from openedx.core.djangoapps.django_comment_common.models import (
CourseDiscussionSettings,
DiscussionsIdMapping,
ForumsConfig,
assign_role
)
from openedx.core.djangoapps.django_comment_common.utils import (
get_course_discussion_settings,
seed_permissions_roles,
set_course_discussion_settings
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from student.roles import CourseStaffRole
from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, ToyCourseFactory
class DictionaryTestCase(TestCase):
def test_extract(self):
d = {'cats': 'meow', 'dogs': 'woof'}
k = ['cats', 'dogs', 'hamsters']
expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
self.assertEqual(utils.extract(d, k), expected)
def test_strip_none(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_none(d), expected)
def test_strip_blank(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_blank(d), expected)
class AccessUtilsTestCase(ModuleStoreTestCase):
"""
Base testcase class for access and roles for the
comment client service integration
"""
CREATE_USER = False
def setUp(self):
super(AccessUtilsTestCase, self).setUp()
self.course = CourseFactory.create()
self.course_id = self.course.id
self.student_role = RoleFactory(name='Student', course_id=self.course_id)
self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id)
self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id)
self.student1 = UserFactory(username='student', email='[email protected]')
self.student1_enrollment = CourseEnrollmentFactory(user=self.student1)
self.student_role.users.add(self.student1)
self.student2 = UserFactory(username='student2', email='[email protected]')
self.student2_enrollment = CourseEnrollmentFactory(user=self.student2)
self.moderator = UserFactory(username='moderator', email='[email protected]', is_staff=True)
self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator)
self.moderator_role.users.add(self.moderator)
self.community_ta1 = UserFactory(username='community_ta1', email='[email protected]')
self.community_ta_role.users.add(self.community_ta1)
self.community_ta2 = UserFactory(username='community_ta2', email='[email protected]')
self.community_ta_role.users.add(self.community_ta2)
self.course_staff = UserFactory(username='course_staff', email='[email protected]')
CourseStaffRole(self.course_id).add_users(self.course_staff)
def test_get_role_ids(self):
ret = utils.get_role_ids(self.course_id)
expected = {u'Moderator': [3], u'Community TA': [4, 5]}
self.assertEqual(ret, expected)
def test_has_discussion_privileges(self):
self.assertFalse(utils.has_discussion_privileges(self.student1, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.student2, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.course_staff, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.moderator, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta1, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta2, self.course_id))
def test_has_forum_access(self):
ret = utils.has_forum_access('student', self.course_id, 'Student')
self.assertTrue(ret)
ret = utils.has_forum_access('not_a_student', self.course_id, 'Student')
self.assertFalse(ret)
ret = utils.has_forum_access('student', self.course_id, 'NotARole')
self.assertFalse(ret)
@ddt.ddt
class CoursewareContextTestCase(ModuleStoreTestCase):
"""
Base testcase class for courseware context for the
comment client service integration
"""
def setUp(self):
super(CoursewareContextTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
discussion_category="Chapter",
discussion_target="Discussion 1"
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion2",
discussion_category="Chapter / Section / Subsection",
discussion_target="Discussion 2"
)
def test_empty(self):
utils.add_courseware_context([], self.course, self.user)
def test_missing_commentable_id(self):
orig = {"commentable_id": "non-inline"}
modified = dict(orig)
utils.add_courseware_context([modified], self.course, self.user)
self.assertEqual(modified, orig)
def test_basic(self):
threads = [
{"commentable_id": self.discussion1.discussion_id},
{"commentable_id": self.discussion2.discussion_id}
]
utils.add_courseware_context(threads, self.course, self.user)
def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name
"""Asserts that the given thread has the expected set of properties"""
self.assertEqual(
set(thread.keys()),
set(["commentable_id", "courseware_url", "courseware_title"])
)
self.assertEqual(
thread.get("courseware_url"),
reverse(
"jump_to",
kwargs={
"course_id": text_type(self.course.id),
"location": text_type(discussion.location)
}
)
)
self.assertEqual(thread.get("courseware_title"), expected_title)
assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1")
assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2")
def test_empty_discussion_subcategory_title(self):
"""
Test that for empty subcategory inline discussion modules,
the divider " / " is not rendered on a post or inline discussion topic label.
"""
discussion = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion",
discussion_category="Chapter",
discussion_target="" # discussion-subcategory
)
thread = {"commentable_id": discussion.discussion_id}
utils.add_courseware_context([thread], self.course, self.user)
self.assertNotIn('/', thread.get("courseware_title"))
@ddt.data((ModuleStoreEnum.Type.mongo, 2), (ModuleStoreEnum.Type.split, 1))
@ddt.unpack
def test_get_accessible_discussion_xblocks(self, modulestore_type, expected_discussion_xblocks):
"""
Tests that the accessible discussion xblocks having no parents do not get fetched for split modulestore.
"""
course = CourseFactory.create(default_store=modulestore_type)
# Create a discussion xblock.
test_discussion = self.store.create_child(self.user.id, course.location, 'discussion', 'test_discussion')
# Assert that created discussion xblock is not an orphan.
self.assertNotIn(test_discussion.location, self.store.get_orphans(course.id))
# Assert that there is only one discussion xblock in the course at the moment.
self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), 1)
# The above call is request cached, so we need to clear it for this test.
RequestCache.clear_all_namespaces()
# Add an orphan discussion xblock to that course
orphan = course.id.make_usage_key('discussion', 'orphan_discussion')
self.store.create_item(self.user.id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Assert that the discussion xblock is an orphan.
self.assertIn(orphan, self.store.get_orphans(course.id))
self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), expected_discussion_xblocks)
class CachedDiscussionIdMapTestCase(ModuleStoreTestCase):
"""
Tests that using the cache of discussion id mappings has the same behavior as searching through the course.
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(CachedDiscussionIdMapTestCase, self).setUp()
self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course')
self.discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id',
discussion_category='Chapter',
discussion_target='Discussion 1'
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id_2',
discussion_category='Chapter 2',
discussion_target='Discussion 2'
)
self.private_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='private_discussion_id',
discussion_category='Chapter 3',
discussion_target='Beta Testing',
visible_to_staff_only=True
)
RequestCache.clear_all_namespaces() # clear the cache before the last course publish
self.bad_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='bad_discussion_id',
discussion_category=None,
discussion_target=None
)
def test_cache_returns_correct_key(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
self.assertEqual(usage_key, self.discussion.location)
def test_cache_returns_none_if_id_is_not_present(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'bogus_id')
self.assertIsNone(usage_key)
def test_cache_raises_exception_if_discussion_id_map_not_cached(self):
DiscussionsIdMapping.objects.all().delete()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_cache_raises_exception_if_discussion_id_not_cached(self):
cache = DiscussionsIdMapping.objects.get(course_id=self.course.id)
cache.mapping = None
cache.save()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_xblock_does_not_have_required_keys(self):
self.assertTrue(utils.has_required_keys(self.discussion))
self.assertFalse(utils.has_required_keys(self.bad_discussion))
def verify_discussion_metadata(self):
"""Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct"""
metadata = utils.get_cached_discussion_id_map(
self.course,
['test_discussion_id', 'test_discussion_id_2'],
self.user
)
discussion1 = metadata[self.discussion.discussion_id]
discussion2 = metadata[self.discussion2.discussion_id]
self.assertEqual(discussion1['location'], self.discussion.location)
self.assertEqual(discussion1['title'], 'Chapter / Discussion 1')
self.assertEqual(discussion2['location'], self.discussion2.location)
self.assertEqual(discussion2['title'], 'Chapter 2 / Discussion 2')
def test_get_discussion_id_map_from_cache(self):
self.verify_discussion_metadata()
def test_get_discussion_id_map_without_cache(self):
DiscussionsIdMapping.objects.all().delete()
self.verify_discussion_metadata()
def test_get_missing_discussion_id_map_from_cache(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user)
self.assertEqual(metadata, {})
def test_get_discussion_id_map_from_cache_without_access(self):
user = UserFactory.create()
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user)
self.assertEqual(metadata['private_discussion_id']['title'], 'Chapter 3 / Beta Testing')
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user)
self.assertEqual(metadata, {})
def test_get_bad_discussion_id(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user)
self.assertEqual(metadata, {})
def test_discussion_id_accessible(self):
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id'))
def test_bad_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id'))
def test_missing_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bogus_id'))
def test_discussion_id_not_accessible_without_access(self):
user = UserFactory.create()
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id'))
self.assertFalse(utils.discussion_category_id_access(self.course, user, 'private_discussion_id'))
class CategoryMapTestMixin(object):
"""
Provides functionality for classes that test
`get_discussion_category_map`.
"""
def assert_category_map_equals(self, expected, requesting_user=None):
"""
Call `get_discussion_category_map`, and verify that it returns
what is expected.
"""
self.assertEqual(
utils.get_discussion_category_map(self.course, requesting_user or self.user),
expected
)
class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase):
"""
Base testcase class for discussion categories for the
comment client service integration
"""
def setUp(self):
super(CategoryMapTestCase, self).setUp()
self.course = CourseFactory.create(
org="TestX", number="101", display_name="Test Course",
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime.datetime(2012, 2, 3, tzinfo=UTC)
)
# Courses get a default discussion topic on creation, so remove it
self.course.discussion_topics = {}
self.discussion_num = 0
self.instructor = InstructorFactory(course_key=self.course.id)
self.maxDiff = None # pylint: disable=invalid-name
self.later = datetime.datetime(2050, 1, 1, tzinfo=UTC)
def create_discussion(self, discussion_category, discussion_target, **kwargs):
self.discussion_num += 1
return ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion{}".format(self.discussion_num),
discussion_category=discussion_category,
discussion_target=discussion_target,
**kwargs
)
def assert_category_map_equals(self, expected, divided_only_if_explicit=False, exclude_unstarted=True): # pylint: disable=arguments-differ
"""
Asserts the expected map with the map returned by get_discussion_category_map method.
"""
self.assertEqual(
utils.get_discussion_category_map(
self.course, self.instructor, divided_only_if_explicit, exclude_unstarted
),
expected
)
def test_empty(self):
self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []})
def test_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
def check_cohorted_topics(expected_ids):
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_divided": "Topic_A" in expected_ids},
"Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_divided": "Topic_B" in expected_ids},
"Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_divided": "Topic_C" in expected_ids},
},
"subcategories": {},
"children": [("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY), ("Topic C", TYPE_ENTRY)]
}
)
check_cohorted_topics([]) # default (empty) cohort config
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_cohorted_topics([])
set_discussion_division_settings(self.course.id, enable_cohorts=True)
check_cohorted_topics([])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_B", "Topic_C"]
)
check_cohorted_topics(["Topic_B", "Topic_C"])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_A", "Some_Other_Topic"]
)
check_cohorted_topics(["Topic_A"])
# unlikely case, but make sure it works.
set_discussion_division_settings(
self.course.id,
enable_cohorts=False,
divided_discussions=["Topic_A"]
)
check_cohorted_topics([])
def test_single_inline(self):
self.create_discussion("Chapter", "Discussion")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_with_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": True,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_without_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True
)
def test_get_unstarted_discussion_xblocks(self):
self.create_discussion("Chapter 1", "Discussion 1", start=self.later)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
"start_date": self.later
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)],
"start_date": self.later,
"sort_key": "Chapter 1"
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True,
exclude_unstarted=False
)
def test_tree(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
def check_divided(is_divided):
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": is_divided,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
# empty / default config
check_divided(False)
# explicitly disabled cohorting
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_divided(False)
# explicitly enable courses divided by Cohort with inline discusssions also divided.
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
check_divided(True)
def test_tree_with_duplicate_targets(self):
self.create_discussion("Chapter 1", "Discussion A")
self.create_discussion("Chapter 1", "Discussion B")
self.create_discussion("Chapter 1", "Discussion A") # duplicate
self.create_discussion("Chapter 1", "Discussion A") # another duplicate
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate
category_map = utils.get_discussion_category_map(self.course, self.user)
chapter1 = category_map["subcategories"]["Chapter 1"]
chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"])
chapter1_discussions_with_types = set([("Discussion A", TYPE_ENTRY), ("Discussion B", TYPE_ENTRY),
("Discussion A (1)", TYPE_ENTRY), ("Discussion A (2)", TYPE_ENTRY)])
self.assertEqual(set(chapter1["children"]), chapter1_discussions_with_types)
self.assertEqual(set(chapter1["entries"].keys()), chapter1_discussions)
chapter2 = category_map["subcategories"]["Chapter 2"]
subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"]
subsection1_discussions = set(["Discussion", "Discussion (1)"])
subsection1_discussions_with_types = set([("Discussion", TYPE_ENTRY), ("Discussion (1)", TYPE_ENTRY)])
self.assertEqual(set(subsection1["children"]), subsection1_discussions_with_types)
self.assertEqual(set(subsection1["entries"].keys()), subsection1_discussions)
def test_start_date_filter(self):
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", u"Discussion 2 обсуждение", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
self.assertFalse(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY)]
}
)
def test_self_paced_start_date_filter(self):
self.course.self_paced = True
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
self.assertTrue(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
def test_sort_inline_explicit(self):
self.create_discussion("Chapter", "Discussion 1", sort_key="D")
self.create_discussion("Chapter", "Discussion 2", sort_key="A")
self.create_discussion("Chapter", "Discussion 3", sort_key="E")
self.create_discussion("Chapter", "Discussion 4", sort_key="C")
self.create_discussion("Chapter", "Discussion 5", sort_key="B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": "D",
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": "A",
"is_divided": False,
},
"Discussion 3": {
"id": "discussion3",
"sort_key": "E",
"is_divided": False,
},
"Discussion 4": {
"id": "discussion4",
"sort_key": "C",
"is_divided": False,
},
"Discussion 5": {
"id": "discussion5",
"sort_key": "B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion 2", TYPE_ENTRY),
("Discussion 5", TYPE_ENTRY),
("Discussion 4", TYPE_ENTRY),
("Discussion 1", TYPE_ENTRY),
("Discussion 3", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_configured_topics_explicit(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A", "sort_key": "B"},
"Topic B": {"id": "Topic_B", "sort_key": "C"},
"Topic C": {"id": "Topic_C", "sort_key": "A"}
}
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "B", "is_divided": False},
"Topic B": {"id": "Topic_B", "sort_key": "C", "is_divided": False},
"Topic C": {"id": "Topic_C", "sort_key": "A", "is_divided": False},
},
"subcategories": {},
"children": [("Topic C", TYPE_ENTRY), ("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY)]
}
)
def test_sort_alpha(self):
self.course.discussion_sort_alpha = True
self.create_discussion("Chapter", "Discussion D")
self.create_discussion("Chapter", "Discussion A")
self.create_discussion("Chapter", "Discussion E")
self.create_discussion("Chapter", "Discussion C")
self.create_discussion("Chapter", "Discussion B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion D": {
"id": "discussion1",
"sort_key": "Discussion D",
"is_divided": False,
},
"Discussion A": {
"id": "discussion2",
"sort_key": "Discussion A",
"is_divided": False,
},
"Discussion E": {
"id": "discussion3",
"sort_key": "Discussion E",
"is_divided": False,
},
"Discussion C": {
"id": "discussion4",
"sort_key": "Discussion C",
"is_divided": False,
},
"Discussion B": {
"id": "discussion5",
"sort_key": "Discussion B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion A", TYPE_ENTRY),
("Discussion B", TYPE_ENTRY),
("Discussion C", TYPE_ENTRY),
("Discussion D", TYPE_ENTRY),
("Discussion E", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_intermediates(self):
self.create_discussion("Chapter B", "Discussion 2")
self.create_discussion("Chapter C", "Discussion")
self.create_discussion("Chapter A", "Discussion 1")
self.create_discussion("Chapter B", "Discussion 1")
self.create_discussion("Chapter A", "Discussion 2")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter A": {
"entries": {
"Discussion 1": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter B": {
"entries": {
"Discussion 1": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter C": {
"entries": {
"Discussion": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter A", TYPE_SUBCATEGORY), ("Chapter B", TYPE_SUBCATEGORY),
("Chapter C", TYPE_SUBCATEGORY)]
}
)
def test_ids_empty(self):
self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), [])
def test_ids_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
six.assertCountEqual(
self,
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C"]
)
def test_ids_inline(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
six.assertCountEqual(
self,
utils.get_discussion_categories_ids(self.course, self.user),
["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"]
)
def test_ids_mixed(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
six.assertCountEqual(
self,
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"]
)
class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase):
"""
Tests `get_discussion_category_map` on discussion xblocks which are
only visible to some content groups.
"""
def test_staff_user(self):
"""
Verify that the staff user can access the alpha, beta, and
global discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Beta', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.staff_user
)
def test_alpha_user(self):
"""
Verify that the alpha user can access the alpha and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.alpha_user
)
def test_beta_user(self):
"""
Verify that the beta user can access the beta and global
discussion topics.
"""
children = [('Visible to Beta', 'entry'), ('Visible to Everyone', 'entry')]
if six.PY3:
children = [('Visible to Everyone', 'entry'), ('Visible to Beta', 'entry')]
expected = {
'subcategories': {
'Week 1': {
'subcategories': {},
'children': children,
'entries': {
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
}
self.assert_category_map_equals(
expected,
requesting_user=self.beta_user
)
def test_non_cohorted_user(self):
"""
Verify that the non-cohorted user can access the global
discussion topic.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.non_cohorted_user
)
class JsonResponseTestCase(TestCase, UnicodeTestMixin):
def _test_unicode_data(self, text):
response = utils.JsonResponse(text)
reparsed = json.loads(response.content.decode('utf-8'))
self.assertEqual(reparsed, text)
class DiscussionTabTestCase(ModuleStoreTestCase):
""" Test visibility of the discussion tab. """
def setUp(self):
super(DiscussionTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.enrolled_user = UserFactory.create()
self.staff_user = AdminFactory.create()
CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id)
self.unenrolled_user = UserFactory.create()
def discussion_tab_present(self, user):
""" Returns true if the user has access to the discussion tab. """
request = RequestFactory().request()
all_tabs = get_course_tab_list(user, self.course)
return any(tab.type == 'discussion' for tab in all_tabs)
def test_tab_access(self):
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}):
self.assertTrue(self.discussion_tab_present(self.staff_user))
self.assertTrue(self.discussion_tab_present(self.enrolled_user))
self.assertFalse(self.discussion_tab_present(self.unenrolled_user))
@mock.patch('ccx.overrides.get_current_ccx')
def test_tab_settings(self, mock_get_ccx):
mock_get_ccx.return_value = True
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
class IsCommentableDividedTestCase(ModuleStoreTestCase):
"""
Test the is_commentable_divided function.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super(IsCommentableDividedTestCase, self).setUp()
self.toy_course_key = ToyCourseFactory.create().id
def test_is_commentable_divided(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name):
"""Helper for topic_name_to_id that uses course."""
return topic_name_to_id(course, name)
# no topics
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course doesn't even have a 'General' topic"
)
# not cohorted
config_course_cohorts(course, is_cohorted=False)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course isn't cohorted"
)
# cohorted, but top level topics aren't
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
# cohorted, including "Feedback" top-level topics aren't
config_course_cohorts(
course,
is_cohorted=True
)
config_course_discussions(course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback"])
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("Feedback")),
"Feedback was listed as cohorted. Should be."
)
def test_is_commentable_divided_inline_discussion(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name):
return topic_name_to_id(course, name)
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"]
)
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("random")),
"By default, Non-top-level discussions are not cohorted in a cohorted courses."
)
# if always_divide_inline_discussions is set to False, non-top-level discussion are always
# not divided unless they are explicitly set in divided_discussions
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"],
always_divide_inline_discussions=False
)
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("random")),
"Non-top-level discussion is not cohorted if always_divide_inline_discussions is False."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("random_inline")),
"If always_divide_inline_discussions set to False, Non-top-level discussion is "
"cohorted if explicitly set in cohorted_discussions."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("Feedback")),
"If always_divide_inline_discussions set to False, top-level discussion are not affected."
)
def test_is_commentable_divided_team(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, always_divide_inline_discussions=True)
team = CourseTeamFactory(course_id=course.id)
# Verify that team discussions are not cohorted, but other discussions are
# if "always cohort inline discussions" is set to true.
self.assertFalse(utils.is_commentable_divided(course.id, team.discussion_topic_id))
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
def test_is_commentable_divided_cohorts(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.NONE,
)
# Although Cohorts are enabled, discussion division is explicitly disabled.
self.assertFalse(utils.is_commentable_divided(course.id, "random"))
# Now set the discussion division scheme.
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.COHORT,
)
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
def test_is_commentable_divided_enrollment_track(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK,
)
# Although division scheme is set to ENROLLMENT_TRACK, divided returns
# False because there is only a single enrollment mode.
self.assertFalse(utils.is_commentable_divided(course.id, "random"))
# Now create 2 explicit course modes.
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED)
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
class GroupIdForUserTestCase(ModuleStoreTestCase):
""" Test the get_group_id_for_user method. """
def setUp(self):
super(GroupIdForUserTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_user = UserFactory.create()
CourseEnrollmentFactory.create(
mode=CourseMode.VERIFIED, user=self.test_user, course_id=self.course.id
)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.test_user]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.NONE, course_discussion_settings.division_scheme)
self.assertIsNone(utils.get_group_id_for_user(self.test_user, course_discussion_settings))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.COHORT, course_discussion_settings.division_scheme)
self.assertEqual(
self.test_cohort.id,
utils.get_group_id_for_user(self.test_user, course_discussion_settings)
)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.ENROLLMENT_TRACK, course_discussion_settings.division_scheme)
self.assertEqual(
-2, # Verified has group ID 2, and we negate that value to ensure unique IDs
utils.get_group_id_for_user(self.test_user, course_discussion_settings)
)
class CourseDiscussionDivisionEnabledTestCase(ModuleStoreTestCase):
""" Test the course_discussion_division_enabled and available_division_schemes methods. """
def setUp(self):
super(CourseDiscussionDivisionEnabledTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertFalse(utils.course_discussion_division_enabled(course_discussion_settings))
self.assertEqual([], utils.available_division_schemes(self.course.id))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=False, division_scheme=CourseDiscussionSettings.COHORT
)
# Because cohorts are disabled, discussion division is not enabled.
self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([], utils.available_division_schemes(self.course.id))
# Now enable cohorts, which will cause discussions to be divided.
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([CourseDiscussionSettings.COHORT], utils.available_division_schemes(self.course.id))
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
# Only a single enrollment track exists, so discussion division is not enabled.
self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([], utils.available_division_schemes(self.course.id))
# Now create a second CourseMode, which will cause discussions to be divided.
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([CourseDiscussionSettings.ENROLLMENT_TRACK], utils.available_division_schemes(self.course.id))
class GroupNameTestCase(ModuleStoreTestCase):
""" Test the get_group_name and get_group_names_by_id methods. """
def setUp(self):
super(GroupNameTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_cohort_1 = CohortFactory(
course_id=self.course.id,
name='Cohort 1',
users=[]
)
self.test_cohort_2 = CohortFactory(
course_id=self.course.id,
name='Cohort 2',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual({}, utils.get_group_names_by_id(course_discussion_settings))
self.assertIsNone(utils.get_group_name(-1000, course_discussion_settings))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(
{
self.test_cohort_1.id: self.test_cohort_1.name,
self.test_cohort_2.id: self.test_cohort_2.name
},
utils.get_group_names_by_id(course_discussion_settings)
)
self.assertEqual(
self.test_cohort_2.name,
utils.get_group_name(self.test_cohort_2.id, course_discussion_settings)
)
# Test also with a group_id that doesn't exist.
self.assertIsNone(
utils.get_group_name(-1000, course_discussion_settings)
)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(
{
-1: "audit course",
-2: "verified course"
},
utils.get_group_names_by_id(course_discussion_settings)
)
self.assertEqual(
"verified course",
utils.get_group_name(-2, course_discussion_settings)
)
# Test also with a group_id that doesn't exist.
self.assertIsNone(
utils.get_group_name(-1000, course_discussion_settings)
)
class PermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions)"""
def test_get_ability(self):
content = {}
content['user_id'] = '1'
content['type'] = 'thread'
user = mock.Mock()
user.id = 1
with mock.patch(
'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view'
) as check_perm:
check_perm.return_value = True
self.assertEqual(utils.get_ability(None, content, user), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': False,
'can_report': False
})
content['user_id'] = '2'
self.assertEqual(utils.get_ability(None, content, user), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': True,
'can_report': True
})
def test_get_ability_with_global_staff(self):
"""
Tests that global staff has rights to report other user's post inspite
of enrolled in the course or not.
"""
content = {'user_id': '1', 'type': 'thread'}
with mock.patch(
'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view'
) as check_perm:
# check_permissions_by_view returns false because user is not enrolled in the course.
check_perm.return_value = False
global_staff = UserFactory(username='global_staff', email='[email protected]', is_staff=True)
self.assertEqual(utils.get_ability(None, content, global_staff), {
'editable': False,
'can_reply': False,
'can_delete': False,
'can_openclose': False,
'can_vote': False,
'can_report': True
})
def test_is_content_authored_by(self):
content = {}
user = mock.Mock()
user.id = 1
# strict equality checking
content['user_id'] = 1
self.assertTrue(utils.is_content_authored_by(content, user))
# cast from string to int
content['user_id'] = '1'
self.assertTrue(utils.is_content_authored_by(content, user))
# strict equality checking, fails
content['user_id'] = 2
self.assertFalse(utils.is_content_authored_by(content, user))
# cast from string to int, fails
content['user_id'] = 'string'
self.assertFalse(utils.is_content_authored_by(content, user))
# content has no known author
del content['user_id']
self.assertFalse(utils.is_content_authored_by(content, user))
class GroupModeratorPermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions) for group moderators"""
def _check_condition(user, condition, content):
"""
Mocks check_condition method because is_open and is_team_member_if_applicable must always be true
in order to interact with a thread or comment.
"""
return True if condition == 'is_open' or condition == 'is_team_member_if_applicable' else False
def setUp(self):
super(GroupModeratorPermissionsTestCase, self).setUp()
# Create course, seed permissions roles, and create team
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
verified_coursemode = CourseMode.VERIFIED
audit_coursemode = CourseMode.AUDIT
# Create four users: group_moderator (who is within the verified enrollment track and in the cohort),
# verified_user (who is in the verified enrollment track but not the cohort),
# cohorted_user (who is in the cohort but not the verified enrollment track),
# and plain_user (who is neither in the cohort nor the verified enrollment track)
self.group_moderator = UserFactory(username='group_moderator', email='[email protected]')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.group_moderator,
mode=verified_coursemode
)
self.verified_user = UserFactory(username='verified', email='[email protected]')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.verified_user,
mode=verified_coursemode
)
self.cohorted_user = UserFactory(username='cohort', email='[email protected]')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.cohorted_user,
mode=audit_coursemode
)
self.plain_user = UserFactory(username='plain', email='[email protected]')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.plain_user,
mode=audit_coursemode
)
CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.group_moderator, self.cohorted_user]
)
# Give group moderator permissions to group_moderator
assign_role(self.course.id, self.group_moderator, 'Group Moderator')
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_not_divided(self, check_condition_function):
"""
Group moderator should not have moderator permissions if the discussions are not divided.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
content = {'user_id': self.cohorted_user.id, 'type': 'thread'}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
content = {'user_id': self.verified_user.id, 'type': 'thread'}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_divided_within_group(self, check_condition_function):
"""
Group moderator should have moderator permissions within their group if the discussions are divided.
"""
set_discussion_division_settings(self.course.id, enable_cohorts=True,
division_scheme=CourseDiscussionSettings.COHORT)
content = {'user_id': self.cohorted_user.id, 'type': 'thread', 'username': self.cohorted_user.username}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': True,
'can_report': True
})
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_divided_outside_group(self, check_condition_function):
"""
Group moderator should not have moderator permissions outside of their group.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
set_discussion_division_settings(self.course.id, division_scheme=CourseDiscussionSettings.NONE)
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
class ClientConfigurationTestCase(TestCase):
"""Simple test cases to ensure enabling/disabling the use of the comment service works as intended."""
def test_disabled(self):
"""Ensures that an exception is raised when forums are disabled."""
config = ForumsConfig.current()
config.enabled = False
config.save()
with self.assertRaises(CommentClientMaintenanceError):
perform_request('GET', 'http://www.google.com')
@patch('requests.request')
def test_enabled(self, mock_request):
"""Ensures that requests proceed normally when forums are enabled."""
config = ForumsConfig.current()
config.enabled = True
config.save()
response = Mock()
response.status_code = 200
response.json = lambda: {}
mock_request.return_value = response
result = perform_request('GET', 'http://www.google.com')
self.assertEqual(result, {})
def set_discussion_division_settings(
course_key, enable_cohorts=False, always_divide_inline_discussions=False,
divided_discussions=[], division_scheme=CourseDiscussionSettings.COHORT
):
"""
Convenience method for setting cohort enablement and discussion settings.
COHORT is the default division_scheme, as no other schemes were supported at
the time that the unit tests were originally written.
"""
set_course_discussion_settings(
course_key=course_key,
divided_discussions=divided_discussions,
division_scheme=division_scheme,
always_divide_inline_discussions=always_divide_inline_discussions,
)
set_course_cohorted(course_key, enable_cohorts)
| agpl-3.0 | 5,630,224,446,512,090,000 | 41.95315 | 143 | 0.526102 | false |
ericbulloch/authorize | authorize/tests/test_api.py | 1 | 2400 | from unittest import TestCase
from authorize import gen_xml as x, responses, cim, arb, aim
class TestAPIUsage(TestCase):
def test_aim_calls(self):
"""
Test that the API calls using AIM are made with the correct parameters.
"""
api = aim.Api(login=u"ciao", key=u"handsome", do_raise=True)
assert api.server.startswith("secure2")
api = aim.Api(login=u"ciao", key=u"handsome", is_test=True, do_raise=True)
assert api.server.startswith("test")
assert api.login == "ciao"
assert api.key == "handsome"
assert api.required_arguments[u'x_login'] == api.login
assert api.required_arguments[u'x_tran_key'] == api.key
request_body = []
def _fake_request(body):
request_body.append(body)
return u'1|1|1|This transaction has been approved.||||||40.00|CC|credit|||||||||||||||||||||||||||||||||||||||||||||||||||||||||true'
api.request = _fake_request
result = api.transaction(type=aim.CREDIT, amount=40, card_num=u"2222", exp_date=u"0709", trans_id=u"123456")
body = request_body[0]
assert body == """\
x_exp_date=0709&x_amount=40&x_card_num=2222&x_type=credit&x_trans_id=123456&x_login=ciao&x_tran_key=handsome&x_encap_char=&x_version=3.1&x_delim_char=%7C&x_relay_response=false&x_delim_data=true"""
result = api.transaction(amount=40, card_num=u"4111111111111111",
exp_date=u"0709", trans_id=u"123456",
items=[[1,2,3,4], [5,6,7,8]],
extra_fields={u"comment": u"on this"},
authentication_indicator=1,
cardholder_authentication_value=4)
body = request_body[1]
assert body == """\
x_cardholder_authentication_value=4&x_card_num=4111111111111111&x_line_item=%5B%3C%7C%3E1%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E2%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E3%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E4%3C%7C%3E%5D&x_line_item=%5B%3C%7C%3E5%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E6%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E7%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E8%3C%7C%3E%5D&x_amount=40&x_exp_date=0709&x_authentication_indicator=1&x_trans_id=123456&x_login=ciao&x_tran_key=handsome&x_encap_char=&x_version=3.1&x_delim_char=%7C&x_relay_response=false&x_delim_data=true&comment=on+this"""
| mit | 4,731,461,782,902,302,000 | 59 | 544 | 0.593333 | false |
cloudera/recordservice | bin/load-data.py | 1 | 13316 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This script is used to load the proper datasets for the specified workloads. It loads
# all data via Hive except for parquet data which needs to be loaded via Impala.
# Most ddl commands are executed by Impala.
import collections
import os
import re
import sqlparse
import subprocess
import sys
import tempfile
import time
import getpass
from itertools import product
from optparse import OptionParser
from Queue import Queue
from tests.beeswax.impala_beeswax import *
from threading import Thread
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core",
help="The exploration strategy for schema gen: 'core', "\
"'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workloads", dest="workloads",
help="Comma-separated list of workloads to load data for. If 'all' is "\
"specified then data for all workloads is loaded.")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default=False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("--impalad", dest="impalad", default="localhost:21000",
help="Impala daemon to connect to")
parser.add_option("--hive_hs2_hostport", dest="hive_hs2_hostport",
default="localhost:10050",
help="HS2 host:Port to issue Hive queries against using beeline")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
parser.add_option("--workload_dir", dest="workload_dir",
default=os.environ['IMPALA_WORKLOAD_DIR'],
help="Directory that contains Impala workloads")
parser.add_option("--dataset_dir", dest="dataset_dir",
default=os.environ['IMPALA_DATASET_DIR'],
help="Directory that contains Impala datasets")
parser.add_option("--use_kerberos", action="store_true", default=False,
help="Load data on a kerberized cluster.")
parser.add_option("--principal", default=None, dest="principal",
help="Kerberos service principal, required if --use_kerberos is set")
options, args = parser.parse_args()
DATA_LOAD_DIR = '/tmp/data-load-files'
WORKLOAD_DIR = options.workload_dir
DATASET_DIR = options.dataset_dir
TESTDATA_BIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin')
AVRO_SCHEMA_DIR = "avro_schemas"
GENERATE_SCHEMA_CMD = "generate-schema-statements.py --exploration_strategy=%s "\
"--workload=%s --scale_factor=%s --verbose"
# Load data using Hive's beeline because the Hive shell has regressed (CDH-17222).
# The Hive shell is stateful, meaning that certain series of actions lead to problems.
# Examples of problems due to the statefullness of the Hive shell:
# - Creating an HBase table changes the replication factor to 1 for subsequent LOADs.
# - INSERTs into an HBase table fail if they are the first stmt executed in a session.
# However, beeline itself also has bugs. For example, inserting a NULL literal into
# a string-typed column leads to an NPE. We work around these problems by using LOAD from
# a datafile instead of doing INSERTs.
HIVE_CMD = os.path.join(os.environ['HIVE_HOME'], 'bin/beeline')
hive_auth = "auth=none"
if options.use_kerberos:
if not options.principal:
print "--principal is required when --use_kerberos is specified"
exit(1)
hive_auth = "principal=" + options.principal
HIVE_ARGS = '-n %s -u "jdbc:hive2://%s/default;%s" --verbose=true'\
% (getpass.getuser(), options.hive_hs2_hostport, hive_auth)
HADOOP_CMD = os.path.join(os.environ['HADOOP_HOME'], 'bin/hadoop')
def available_workloads(workload_dir):
return [subdir for subdir in os.listdir(workload_dir)
if os.path.isdir(os.path.join(workload_dir, subdir))]
def validate_workloads(all_workloads, workloads):
for workload in workloads:
if workload not in all_workloads:
print 'Workload \'%s\' not found in workload directory' % workload
print 'Available workloads: ' + ', '.join(all_workloads)
sys.exit(1)
def exec_cmd(cmd, error_msg, exit_on_error=True):
ret_val = -1
try:
ret_val = subprocess.call(cmd, shell=True)
except Exception as e:
error_msg = "%s: %s" % (error_msg, str(e))
finally:
if ret_val != 0:
print error_msg
if exit_on_error: sys.exit(ret_val)
return ret_val
def exec_hive_query_from_file(file_name):
if not os.path.exists(file_name): return
hive_cmd = "%s %s -f %s" % (HIVE_CMD, HIVE_ARGS, file_name)
print 'Executing Hive Command: %s' % hive_cmd
exec_cmd(hive_cmd, 'Error executing file from Hive: ' + file_name)
def exec_hbase_query_from_file(file_name):
if not os.path.exists(file_name): return
hbase_cmd = "hbase shell %s" % file_name
print 'Executing HBase Command: %s' % hbase_cmd
exec_cmd(hbase_cmd, 'Error executing hbase create commands')
# KERBEROS TODO: fails when kerberized and impalad principal isn't "impala"
def exec_impala_query_from_file(file_name):
"""Execute each query in an Impala query file individually"""
is_success = True
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
try:
impala_client.connect()
with open(file_name, 'r+') as query_file:
queries = sqlparse.split(query_file.read())
for query in queries:
query = sqlparse.format(query.rstrip(';'), strip_comments=True)
print '(%s):\n%s\n' % (file_name, query.strip())
result = impala_client.execute(query)
except Exception as e:
print "Data Loading from Impala failed with error: %s" % str(e)
is_success = False
finally:
impala_client.close_connection()
return is_success
def exec_bash_script(file_name):
bash_cmd = "bash %s" % file_name
print 'Executing Bash Command: ' + bash_cmd
exec_cmd(bash_cmd, 'Error bash script: ' + file_name)
def generate_schema_statements(workload):
generate_cmd = GENERATE_SCHEMA_CMD % (options.exploration_strategy, workload,
options.scale_factor)
if options.table_names:
generate_cmd += " --table_names=%s" % options.table_names
if options.force_reload:
generate_cmd += " --force_reload"
if options.table_formats:
generate_cmd += " --table_formats=%s" % options.table_formats
if options.hive_warehouse_dir is not None:
generate_cmd += " --hive_warehouse_dir=%s" % options.hive_warehouse_dir
if options.hdfs_namenode is not None:
generate_cmd += " --hdfs_namenode=%s" % options.hdfs_namenode
print 'Executing Generate Schema Command: ' + generate_cmd
schema_cmd = os.path.join(TESTDATA_BIN_DIR, generate_cmd)
error_msg = 'Error generating schema statements for workload: ' + workload
exec_cmd(schema_cmd, error_msg)
def get_dataset_for_workload(workload):
dimension_file_name = os.path.join(WORKLOAD_DIR, workload,
'%s_dimensions.csv' % workload)
if not os.path.isfile(dimension_file_name):
print 'Dimension file not found: ' + dimension_file_name
sys.exit(1)
with open(dimension_file_name, 'rb') as input_file:
match = re.search('dataset:\s*([\w\-\.]+)', input_file.read())
if match:
return match.group(1)
else:
print 'Dimension file does not contain dataset for workload \'%s\'' % (workload)
sys.exit(1)
def copy_avro_schemas_to_hdfs(schemas_dir):
"""Recursively copies all of schemas_dir to the test warehouse."""
if not os.path.exists(schemas_dir):
print 'Avro schema dir (%s) does not exist. Skipping copy to HDFS.' % schemas_dir
return
exec_hadoop_fs_cmd("-mkdir -p " + options.hive_warehouse_dir)
exec_hadoop_fs_cmd("-put -f %s %s/" % (schemas_dir, options.hive_warehouse_dir))
def exec_hadoop_fs_cmd(args, exit_on_error=True):
cmd = "%s fs %s" % (HADOOP_CMD, args)
print "Executing Hadoop command: " + cmd
exec_cmd(cmd, "Error executing Hadoop command, exiting",
exit_on_error=exit_on_error)
def exec_impala_query_from_file_parallel(query_files):
# Get the name of the query file that loads the base tables, if it exists.
# TODO: Find a better way to detect the file that loads the base tables.
create_base_table_file = next((q for q in query_files if 'text' in q), None)
if create_base_table_file:
is_success = exec_impala_query_from_file(create_base_table_file)
query_files.remove(create_base_table_file)
# If loading the base tables failed, exit with a non zero error code.
if not is_success: sys.exit(1)
if not query_files: return
threads = []
result_queue = Queue()
for query_file in query_files:
thread = Thread(target=lambda x: result_queue.put(exec_impala_query_from_file(x)),
args=[query_file])
thread.daemon = True
threads.append(thread)
thread.start()
# Keep looping until the number of results retrieved is the same as the number of
# threads spawned, or until a data loading query fails. result_queue.get() will
# block until a result is available in the queue.
num_fetched_results = 0
while num_fetched_results < len(threads):
success = result_queue.get()
num_fetched_results += 1
if not success: sys.exit(1)
# There is a small window where a thread may still be alive even if all the threads have
# finished putting their results in the queue.
for thread in threads: thread.join()
def invalidate_impala_metadata():
print "Invalidating Metadata"
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
impala_client.connect()
try:
impala_client.execute('invalidate metadata')
finally:
impala_client.close_connection()
if __name__ == "__main__":
all_workloads = available_workloads(WORKLOAD_DIR)
workloads = []
if options.workloads is None:
print "At least one workload name must be specified."
parser.print_help()
sys.exit(1)
elif options.workloads == 'all':
print 'Loading data for all workloads.'
workloads = all_workloads
else:
workloads = options.workloads.split(",")
validate_workloads(all_workloads, workloads)
print 'Starting data load for the following workloads: ' + ', '.join(workloads)
loading_time_map = collections.defaultdict(float)
for workload in workloads:
start_time = time.time()
dataset = get_dataset_for_workload(workload)
generate_schema_statements(workload)
assert os.path.isdir(os.path.join(DATA_LOAD_DIR, dataset)), ("Data loading files "
"do not exist for (%s)" % dataset)
os.chdir(os.path.join(DATA_LOAD_DIR, dataset))
copy_avro_schemas_to_hdfs(AVRO_SCHEMA_DIR)
dataset_dir_contents = os.listdir(os.getcwd())
load_file_substr = "%s-%s" % (workload, options.exploration_strategy)
# Data loading with Impala is done in parallel, each file format has a separate query
# file.
create_filename = '%s-impala-generated' % load_file_substr
load_filename = '%s-impala-load-generated' % load_file_substr
impala_create_files = [f for f in dataset_dir_contents if create_filename in f]
impala_load_files = [f for f in dataset_dir_contents if load_filename in f]
# Execute the data loading scripts.
# Creating tables in Impala has no dependencies, so we execute them first.
# HBase table inserts are done via hive, so the hbase tables need to be created before
# running the hive script. Some of the Impala inserts depend on hive tables,
# so they're done at the end. Finally, the Hbase Tables that have been filled with data
# need to be flushed.
exec_impala_query_from_file_parallel(impala_create_files)
exec_hbase_query_from_file('load-%s-hbase-generated.create' % load_file_substr)
exec_hive_query_from_file('load-%s-hive-generated.sql' % load_file_substr)
exec_hbase_query_from_file('post-load-%s-hbase-generated.sql' % load_file_substr)
if impala_load_files: invalidate_impala_metadata()
exec_impala_query_from_file_parallel(impala_load_files)
loading_time_map[workload] = time.time() - start_time
invalidate_impala_metadata()
total_time = 0.0
for workload, load_time in loading_time_map.iteritems():
total_time += load_time
print 'Data loading for workload \'%s\' completed in: %.2fs'\
% (workload, load_time)
print 'Total load time: %.2fs\n' % total_time
| apache-2.0 | -8,049,208,226,449,014,000 | 44.292517 | 91 | 0.685566 | false |
srfraser/services | src/releng_notification_identity/releng_notification_identity/api.py | 1 | 5815 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from flask import current_app
from typing import List
from werkzeug.exceptions import BadRequest, Conflict, NotFound
from .models import Identity, Preference
from sqlalchemy.exc import IntegrityError
from backend_common.auth import auth
AUTHENTICATION_SCOPE_PREFIX = 'project:releng:services/releng_notification_identity/permission/'
def _get_identity_preferences(identity_name: str) -> List[Preference]:
session = current_app.db.session
identity = session.query(Identity).filter(Identity.name == identity_name).first()
if identity:
preferences = session.query(Preference).filter(identity.id == Preference.identity).all()
if preferences:
return preferences
else:
raise NotFound('Identity with name {} has no configured notification preferences.'.format(identity_name))
else:
raise NotFound('Identity with name {} could not be found.'.format(identity_name))
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'put_identity'])
def put_identity(identity_name: str, body: dict) -> None:
try:
session = current_app.db.session
if session.query(Identity).filter(Identity.name == identity_name).count():
raise Conflict('Identity with the name {} already exists'.format(identity_name))
new_identity = Identity(name=identity_name)
session.add(new_identity)
session.flush()
preferences = [
Preference(**pref, identity=new_identity.id)
for pref in body['preferences']
]
session.add_all(preferences)
session.commit()
return None
except IntegrityError as ie:
raise BadRequest('Request preferences contain duplicate urgency level {}.'.format(ie.params.get('urgency')))
def modify_existing_preferences(new_preferences_lookup: dict, existing_preferences: list):
for record in existing_preferences:
if record.urgency not in new_preferences_lookup:
continue
new_preference = new_preferences_lookup[record.urgency]
record.channel = new_preference['channel']
record.target = new_preference['target']
yield record
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'post_identity'])
def post_identity(identity_name: str, body: dict) -> None:
session = current_app.db.session
preference_records = _get_identity_preferences(identity_name)
new_preference_lookup = {
new_preference['urgency']: new_preference
for new_preference in body['preferences']
}
for record in modify_existing_preferences(new_preference_lookup, preference_records):
session.merge(record)
new_preference_lookup.pop(record.urgency)
if new_preference_lookup:
identity = session.query(Identity).filter(Identity.name == identity_name).first()
for new_urgency, new_preference in new_preference_lookup.items():
new_pref = Preference(**new_preference, identity=identity.id)
session.add(new_pref)
session.commit()
return None
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'get_identity'])
def get_identity(identity_name: str) -> dict:
preferences = _get_identity_preferences(identity_name)
if preferences:
return {
'preferences': [
{**pref.to_dict(), 'name': identity_name}
for pref in preferences
],
}
else:
raise NotFound('No preferences found for identity {}.'.format(identity_name))
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'get_identity_preference_by_urgency'])
def get_identity_preference_by_urgency(identity_name: str, urgency: str) -> dict:
preferences = _get_identity_preferences(identity_name)
preference_by_urgency_level = list(filter(lambda pref: pref.urgency == urgency, preferences))
if preference_by_urgency_level:
return {
'preferences': [
{
'name': identity_name,
**preference_by_urgency_level[0].to_dict(),
}
],
}
else:
raise NotFound('No {} preference found for identity {}.'.format(urgency, identity_name))
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'delete_identity_by_name'])
def delete_identity_by_name(identity_name: str) -> None:
session = current_app.db.session
identity = session.query(Identity).filter(Identity.name == identity_name).first()
if identity:
session.delete(identity)
session.commit()
return None
else:
raise NotFound('Identity with name {} not found.'.format(identity_name))
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'delete_identity_preferences_by_urgency'])
def delete_identity_preference_by_urgency(identity_name: str, urgency: str) -> None:
session = current_app.db.session
identity_key = session.query(Identity).filter(Identity.name == identity_name).value(Identity.id)
if identity_key:
notification_preference = session.query(Preference)\
.filter(Preference.identity == identity_key)\
.filter(Preference.urgency == urgency)\
.first()
if notification_preference:
session.delete(notification_preference)
session.commit()
return None
else:
raise NotFound('Identity {} has no preferences for urgency level {}.'.format(identity_name, urgency))
else:
raise NotFound('Identity with name {} not found.'.format(identity_name))
| mpl-2.0 | 5,147,447,050,764,588,000 | 35.118012 | 117 | 0.66638 | false |
joshsomma/rice_python_1 | format_time.py | 1 | 1071 | # Testing template for format function in "Stopwatch - The game"
###################################################
# Student should add code for the format function here
#desired format
def format(t):
total = ""
#calc minutes
a = t // 600
#calc first part of seconds
b = ((t // 10) % 60) // 10
#calc second part of seconds
c = ((t // 10) % 60) % 10
#calc 10ths of seconds
d = t % 10
total = str(a) + ":" + str(b) + str(c) + "." + str(d)
return total
###################################################
# Test code for the format function
# Note that function should always return a string with
# six characters
print format(0)
print format(7)
print format(17)
print format(60)
print format(63)
print format(214)
print format(599)
print format(600)
print format(602)
print format(667)
print format(1325)
print format(4567)
print format(5999)
###################################################
# Output from test
#0:00.0
#0:00.7
#0:01.7
#0:06.0
#0:06.3
#0:21.4
#0:59.9
#1:00.0
#1:00.2
#1:06.7
#2:12.5
#7:36.7
#9:59.9
| apache-2.0 | -5,548,721,088,610,052,000 | 18.125 | 64 | 0.544351 | false |
RepoReapers/reaper | tests/unit_test/test_csharp.py | 1 | 2846 | import os
import unittest
from attributes.unit_test.discoverer import get_test_discoverer
from tests import get_lsloc, REPOS_PATH
class CSharpTestDiscovererTestCase(unittest.TestCase):
def setUp(self):
self.discoverer = get_test_discoverer('C#')
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_discover(self):
# Test: Project using NUnit
path = os.path.join(REPOS_PATH, 'choco')
proportion = self.discoverer.discover(path)
self.assertLess(0, proportion)
# Test: Project with no unit tests (when these tests were written)
path = os.path.join(REPOS_PATH, 'ShareX')
proportion = self.discoverer.discover(path)
self.assertEqual(0, proportion)
# Test: Project in Ruby to simulate a project with no C source code
path = os.path.join(REPOS_PATH, 'squib')
proportion = self.discoverer.discover(path)
self.assertEqual(0, proportion)
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_nunit(self):
# Test: Project using NUnit
path = os.path.join(REPOS_PATH, 'choco')
proportion = self.discoverer.__nunit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project not using NUnit
path = os.path.join(REPOS_PATH, 'Epic.Numbers')
proportion = self.discoverer.__nunit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertEqual(0, proportion)
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_vs_unit_testing(self):
# Test: Project using Visual Studio Unit Testing
path = os.path.join(REPOS_PATH, 'aws-sdk-net')
proportion = self.discoverer.__vs_unit_testing__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project not using Visual Studio Unit Testing
path = os.path.join(REPOS_PATH, 'choco')
proportion = self.discoverer.__vs_unit_testing__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertEqual(0, proportion)
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_xunit(self):
# Test: Project using XUnit
path = os.path.join(REPOS_PATH, 'WebSockets')
proportion = self.discoverer.__xunit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project not using XUnit
path = os.path.join(REPOS_PATH, 'choco')
proportion = self.discoverer.__xunit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertEqual(0, proportion)
| apache-2.0 | 5,021,286,121,398,584,000 | 36.946667 | 75 | 0.634575 | false |
Rookfighter/TextAdventure | ActionSystem.py | 1 | 4765 | from EventSystem import Event
import utils
class ActionSystem:
def __init__(self, player, rooms, tuiSystem, eventQueue):
self.__player = player
self.__rooms = rooms
self.__tuiSystem = tuiSystem
self.__eventQueue = eventQueue
# a mapping for input actions to functions
self.__actions = {
'use': self.__use,
'take': self.__take,
'goto': self.__goto,
'examine': self.__examine,
'inventory':self.__inventory,
'room': self.__room
}
def __findObject(self, param):
currRoom = self.__rooms[self.__player.room]
obj = utils.findObjectByName(currRoom.objects, param)
if not obj is None:
return obj
return utils.findObjectByName(self.__player.inventory, param)
def __findDirection(self, param):
currRoom = self.__rooms[self.__player.room]
paramUp = param.upper()
for direction in currRoom.directions:
roomName = self.__rooms[direction['room']].name
if direction['visible'] and \
(paramUp == direction['name'].upper() or paramUp == roomName.upper()):
return direction
return None
def __createOnUseEvents(self, obj):
currRoom = self.__rooms[self.__player.room]
if not obj['name'] in currRoom.onUse:
self.__tuiSystem.printNoEffect()
else:
events = currRoom.onUse[obj['name']]
for event in events:
self.__eventQueue.append(Event(event['type'], event))
# remove on use events
del currRoom.onUse[obj['name']]
def __use(self, param):
"""
Callback for "use" command. Uses an item either from inventory or
from the current room.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
return
if obj['useable']:
self.__createOnUseEvents(obj)
else:
self.__tuiSystem.printUnusableObject(obj['name'])
def __take(self, param):
"""
Callback for "take" command. Removes a object from the current room
and adds it to the inventory.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
return
if obj['takeable']:
self.__rooms[self.__player.room].objects.remove(obj)
self.__player.inventory.append(obj)
obj['takeable'] = False
self.__tuiSystem.printObjectTaken(obj['name'])
else:
self.__tuiSystem.printObjectUntakeable(obj['name'])
def __createOnEnterEvents(self):
currRoom = self.__rooms[self.__player.room]
for event in currRoom.onEnter:
self.__eventQueue.append(Event(event['type'], event))
# remove on enter events
del currRoom.onEnter[:]
def __goto(self, param):
"""
Callback for "goto" command. Moves to the next room by either specifying
the direction or the next room name.
"""
direction = self.__findDirection(param)
if direction is None:
self.__tuiSystem.printInvalidDirection(param)
return
if direction['locked']:
self.__tuiSystem.printDoorLocked()
else:
self.__player.room = direction['room']
self.__createOnEnterEvents()
return
def __examine(self, param):
"""
Callback for "examine" command. Prints the examine field of an object.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
else:
self.__tuiSystem.printExamine(obj)
def __inventory(self, param):
"""
Callback for "inventory" command. Prints the current inventory.
"""
self.__tuiSystem.printInventory()
def __room(self, param):
"""
Callback for "room" command. Prints the current room.
"""
self.__tuiSystem.printRoom(self.__player.room)
def getActions(self):
return self.__actions.keys()
def update(self, actStr):
self.__player.action = None
action = actStr
param = ''
# try to find a separating space
idx = actStr.find(' ')
if idx > 0:
action = actStr[:idx]
param = actStr[idx+1:]
# check if the given action is valid
if not action in self.__actions:
self.__tuiSystem.printInvalidAction(action)
return
# execute the action
self.__actions[action](param) | mit | 7,252,749,800,623,843,000 | 29.748387 | 87 | 0.557188 | false |
tomacorp/thermapythia | thermpy/saddle.py | 1 | 8354 | #!/Users/toma/python278i/bin/python
# Tom Anderson
# Thermal simulation prototype
# Sun Jul 13 22:30:26 PDT 2014
#
# Thermonous pertains to stimulation by heat.
# The literal ancient Greek is hot minded.
# If you need a name for it, "ephippion" is the ancient Greek word for saddle blanket
# and in Latin is "ephippia". "Ephippos" means on horseback.
#
#
# TODO:
# Make the spice netlist generation use a string buffer and a file.
# Create test harness for sweeps of problem size.
# Hook up PNG files.
# Hook up HDF5 files
# Create ASCII files for layers, materials, and mesh parameters
# Make problem 3D
# Make tests for 2D, put modules into separate files so that code is
# shared with 3D.
# Separate the 2D-specific code in Solver2D.py.
# Separate the 2D-specific code in Spice2D.py.
# Create test harnesses for each module
# Measure xyce memory usage with
# http://stackoverflow.com/questions/13607391/subprocess-memory-usage-in-python
# Xyce uses about 7-10 times the memory and takes about 3 times as long as the raw matrix.
# 826M
# 26 seconds to 108 seconds by adding Xyce.
import subprocess, os
import pstats
import cProfile
import numpy as np
import Layers
import Matls
import Mesh2D
import Solver2D
import Spice2D
import MatrixDiagnostic
import interactivePlot
# This can scale by using a PNG input instead of code
def defineScalableProblem(lyr, matls, x, y):
"""
defineScalableProblem(Layer lyr, Mesh mesh, Matls matls, int xsize, int ysize)
Create a sample test problem for thermal analysis that can scale
to a wide variety of sizes.
It initializes the mesh based on fractions of the size of the mesh.
The conductivities in the problem are based on the material properties
in the matls object.
"""
mesh = Mesh2D.Mesh(x, y, lyr, matls)
# Heat source
hsx= 0.5
hsy= 0.5
hswidth= 0.25
hsheight= 0.25
heat= 10.0
srcl= round(mesh.width*(hsx-hswidth*0.5))
srcr= round(mesh.width*(hsx+hswidth*0.5))
srct= round(mesh.height*(hsy-hsheight*0.5))
srcb= round(mesh.height*(hsy+hsheight*0.5))
numHeatCells= (srcr - srcl)*(srcb-srct)
heatPerCell= heat/numHeatCells
print "Heat per cell = ", heatPerCell
mesh.field[srcl:srcr, srct:srcb, lyr.heat] = heatPerCell
mesh.field[srcl:srcr, srct:srcb, lyr.resis] = matls.copperCond
# Boundary conditions
mesh.field[0, 0:mesh.height, lyr.isodeg] = 25.0
mesh.field[mesh.width-1, 0:mesh.height, lyr.isodeg] = 25.0
mesh.field[0:mesh.width, 0, lyr.isodeg] = 25.0
mesh.field[0:mesh.width, mesh.height-1, lyr.isodeg] = 25.0
mesh.ifield[0, 0:mesh.height, lyr.isoflag] = 1
mesh.ifield[mesh.width-1, 0:mesh.height, lyr.isoflag] = 1
mesh.ifield[0:mesh.width, 0, lyr.isoflag] = 1
mesh.ifield[0:mesh.width, mesh.height-1, lyr.isoflag] = 1
# Thermal conductors
condwidth= 0.05
cond1l= round(mesh.width*hsx - mesh.width*condwidth*0.5)
cond1r= round(mesh.width*hsx + mesh.width*condwidth*0.5)
cond1t= round(mesh.height*hsy - mesh.height*condwidth*0.5)
cond1b= round(mesh.height*hsy + mesh.height*condwidth*0.5)
mesh.field[0:mesh.width, cond1t:cond1b, lyr.resis] = matls.copperCond
mesh.field[cond1l:cond1r, 0:mesh.height, lyr.resis] = matls.copperCond
return mesh
def defineTinyProblem(lyr, matls):
"""
defineTinyProblem(Layer lyr, Mesh mesh, Matls matls)
Create a tiny test problem.
"""
mesh = Mesh2D.Mesh(3, 3, lyr, matls)
mesh.ifield[0:3, 0, lyr.isoflag] = 1
mesh.field[1, 1, lyr.heat] = 2.0
print "Mesh: " + str(mesh)
return mesh
def solveAmesos(solv, mesh, lyr):
solv.solveMatrixAmesos()
solv.loadSolutionIntoMesh(lyr, mesh)
solv.checkEnergyBalance(lyr, mesh)
def solveSpice(spice, mesh, lyr):
spice.finishSpiceNetlist()
proc= spice.runSpiceNetlist()
proc.wait()
spice.readSpiceRawFile(lyr, mesh)
def Main():
lyr = Layers.Layers()
matls = Matls.Matls()
spice= Spice2D.Spice()
showPlots= False
useTinyProblem= False
if useTinyProblem:
mesh = defineTinyProblem(lyr, matls)
else:
mesh = defineScalableProblem(lyr, matls, 5, 5)
mesh.mapMeshToSolutionMatrix(lyr)
solv = Solver2D.Solver(lyr, mesh)
solv.debug = True
solv.useSpice = False
solv.aztec = True
solv.amesos = False
solv.eigen = False
if (solv.useSpice == True):
solv.spiceSim= Spice2D.Spice()
solv.initDebug()
solv.loadMatrix(lyr, mesh, matls, spice)
if (solv.eigen == True):
print "Solving for eigenvalues"
solv.solveEigen()
print "Finished solving for eigenvalues"
if (solv.useSpice == True):
solveSpice(spice, mesh, lyr)
if (solv.aztec == True):
solv.solveMatrixAztecOO(400000)
solv.loadSolutionIntoMesh(lyr, mesh)
solv.checkEnergyBalance(lyr, mesh)
if (solv.amesos == True):
solveAmesos(solv, mesh, lyr)
if (solv.debug == True):
webpage = MatrixDiagnostic.MatrixDiagnosticWebpage(solv, lyr, mesh)
webpage.createWebPage()
if (showPlots == True):
plots= interactivePlot.interactivePlot(lyr, mesh)
plots.plotTemperature()
if (solv.useSpice == True):
plots.plotSpicedeg()
plots.plotLayerDifference(lyr.spicedeg, lyr.deg)
plots.show()
showProfile= True
if showProfile == True:
cProfile.run('Main()', 'restats')
p = pstats.Stats('restats')
p.sort_stats('cumulative').print_stats(30)
else:
Main()
# Times without printing much.
# Printing overhead is probably about 10% in this case.
# 10000 iterations
# 100X100 12sec
# 200x200 69sec
# 300x300 154sec
# 1000 iterations
# 200x200 14sec
# 300x300 34 sec
#
# Design notes:
# The Mesh class
# Has a rectangular Numpy field that represents the problem geometry.
# The Mesh elements are squares in a layered 2D field.
# The field has layers that are describe by the Layers object.
# The layers represent details about the geometry of the materials and boundary conditions.
# Has the size of the problem, such as length, width, and the number of elements.
# Is decorated with material properties from Matls.
# Is decorated with the solution to the problem.
# The Layer class
# Has enumerations that describe the layers in the Mesh
# The Map class
# Includes a Numpy grid that is the size of the Solver.
# Is used to access Solver information
# Because the solver information is not always available on the local node,
# the Map class has a local copy of the Solver input data. Some of this
# data is only needed for debugging and can be turned off to save space.
# The Solver class
# Loads the and calls the Trilinos solvers.
#
# This is from http://trilinos.sandia.gov/packages/pytrilinos/UsersGuide.pdf pg 20
# self.x = Epetra.Vector(self.Map)
# self.A.FillComplete()
# MLList = {
# "max levels" : 3,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# };
# # Then, we create the preconditioner and compute it,
# Prec = ML.MultiLevelPreconditioner(self.A, False)
# Prec.SetParameterList(MLList)
# Prec.ComputePreconditioner()
# # Finally, we set up the solver, and specifies to use Prec as preconditioner:
# solver = AztecOO.AztecOO(self.A, self.x, self.b)
# solver.SetPrecOperator(Prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg);
# solver.SetAztecOption(AztecOO.AZ_output, 16);
# solver.Iterate(1550, 1e-5)
# This segfaults:
# solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_dom_decomp)
# This does not fail but the solution says that there is no preconditioner
# solver.SetAztecOption(AztecOO.AZ_subdomain_solve, AztecOO.AZ_ilu)
# Complains and fails
# solver.SetParameters({"precond": "dom_decomp",
# "subdomain_solve": "ilu",
# "overlap": 1,
# "graph_fill": 1})
# This complains and fails
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg)
# This is incredibly fast but complains some:
# This appears to be the default and it works:
# solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_none)
# Solutions on infinite resistor grids:
# http://www.mathpages.com/home/kmath668/kmath668.htm
# Example slides, interesting python code:
# http://trilinos.org/oldsite/packages/pytrilinos/PyTrilinosTutorial.pdf
| bsd-3-clause | -276,685,447,021,229,220 | 31.007663 | 93 | 0.698109 | false |
noba3/KoTos | addons/plugin.video.movie25/resources/libs/live/ibrod.py | 1 | 5370 | import urllib,urllib2,re,cookielib,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
from resources.universal import watchhistory
wh = watchhistory.WatchHistory('plugin.video.movie25')
def USALIST(murl):
main.GA("Live","USA Live")
main.addPlayL('AETV','aetv',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/aetv.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('ABC','abc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/abc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('HBO','hbo',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/hbo.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('NBA TV','nbatv',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nbatv.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('NBC','nbc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nbc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('Nickelodeon','nick',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nick.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('SPIKE','spike',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/spike.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('SYFY','syfy',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/syfy.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('TBS','tbs',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/tbs.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('TNT','tnt',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/tnt.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('USA','usa',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/usa.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('ABC FAMILY','abcfam',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/abcfam.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('AMC','amc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/amc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('Bravo','bravo',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/bravo.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('Cartoon Network','cn',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cn.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('CBS','cbs',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cbs.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('CW','cw',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cw.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('ESPN','espn',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/espn.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('FOX','fox',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/fox.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('FX','fx',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/fx.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('Special Event 1','event1',458,art+'/usalive.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
main.addPlayL('Special Event 2','event2',458,art+'/usalive.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png')
def USALINK(mname,murl,thumb):
main.GA("USA Live","Watched")
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Playing Link,1000)")
stream_url ='rtmp://mob.golive.pw:1935/tumadre/ playpath='+murl+'.stream'
listitem = xbmcgui.ListItem(thumbnailImage=thumb)
infoL={'Title': mname, 'Genre': 'Live'}
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='movie', title=mname,season='', episode='', year='',img=thumb,infolabels=infoL, watchedCallbackwithParams='',imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]USA Live[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
return ok
| gpl-2.0 | 5,477,519,663,359,080,000 | 93.210526 | 231 | 0.669088 | false |
userzimmermann/robotframework-python3 | src/robot/output/stdoutlogsplitter.py | 1 | 2106 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.utils import format_time
from .loggerhelper import Message, LEVELS
class StdoutLogSplitter(object):
"""Splits messages logged through stdout (or stderr) into Message objects"""
_split_from_levels = re.compile('^(?:\*'
'(%s|HTML)' # Level
'(:\d+(?:\.\d+)?)?' # Optional timestamp
'\*)' % '|'.join(LEVELS), re.MULTILINE)
def __init__(self, output):
self._messages = list(self._get_messages(output.strip()))
def _get_messages(self, output):
for level, timestamp, msg in self._split_output(output):
if timestamp:
timestamp = self._format_timestamp(timestamp[1:])
yield Message(msg.strip(), level, timestamp=timestamp)
def _split_output(self, output):
tokens = self._split_from_levels.split(output)
tokens = self._add_initial_level_and_time_if_needed(tokens)
for i in range(0, len(tokens), 3):
yield tokens[i:i+3]
def _add_initial_level_and_time_if_needed(self, tokens):
if self._output_started_with_level(tokens):
return tokens[1:]
return ['INFO', None] + tokens
def _output_started_with_level(self, tokens):
return tokens[0] == ''
def _format_timestamp(self, millis):
return format_time(float(millis)/1000, millissep='.')
def __iter__(self):
return iter(self._messages)
| apache-2.0 | 7,499,666,074,120,987,000 | 35.947368 | 80 | 0.622507 | false |
ibayer/fastFM-fork | fastFM/tests/test_als.py | 1 | 5323 | # Author: Immanuel Bayer
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn import metrics
from fastFM import als
from numpy.testing import assert_almost_equal
from fastFM.datasets import make_user_item_regression
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_almost_equal
def get_test_problem(task='regression'):
X = sp.csc_matrix(np.array([[6, 1],
[2, 3],
[3, 0],
[6, 1],
[4, 5]]), dtype=np.float64)
y = np.array([298, 266, 29, 298, 848], dtype=np.float64)
V = np.array([[6, 0],
[5, 8]], dtype=np.float64)
w = np.array([9, 2], dtype=np.float64)
w0 = 2
if task == 'classification':
y_labels = np.ones_like(y)
y_labels[y < np.median(y)] = -1
y = y_labels
return w0, w, V, y, X
def get_small_data():
X = sp.csc_matrix(np.array([[1, 2],
[3, 4],
[5, 6]]), dtype=np.float64)
y = np.array([600, 2800, 10000], dtype=np.float64)
return X, y
def _test_fm_regression_only_w0():
X, y = get_small_data()
fm = als.FMRegression(n_iter=0, l2_reg_w=0, l2_reg_V=0, rank=0)
fm.ignore_w = True
fm.w0_ = 2
fm.fit(X, y, warm_start=True)
assert_almost_equal(fm.w0_, 2, 6)
fm = als.FMRegression(n_iter=1, l2_reg_w=0, l2_reg_V=0, rank=0)
fm.ignore_w = True
fm.w0_ = 2
fm.fit(X, y, warm_start=True)
assert_almost_equal(fm.w0_, 4466.6666666666661, 6)
def _test_raise_when_input_is_dense():
fm = als.FMRegression(n_iter=0, l2_reg_w=0, l2_reg_V=0, rank=0)
X = np.arange(3, 4, dtype=np.float64)
y = np.arange(3, dtype=np.float64)
fm.fit(X, y, warm_start=True)
def test_fm_linear_regression():
X, y = get_small_data()
fm = als.FMRegression(n_iter=1, l2_reg_w=1, l2_reg_V=1, rank=0)
fm.fit(X, y)
def test_fm_regression():
w0, w, V, y, X = get_test_problem()
fm = als.FMRegression(n_iter=1000, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X, y)
y_pred = fm.predict(X)
assert_almost_equal(y_pred, y, 3)
# check different size
fm = als.FMRegression(n_iter=1000, l2_reg_w=0, l2_reg_V=0, rank=5)
X_big = sp.hstack([X,X])
fm.fit(X_big, y)
y_pred = fm.predict(X_big[:2,])
def test_fm_classification():
w0, w, V, y, X = get_test_problem(task='classification')
fm = als.FMClassification(n_iter=1000,
init_stdev=0.1, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X, y)
y_pred = fm.predict(X)
print(y_pred)
assert metrics.accuracy_score(y, y_pred) > 0.95
# check different size
fm.fit(X[:2,], y[:2])
def test_als_warm_start():
X, y, coef = make_user_item_regression(label_stdev=0)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
X_train = sp.csc_matrix(X_train)
X_test = sp.csc_matrix(X_test)
fm = als.FMRegression(n_iter=10, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X_train, y_train)
y_pred = fm.predict(X_test)
error_10_iter = mean_squared_error(y_pred, y_test)
fm = als.FMRegression(n_iter=5, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X_train, y_train)
print(fm.iter_count)
y_pred = fm.predict(X_test)
error_5_iter = mean_squared_error(y_pred, y_test)
fm.fit(sp.csc_matrix(X_train), y_train, n_more_iter=5)
print(fm.iter_count)
y_pred = fm.predict(X_test)
error_5_iter_plus_5 = mean_squared_error(y_pred, y_test)
print(error_5_iter, error_5_iter_plus_5, error_10_iter)
assert error_10_iter == error_5_iter_plus_5
def test_warm_start_path():
X, y, coef = make_user_item_regression(label_stdev=.4)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
X_train = sp.csc_matrix(X_train)
X_test = sp.csc_matrix(X_test)
n_iter = 10
rank = 4
seed = 333
step_size = 1
l2_reg_w = 0
l2_reg_V = 0
fm = als.FMRegression(n_iter=0, l2_reg_w=l2_reg_w,
l2_reg_V=l2_reg_V, rank=rank, random_state=seed)
# initalize coefs
fm.fit(X_train, y_train)
rmse_train = []
rmse_test = []
for i in range(1, n_iter):
fm.fit(X_train, y_train, n_more_iter=step_size)
rmse_train.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train)))
rmse_test.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test)))
print('------- restart ----------')
values = np.arange(1, n_iter)
rmse_test_re = []
rmse_train_re = []
for i in values:
fm = als.FMRegression(n_iter=i, l2_reg_w=l2_reg_w,
l2_reg_V=l2_reg_V, rank=rank, random_state=seed)
fm.fit(X_train, y_train)
rmse_test_re.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test)))
rmse_train_re.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train)))
assert_almost_equal(rmse_train, rmse_train_re)
assert_almost_equal(rmse_test, rmse_test_re)
if __name__ == '__main__':
#test_fm_regression_only_w0()
test_fm_linear_regression()
| bsd-3-clause | 8,814,023,319,158,860,000 | 30.128655 | 87 | 0.588578 | false |
opencord/voltha | tests/utests/voltha/extensions/omci/test_mib_sync.py | 1 | 1024 | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, main
from mock.mock_adapter_agent import MockAdapterAgent
class TestMibSync(TestCase):
"""
Test the MIB Synchronizer State Machine
"""
def setUp(self):
self.adapter_agent = MockAdapterAgent()
def tearDown(self):
if self.adapter_agent is not None:
self.adapter_agent.tearDown()
# TODO: Add tests
if __name__ == '__main__':
main()
| apache-2.0 | 5,292,024,352,594,160,000 | 26.675676 | 74 | 0.708008 | false |
janiskuehn/component-based-recognition | project/plot.py | 1 | 13515 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as tik
import os
from matplotlib import cm
from neural import NeuralState
def plot_weigth_matrix_bars(m: np.ndarray):
"""
Plot a weight matrix as 3d bar diagram
:param m: Weight matrix
:return: -
"""
# Create a figure for plotting the data as a 3D histogram.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Create an X-Y mesh of the same dimension as the 2D data
x_s, y_s = np.meshgrid(np.arange(m.shape[1]), np.arange(m.shape[0]))
x_s = x_s.flatten()
y_s = y_s.flatten()
z_data = m.flatten()
ax.bar(x_s, y_s, zs=z_data, zdir='y', alpha=0.8)
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_zlabel('Weight')
plt.show()
def hinton(matrix: np.ndarray, file: str = "", max_weight=None):
"""
Draw Hinton diagram for visualizing a weight matrix.
:param matrix: Input 2D matrix.
:param file: File path for saving the plot.
:param max_weight: Manually set upper limit for values.
:return: Shows the Hinton diagram as new window or saves it to a file.
"""
ax = plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('none')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
if file == "":
plt.show()
else:
plt.savefig(file)
plt.close()
def height_plot(matrix: np.ndarray, file: str = ""):
"""
Draw temperature height map diagram.
:param matrix: Input 2D matrix.
:param file: File path for saving the plot.
:return: Shows the height map diagram as new window or saves it to a file.
"""
# Create heights in the grid
z = matrix
# Build a figure with 2 subplots, the first is 3D
fig = plt.figure()
ax2 = fig.add_subplot(111)
im = ax2.imshow(z, cmap="hot", interpolation='none')
ax2.invert_yaxis()
# add an explanatory colour bar
plt.colorbar(im, orientation='vertical')
if file == "":
plt.show()
else:
plt.savefig(file)
plt.close()
def combined_plot1(weights: list, times: list, dweights: list, stepsize: int,
neurons: np.ndarray, hopfield: np.ndarray, file: str = None, metadata: str = ""):
"""
:param weights:
:param times:
:param dweights:
:param stepsize:
:param neurons:
:param hopfield:
:param file:
:param metadata:
:return:
"""
l = len(weights)
w = weights[0::stepsize]
c_w = len(w)
dw = [sum(dweights[i:i+stepsize]) for i in range(0, l - 1, stepsize)]
c_dw = len(dw)
l_ax = max(4, c_w + 1)
# Build a figure with 2 subplots, the first is 3D
fig, axes = plt.subplots(ncols=l_ax, nrows=4)
size = 5
fig.set_size_inches(l_ax * size, 3 * size)
#
# Title
fig.suptitle(metadata, fontsize=14, fontweight='bold')
for i in range(2, l_ax - 2):
fig.delaxes(axes[0][i])
#
# Neuron Map
major_locator_n = tik.MultipleLocator(neurons.shape[0] // 2)
major_formatter_n = tik.FormatStrFormatter('%d')
minor_locator_n = tik.MultipleLocator(1)
ax = axes[0][-1]
z = neurons
im = ax.imshow(z, cmap="hot", interpolation='none')
ax.set_aspect('equal')
ax.set_title("Active Neurons")
ax.yaxis.set_major_locator(major_locator_n)
ax.yaxis.set_major_formatter(major_formatter_n)
ax.yaxis.set_minor_locator(minor_locator_n)
ax.xaxis.set_major_locator(major_locator_n)
ax.xaxis.set_major_formatter(major_formatter_n)
ax.xaxis.set_minor_locator(minor_locator_n)
ax = axes[0][-2]
ax.set_aspect(8)
fig.colorbar(im, orientation='vertical', cax=ax)
#
# Hopfield
major_locator_w = tik.MultipleLocator(hopfield.shape[0] // 2)
major_formatter_w = tik.FormatStrFormatter('%d')
minor_locator_w = tik.MultipleLocator(hopfield.shape[0] // 4)
ax = axes[0][0]
z = hopfield
im = ax.imshow(z, cmap="hot", interpolation='none')
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_title("Hopfield weights")
ax.yaxis.tick_right()
ax.yaxis.set_major_locator(major_locator_w)
ax.yaxis.set_major_formatter(major_formatter_w)
ax.yaxis.set_minor_locator(minor_locator_w)
ax.xaxis.set_major_locator(major_locator_w)
ax.xaxis.set_major_formatter(major_formatter_w)
ax.xaxis.set_minor_locator(minor_locator_w)
ax = axes[0][1]
ax.set_aspect(8)
fig.colorbar(im, orientation='vertical', cax=ax)
ax.yaxis.tick_left()
#
# Weights & Weights per neuron
weight_min = np.min(w)
weight_max = np.max(w)
for i in range(c_w):
ax = axes[1][i]
z = w[i]
im = ax.imshow(z, cmap="hot", interpolation='none', vmin=weight_min, vmax=weight_max)
ax.invert_yaxis()
ax.set_aspect('equal')
if i == 0:
ax.yaxis.set_major_locator(major_locator_w)
ax.yaxis.set_major_formatter(major_formatter_w)
ax.yaxis.set_minor_locator(minor_locator_w)
ax.xaxis.set_major_locator(major_locator_w)
ax.xaxis.set_major_formatter(major_formatter_w)
ax.xaxis.set_minor_locator(minor_locator_w)
ax.set_title("Weights: t = " + '% 4.2f' % times[i * stepsize])
else:
ax.set_axis_off()
ax.set_title("t = " + '% 4.2f' % times[i * stepsize])
ax = axes[3][i]
weight_per_neuron(ax, z, neurons.flatten())
if i != 0:
ax.set_axis_off()
else:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_title("Weight per neuron (colored: only active):")
ax = axes[1][-1]
ax.set_aspect(8)
fig.colorbar(im, orientation='vertical', cax=ax, extend='both')
fig.delaxes(axes[3][-1])
#
# dWeights
dweight_min = np.min(dw)
dweight_max = np.max(dw)
for i in range(c_dw):
ax = axes[2][i]
z = dw[i]
im = ax.imshow(z, cmap="hot", interpolation='none', vmin=dweight_min, vmax=dweight_max)
ax.invert_yaxis()
ax.set_aspect('equal')
if i == 0:
ax.yaxis.set_major_locator(major_locator_w)
ax.yaxis.set_major_formatter(major_formatter_w)
ax.yaxis.set_minor_locator(minor_locator_w)
ax.xaxis.set_major_locator(major_locator_w)
ax.xaxis.set_major_formatter(major_formatter_w)
ax.xaxis.set_minor_locator(minor_locator_w)
ax.set_title("Deviations:")
else:
ax.set_axis_off()
fig.delaxes(axes[2][-2])
ax = axes[2][-1]
ax.set_aspect(8)
fig.colorbar(im, orientation='vertical', cax=ax, extend='both')
#
# Finish
fig.tight_layout()
if not file:
plt.show()
else:
i = 0
while os.path.exists('{}_{:d}.png'.format(file, i)):
i += 1
file = '{}_{:d}.png'.format(file, i)
print("Saving results to: " + file)
plt.savefig(file, dpi=100)
plt.close()
def combined_learning_plot_patternwise(weights: list, times: list, dweights: list, neurons_t: list, neuralstates: list,
spp: int, rot: int, file: str = None):
c_pat = len(neuralstates)
l_ax = c_pat + 2
w = weights[0::spp]
t = times[0::spp]
n = neurons_t[0::spp]
metadata = ""
#
# Prepare plot
fig, axes = plt.subplots(ncols=l_ax, nrows=3)
size = 5
fig.set_size_inches(l_ax * size, 3 * size)
#
# Title
ax = axes[0][0]
ax.set_title(metadata, fontsize=14, fontweight='bold')
ax.set_axis_off()
#
# Plots
state_0: NeuralState = neuralstates[0]
weight_min = np.min(w)
weight_max = np.max(w)
major_locator_w = tik.MultipleLocator(state_0.N // 2)
major_formatter_w = tik.FormatStrFormatter('%d')
minor_locator_w = tik.MultipleLocator(state_0.N // 4)
for i in range(l_ax - 1):
#
# Neuron Map
if 0 < i < len(n) + 1:
ax = axes[0][i]
state: NeuralState = n[i-1]
z = state.as_matrix()
if i == 1:
neural_map(ax, z, True)
ax.set_title("Active Neurons")
else:
neural_map(ax, z, False)
#
# Weights
ax_w = axes[1][i]
z = w[i]
im_w = ax_w.imshow(z, cmap="hot", interpolation='none', vmin=weight_min, vmax=weight_max)
ax_w.invert_yaxis()
ax_w.set_aspect('equal')
if i == 0:
ax_w.yaxis.set_major_locator(major_locator_w)
ax_w.yaxis.set_major_formatter(major_formatter_w)
ax_w.yaxis.set_minor_locator(minor_locator_w)
ax_w.xaxis.set_major_locator(major_locator_w)
ax_w.xaxis.set_major_formatter(major_formatter_w)
ax_w.xaxis.set_minor_locator(minor_locator_w)
ax_w.set_title("Weights: t = " + '% 4.2f' % 0)
else:
ax_w.set_axis_off()
ax_w.set_title("t = " + '% 4.2f' % t[i])
#
# Weights per neuron
ax = axes[2][i]
if i == 0:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_title("Weight per neuron (colored: only active):")
wpn_n = np.zeros(state_0.N)
else:
ax.set_axis_off()
wpn_n = state.vec
weight_per_neuron(ax, z, wpn_n)
#
# Colorbar
if i == l_ax - 2:
ax = axes[1][-1]
ax.set_aspect(8)
fig.colorbar(im_w, orientation='vertical', cax=ax, extend='both')
#
# Empty axes
ax = axes[0][-1]
fig.delaxes(ax)
ax = axes[2][-1]
fig.delaxes(ax)
#
# Finish
fig.tight_layout()
if not file:
plt.show()
else:
i = 0
while os.path.exists('{}_{:d}.png'.format(file, i)):
i += 1
file = '{}_{:d}.png'.format(file, i)
print("Saving results to: " + file)
plt.savefig(file, dpi=100)
plt.close()
def weight_per_neuron(ax: plt.Axes, w: np.ndarray, neurons: np.ndarray):
width = 0.7
num = w.shape[0]
w_n, w_n_a, x_n_a = [], [], []
x_n = np.arange(1, num + 1)
for i in range(num):
w_n.append(np.sum(w[i]))
if neurons[i] == 1:
sm = 0
for j in range(num):
sm += w[i][j] if neurons[j] == 1 else 0
w_n_a.append(sm)
x_n_a.append(x_n[i])
w_max = np.max(w_n)
# customize layout
step = (num // 10)
steps = x_n[0::max(1, step)]
steps = np.array(steps) - 1
steps[0] = 1
if steps[-1] != x_n[-1]:
steps = np.append(steps, x_n[-1])
major_locator_n = tik.FixedLocator(steps)
major_locator_n.view_limits(1, num)
minor_locator_n = tik.MultipleLocator(1)
ax.xaxis.set_major_locator(major_locator_n)
ax.xaxis.set_minor_locator(minor_locator_n)
ax.set_xlim(0, num + 1)
ax.set_ylim(0, max(2, w_max))
# colormap for active neurons:
y = np.array(w_n_a) - 1
sp = cm.get_cmap("spring").reversed()
atu = cm.get_cmap("autumn").reversed()
colors = [atu(abs(y_i) / 1) if y_i < 0 else sp(y_i / max(1, w_max - 1)) for y_i in y]
# red dash line:
ax.plot((0, num + 1), (1, 1), 'red', linestyle='--')
# gray bars for inactive neurons
ax.bar(x_n, w_n, width, color='gray')
# colored active neurons
ax.bar(x_n_a, w_n_a, width, color=colors)
def neural_map(ax: plt.Axes, neurons: np.ndarray, axes: bool):
l = neurons.shape[0]
if axes:
major_locator_n = tik.MultipleLocator(l // 2)
major_formatter_n = tik.FormatStrFormatter('%d')
minor_locator_n = tik.MultipleLocator(1)
ax.yaxis.set_major_locator(major_locator_n)
ax.yaxis.set_major_formatter(major_formatter_n)
ax.yaxis.set_minor_locator(minor_locator_n)
ax.xaxis.set_major_locator(major_locator_n)
ax.xaxis.set_major_formatter(major_formatter_n)
ax.xaxis.set_minor_locator(minor_locator_n)
else:
ax.xaxis.set_major_locator(tik.NullLocator())
ax.xaxis.set_minor_locator(tik.NullLocator())
ax.yaxis.set_major_locator(tik.NullLocator())
ax.yaxis.set_minor_locator(tik.NullLocator())
ax.imshow(neurons, cmap="hot", interpolation='none')
ax.set_aspect('equal')
ma = l - 0.5
mi = -0.5
ax.set_xlim(mi, ma)
ax.set_ylim(mi, ma)
for i in range(1, l):
xy = i - 0.5
ax.plot((mi, ma), (xy, xy), 'red', linestyle='-')
ax.plot((xy, xy), (mi, ma), 'red', linestyle='-')
| gpl-3.0 | 6,391,659,414,283,574,000 | 26.525458 | 119 | 0.550351 | false |
yosi-dediashvili/SubiT | tests/SubProvidersTests/OpenSubtitlesSubProviderTest.py | 1 | 3995 | """
Test classes for OpenSubtitlesProvider.
The classes derives all the test from BaseSubProviderTest.
"""
import unittest
import BaseSubProviderTest
class Test_all_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.all_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_eng_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.eng_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_heb_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.heb_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_nor_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.nor_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_rus_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.rus_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_spa_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.spa_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_tur_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.tur_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_slo_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.slo_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_cze_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.cze_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_bul_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.bul_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider()) | gpl-3.0 | 4,231,381,265,595,466,000 | 38.373737 | 74 | 0.709387 | false |
maxcutler/Courant-News | courant/core/assets/management/commands/assets.py | 1 | 8722 | """Manage assets.
Usage:
./manage.py assets rebuild
Rebuild all known assets; this requires tracking to be enabled:
Only assets that have previously been built and tracked are
considered "known".
./manage.py assets rebuild --parse-templates
Try to find as many of the project's templates (hopefully all),
and check them for the use of assets. Rebuild all the assets
discovered in this way. If tracking is enabled, the tracking
database will be replaced by the newly found assets.
"""
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django import template
from courant.core.assets.conf import settings
from courant.core.assets.templatetags.assets import AssetsNode as AssetsNodeO
from django.templatetags.assets import AssetsNode as AssetsNodeMapped
from courant.core.assets.merge import merge
from courant.core.assets.tracker import get_tracker
try:
import jinja2
except:
jinja2 = None
else:
from django_assets.jinja.extension import AssetsExtension
# Prepare a Jinja2 environment we can later use for parsing.
# If not specified by the user, put in there at least our own
# extension, which we will need most definitely to achieve anything.
_jinja2_extensions = getattr(settings, 'ASSETS_JINJA2_EXTENSIONS')
if not _jinja2_extensions:
_jinja2_extensions = [AssetsExtension.identifier]
jinja2_env = jinja2.Environment(extensions=_jinja2_extensions)
def _shortpath(abspath):
"""Make an absolute path relative to the project's settings module,
which would usually be the project directory."""
b = os.path.dirname(
os.path.normpath(
os.sys.modules[settings.SETTINGS_MODULE].__file__))
p = os.path.normpath(abspath)
return p[len(os.path.commonprefix([b, p])):]
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--parse-templates', action='store_true',
help='Rebuild assets found by parsing project templates '
'instead of using the tracking database.'),
make_option('--verbosity', action='store', dest='verbosity',
default='1', type='choice', choices=['0', '1', '2'],
help='Verbosity; 0=minimal output, 1=normal output, 2=all output'),
)
help = 'Manage assets.'
args = 'subcommand'
requires_model_validation = True
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('You need to specify a subcommand')
elif len(args) > 1:
raise CommandError('Invalid number of subcommands passed: %s' %
", ".join(args))
else:
command = args[0]
options['verbosity'] = int(options['verbosity'])
if command == 'rebuild':
if options.get('parse_templates') or not get_tracker():
assets = self._parse_templates(options)
else:
assets = dict()
self._rebuild_assets(options, assets)
else:
raise CommandError('Unknown subcommand: %s' % command)
def _rebuild_assets(self, options, assets):
for output, data in assets.items():
if options.get('verbosity') >= 1:
print "Building asset: %s" % output
try:
merge(data['sources'], output, data['filter'])
except Exception, e:
print self.style.ERROR("Failed, error was: %s" % e)
def _parse_templates(self, options):
# build a list of template directories based on configured loaders
template_dirs = []
if 'django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS:
template_dirs.extend(settings.TEMPLATE_DIRS)
if 'django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS:
from django.template.loaders.app_directories import app_template_dirs
template_dirs.extend(app_template_dirs)
found_assets = {}
# find all template files
if options.get('verbosity') >= 1:
print "Searching templates..."
total_count = 0
for template_dir in template_dirs:
for directory, _ds, files in os.walk(template_dir):
for filename in files:
if filename.endswith('.html'):
total_count += 1
tmpl_path = os.path.join(directory, filename)
self._parse_template(options, tmpl_path, found_assets)
if options.get('verbosity') >= 1:
print "Parsed %d templates, found %d valid assets." % (
total_count, len(found_assets))
return found_assets
def _parse_template(self, options, tmpl_path, found_assets):
def try_django(contents):
# parse the template for asset nodes
try:
t = template.Template(contents)
except template.TemplateSyntaxError, e:
if options.get('verbosity') >= 2:
print self.style.ERROR('\tdjango parser failed, error was: %s'%e)
return False
else:
result = []
def _recurse_node(node):
# depending on whether the template tag is added to
# builtins, or loaded via {% load %}, it will be
# available in a different module
if isinstance(node, (AssetsNodeMapped, AssetsNodeO)):
# try to resolve this node's data; if we fail,
# then it depends on view data and we cannot
# manually rebuild it.
try:
output, files, filter = node.resolve()
except template.VariableDoesNotExist:
if options.get('verbosity') >= 2:
print self.style.ERROR('\tskipping asset %s, depends on runtime data.' % node.output)
else:
result.append((output, files, filter))
# see Django #7430
for subnode in hasattr(node, 'nodelist') \
and node.nodelist\
or []:
_recurse_node(subnode)
for node in t: # don't move into _recurse_node, ``Template`` has a .nodelist attribute
_recurse_node(node)
return result
def try_jinja(contents):
try:
t = jinja2_env.parse(contents.decode(settings.DEFAULT_CHARSET))
except jinja2.exceptions.TemplateSyntaxError, e:
if options.get('verbosity') >= 2:
print self.style.ERROR('\tjinja parser failed, error was: %s'%e)
return False
else:
result = []
def _recurse_node(node):
for node in node.iter_child_nodes():
if isinstance(node, jinja2.nodes.Call):
if isinstance(node.node, jinja2.nodes.ExtensionAttribute)\
and node.node.identifier == AssetsExtension.identifier:
filter, output, files = node.args
result.append((output.as_const(),
files.as_const(),
filter.as_const()))
for node in t.iter_child_nodes():
_recurse_node(node)
return result
if options.get('verbosity') >= 2:
print "Parsing template: %s" % _shortpath(tmpl_path)
file = open(tmpl_path, 'rb')
try:
contents = file.read()
finally:
file.close()
result = try_django(contents)
if result is False and jinja2:
result = try_jinja(contents)
if result:
for output, files, filter in result:
if not output in found_assets:
if options.get('verbosity') >= 2:
print self.style.NOTICE('\tfound asset: %s' % output)
found_assets[output] = {
'sources': files,
'filter': filter,
}
| bsd-3-clause | -7,455,010,411,454,960,000 | 41.178218 | 117 | 0.5446 | false |
jdgarrett/geogig | doc/manpages/source/conf.py | 1 | 11571 | # -*- coding: utf-8 -*-
#
# GeoGig documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 28 10:01:09 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, string
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('geogig', 'geogig', 'Runs a geogig command', ['Boundless <http://boundlessgeo.com>'], '1'),
('init', 'geogig-init', 'Create and initialize a new geogig repository', ['Boundless <http://boundlessgeo.com>'], '1'),
('add', 'geogig-add', 'Stage changes to the index to prepare for commit', ['Boundless <http://boundlessgeo.com>'], '1'),
('branch', 'geogig-branch', 'Create, delete, or list branches', ['OpenGeo <http://opengeo.org'], '1'),
('checkout', 'geogig-checkout', 'Checkout a branch', ['OpenGeo <http://opengeo.org'], '1'),
('commit', 'geogig-commit', 'Commits staged changes to the repository', ['Boundless <http://boundlessgeo.com>'], '1'),
('config', 'geogig-config', 'Get and set repository or global options', ['Boundless <http://boundlessgeo.com>'], '1'),
('cherrypick', 'geogig-cherrypick', 'Apply the changes introduced by some existing commits', ['Boundless <http://boundlessgeo.com>'], '1'),
('diff', 'geogig-diff', 'Show changes between two tree-ish references.', ['Boundless <http://boundlessgeo.com>'], '1'),
('log', 'geogig-log', 'Show commit logs', ['Boundless <http://boundlessgeo.com>'], '1'),
('help', 'geogig-help', 'Get help for a command', ['OpenGeo <http://opengeo.org'], 1),
('indexing', 'geogig-index', 'Index command extension', ['Boundless <http://boundlessgeo.com>'], '1'),
('indexcreate', 'geogig-index-create', 'Create a new index on a feature tree', ['Boundless <http://boundlessgeo.com>'], '1'),
('indexupdate', 'geogig-index-update', 'Change the extra attributes tracked by an index', ['Boundless <http://boundlessgeo.com>'], '1'),
('indexrebuild', 'geogig-index-rebuild', 'Rebuild indexes for the whole history of a feature tree', ['Boundless <http://boundlessgeo.com>'], '1'),
('indexlist', 'geogig-index-list', 'List indexes in the repository', ['Boundless <http://boundlessgeo.com>'], '1'),
('status', 'geogig-status', 'Show the working tree and index status', ['Boundless <http://boundlessgeo.com>'], '1'),
('merge', 'geogig-merge', 'Merge two or more histories into one', ['Boundless <http://boundlessgeo.com>'], '1'),
('rebase', 'geogig-rebase', 'Forward-port local commits to the updated upstream head', ['Boundless <http://boundlessgeo.com>'], '1'),
('reset', 'geogig-reset', 'Reset current HEAD to the specified state', ['Boundless <http://boundlessgeo.com>'], '1'),
('remote', 'geogig-remote', 'Remote management command extension', ['Boundless <http://boundlessgeo.com>'], '1'),
('remoteadd', 'geogig-remote-add', 'Add a repository whose branches should be tracked', ['Boundless <http://boundlessgeo.com>'], '1'),
('remotelist', 'geogig-remote-list', 'List all repositories being tracked', ['Boundless <http://boundlessgeo.com>'], '1'),
('remoteremove', 'geogig-remote-remove', 'Remove a repository whose branches are being tracked', ['Boundless <http://boundlessgeo.com>'], '1'),
('revert', 'geogig-revert', 'Revert changes that were committed', ['Boundless <http://boundlessgeo.com>'], '1'),
('clone', 'geogig-clone', 'Clone a repository into a new directory', ['Boundless <http://boundlessgeo.com>'], '1'),
('fetch', 'geogig-fetch', 'Download objects and refs from another repository', ['Boundless <http://boundlessgeo.com>'], '1'),
('pull', 'geogig-pull', 'Fetch from and merge with another repository or a local branch', ['Boundless <http://boundlessgeo.com>'], '1'),
('push', 'geogig-push', 'Update remote refs along with associated objects', ['Boundless <http://boundlessgeo.com>'], '1'),
('pg', 'geogig-pg', 'PostGIS command extension', ['Boundless <http://boundlessgeo.com>'], '1'),
('pgimport', 'geogig-pg-import', 'Import features from a PostGIS database', ['Boundless <http://boundlessgeo.com>'], '1'),
('pgexport', 'geogig-pg-export', 'Export features to a PostGIS database', ['Boundless <http://boundlessgeo.com>'], '1'),
('pglist', 'geogig-pg-list', 'List tables in a PostGIS database', ['Boundless <http://boundlessgeo.com>'], '1'),
('pgdescribe', 'geogig-pg-describe', 'Describe properties of a table in a PostGIS database', ['Boundless <http://boundlessgeo.com>'], '1'),
('shp', 'geogig-shp', 'Shapefile command extension', ['Boundless <http://boundlessgeo.com>'], '1'),
('shpimport', 'geogig-shp-import', 'Import features from shapefiles', ['Boundless <http://boundlessgeo.com>'], '1'),
('shpexport', 'geogig-shp-export', 'Import features to shapefiles', ['Boundless <http://boundlessgeo.com>'], '1')
]
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
#todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../themes/']
html_theme = 'geogig_docs'
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'geogig'
# General substitutions.
project = u'GeoGig'
manual = u'Man Pages'
copyright = u'Boundless <http://boundlessgeo.com>'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2-SNAPSHOT'
# Users don't need to see the "SNAPSHOT" notation when it's there
if release.find('SNAPSHOT') != -1:
release = '1.2'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_theme = 'geogig_docs'
html_theme_path = ['../../themes']
if os.environ.get('HTML_THEME_PATH'):
html_theme_path.append(os.environ.get('HTML_THEME_PATH'))
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + " " + release + " " + manual
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = favicon.ico
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['../../theme/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'GeoGigUserManual'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'GeoGigUserManual.tex', u'GeoGig User Manual',
u'GeoGig', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../themes/geogig/static/GeoGig.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}',
'fncychap': '\\usepackage[Sonny]{fncychap}',
'preamble': #"""\\usepackage[parfill]{parskip}
"""
\\hypersetup{
colorlinks = true,
linkcolor = [rgb]{0,0.46,0.63},
anchorcolor = [rgb]{0,0.46,0.63},
citecolor = blue,
filecolor = [rgb]{0,0.46,0.63},
pagecolor = [rgb]{0,0.46,0.63},
urlcolor = [rgb]{0,0.46,0.63}
}
"""
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause | 836,794,655,202,351,100 | 44.023346 | 150 | 0.682655 | false |
ve7cxz/PyAPRSd | aprs/packet.py | 1 | 8992 | #!/usr/bin/env python
import sys, re
# Packet class
class Packet(object):
def __init__(self):
# These data types are taken directly from the APRS spec at http://aprs.org/doc/APRS101.PDF
# This is not an exhaustive list. These are the most common ones, and were added during
# testing.
self._data_type_list = {'!' : 'Position without timestamp',
'_' : 'Weather Report (without position)',
'@' : 'Position with timestamp (with APRS messaging)',
'/' : 'Position with timestamp (no APRS messaging)',
'=' : 'Position without timestamp (with APRS messaging)',
'T' : 'Telemetry data',
';' : 'Object',
'<' : 'Station Capabilities',
'>' : 'Status',
'`' : 'Current Mic-E Data (not used in TM-D700)',
'?' : 'Query',
'\'' : 'Old Mic-E Data (but Current data for TM-D700)',
':' : 'Message',
'$' : 'Raw GPS data or Ultimeter 2000',
}
self._date_type_list = {'z' : 'D/H/M format, zulu time',
'/' : 'D/H/M format, local time',
'h' : 'H/M/S format, zulu time'
}
# Raw packet
self._packet = None
# Station the packet originated from
self._source = None
# Destination of the packet
self._destination = None
# Packet path
self._path = None
# Information field
self._information = None
# Data type identifier
self._data_type = None
# Latitude
self._latitude = None
# Longitude
self._longitude = None
# Symbol
self._symbol = None
# Comment
self._comment = None
# PHG (Power-Height-Gain)
self._phg = None
# Data extension
self._data_extension = None
# Altitude
self._altitude = None
# Date
self._date = None
# Date type
self._date_type = None
# Month
self._month = None
# Day
self._day = None
# Hour
self._hour = None
# Minute
self._minute = None
# Second
self._second = None
# Parsed, read-only values of the above, populated by parse()
self._parsed_source = None
self._parsed_destination = None
self._parsed_path = None
self._parsed_information = None
# Internal class variables
# X1J flag
self._x1j = False
# packet
@property
def packet(self):
return self._packet
@packet.setter
def packet(self, value):
self._packet = value
self._parse()
# source
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
self._build()
# destination
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, value):
self._destination = value
self._build()
# Path
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self._build()
# Information field
@property
def information(self):
return self._information
@information.setter
def information(self, value):
self._information = value
self._build()
# Data type (usually first character of the Information field - not always)
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
self._build()
# Latitude
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
self._build()
# Longitude
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
self._build()
# Symbol
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value):
self._symbol = value
self._build()
# Comment (at the end of the Information field in status packets)
@property
def comment(self):
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
self._build()
# Data extension (PHG, course/speed, radio range, etc.)
@property
def data_extension(self):
return self._data_extension
@data_extension.setter
def data_extension(self, value):
self._data_extension = value
self._build()
# Altitude
@property
def altitude(self):
return self._altitude
@altitude.setter
def altitude(self, value):
self._altitude = value
self._build()
# Power-Height-Gain
@property
def phg(self):
return self._phg
@phg.setter
def phg(self, value):
self._phg = value
self._build()
# Raw date
@property
def date(self):
return self._date
@date.setter
def date(self, value):
self._date = value
self._build()
# Date type
@property
def date_type(self):
return self._date_type
@date_type.setter
def date_type(self, value):
self._date_type = value
self._build()
# Month
@property
def month(self):
return self._month
@month.setter
def month(self, value):
self._month = value
self._build()
# Day
@property
def day(self):
return self._day
@day.setter
def day(self, value):
self._day = value
self._build()
# Hour
@property
def hour(self):
return self._hour
@hour.setter
def hour(self, value):
self._hour = value
self._build()
# Minute
@property
def minute(self):
return self._minute
@minute.setter
def minute(self, value):
self._minute = value
self._build()
# Second
@property
def second(self):
return self._second
@second.setter
def second(self, value):
self._second = value
self._build()
# Read-only attributes
# Friendly name for the data type
@property
def data_type_name(self):
return self._data_type_list.get(self._data_type)
# Friendly name for the date type
@property
def date_type_name(self):
return self._date_type_list.get(self._date_type)
# reset packet
def _reset(self):
self._source = self._parsed_source
self._destination = self._parsed_destination
self._path = self._parsed_path
self._information = self._parsed_information
self._parse()
# parse information
def _parse_information(self):
# Get the data type
first_char = self._information[0]
# Look to see if it is a valid data type.
if first_char in self._data_type_list:
# Assign it to _data_type
self._data_type = first_char
else:
# No valid data type found so far. However, the spec allows '!' (and
# *only* '!' to appear anywhere in the first 40 characters of the
# information field
if re.search(r"!", data[0:40]):
self._data_type = "!"
# Set the X1J flag to assist with parsing
self._x1j = True
else:
# Since we don't know the data type, we can't parse the information
# field any further
return
# Parse the information field
if self._data_type in [ '!', '=' ]:
# position reports - without timestamps (!, =)
# Check if the
(self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\!\=]([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups()
# Join the two symbol characters together
self._symbol = symbol_table + symbol_code
elif self._data_type in [ '/', '@' ]:
# position reports - with timestamps (/, @)
(self._date, self._date_type, self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\/\@](\d{6})([zh\/])([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups()
if self._date_type in [ "z", "/" ]:
self._day = self._date[0:2]
self._hour = self._date[2:2]
self._minute = self._date[4:2]
elif self._date_type == "/":
self._hour = self._date[0:2]
self._minute = self._date[2:2]
self._seconds = self._date[4:2]
# parse
def _parse(self):
# Split packet into segments
print "Packet: " + self._packet
packet_segments = re.search(r"([\w\-]+)>([\w\-]+),([\w\-\*\,]+):(.*)$", self._packet)
# Assign segments to variables
(self._source, self._destination, self._path, self._information) = packet_segments.groups()
# Set the read-only parse time versions of the above
(self._parsed_source, self._parsed_destination, self._parsed_path, self._parsed_information) = packet_segments.groups()
self._parse_information()
# build information
def _build_information(self):
pass
# build
def _build(self):
if self._source is not None and self._destination is not None and self._path is not None and self._information is not None:
packet = self._source + ">" + self._destination + "," + self._path + ":" + self._information
self._packet = packet
| bsd-3-clause | -956,683,427,237,687,600 | 23.77135 | 214 | 0.597976 | false |
peggyl/sodapy | sodapy/__init__.py | 1 | 9828 | from constants import MAX_LIMIT
from version import __version__, version_info
import requests
from cStringIO import StringIO
import csv
import json
__author__ = "Cristina Munoz <[email protected]>"
class Socrata(object):
def __init__(self, domain, app_token, username=None, password=None,
access_token=None, session_adapter=None):
'''
The required arguments are:
domain: the domain you wish you to access
app_token: your Socrata application token
Simple requests are possible without an app_token, though these
requests will be rate-limited.
For write/update/delete operations or private datasets, the Socrata API
currently supports basic HTTP authentication, which requires these
additional parameters.
username: your Socrata username
password: your Socrata password
The basic HTTP authentication comes with a deprecation warning, and the
current recommended authentication method is OAuth 2.0. To make
requests on behalf of the user using OAuth 2.0 authentication, follow
the recommended procedure and provide the final access_token to the
client.
More information about authentication can be found in the official
docs:
http://dev.socrata.com/docs/authentication.html
'''
if not domain:
raise Exception("A domain is required.")
self.domain = domain
# set up the session with proper authentication crendentials
self.session = requests.Session()
if not app_token:
print ("Warning: requests made without an app_token will be"
" subject to strict throttling limits.")
else:
self.session.headers.update({"X-App-token": app_token})
self.authentication_validation(username, password, access_token)
# use either basic HTTP auth or OAuth2.0
if username and password:
self.session.auth = (username, password)
elif access_token:
self.session.headers.update({"Authorization": "OAuth {0}"
.format(access_token)})
if session_adapter:
self.session.mount(session_adapter["prefix"],
session_adapter["adapter"])
self.uri_prefix = session_adapter["prefix"]
else:
self.uri_prefix = "https"
def authentication_validation(self, username, password, access_token):
'''
Only accept one form of authentication.
'''
if bool(username) != bool(password):
raise Exception("Basic authentication requires a username AND"
" password.")
if (username and access_token) or (password and access_token):
raise Exception("Cannot use both Basic Authentication and"
" OAuth2.0. Please use only one authentication"
" method.")
def create(self, file_object):
raise NotImplementedError()
def get(self, resource, **kwargs):
'''
Read data from the requested resource. Optionally, specify a keyword
arg to filter results:
select : the set of columns to be returned, defaults to *
where : filters the rows to be returned, defaults to limit
order : specifies the order of results
group : column to group results on
limit : max number of results to return, defaults to 1000
offset : offset, used for paging. Defaults to 0
q : performs a full text search for a value
exclude_system_fields : defaults to true. If set to false, the
response will include system fields (:id, :created_at, and
:updated_at)
More information about the SoQL parameters can be found at the official
docs:
http://dev.socrata.com/docs/queries.html
More information about system fields can be found here:
http://dev.socrata.com/docs/system-fields.html
'''
headers = _clear_empty_values({"Accept": kwargs.pop("format", None)})
params = {
"$select": kwargs.pop("select", None),
"$where": kwargs.pop("where", None),
"$order": kwargs.pop("order", None),
"$group": kwargs.pop("group", None),
"$limit": kwargs.pop("limit", None),
"$offset": kwargs.pop("offset", None),
"$q": kwargs.pop("q", None),
"$$exclude_system_fields": kwargs.pop("exclude_system_fields",
None)
}
params.update(kwargs)
params = _clear_empty_values(params)
if params.get("$limit") and params["$limit"] > MAX_LIMIT:
raise Exception("Max limit exceeded! {0} is greater than the"
" Socrata API limit of {1}. More information on"
" the official API docs:"
" http://dev.socrata.com/docs/paging.html"
.format(params["$limit"], MAX_LIMIT))
response = self._perform_request("get", resource, headers=headers,
params=params)
return response
def upsert(self, resource, payload):
'''
Insert, update or delete data to/from an existing dataset. Currently
supports json and csv file objects. See here for the upsert
documentation:
http://dev.socrata.com/publishers/upsert.html
'''
return self._perform_update("post", resource, payload)
def replace(self, resource, payload):
'''
Same logic as upsert, but overwrites existing data with the payload
using PUT instead of POST.
'''
return self._perform_update("put", resource, payload)
def _perform_update(self, method, resource, payload):
if isinstance(payload, list):
response = self._perform_request(method, resource,
data=json.dumps(payload))
elif isinstance(payload, file):
headers = {
"content-type": "text/csv",
}
response = self._perform_request(method, resource, data=payload,
headers=headers)
else:
raise Exception("Unrecognized payload {0}. Currently only lists"
" and files are supported.".format(type(payload)))
return response
def delete(self, resource, id=None):
'''
Delete the entire dataset, e.g.
client.delete("/resource/nimj-3ivp.json")
or a single row, e.g.
client.delete("/resource/nimj-3ivp.json", id=4)
'''
if id:
base, content_type = resource.rsplit(".", 1)
delete_uri = "{0}/{1}.{2}".format(base, id, content_type)
else:
delete_uri = resource.replace("resource", "api/views")
return self._perform_request("delete", delete_uri)
def _perform_request(self, request_type, resource, **kwargs):
'''
Utility method that performs all requests.
'''
request_type_methods = set(["get", "post", "put", "delete"])
if request_type not in request_type_methods:
raise Exception("Unknown request type. Supported request types are"
": {0}".format(", ".join(request_type_methods)))
uri = "{0}://{1}{2}".format(self.uri_prefix, self.domain, resource)
# set a timeout, just to be safe
kwargs["timeout"] = 10
response = getattr(self.session, request_type)(uri, **kwargs)
# handle errors
if response.status_code not in (200, 202):
_raise_for_status(response)
# deletes have no content body, simply return the whole response
if request_type == "delete":
return response
# for other request types, return most useful data
content_type = response.headers.get('content-type').strip().lower()
if content_type == "application/json; charset=utf-8":
return response.json()
elif content_type == "text/csv; charset=utf-8":
csv_stream = StringIO(response.text)
return [line for line in csv.reader(csv_stream)]
elif content_type == "application/rdf+xml; charset=utf-8":
return response.content
else:
raise Exception("Unknown response format: {0}"
.format(content_type))
def close(self):
self.session.close()
# helper methods
def _raise_for_status(response):
'''
Custom raise_for_status with more appropriate error message.
'''
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{0} Client Error: {1}".format(response.status_code,
response.reason)
elif 500 <= response.status_code < 600:
http_error_msg = "{0} Server Error: {1}".format(response.status_code,
response.reason)
if http_error_msg:
try:
more_info = response.json().get("message")
except ValueError:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{0}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response)
def _clear_empty_values(args):
result = {}
for param in args:
if args[param] is not None:
result[param] = args[param]
return result
| mit | -3,261,599,665,507,649,000 | 38.629032 | 79 | 0.573972 | false |
fluidinfo/fom | tests/_base.py | 1 | 1637 | # -*- coding: utf-8 -*-
"""
test_base
~~~~~~~~~
A way of faking out FluidDB in fom, for testing.
:copyright: (c) 2010 by AUTHOR.
:license: MIT, see LICENSE_FILE for more details.
"""
from collections import deque
from fom.db import FluidDB, _generate_endpoint_url, NO_CONTENT, FluidResponse
class FakeHttpLibResponse(dict):
def __init__(self, status, content_type, content=None):
# yeah, I know, blame httplib2 for this API
self.status_code = status
self.headers = {}
self.headers['content-type'] = content_type
self.text = content
class FakeHttpLibRequest(object):
def __init__(self, response):
self.response = response
def __call__(self, *args):
self.args = args
return self.response
class FakeFluidDB(FluidDB):
def __init__(self):
FluidDB.__init__(self, 'http://testing')
self.reqs = []
self.resps = deque()
self.default_response = FakeHttpLibResponse(200, 'text/plain', 'empty')
def add_resp(self, status, content_type, content):
hresp = FakeHttpLibResponse(status, content_type, content)
self.resps.append(hresp)
def __call__(self, method, path, payload=NO_CONTENT, urlargs=None,
content_type=None, is_value=False):
path = _generate_endpoint_url('', path, '')
req = (method, path, payload, urlargs, content_type)
self.reqs.append(req)
try:
resp = self.resps.popleft()
except IndexError:
resp = self.default_response
return FluidResponse(resp, resp.text, is_value)
| mit | 5,095,253,780,232,331,000 | 26.745763 | 79 | 0.607819 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.