repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mz314/django-sass-processor | sass_processor/management/commands/compilescss.py | 1 | 7090 | # -*- coding: utf-8 -*-
import os
import sass
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.utils.importlib import import_module
from django.utils.encoding import force_bytes
from compressor.offline.django import DjangoParser
from compressor.exceptions import TemplateDoesNotExist, TemplateSyntaxError
from sass_processor.templatetags.sass_tags import SassSrcNode
from sass_processor.storage import find_file
class Command(BaseCommand):
help = "Compile SASS/SCSS into CSS outside of the request/response cycle"
option_list = BaseCommand.option_list + (make_option('--delete-files', action='store_true',
dest='delete_files', default=False, help='Delete generated `*.css` files instead of creating them.'),)
def __init__(self):
self.parser = DjangoParser(charset=settings.FILE_CHARSET)
self.template_exts = getattr(settings, 'SASS_TEMPLATE_EXTS', ['.html'])
self.output_style = getattr(settings, 'SASS_OUTPUT_STYLE', 'compact')
super(Command, self).__init__()
def handle(self, *args, **options):
self.verbosity = int(options['verbosity'])
self.delete_files = options['delete_files']
self.compiled_files = []
templates = self.find_templates()
for template_name in templates:
self.parse_template(template_name)
if self.verbosity > 0:
if self.delete_files:
self.stdout.write('Successfully deleted {0} previously generated `*.css` files.'.format(len(self.compiled_files)))
else:
self.stdout.write('Successfully compiled {0} referred SASS/SCSS files.'.format(len(self.compiled_files)))
def find_templates(self):
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module, 'get_template_sources', loader.get_template_sources)
paths.update(list(get_template_sources('')))
except (ImportError, AttributeError):
pass
if not paths:
raise CommandError("No template paths found. None of the configured template loaders provided template paths")
templates = set()
for path in paths:
for root, _, files in os.walk(path):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(name.endswith(ext) for ext in self.template_exts))
if not templates:
raise CommandError("No templates found. Make sure your TEMPLATE_LOADERS and TEMPLATE_DIRS settings are correct.")
return templates
def get_loaders(self):
try:
from django.template.loader import (
find_template as finder_func)
except ImportError:
from django.template.loader import (find_template_source as finder_func)
try:
# Force Django to calculate template_source_loaders from
# TEMPLATE_LOADERS settings, by asking to find a dummy template
finder_func('test')
# Had to transform this Exception, because otherwise even if there
# was a try catch it was crashing, this is a broad Exception but at
# it does what the try catch does by not crashing the command line
# execution.
except Exception:
pass
loaders = []
# At the top when you first import template_source_loaders it is set
# to None, because in django that is what it is set too. While it
# executes the finder_func it is setting the template_source_loaders
# I needed to re-import the value of it at this point because it was
# still None and importing it again made it filled with the proper
# django default values.
from django.template.loader import template_source_loaders
for loader in template_source_loaders:
if hasattr(loader, 'loaders'):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def parse_template(self, template_name):
try:
template = self.parser.parse(template_name)
except IOError: # unreadable file -> ignore
self.stdout.write("Unreadable template at: %s\n" % template_name)
return
except TemplateSyntaxError as e: # broken template -> ignore
self.stdout.write("Invalid template %s: %s\n" % (template_name, e))
return
except TemplateDoesNotExist: # non existent template -> ignore
self.stdout.write("Non-existent template at: %s\n" % template_name)
return
except UnicodeDecodeError:
self.stdout.write("UnicodeDecodeError while trying to read template %s\n" % template_name)
try:
nodes = list(self.walk_nodes(template))
except Exception as e:
# Could be an error in some base template
self.stdout.write("Error parsing template %s: %s\n" % (template_name, e))
else:
for node in nodes:
if self.delete_files:
self.delete_file(node)
else:
self.compile(node)
def compile(self, node):
sass_filename = find_file(node.path)
if not sass_filename or sass_filename in self.compiled_files:
return
content = sass.compile(include_paths=node.include_paths, filename=sass_filename, output_style=self.output_style)
basename, _ = os.path.splitext(sass_filename)
destpath = basename + '.css'
with open(destpath, 'wb') as fh:
fh.write(force_bytes(content))
self.compiled_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Compiled SASS/SCSS file: '{0}'\n".format(node.path))
def delete_file(self, node):
"""
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
"""
sass_filename = find_file(node.path)
if not sass_filename:
return
basename, _ = os.path.splitext(sass_filename)
destpath = basename + '.css'
if os.path.isfile(destpath):
os.remove(destpath)
self.compiled_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Deleted '{0}'\n".format(destpath))
def walk_nodes(self, node):
"""
Iterate over the nodes recursively yielding the templatetag 'sass_src'
"""
for node in self.parser.get_nodelist(node):
if isinstance(node, SassSrcNode):
if node.is_sass:
yield node
else:
for node in self.walk_nodes(node):
yield node
| mit | -9,080,522,471,985,560,000 | 44.448718 | 130 | 0.621157 | false |
Nithanaroy/random_scripts | CreateNeoDB.py | 1 | 1357 | from py2neo import Graph
graph = Graph("http://neo4j:1234@localhost:7474/db/data/")
# Insert data
insert_query = '''
UNWIND {pairs} as pair
MERGE (p1:Person {name:pair[0]})
MERGE (p2:Person {name:pair[1]})
MERGE (p1)-[:KNOWS]-(p2);
'''
data = [["Jim", "Mike"], ["Jim", "Billy"], ["Anna", "Jim"],
["Anna", "Mike"], ["Sally", "Anna"], ["Joe", "Sally"],
["Joe", "Bob"], ["Bob", "Sally"]]
graph.cypher.execute(insert_query, {"pairs": data})
# Friends of a friend
foaf_query = '''
MATCH (person:Person {name: {name}})-[:KNOWS*2]-(foaf)
WHERE NOT (person)-[:KNOWS]-(foaf)
RETURN foaf.name AS name
'''
results = graph.cypher.execute(foaf_query, {"name": "Joe"})
print 'FOF'
for record in results:
print(record)
# Common friends
common_friends_query = """
MATCH (user:Person)-[:KNOWS]-(friend)-[:KNOWS]-(foaf:Person)
WHERE user.name = {user} AND foaf.name = {foaf}
RETURN friend.name AS friend
"""
results = graph.cypher.execute(common_friends_query, {"user": "Joe", "foaf": "Sally"})
for record in results:
print(record)
# Connecting paths
connecting_paths_query = """
MATCH path = shortestPath((p1:Person)-[:KNOWS*..6]-(p2:Person))
WHERE p1.name = {name1} AND p2.name = {name2}
RETURN path
"""
results = graph.cypher.execute(connecting_paths_query, {"name1": "Joe", "name2": "Billy"})
for record in results:
print(record)
| mit | 8,349,724,321,448,204,000 | 24.12963 | 90 | 0.640383 | false |
charanpald/sandbox | sandbox/misc/ClusterExp.py | 1 | 1686 |
"""
Compare the clustering methods in scikits.learn to see which ones are fastest
and most accurate
"""
import time
import numpy
import sklearn.cluster as cluster
from apgl.data.Standardiser import Standardiser
import scipy.cluster.vq as vq
numExamples = 10000
numFeatures = 500
X = numpy.random.rand(numExamples, numFeatures)
X = Standardiser().standardiseArray(X)
k = 10
numRuns = 10
maxIter = 100
tol = 10**-4
intialCentroids = X[0:k, :]
#Quite fast
print("Running scikits learn k means")
clusterer = cluster.KMeans(k=k, n_init=numRuns, tol=tol, init=intialCentroids, max_iter=maxIter)
start = time.clock()
clusterer.fit(X)
totalTime = time.clock() - start
print(totalTime)
startArray = X[0:k, :]
#Really fast - good alternative but check cluster accuracy
print("Running mini batch k means")
clusterer = cluster.MiniBatchKMeans(k=k, max_iter=maxIter, tol=tol, init=intialCentroids)
start = time.clock()
clusterer.fit(X)
totalTime = time.clock() - start
print(totalTime)
clusters1 = clusterer.labels_
#Run k means clustering a number of times
print("Running vq k means")
start = time.clock()
centroids, distortion = vq.kmeans(X, intialCentroids, iter=numRuns, thresh=tol)
totalTime = time.clock() - start
print(totalTime)
#Run k means just once
print("Running vq k means2")
start = time.clock()
centroids, distortion = vq.kmeans2(X, intialCentroids, iter=maxIter, thresh=tol)
totalTime = time.clock() - start
print(totalTime)
clusters, distortion = vq.vq(X, centroids)
#Very slow
#clusterer = cluster.Ward(n_clusters=k)
#start = time.clock()
#clusterer.fit(X)
#totalTime = time.clock() - start
#print(totalTime)
#Conclusion: k means is fast even on 10000 examples | gpl-3.0 | -6,093,461,531,651,391,000 | 23.1 | 96 | 0.747924 | false |
aularon/meld | setup_win32.py | 1 | 3455 | #!/usr/bin/env python
import glob
import os
import site
import sys
from cx_Freeze import setup, Executable
import meld.build_helpers
import meld.conf
site_dir = site.getsitepackages()[1]
include_dll_path = os.path.join(site_dir, "gnome")
missing_dll = [
'libgtk-3-0.dll',
'libgdk-3-0.dll',
'libatk-1.0-0.dll',
'libintl-8.dll',
'libzzz.dll',
'libpyglib-gi-2.0-python27-0.dll',
'libwinpthread-1.dll',
'libcairo-gobject-2.dll',
'libgdk_pixbuf-2.0-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgio-2.0-0.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgirepository-1.0-1.dll',
'libgtksourceview-3.0-1.dll',
'libjpeg-8.dll',
'libpng16-16.dll',
'libgnutls-26.dll',
'libxml2-2.dll',
'librsvg-2-2.dll',
'libharfbuzz-gobject-0.dll',
'libwebp-4.dll',
]
gtk_libs = [
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/glib-2.0',
'share/icons',
]
include_files = [(os.path.join(include_dll_path, path), path) for path in
missing_dll + gtk_libs]
build_exe_options = {
"compressed": False,
"icon": "data/icons/meld.ico",
"includes": ["gi"],
"packages": ["gi", "weakref"],
"include_files": include_files,
}
bdist_msi_options = {
"upgrade_code": "{1d303789-b4e2-4d6e-9515-c301e155cd50}",
}
setup(
name="Meld",
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='Kai Willadsen',
author_email='[email protected]',
url='http://meldmerge.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
options = {
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
},
executables = [
Executable(
"bin/meld",
base="Win32GUI",
targetName="Meld.exe",
shortcutName="Meld",
shortcutDir="ProgramMenuFolder",
),
],
packages=[
'meld',
'meld.ui',
'meld.util',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS']
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld',
['data/meld.css']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
],
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
}
)
| gpl-2.0 | 3,119,004,850,918,130,700 | 24.404412 | 85 | 0.557453 | false |
afrolov1/nova | nova/tests/image/test_glance.py | 1 | 50675 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import filecmp
import os
import random
import tempfile
import time
import sys
import testtools
import mock
import mox
import glanceclient.exc
from oslo.config import cfg
from nova import context
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova import utils
import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.NoDBTestCase):
"""Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
self.client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(self.client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mox = mox.Mox()
self.files_to_clean = []
def tearDown(self):
super(TestGlanceImageService, self).tearDown()
self.mox.UnsetStubs()
for f in self.files_to_clean:
try:
os.unlink(f)
except os.error:
pass
def _get_tempfile(self):
(outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests')
self.files_to_clean.append(config_filename)
return (outfd, config_filename)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_page_size(self):
with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock:
self.service.detail(self.context, page_size=5)
self.assertEqual(a_mock.called, True)
a_mock.assert_called_with(self.context, 1, 'list',
filters={'is_public': 'none'},
page_size=5)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, data=writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, data=writer)
def test_download_file_url(self):
self.flags(allowed_direct_url_schemes=['file'])
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that returns a file url."""
(outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
outf = os.fdopen(outfd, 'w')
inf = open('/dev/urandom', 'r')
for i in range(10):
_data = inf.read(1024)
outf.write(_data)
outf.close()
def get(self, image_id):
return type('GlanceTestDirectUrlMeta', (object,),
{'direct_url': 'file://%s' + self.s_tmpfname})
client = MyGlanceStubClient()
(outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
os.close(outfd)
service = self._create_image_service(client)
image_id = 1 # doesn't matter
service.download(self.context, image_id, dst_path=tmpfname)
# compare the two files
rc = filecmp.cmp(tmpfname, client.s_tmpfname)
self.assertTrue(rc, "The file %s and %s should be the same" %
(tmpfname, client.s_tmpfname))
os.remove(client.s_tmpfname)
os.remove(tmpfname)
def test_download_module_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.mox.StubOutWithMock(lv_utils, 'copy_image')
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id=fs_id)
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
dest_file = os.devnull
lv_utils.copy_image(mox.IgnoreArg(), dest_file)
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=dest_file)
self.mox.VerifyAll()
def test_download_module_no_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
some_data = "sfxvdwjer"
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
return some_data
def _fake_copyfile(source, dest):
self.fail('This should not be called because a match should not '
'have been found.')
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id='someotherid')
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
service.download(self.context, image_id,
dst_path=os.devnull,
data=None)
def test_download_module_mountpoints(self):
glance_mount = '/glance/mount/point'
_, data_filename = self._get_tempfile()
nova_mount = os.path.dirname(data_filename)
source_path = os.path.basename(data_filename)
file_url = 'file://%s' % os.path.join(glance_mount, source_path)
file_system_id = 'test_FS_ID'
file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': file_system_desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.copy_called = False
def _fake_copyfile(source, dest):
self.assertEqual(source, data_filename)
self.copy_called = True
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
self.flags(group='image_file_url:gluster', id=file_system_id)
self.flags(group='image_file_url:gluster', mountpoint=nova_mount)
service.download(self.context, image_id, dst_path=os.devnull)
self.assertTrue(self.copy_called)
def test_download_module_file_bad_module(self):
_, data_filename = self._get_tempfile()
file_url = 'applesauce://%s' % data_filename
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
data_called = False
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': {}}]})
def data(self, image_id):
self.data_called = True
return "someData"
self.flags(allowed_direct_url_schemes=['applesauce'])
self.mox.StubOutWithMock(lv_utils, 'copy_image')
self.flags(allowed_direct_url_schemes=['file'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
# by not calling copyfileobj in the file download module we verify
# that the requirements were not met for its use
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=os.devnull)
self.mox.VerifyAll()
self.assertTrue(client.data_called)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEqual(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEqual(same_id, image_id)
self.assertEqual(service._client.host, 'something-less-likely')
def test_extracting_missing_attributes(self):
"""Verify behavior from glance objects that are missing attributes
This fakes the image class and is missing attribute as the client can
return if they're not set in the database.
"""
class MyFakeGlanceImage(glance_stubs.FakeImage):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
'updated_at', 'status', 'min_disk',
'min_ram', 'is_public']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
metadata = {
'id': 1,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
observed = glance._extract_attributes(image)
expected = {
'id': 1,
'name': None,
'is_public': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(expected, observed)
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGetLocations(test.NoDBTestCase):
"""Tests the internal _get_locations function."""
class ImageSpecV2(object):
visibility = None
properties = None
locations = None
direct_url = None
@mock.patch('nova.image.glance._is_image_available')
def test_success_has_locations(self, avail_mock):
avail_mock.return_value = True
locations = [
mock.sentinel.loc1
]
image_meta = mock.MagicMock(locations=locations,
spec=TestGetLocations.ImageSpecV2)
client_mock = mock.MagicMock()
client_mock.call.return_value = image_meta
locs = glance._get_locations(client_mock, mock.sentinel.ctx,
mock.sentinel.image_id)
client_mock.call.assert_called_once_with(mock.sentinel.ctx,
2, 'get',
mock.sentinel.image_id)
self.assertEqual(locations, locs)
avail_mock.assert_called_once_with(mock.sentinel.ctx, image_meta)
@mock.patch('nova.image.glance._is_image_available')
def test_success_direct_uri_added_to_locations(self, avail_mock):
avail_mock.return_value = True
locations = [
mock.sentinel.loc1
]
image_meta = mock.MagicMock(locations=locations,
spec=TestGetLocations.ImageSpecV2,
direct_uri=mock.sentinel.duri)
client_mock = mock.MagicMock()
client_mock.call.return_value = image_meta
locs = glance._get_locations(client_mock, mock.sentinel.ctx,
mock.sentinel.image_id)
client_mock.call.assert_called_once_with(mock.sentinel.ctx,
2, 'get',
mock.sentinel.image_id)
expected = locations
expected.append({"url": mock.sentinel.duri, "metadata": {}})
self.assertEqual(expected, locs)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._is_image_available')
def test_get_locations_not_found(self, avail_mock, reraise_mock):
raised = exception.ImageNotFound(image_id=123)
reraise_mock.side_effect = raised
client_mock = mock.MagicMock()
client_mock.call.side_effect = glanceclient.exc.NotFound
self.assertRaises(exception.ImageNotFound, glance._get_locations,
client_mock, mock.sentinel.ctx,
mock.sentinel.image_id)
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
class ImageSpecV2(object):
visibility = None
properties = None
class ImageSpecV1(object):
is_public = None
properties = None
def test_auth_token_override(self):
ctx = mock.MagicMock(auth_token=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_admin_override(self):
ctx = mock.MagicMock(auth_token=False, is_admin=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_v2_visibility(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
# We emulate warlock validation that throws an AttributeError
# if you try to call is_public on an image model returned by
# a call to V2 image.get(). Here, the ImageSpecV2 does not have
# an is_public attribute and MagicMock will throw an AttributeError.
img = mock.MagicMock(visibility='PUBLIC',
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_v1_is_public(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
img = mock.MagicMock(is_public=True,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_is_owner(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'owner_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_context_matches_project_prop(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'project_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_no_user_in_props(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
def test_user_matches_context(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
user_id='123')
props = {
'user_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
class TestShow(test.NoDBTestCase):
"""Tests the show method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_success(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual(mock.sentinel.trans_from, info)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_not_available(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = False
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_client_failure(self, is_avail_mock, trans_from_mock,
reraise_mock):
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotAuthorized):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
@mock.patch('nova.image.glance._is_image_available')
def test_show_queued_image_without_some_attrs(self, is_avail_mock):
is_avail_mock.return_value = True
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
class fake_image_cls(object):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = False
protected = False
min_disk = 0
created_at = '2014-05-20T08:16:48'
size = 0
status = 'queued'
is_public = False
min_ram = 0
owner = '980ec4870033453ead65c0470a78b8a8'
updated_at = '2014-05-20T08:16:48'
glance_image = fake_image_cls()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_info = service.show(ctx, glance_image.id)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'properties'])
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
class TestDetail(test.NoDBTestCase):
"""Tests the detail method of the GlanceImageService."""
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_available(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = True
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual([mock.sentinel.trans_from], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = False
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
self.assertEqual([], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_params_passed(self, is_avail_mock, _trans_from_mock,
ext_query_mock):
params = dict(limit=10)
ext_query_mock.return_value = params
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list', limit=10)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_client_failure(self, is_avail_mock, trans_from_mock,
ext_query_mock, reraise_mock):
params = {}
ext_query_mock.return_value = params
raised = exception.Forbidden()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.Forbidden):
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with()
class TestCreate(test.NoDBTestCase):
"""Tests the create method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success(self, trans_to_mock, trans_from_mock):
translated = {
'image_id': mock.sentinel.image_id
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
image_meta = service.create(ctx, image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {}
trans_to_mock.return_value = translated
image_mock = mock.MagicMock(spec=dict)
raised = exception.Invalid()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.BadRequest
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.Invalid, service.create, ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
self.assertFalse(trans_from_mock.called)
class TestUpdate(test.NoDBTestCase):
"""Tests the update method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_success(self, trans_to_mock, trans_from_mock):
translated = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.update(ctx, mock.sentinel.image_id, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
# Verify that the 'id' element has been removed as a kwarg to
# the call to glanceclient's update (since the image ID is
# supplied as a positional arg), and that the
# purge_props default is True.
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
image_meta = service.update(ctx, mock.sentinel.image_id,
image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotAuthorized,
service.update, ctx, mock.sentinel.image_id,
image_mock)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
purge_props=True,
name=mock.sentinel.name)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
class TestDelete(test.NoDBTestCase):
"""Tests the delete method of the GlanceImageService."""
def test_delete_success(self):
client = mock.MagicMock()
client.call.return_value = True
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.delete(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'delete',
mock.sentinel.image_id)
def test_delete_client_failure(self):
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.NotFound
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotFound, service.delete, ctx,
mock.sentinel.image_id)
class TestGlanceClientWrapper(test.NoDBTestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_headers_passed_glanceclient(self):
auth_token = 'auth_token'
ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token)
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
def _get_fake_glanceclient(version, endpoint, **params):
fake_client = glance_stubs.StubGlanceClient(version,
endpoint, **params)
self.assertIsNotNone(fake_client.auth_token)
self.assertIsNotNone(fake_client.identity_headers)
self.assertEqual(fake_client.identity_header['X-Auth_Token'],
auth_token)
self.assertEqual(fake_client.identity_header['X-User-Id'], 'fake')
self.assertIsNone(fake_client.identity_header['X-Roles'])
self.assertIsNone(fake_client.identity_header['X-Tenant-Id'])
self.assertIsNone(fake_client.identity_header['X-Service-Catalog'])
self.assertEqual(fake_client.
identity_header['X-Identity-Status'],
'Confirmed')
self.stubs.Set(glanceclient.Client, '__init__',
_get_fake_glanceclient)
glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(glance_protocol="https")
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.TestCase):
def test_get_ipv4_api_servers(self):
self.flags(glance_api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'])
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
# Python 2.6 can not parse ipv6 address correctly
@testtools.skipIf(sys.version_info < (2, 7), "py27 or greater only")
def test_get_ipv6_api_servers(self):
self.flags(glance_api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'])
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
class TestUpdateGlanceImage(test.NoDBTestCase):
def test_start(self):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
image_service = self.mox.CreateMock(glance.GlanceImageService)
self.mox.StubOutWithMock(glance, 'get_remote_image_service')
glance.get_remote_image_service(
'context', 'id').AndReturn((image_service, 'image_id'))
image_service.update(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
self.mox.ReplayAll()
consumer.start()
| apache-2.0 | -4,568,962,508,668,061,700 | 38.71395 | 79 | 0.582082 | false |
zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path196.py | 1 | 2569 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'data_volume=true', 'cluster=cluster1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup1'],
[TestAction.start_vm, 'vm2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_vm_backup, 'vm2-backup1'],
[TestAction.delete_image, 'vm2-image1'],
[TestAction.recover_image, 'vm2-image1'],
[TestAction.delete_image, 'vm2-image1'],
[TestAction.expunge_image, 'vm2-image1'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.detach_volume, 'volume2'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=thin,scsi'],
[TestAction.use_vm_backup, 'vm2-backup2'],
[TestAction.destroy_vm, 'vm2'],
[TestAction.expunge_vm, 'vm2'],
[TestAction.delete_volume, 'volume2'],
[TestAction.expunge_volume, 'volume2'],
[TestAction.destroy_vm, 'vm3'],
[TestAction.recover_vm, 'vm3'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster2'],
[TestAction.create_vm_backup, 'vm4', 'vm4-backup3'],
[TestAction.migrate_vm, 'vm4'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.attach_volume, 'vm4', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup4'],
[TestAction.stop_vm, 'vm4'],
[TestAction.use_volume_backup, 'volume4-backup4'],
[TestAction.start_vm, 'vm4'],
])
'''
The final status:
Running:['vm4']
Stopped:['vm1', 'vm3']
Enadbled:['vm2-backup2', 'vm4-backup3', 'volume4-backup4', 'image2']
attached:['auto-volume1', 'volume4']
Detached:[]
Deleted:['volume3', 'vm2-backup1']
Expunged:['vm2', 'volume2', 'vm2-image1']
Ha:[]
Group:
vm_backup2:['vm4-backup3']---vm4@
vm_backup1:['vm2-backup2']---vm2@
''' | apache-2.0 | -8,610,853,918,201,521,000 | 37.939394 | 104 | 0.688206 | false |
lypnol/graph-theory | problem-02/submissions/coco-backtracking-improved.py | 1 | 2420 | from submission import Submission
def calculate_length(permutation, matrix):
n = len(permutation)
length = 0
for i in range(n-1):
length += matrix[permutation[i]][permutation[i+1]]
length += matrix[permutation[-1]][permutation[0]]
return length
def glouton(graphe, depart=None):
sommets = list(graphe.keys())
if depart is None:
depart = sommets.pop()
else:
sommets.remove(depart)
circuit = [depart]
position = depart
while sommets:
# selection du plus proche
min_l = float("inf")
closest_s = None
for s in sommets:
if graphe[position][s] < min_l:
closest_s = s
min_l = graphe[position][s]
sommets.remove(closest_s)
circuit.append(closest_s)
position = closest_s
return circuit, calculate_length(circuit, graphe)
def glouton_all_starts(graphe):
sommets = list(graphe.keys())
best_s = min([glouton(graphe, depart=s) for s in sommets], key=lambda x: x[1])
return best_s
def actual_length(path, matrix):
return sum((matrix[path[i]][path[i+1]] for i in range(len(path) - 1)))
def tsp_backtracking_closest_neighbours_rec(path, restant, max_length, matrix):
if not restant:
return path, calculate_length(path, matrix)
if actual_length(path, matrix) > max_length:
return (None, None)
best_length = max_length
best_path = None
for p in restant: #sorted(restant, key=lambda x: matrix[path[-1]][x]):
final_path, length = tsp_backtracking_closest_neighbours_rec(path + [p], restant - {p}, max_length, matrix)
if final_path is not None and length <= best_length:
max_length = length
best_length = length
best_path = final_path
if best_path is not None:
return best_path, best_length
else:
return (None, None)
def tsp_backtracking_closest_neighbours(matrix):
sommets = list(matrix.keys())
_, best_length = glouton_all_starts(matrix)
s = sommets.pop()
return tsp_backtracking_closest_neighbours_rec([s], set(sommets), best_length, matrix)
class CocoBacktrackingImproved(Submission):
def author(self):
return "coco-backtracking-improved"
def run(self, input):
matrix = input
path, length = tsp_backtracking_closest_neighbours(matrix)
return path + [path[0]]
| mit | -6,264,765,515,762,897,000 | 32.611111 | 115 | 0.62686 | false |
z3r0zh0u/pyutls | MyProcLib.py | 1 | 4286 | """
My Process Execution Library
"""
import os
import time
import Queue
import platform
import threading
import subprocess
NewLine = '\n'
if platform.system() == 'Windows':
NewLine = '\r\n'
def queue_output(out, queue):
"""Queue output"""
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def retrieve_output(queue):
"""Retrieve output"""
output = ''
try:
data = queue.get_nowait()
while data != '':
output += data
data = queue.get_nowait()
except Queue.Empty:
pass
return output
class MyProc:
def __init__(self, proc_name, debug = False):
self.proc_name = proc_name
self.debug = debug
self.interactive = False
self.proc = None
self.out_queue = None
self.err_queue = None
self.__debug_print('[*] Process: ' + proc_name)
def run_proc(self, param = None, no_wait = False):
"""Run process only"""
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
if no_wait:
subprocess.Popen(cmd)
else:
subprocess.call(cmd)
def run_proc_output(self, param = None):
"""Run process and return the output"""
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
output = subprocess.check_output(cmd)
self.__debug_print('[*] Output:' + NewLine + output)
return output
def run_proc_interactive(self, param = None):
"""Interactive with process"""
self.interactive = True
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.out_queue = Queue.Queue()
self.err_queue = Queue.Queue()
out_thread = threading.Thread(target=queue_output, args=(self.proc.stdout, self.out_queue))
err_thread = threading.Thread(target=queue_output, args=(self.proc.stderr, self.err_queue))
out_thread.daemon = True
err_thread.daemon = True
out_thread.start()
err_thread.start()
time.sleep(0.1)
def send_input(self, input):
"""Send input to process"""
if self.interactive:
self.__debug_print('[*] Stdin: ' + input)
self.proc.stdin.write(input + NewLine)
time.sleep(0.1)
def get_output(self):
"""Get output"""
out_stdout = ''
out_stderr = ''
if self.interactive:
out_stdout = retrieve_output(self.out_queue)
out_stderr = retrieve_output(self.err_queue)
if len(out_stdout) > 0:
self.__debug_print('[*] Stdout: ' + NewLine + out_stdout)
self.__debug_print('-' * 40)
if len(out_stderr) > 0:
self.__debug_print('[*] Stderr: ' + NewLine + out_stderr)
self.__debug_print('-' * 40)
return out_stdout, out_stderr
def __debug_print(self, message):
"""Print debug info"""
if self.debug:
print message
def run_process():
"""Run process"""
proc_name = 'c:\\Windows\\System32\\cmd.exe'
proc = MyProc(proc_name, debug = True)
param = ' /c notepad test.txt'
proc.run_proc(param, no_wait = True)
param = ' /c ping 127.0.0.1'
output = proc.run_proc_output(param)
print output
proc.run_proc_interactive()
while True:
try:
input = raw_input("Input: ")
proc.send_input(input)
out_stdout, out_stderr = proc.get_output()
if out_stdout != '':
print out_stdout
if out_stderr != '':
print out_stderr
except Exception as e:
print '[!] Error: ' + str(e)
break
if __name__ == '__main__':
run_process() | gpl-2.0 | -2,528,289,493,108,418,600 | 20.984615 | 112 | 0.519365 | false |
GammaC0de/pyload | src/pyload/plugins/downloaders/ZippyshareCom.py | 1 | 4071 | # -*- coding: utf-8 -*-
import re
import urllib.parse
from bs4 import BeautifulSoup
from pyload.core.utils.misc import eval_js
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ZippyshareCom(SimpleDownloader):
__name__ = "ZippyshareCom"
__type__ = "downloader"
__version__ = "0.98"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?P<HOST>www\d{0,3}\.zippyshare\.com)/(?:[vd]/|view\.jsp.*key=)(?P<KEY>[\w^_]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Zippyshare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "[email protected]"),
("sebdelsol", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("zippyshare.com", "ziplocale", "en")]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://\g<HOST>/v/\g<KEY>/file.html")]
NAME_PATTERN = r'(?:<title>Zippyshare.com - |"/)(?P<N>[^/]+)(?:</title>|";)'
SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r"does not exist (anymore )?on this server<"
TEMP_OFFLINE_PATTERN = r"^unmatchable$"
LINK_PATTERN = r"document.location = '(.+?)'"
def setup(self):
self.chunk_limit = -1
self.multi_dl = True
self.resume_download = True
def handle_free(self, pyfile):
self.captcha = ReCaptcha(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
try:
self.link = re.search(self.LINK_PATTERN, self.data)
self.captcha.challenge()
except Exception as exc:
self.error(exc)
else:
self.link = self.fixurl(self.get_link())
if ".com/pd/" in self.link:
self.load(self.link)
self.link = self.link.replace(".com/pd/", ".com/d/")
if self.link and pyfile.name == "file.html":
pyfile.name = urllib.parse.unquote(self.link.split("/")[-1])
def get_link(self):
#: Get all the scripts inside the html body
soup = BeautifulSoup(self.data)
scripts = [
s.getText()
for s in soup.body.findAll("script", type="text/javascript")
if "('dlbutton').href =" in s.getText()
]
#: Emulate a document in JS
inits = [
"""
var document = {}
document.getElementById = function(x) {
if (!this.hasOwnProperty(x)) {
this[x] = {getAttribute : function(x) { return this[x] } }
}
return this[x]
}
"""
]
#: inits is meant to be populated with the initialization of all the DOM elements found in the scripts
eltRE = r'getElementById\([\'"](.+?)[\'"]\)(\.)?(getAttribute\([\'"])?(\w+)?([\'"]\))?'
for m in re.findall(eltRE, " ".join(scripts)):
JSid, JSattr = m[0], m[3]
values = [
f for f in (elt.get(JSattr, None) for elt in soup.findAll(id=JSid)) if f
]
if values:
inits.append(
'document.getElementById("{}")["{}"] = "{}"'.format(
JSid, JSattr, values[-1]
)
)
#: Add try/catch in JS to handle deliberate errors
scripts = ["\n".join(("try{", script, "} catch(err){}")) for script in scripts]
#: Get the file's url by evaluating all the scripts
scripts = inits + scripts + ["document.dlbutton.href"]
return eval_js("\n".join(scripts))
| agpl-3.0 | -8,509,155,195,058,406,000 | 33.5 | 110 | 0.519774 | false |
mbelmadani/motifgp | motifgp/hammingregex.py | 1 | 6109 | import re
import numpy
def sxor(s1,s2):
# convert strings to a list of character pair tuples
# go through each tuple, converting them to ASCII code (ord)
# perform exclusive or on the ASCII code
# then convert the result back to ASCII (chr)
# merge the resulting array of characters as a string
return [ord(a) ^ ord(b) for a,b in zip(s1,s2)]
def hamming_pre_string(regex, sequence):
"""
To compute the hamming distance, we need to match de regex on sequence and then replace the match with "1"
"""
match = re.search(regex, sequence)
if match:
match = match.group(0)
else:
#match = ""
#"0" * len(sequence)
return None
placeholder = "1" * len(match)
pre_string = list(sequence.replace(match, placeholder))
for i in range(len(pre_string)):
if pre_string[i] != '1':
pre_string[i] = '0'
return "".join(pre_string)
def compute_hamming(list_of_regex, template, sequence):
"""
For each regex, create a weighted average from the list of regexs given.
Matches each regex vs the template on the sequence, calculate the
hamming distance on the template and adjusts the weight of the
result on the length of list_of_regex.
Sums weighted hammings strings
Return the overall performance of list_of_regexs vs. template on
sequence
"""
hamming_template = hamming_pre_string(template, sequence)
regexs = None
if type(list_of_regex) == str:
regexs = list(list_of_regex)
else:
regexs = list_of_regex
output = None
for regex in regexs:
hamming_bs = hamming_pre_string(regex, sequence)
#print bs1+"$", "\n", bs2+"$"
#print "".join([str(x) for x in sxor(bs1, bs2)])
if hamming_bs == None:
xor_string = [float(x) for x in hamming_template]
#"".join([str(x) for x in sxor(hamming_template, str("0"*len(hamming_template)))]) # Invert template because no match was found. So match == everything but the template motif.
else:
#print hamming_bs, hamming_template
xor_string = sxor(hamming_bs, hamming_template)
xor_string = [x/float(len(regexs)) for x in xor_string]
"""
print ">"
print xor_string
print "< "
"""
if output:
output = [x + y for x,y in zip(output, xor_string)]
else:
output = xor_string
return output
def score_hamming(floatstring):
"""
Converts the weigthed hamming distance string to a numerical value
"""
return sum( floatstring ) / float(len(floatstring))
class HammingBenchmark():
"""
Class to contain a benchmark of hamming distance against a synthetic dataset
"""
def __init__(self):
self.scores = {}
"""
self.max = -1
self.min = -1
self.mean = -1
self.std = -1
self.rmax = -1
self.rmin = -1
self.rmean = -1
self.rstd = -1
"""
def __repr__(self):
return "HammingBenchmark()"
def __str__(self):
output=""
for each in self.scores:
output += each+":\n"
benchmark_str = [
#self.scoxres, "\n",
"max:",self.scores[each]["max"],
"min:",self.scores[each]["min"],
"mean:",self.scores[each]["mean"],
"std:",self.scores[each]["std"],
]
output += ",".join([str(x) for x in benchmark_str]) + "\n"
#print benchmark_str
#output = ",".join(str(x) for x in benchmark_str)
return output
def compile(self, candidates, sequence_tuples):
"""
candidates; a batch of regular expression that are to be evaluated
sequence_tuples: a list of pairs of templates-sequences
"""
bins = {}
for each in candidates: # Slice candidates one by one. This can be changes to have a real bin behavior
sequence_scores = []
candidate = [each] #TODO:CHANGEME; Quick hack to evaluate each candidate on its own versus the sequence set
bins[each] = {}
bins[each]["score"] = []
bins[each]["max"] = -1
bins[each]["min"] = -1
bins[each]["std"] = -1
bins[each]["mean"] = -1
for template, sequence in sequence_tuples:
hamming_str_score = compute_hamming(candidate, template, sequence)
candidates_score = tuple((sum(hamming_str_score), score_hamming(hamming_str_score) , hamming_str_score ))
bins[each]["score"].append(candidates_score)
self.scores = bins
self.update()
def update(self):
for each in self.scores.keys():
numeric_scores = [x[0] for x in self.scores[each]["score"]]
#if not numeric_scores:
# numeric_scores.append(0)
self.scores[each]["max"] = max(numeric_scores)
self.scores[each]["min"] = min(numeric_scores)
self.scores[each]["std"] = numpy.std(numeric_scores)
self.scores[each]["mean"] = numpy.mean(numeric_scores)
def flush_data_points(self, ks, xs, outpath, seed, CLEAR=True):
"""
Prints a data point y such that k[x] = y
k is an individual. x is the mapping value
seed will be used to color the datapoint
outpath is where to append the datapoint. CLEAR overwrites instead of appending.
"""
if CLEAR:
f = open(outpath, 'w')
else:
f = open(outpath, 'a')
for idx in range(len(ks)):
each = ks[idx]
x = xs[idx]
scores = self.scores[each]
y = scores["mean"],scores["std"],
output = [str(x) , str(y) , str(seed)]
output = "\t".join(output)
output += "\n"
print output
f.write(output)
| lgpl-3.0 | -8,711,762,516,253,431,000 | 32.201087 | 187 | 0.549026 | false |
daweim0/Just-some-image-features | lib/fcn/config.py | 1 | 5504 | # --------------------------------------------------------
# FCN
# Copyright (c) 2016
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""FCN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
import math
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
import ast
__C = edict()
# Consumers can get config by:
# from fcn.config import cfg
cfg = __C
__C.FLIP_X = False
__C.INPUT = 'RGBD'
__C.NETWORK = 'VGG16'
__C.LOSS_FUNC = 'not_specified'
__C.PUPPER_DATASET = False
__C.NORMALIZE_IMAGES = False
# these are set once the program starts running, they should not be set in a cfg file
__C.MODE = ""
__C.IMDB_NAME = ""
__C.SET_VARIANT = ""
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.SINGLE_FRAME = False
__C.TRAIN.TRAINABLE = True
__C.TRAIN.VERTEX_REG = False
__C.TRAIN.VERTEX_W = 10.0
__C.TRAIN.VISUALIZE = False
__C.TRAIN.GAN = False
# learning rate
__C.TRAIN.LEARNING_RATE = 0.001
__C.TRAIN.LEARNING_RATE_ADAM = 0.1
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.GAMMA = 0.1
__C.TRAIN.STEPSIZE = 30000
# voxel grid size
__C.TRAIN.GRID_SIZE = 256
# Scales to compute real features
__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# parameters for data augmentation
__C.TRAIN.CHROMATIC = True
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
__C.TRAIN.NUM_STEPS = 5
__C.TRAIN.NUM_UNITS = 64
__C.TRAIN.NUM_CLASSES = 10
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = False
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'caffenet_fast_rcnn'
__C.TRAIN.SNAPSHOT_INFIX = ''
__C.TRAIN.DISPLAY = 20
__C.TRAIN.OPTICAL_FLOW = False
__C.TRAIN.DELETE_OLD_CHECKPOINTS = False
__C.TRAIN.VISUALIZE_DURING_TRAIN = False
__C.TRAIN.OPTIMIZER = 'MomentumOptimizer'
__C.TRAIN.USE_MASKS = False
__C.TRAIN.IMAGE_LIST_NAME = ""
__C.TRAIN.ADD_BACKGROUNDS = False
__C.TRAIN.ADD_NOISE = True
__C.NET_CONF = edict()
__C.NET_CONF.COMBINE_CONVOLUTION_SIZE = 1
__C.NET_CONF.CONCAT_OR_SUBTRACT = "concat"
__C.NET_CONF.N_CONVOLUTIONS = 1
__C.NET_CONF.MATCHING_STAGE_SCALE = 1.0
__C.NET_CONF.CONV1_SKIP_LINK = False
__C.NET_CONF.CONV2_SKIP_LINK = False
__C.NET_CONF.CONV3_SKIP_LINK = False
__C.NET_CONF.NEGATIVE_RADIUS = 200
#
# Testing options
#
__C.TEST = edict()
__C.TEST.SINGLE_FRAME = False
__C.TEST.VERTEX_REG = False
__C.TEST.VISUALIZE = False
__C.TEST.RANSAC = False
__C.TEST.GAN = False
__C.TEST.OPTICAL_FLOW = False
# Scales to compute real features
__C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# voxel grid size
__C.TEST.GRID_SIZE = 256
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict and type(a) is not dict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict or type(v) is dict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_string(filename):
"""Load a config file and merge it into the default options."""
yaml_cfg = edict(ast.literal_eval(filename))
_merge_a_into_b(yaml_cfg, __C)
| mit | -4,911,511,965,618,673,000 | 26.247525 | 85 | 0.632813 | false |
johnshiver/football_tools | football/core/models/weekly_stats.py | 1 | 1824 | from django.db import models
from django.conf import settings
from model_utils.models import TimeStampedModel
class WeeklyStats(TimeStampedModel):
player = models.ForeignKey('core.Player', related_name='player_stats')
season = models.ForeignKey('core.Season')
week = models.ForeignKey('core.Week', related_name='weekly_stats')
# rb stats
rushing_atts = models.SmallIntegerField(default=0)
rushing_yds = models.IntegerField(default=0)
rushing_tds = models.IntegerField(default=0)
# qb stats
passing_atts = models.SmallIntegerField(default=0)
passing_cmps = models.IntegerField(default=0)
passing_yds = models.IntegerField(default=0)
passing_tds = models.IntegerField(default=0)
passing_ints = models.SmallIntegerField(default=0)
# wr stats
receiving_rec = models.SmallIntegerField(default=0)
receiving_yds = models.IntegerField(default=0)
receiving_tds = models.IntegerField(default=0)
total_score = models.IntegerField(default=0)
def __str__(self):
return "{} for {}: score -> {}".format(self.week,
self.player,
self.total_score)
def calc_total_score(self):
total = 0
total += (settings.RUSHING_TD_POINTS * self.rushing_tds)
total += (settings.RUSHING_YD_POINTS * self.rushing_yds)
total += (settings.PASSING_YD_POINTS * self.passing_yds)
total += (settings.PASSING_TD_POINTS * self.passing_tds)
total += (settings.PASSING_INT_POINTS * self.passing_ints)
total += (settings.RECEIVING_YD_POINTS * self.receiving_yds)
total += (settings.RECEIVING_TD_POINTS * self.receiving_tds)
total += (settings.RECEIVING_REC_POINTS * self.receiving_rec)
return total
| mit | 6,390,556,806,062,743,000 | 37.808511 | 74 | 0.655702 | false |
ikvk/imap_tools | tests/test_utils.py | 1 | 7008 | import unittest
import datetime
from imap_tools import utils
from imap_tools.errors import ImapToolsError, UnexpectedCommandStatusError, MailboxCopyError
from imap_tools.consts import MailMessageFlags
class UtilsTest(unittest.TestCase):
def test_clean_uids(self):
# *clean_uids tested enough in test_query.py
pass
def test_clean_flags(self):
self.assertEqual(utils.clean_flags([MailMessageFlags.FLAGGED, MailMessageFlags.SEEN]), ['\\Flagged', '\\Seen'])
self.assertEqual(utils.clean_flags(['\\FLAGGED', '\\seen']), ['\\FLAGGED', '\\seen'])
self.assertEqual(utils.clean_flags(['TAG1']), ['TAG1',])
self.assertEqual(utils.clean_flags(['tag2']), ['tag2',])
for flag in MailMessageFlags.all:
self.assertEqual(utils.clean_flags(flag), ['\\' + flag.replace('\\', '', 1).capitalize()])
with self.assertRaises(ValueError):
utils.clean_flags([MailMessageFlags.FLAGGED, '\\CUSTOM_TAG_WITH_SLASH'])
def test_chunks(self):
self.assertEqual(list(utils.chunks('ABCDE', 2, '=')), [('A', 'B'), ('C', 'D'), ('E', '=')])
self.assertEqual(list(utils.chunks([1, 2, 3, 4, 5, 6], 3)), [(1, 2, 3), (4, 5, 6)])
self.assertEqual(list(utils.chunks([], 4)), [])
self.assertEqual(list(utils.chunks([1, 2], 0)), [])
self.assertEqual(list(utils.chunks(['0', '0'], 1)), [('0',), ('0',)])
def test_quote(self):
self.assertEqual(utils.quote('str привет'), '"str привет"')
self.assertEqual(utils.quote('str \\'), '"str \\\\"')
self.assertEqual(utils.quote(b'\xd1\x8f'), b'"\xd1\x8f"')
self.assertEqual(utils.quote(b'\\ \xd1\x8f \\'), b'"\\\\ \xd1\x8f \\\\"')
def test_pairs_to_dict(self):
self.assertEqual(utils.pairs_to_dict(['MESSAGES', '3', 'UIDNEXT', '4']), {'MESSAGES': '3', 'UIDNEXT': '4'})
with self.assertRaises(ValueError):
utils.pairs_to_dict(['1', '2', '3'])
def test_decode_value(self):
self.assertEqual(utils.decode_value('str привет 你好', 'not matter'), 'str привет 你好')
self.assertEqual(utils.decode_value(b'str \xd0\xb4\xd0\xb0 \xe4\xbd\xa0'), 'str да 你')
self.assertEqual(utils.decode_value(b'str \xd0\xb4\xd0\xb0 \xe4\xbd\xa0', 'utf8'), 'str да 你')
self.assertEqual(utils.decode_value(b'\xef\xf0\xe8\xe2\xe5\xf2', 'cp1251'), 'привет')
self.assertEqual(utils.decode_value(b'str \xd0\xb4\xd0\xb0 \xe4\xbd\xa0', 'wat?'), 'str да 你')
def test_check_command_status(self):
self.assertIsNone(utils.check_command_status(('EXP', 'command_result_data'), MailboxCopyError, expected='EXP'))
self.assertIsNone(utils.check_command_status(('OK', 'res'), UnexpectedCommandStatusError))
with self.assertRaises(TypeError):
utils.check_command_status(('NOT_OK', 'test'), ImapToolsError)
with self.assertRaises(MailboxCopyError):
utils.check_command_status(('BYE', ''), MailboxCopyError, expected='OK')
def test_parse_email_date(self):
for val, exp in (
('1 Jan 2000 00:00', datetime.datetime(2000, 1, 1, 0, 0)),
('1 Feb 2000 00:00', datetime.datetime(2000, 2, 1, 0, 0)),
('1 Mar 2000 00:00', datetime.datetime(2000, 3, 1, 0, 0)),
('1 Apr 2000 00:00', datetime.datetime(2000, 4, 1, 0, 0)),
('1 May 2000 00:00', datetime.datetime(2000, 5, 1, 0, 0)),
('1 Jun 2000 00:00', datetime.datetime(2000, 6, 1, 0, 0)),
('1 Jul 2000 00:00', datetime.datetime(2000, 7, 1, 0, 0)),
('1 Aug 2000 00:00', datetime.datetime(2000, 8, 1, 0, 0)),
('1 Sep 2000 00:00', datetime.datetime(2000, 9, 1, 0, 0)),
('1 Oct 2000 00:00', datetime.datetime(2000, 10, 1, 0, 0)),
('1 Nov 2000 00:00', datetime.datetime(2000, 11, 1, 0, 0)),
('1 Dec 2000 00:00', datetime.datetime(2000, 12, 1, 0, 0)),
('=) wat 7 Jun 2017 09:23!',
datetime.datetime(2017, 6, 7, 9, 23)),
('7 Jun 2017 09:23',
datetime.datetime(2017, 6, 7, 9, 23)),
('Wed, 7 Jun 2017 09:23',
datetime.datetime(2017, 6, 7, 9, 23)),
('Wed, 7 Jun 2017 09:23:14',
datetime.datetime(2017, 6, 7, 9, 23, 14)),
('Wed, 7 Jun 2017 09:23:14 +0000',
datetime.datetime(2017, 6, 7, 9, 23, 14, tzinfo=datetime.timezone.utc)),
('Wed, 7 Jun 2017 09:23:14 +0000 (UTC)',
datetime.datetime(2017, 6, 7, 9, 23, 14, tzinfo=datetime.timezone.utc)),
('Wed, 7 Jun 2017 09:23 +0000',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone.utc)),
('Wed, 7 Jun 2017 09:23 +0000 (UTC)',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone.utc)),
('7 Jun 2017 09:23 +0000',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone.utc)),
('7 Jun 2017 09:23 +0000 (UTC)',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone.utc)),
('7 Jun 2017 09:23 -2359',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone(datetime.timedelta(-1, 60)))),
('7 Jun 2017 09:23 +0530 (UTC) asd',
datetime.datetime(2017, 6, 7, 9, 23, tzinfo=datetime.timezone(datetime.timedelta(0, 19800)))),
('7 Bad 2017 09:23', datetime.datetime(1900, 1, 1, 0, 0)),
):
self.assertEqual(utils.parse_email_date(val), exp)
def test_parse_email_addresses(self):
self.assertEqual(
utils.parse_email_addresses('=?UTF-8?B?0J7Qu9C1=?= <[email protected]>,\r\n "\'\\"z, z\\"\'" <[email protected]>\f'),
({'email': '[email protected]', 'name': 'Оле', 'full': 'Оле <[email protected]>'},
{'email': '[email protected]', 'name': '\'"z, z"\'', 'full': '\'"z, z"\' <[email protected]>'}))
self.assertEqual(
utils.parse_email_addresses(' <[email protected]>'),
({'email': '[email protected]', 'name': '', 'full': '[email protected]'},))
self.assertEqual(
utils.parse_email_addresses('ivan'),
({'email': '', 'name': '', 'full': 'ivan'},)) # *в этом случае ivan считается email-ом
self.assertEqual(
utils.parse_email_addresses('你好 <[email protected]>'),
({'email': '[email protected]', 'name': '你好', 'full': '你好 <[email protected]>'},))
self.assertEqual(
utils.parse_email_addresses(' "hi" <bad_mail.wow> '),
({'email': '', 'name': 'hi', 'full': 'hi <bad_mail.wow>'},))
self.assertEqual(
utils.parse_email_addresses('=?utf-8?Q?ATO.RU?= <[email protected]>'),
({'email': '[email protected]', 'name': 'ATO.RU', 'full': 'ATO.RU <[email protected]>'},))
| apache-2.0 | -6,395,569,723,169,207,000 | 56.173554 | 119 | 0.551171 | false |
our-city-app/oca-backend | src/rogerthat/migrations/delete_all_models_by_kind.py | 1 | 1605 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db, ndb
from google.appengine.ext.ndb.query import QueryOptions
from rogerthat.bizz.job import run_job, MODE_BATCH
from rogerthat.consts import MIGRATION_QUEUE
def job(cls, namespace=None, batch_size=50):
if issubclass(cls, db.Model):
run_job(_qry_db, [cls, namespace], _worker_db, [], worker_queue=MIGRATION_QUEUE, mode=MODE_BATCH,
batch_size=batch_size)
elif issubclass(cls, ndb.Model):
run_job(_qry_ndb, [cls, namespace], _worker_ndb, [], worker_queue=MIGRATION_QUEUE, mode=MODE_BATCH,
batch_size=batch_size)
def _qry_db(cls, namespace=None):
return cls.all(keys_only=True,
namespace=namespace)
def _worker_db(keys):
db.delete(db.get(keys))
def _qry_ndb(cls, namespace=None):
return cls.query(default_options=QueryOptions(keys_only=True),
namespace=namespace)
def _worker_ndb(keys):
ndb.delete_multi(keys)
| apache-2.0 | 3,319,584,261,818,788,000 | 31.755102 | 107 | 0.697196 | false |
agrav/freesif | tests/test_sif2hdf5.py | 1 | 3789 | # -*- coding: utf-8 -*-
"""
Test sif2hdf5 function for different filetypes.
"""
import unittest
import freesif as fs
import os
import shutil
FILES = os.path.join(os.path.dirname(__file__), 'files')
class TestSIF2HDF5(unittest.TestCase):
"""Test *sif2hdf5* function for different filetypes. It is only checked that
the correct file is created, that it opens with the *open_hdf5* function and
that the type and number of records on the file is correct. The actual data
on the files is not verified, this is done in the HydroData and StrucData
tests.
"""
@classmethod
def setUpClass(cls):
# file names
cls._in_files = dict(
siu_single=os.path.join(FILES, 'struc', 'single_super_elem', 'test01_2ndord_linstat_R1.SIU'),
siu_assembly=os.path.join(FILES, 'struc', 'assembly', 'R100.SIU'),
fem_single=os.path.join(FILES, 'struc', 'single_super_elem', 'test01_2ndord_linstat_T1.FEM'),
fem_assembly=os.path.join(FILES, 'struc', 'assembly', 'T100.FEM'),
sif_hydro=os.path.join(FILES, 'hydro', 'slowdrift_G1.SIF')
)
cls._out_files = dict(
siu_single=os.path.join(FILES, 'tmp', 'siu_single_R1.h5'),
siu_assembly=os.path.join(FILES, 'tmp', 'siu_assembly_R100.h5'),
fem_single=os.path.join(FILES, 'tmp', 'fem_single_T1.h5'),
fem_assembly=os.path.join(FILES, 'tmp', 'fem_assembly_T100.h5'),
sif_hydro=os.path.join(FILES, 'tmp', 'sif_hydro_G1.h5')
)
# create a clean /tmp directory
try:
shutil.rmtree(os.path.join(FILES, 'tmp'))
except FileNotFoundError:
# ok, so it was already removed
pass
finally:
# create empty directory
os.mkdir(os.path.join(FILES, 'tmp'))
@classmethod
def tearDownClass(cls):
# remove '\tmp' directory with H5 files
try:
shutil.rmtree(os.path.join(FILES, 'tmp'))
except FileNotFoundError:
# ok, so it was already removed
pass
def test_SIU_single(self):
fs.sif2hdf5(self._in_files['siu_single'], hdf5name=self._out_files['siu_single'])
self.assertTrue(os.path.isfile(self._out_files['siu_single']))
def test_SIU_assembly(self):
fs.sif2hdf5(self._in_files['siu_assembly'], hdf5name=self._out_files['siu_assembly'])
self.assertTrue(os.path.isfile(self._out_files['siu_assembly']))
def test_FEM_single(self):
fs.sif2hdf5(self._in_files['fem_single'], hdf5name=self._out_files['fem_single'])
self.assertTrue(os.path.isfile(self._out_files['fem_single']))
def test_FEM_assembly(self):
fs.sif2hdf5(self._in_files['fem_assembly'], hdf5name=self._out_files['fem_assembly'])
self.assertTrue(os.path.isfile(self._out_files['fem_assembly']))
def test_SIF_hydro(self):
fs.sif2hdf5(self._in_files['sif_hydro'], hdf5name=self._out_files['sif_hydro'])
self.assertTrue(os.path.isfile(self._out_files['sif_hydro']))
# TODO: check that type/number of records on h5 file is correct inside
# the following tests
def test_open_SIU_single(self):
f = fs.open_hdf5(self._out_files['siu_single'])
f.close()
def test_open_SIU_assembly(self):
f = fs.open_hdf5(self._out_files['siu_assembly'])
f.close()
def test_open_FEM_single(self):
f = fs.open_hdf5(self._out_files['fem_single'])
f.close()
def test_open_FEM_assembly(self):
f = fs.open_hdf5(self._out_files['fem_assembly'])
f.close()
def test_open_SIF_hydro(self):
f = fs.open_hdf5(self._out_files['sif_hydro'])
f.close()
if __name__ == '__main__':
unittest.main()
| mit | 246,719,434,470,003,000 | 35.432692 | 105 | 0.610715 | false |
Hornwitser/DiscordLogger | logger/config-example.py | 1 | 1032 | # Logger bot configuration.
{
# These messages will dissapear after the bot has been run
# Most of these settings can be changed from within the bot
# itself. See the help command.
'active_servers': set(),
'admin_commands': {'help', 'ignore_server', 'listen_on', 'leave', 'join'},
'admin_roles': set(),
'admins': set(),
# Discord user for the bot.
'bot_user': '[email protected]',
'bot_password': 'Password for Discord user'
# MySQL database connection.
'db_host': 'localhost',
'db_user': 'logger',
'db_password': 'Password for database user',
'db_schema': 'discord',
'ignores': set(),
# Set of user ids that are masters of bot, and can do any command.
'masters': {'your-user-id-number'},
'noisy_deny': True,
'protected_servers': set(),
# Character used to triggering commands in the bot. Setting it
# to '!', means commands start with an ! character (e.g, !help).
'trigger': '!',
'user_commands': {'help', 'leave', 'join'},
}
| mit | -6,773,766,223,123,624,000 | 30.272727 | 78 | 0.614341 | false |
JohnyEngine/CNC | opencamlib/scripts/drop-cutter/drop_cutter_one-triangle_2.py | 1 | 2299 | import ocl
import pyocl
import camvtk
import vtk
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
# triangle
a=ocl.Point(1,0,0.4)
b=ocl.Point(0,1,0)
c=ocl.Point(0,0,-0.2)
t = ocl.Triangle(b,c,a)
# draw the triangle with VTK
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
# cutter
radius1=1
length=5
angle = math.pi/4
#cutter = ocl.ConeCutter(0.37, angle)
cutter = ocl.BallCutter(0.532, length)
#cutter = ocl.CylCutter(0.3, length)
#cutter = ocl.BullCutter(0.5,0.123, length)
print cutter
# grid on which we run drop-cutter
minx=-0.5
dx=0.0051
maxx=1.5
miny=-0.7
dy=dx
maxy=1.5
z=-0.7
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
print len(clpoints), "cl-points to evaluate"
n=0
for cl in clpoints:
#cutter.vertexDrop(cl,t)
#cutter.edgeDrop(cl,t)
#cutter.facetDrop(cl,t)
cutter.dropCutter(cl,t) # this calls all three above: vertex,facet,edge
n=n+1
if (n % int(len(clpoints)/10)) == 0:
print n/int(len(clpoints)/10), " ",
print "done."
print "rendering..."
print " len(clpoints)=", len(clpoints)
camvtk.drawCLPointCloud(myscreen, clpoints)
print "done."
# draw a sphere, just for fun
origo = camvtk.Sphere(center=(0,0,0) , radius=0.1, color=camvtk.blue)
origo.SetOpacity(0.2)
myscreen.addActor( origo )
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
myscreen.camera.SetClippingRange(-20,20)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| apache-2.0 | 785,468,906,210,808,400 | 27.7375 | 79 | 0.606351 | false |
bnoi/scikit-tracker | sktracker/io/tests/test_metadataio.py | 1 | 2350 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
from sktracker import data
from sktracker.io import get_metadata
from sktracker.io import validate_metadata
def test_get_metadata():
fname = data.sample_ome()
real_metadata = {'PhysicalSizeY': 0.065,
'SizeC': 2,
'SizeZ': 8,
'SizeT': 20,
'PhysicalSizeX': 0.065,
'SizeY': 20,
'SizeX': 50,
'PhysicalSizeZ': 0.8,
'DimensionOrder': ['T', 'Z', 'C', 'Y', 'X'],
'AcquisitionDate': '2014-02-24T15:29:53',
'Shape': (20, 8, 2, 20, 50)}
guessed_metadata = get_metadata(fname, json_discovery=True)
guessed_metadata.pop("FileName", None)
assert real_metadata == guessed_metadata
def test_invalidate_metadata():
bad_metadata = {'SizeC': 2, 'SizeZ': 8}
assert_raises(ValueError, validate_metadata, bad_metadata, ['DimensionOrder'])
def test_validate_metadata():
good_metadata = {'PhysicalSizeY': 0.065,
'SizeC': 2,
'SizeZ': 8,
'SizeT': 20,
'PhysicalSizeX': 0.065,
'SizeY': 20,
'SizeX': 50,
'PhysicalSizeZ': 0.8,
'DimensionOrder': ['T', 'Z', 'C', 'Y', 'X'],
'AcquisitionDate': '2014-02-24T15:29:53',
'Shape': (20, 8, 2, 20, 50),
'FileName': '../../data/sample.ome.tif'}
default_good = validate_metadata(good_metadata)
extra_good = validate_metadata(good_metadata,
keys=['PhysicalSizeZ',
'DimensionOrder',
'AcquisitionDate'])
assert default_good and extra_good
def test_get_from_metadata_json():
from sktracker.io.metadataio import _get_from_metadata_json
assert _get_from_metadata_json(data.metadata_json()) == {'PhysicalSizeZ': 0.8}
store_path = data.sample_h5_temp()
assert _get_from_metadata_json(store_path) == {}
| bsd-3-clause | 3,016,241,812,330,578,000 | 31.191781 | 82 | 0.513191 | false |
enigmampc/catalyst | catalyst/utils/input_validation.py | 1 | 25568 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import tzinfo
from functools import partial, wraps
from operator import attrgetter
from numpy import dtype
import pandas as pd
from pytz import timezone
from six import iteritems, string_types, PY3
from toolz import valmap, complement, compose
import toolz.curried.operator as op
from catalyst.utils.functional import getattrs
from catalyst.utils.preprocess import call, preprocess
if PY3:
_qualified_name = attrgetter('__qualname__')
else:
def _qualified_name(obj):
"""
Return the fully-qualified name (ignoring inner classes) of a type.
"""
module = obj.__module__
if module in ('__builtin__', '__main__', 'builtins'):
return obj.__name__
return '.'.join([module, obj.__name__])
def verify_indices_all_unique(obj):
"""
Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries.
"""
axis_names = [
('index',), # Series
('index', 'columns'), # DataFrame
('items', 'major_axis', 'minor_axis') # Panel
][obj.ndim - 1] # ndim = 1 should go to entry 0,
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplicate entries in {type}.{axis}: {dupes}.".format(
type=type(obj).__name__,
axis=axis_name,
dupes=sorted(index[index.duplicated()]),
)
)
return obj
def optionally(preprocessor):
"""Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Examples
--------
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True
"""
@wraps(preprocessor)
def wrapper(func, argname, arg):
return arg if arg is None else preprocessor(func, argname, arg)
return wrapper
def ensure_upper_case(func, argname, arg):
if isinstance(arg, string_types):
return arg.upper()
else:
raise TypeError(
"{0}() expected argument '{1}' to"
" be a string, but got {2} instead.".format(
func.__name__,
argname,
arg,
),
)
def ensure_dtype(func, argname, arg):
"""
Argument preprocessor that converts the input into a numpy dtype.
Examples
--------
>>> import numpy as np
>>> from catalyst.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64')
"""
try:
return dtype(arg)
except TypeError:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a numpy dtype.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from catalyst.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timestamp(func, argname, arg):
"""Argument preprocessor that converts the input into a pandas Timestamp
object.
Examples
--------
>>> from catalyst.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00')
"""
try:
return pd.Timestamp(arg)
except ValueError as e:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a pandas Timestamp.\n"
"Original error was: {t}: {e}".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
t=_qualified_name(type(e)),
e=e,
),
)
def expect_dtypes(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected numpy dtypes.
Examples
--------
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (dtype, tuple)):
raise TypeError(
"expect_dtypes() expected a numpy dtype or tuple of dtypes"
" for argument {name!r}, but got {dtype} instead.".format(
name=name, dtype=dtype,
)
)
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
@preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_dtype(dtypes):
"""
Factory for dtype-checking functions that work with the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# name. Otherwise just show the value.
try:
value_to_show = value.dtype.name
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a value with dtype {dtype_str} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=get_funcname(func),
dtype_str=' or '.join(repr(d.name) for d in dtypes),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattr(argvalue, 'dtype', object()) not in dtypes:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_dtype, named))
def expect_kinds(**named):
"""
Preprocessing decorator that verifies inputs have expected dtype kinds.
Examples
--------
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead.
"""
for name, kind in iteritems(named):
if not isinstance(kind, (str, tuple)):
raise TypeError(
"expect_dtype_kinds() expected a string or tuple of strings"
" for argument {name!r}, but got {kind} instead.".format(
name=name, kind=dtype,
)
)
@preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_kind(kinds):
"""
Factory for kind-checking functions that work the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# kind. Otherwise just show the value.
try:
value_to_show = value.dtype.kind
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a numpy object of kind {kinds} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=_qualified_name(func),
kinds=' or '.join(map(repr, kinds)),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_kind, named))
def expect_types(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Examples
--------
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
def _expect_type(type_):
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=' or '.join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return make_check(
exc_type=TypeError,
template=template,
pred=lambda v: not isinstance(v, type_),
actual=compose(_qualified_name, type),
funcname=__funcname,
)
return preprocess(**valmap(_expect_type, named))
def make_check(exc_type, template, pred, actual, funcname):
"""
Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name.
"""
if isinstance(funcname, str):
def get_funcname(_):
return funcname
else:
get_funcname = funcname
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc_type(
template % {
'funcname': get_funcname(func),
'argname': argname,
'actual': actual(argvalue),
},
)
return argvalue
return _check
def optional(type_):
"""
Helper for use with `expect_types` when an input can be `type_` or `None`.
Returns an object such that both `None` and instances of `type_` pass
checks of the form `isinstance(obj, optional(type_))`.
Parameters
----------
type_ : type
Type for which to produce an option.
Examples
--------
>>> isinstance({}, optional(dict))
True
>>> isinstance(None, optional(dict))
True
>>> isinstance(1, optional(dict))
False
"""
return (type_, type(None))
def expect_element(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Examples
--------
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
# Special case the error message for set and frozen set to make it
# less verbose.
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection_for_error_message)
return make_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
funcname=__funcname,
)
return preprocess(**valmap(_expect_element, named))
def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def expect_strictly_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall EXCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_strictly_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(2)
3
>>> foo(4)
5
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value exclusively between 1 and 5 for
argument 'x', but got 5 instead.
>>> @expect_strictly_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly greater than 2 for
argument 'x', but got 2 instead.
>>> @expect_strictly_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly less than 5 for
argument 'x', but got 5 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value >= upper
predicate_descr = "strictly less than " + str(upper)
elif upper is None:
def should_fail(value):
return value <= lower
predicate_descr = "strictly greater than " + str(lower)
else:
def should_fail(value):
return not (lower < value < upper)
predicate_descr = "exclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def _expect_bounded(make_bounded_check, __funcname, **named):
def valid_bounds(t):
return (
isinstance(t, tuple) and
len(t) == 2 and
t != (None, None)
)
for name, bounds in iteritems(named):
if not valid_bounds(bounds):
raise TypeError(
"expect_bounded() expected a tuple of bounds for"
" argument '{name}', but got {bounds} instead.".format(
name=name,
bounds=bounds,
)
)
return preprocess(**valmap(make_bounded_check, named))
def expect_dimensions(__funcname=_qualified_name, **dimensions):
"""
Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Examples
--------
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead.
"""
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
def _expect_dimension(expected_ndim):
def _check(func, argname, argvalue):
actual_ndim = argvalue.ndim
if actual_ndim != expected_ndim:
if actual_ndim == 0:
actual_repr = 'scalar'
else:
actual_repr = "%d-D array" % actual_ndim
raise ValueError(
"{func}() expected a {expected:d}-D array"
" for argument {argname!r}, but got a {actual}"
" instead.".format(
func=get_funcname(func),
expected=expected_ndim,
argname=argname,
actual=actual_repr,
)
)
return argvalue
return _check
return preprocess(**valmap(_expect_dimension, dimensions))
def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor
def coerce_types(**kwargs):
"""
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
"""
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs))
class error_keywords(object):
def __init__(self, *args, **kwargs):
self.messages = kwargs
def __call__(self, func):
def assert_keywords_and_call(*args, **kwargs):
for field, message in iteritems(self.messages):
if field in kwargs:
raise TypeError(message)
return func(*args, **kwargs)
return assert_keywords_and_call
coerce_string = partial(coerce, string_types)
| apache-2.0 | -2,792,091,310,868,139,500 | 29.620359 | 79 | 0.556516 | false |
iABC2XYZ/abc | Scripts/RFQVane/VaneStructure2.py | 1 | 7067 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 03 11:11:22 2017
@author: A
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fsolve
from scipy.special import iv
#from scipy.signal import find_peaks_cwt
plt.close('all')
zStart=0+0.67
zEnd=230.045+0.67
zStep=0.005#########################################步长不要小于0.005,否则贝塞尔函数求解会出错
Freq=162.5
cLight=299792458
lambda_m=cLight/Freq/1.e6
cell_Beta_A_a_m_Z_L=np.loadtxt('pariout_python.txt')
cell=cell_Beta_A_a_m_Z_L[:,0]
Beta=cell_Beta_A_a_m_Z_L[:,3]
A=cell_Beta_A_a_m_Z_L[:,5]
a=cell_Beta_A_a_m_Z_L[:,7]
m=cell_Beta_A_a_m_Z_L[:,8]
Z=cell_Beta_A_a_m_Z_L[:,-3]
L=cell_Beta_A_a_m_Z_L[:,-4]
numCell=len(cell)
nREC=int((zEnd-zStart)/zStep)+1
xREC=np.zeros((nREC,2))
xREC_2=np.zeros((nREC,2))
zREC=np.zeros(nREC)
cellREC=np.zeros(nREC)
cellFlagREC=np.zeros(nREC)
RhoREC=np.zeros(nREC)
LREC=np.zeros(nREC)
Lcal=np.zeros(nREC)
iCellFlag=1
zRec=zStart
def RFQVane(x,a,k,z,m):########################################################定义RFQ极头函数
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
return x**2/a**2-(1-A*iv(0,k*x)*np.cos(k*z))/(1-A*iv(0,k*a))
def Rho(a,k,m):
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
Rho=0.75*a/np.sqrt(1-A*iv(0,k*a))
return Rho
iREC=0;
while (zRec<zEnd):
print(zRec)
diff_RecCell=zRec-Z
iCell=len(diff_RecCell[diff_RecCell>0]) -1 ###############################判断所取点在第几个Cell
iCellFlag=(-1)**iCell
if (iCellFlag>0):
zCal=zRec-Z[iCell]
zCal_2=Z[iCell]-zRec
else:
zCal=Z[iCell+1]-zRec
zCal_2=zRec-Z[iCell-1]
# zCal=zRec-Z[iCell]
#k=np.pi/L[iCell]
betaK=np.interp(zRec,Z,Beta)
k=np.pi/betaK/lambda_m/100*2
#k=np.pi/np.interp(zRec,Z,L)##############################用L数据计算对比发现和用beta计算CELL长度并没有区别
aInterP=np.interp(zRec,Z,a)
mInterP=np.interp(zRec,Z,m)
xRecTmp = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal,mInterP))
xRecTmp_2 = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal_2,mInterP))
RhoREC[iREC]=Rho(aInterP,k,mInterP)
xREC[iREC,:]=xRecTmp
xREC_2[iREC,:]=xRecTmp_2
zREC[iREC]=zRec
cellREC[iREC]=iCell
cellFlagREC[iREC]=iCellFlag
LREC[iREC]=np.interp(zRec,Z,L)
Lcal[iREC]=betaK*lambda_m/2*100
iREC+=1
zRec+=zStep
plt.figure('calculating result')
plt.plot(zREC,xREC[:,0],'b')
plt.hold
plt.savefig('result.png')
#plt.plot(zREC,xREC_2[:,0],'r')
######################################对比####################################
z_HV_REF=np.loadtxt('RFQ H DATA.txt')
Z_REF=z_HV_REF[:,0]/10.
X_REF=z_HV_REF[:,1]/10
Rho_REF=z_HV_REF[:,2]/10
plt.figure('Comp')
plt.plot(zREC-0.67,xREC,'b')
plt.hold
#plt.plot(zREC,xREC_2[:,0],'g')
plt.hold
plt.plot(Z_REF,X_REF,'r')
xRECInterP=np.interp(Z_REF,zREC-0.67,xREC[:,0])
plt.figure('Diff')
plt.plot(Z_REF,X_REF-xRECInterP,'r')
plt.hold
#plt.savefig('comp.png')
#plt.plot(zREC,cellFlagREC,'g')
########################对比Rho函数##################################################
'''
plt.figure('Rho')
plt.plot(zREC,RhoREC,'b')
plt.hold
plt.plot(Z_REF,Rho_REF,'r')
plt.hold
plt.plot(Z_REF,Rho_REF-np.interp(Z_REF,zREC,RhoREC),'g')
plt.plot(zREC,np.interp(zREC,Z_REF,Rho_REF),'g')
'''
###########################对比Cell长度读取和计算函数################################
'''
plt.figure('L_COMP')
plt.plot(zREC,LREC,'r')
plt.hold
plt.plot(zREC,Lcal,'b')
plt.hold
plt.figure('L_Ratio')
plt.plot(zREC,((LREC-Lcal)/LREC))
'''
########################分析Cell数################################################
def Smooth(x):
x[0]=x[0]
x[1]=np.average(x[0:2])
x[2:-3]=(x[0:-5]+x[1:-4]+x[2:-3]+x[3:-2]+x[4:-1])/5.
x[-2]=np.average(x[-3:-1])
x[-1]=x[-1]
return x
def FindPeaks(x):
xLeft=x[1:-2]> x[0:-3]
xRight=x[1:-2]> x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
def FindValley(x):
xLeft=x[1:-2]< x[0:-3]
xRight=x[1:-2]< x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
indexPeak=((Z_REF>4.) * (Z_REF<221.5))######################定义寻峰范围
ZREFPeak=Z_REF[indexPeak]
xREFPeak=X_REF[indexPeak]
xREFPeak=Smooth(xREFPeak)
xREFPeak=Smooth(xREFPeak)
xRECPeak=xRECInterP[indexPeak]
ZRECPeak=ZREFPeak
xRECPeak=Smooth(xRECPeak)
xRECPeak=Smooth(xRECPeak)
index_xRECPeakTuple=FindPeaks(xRECPeak)
index_xREFPeakTuple=FindPeaks(xREFPeak)
index_xRECPeak=index_xRECPeakTuple[0]
index_xREFPeak=index_xREFPeakTuple[0]
print(' xRECPeak:',len(index_xRECPeak),'\n','xREFPeak:',len(index_xREFPeak))
index_xREFValleyTuple=FindValley(xREFPeak)
index_xREFValley=index_xREFValleyTuple[0]
if len(index_xREFPeak)==len(index_xREFValley):
if ((Z_REF[index_xREFPeak[0]])<(Z_REF[index_xREFValley[0]])):
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak]
P_cell_PV=Z_REF[index_xREFValley]
else:
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley]
P_cell_PV=Z_REF[index_xREFPeak]
elif len(index_xREFPeak)<len(index_xREFValley):
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley[:-1]]
P_cell_PV=Z_REF[index_xREFPeak]
else:
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak[-1]]
P_cell_PV=Z_REF[index_xREFValley]
pariout=np.loadtxt('pariout_python.txt')
Cell_pariout=pariout[:,0]
Z_pariout=pariout[:,-3]
L_pariout=pariout[:,-4]
r0_pariout=pariout[:,9]
ncell_pariout=len(Z_pariout[(Z_pariout>4.)*(Z_pariout<221.5)])
'''
plt.figure('Length(HV_P-V)_comp_priout')
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],P_cell_PV,Lcell_HV),'b')
plt.hold
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],Z_pariout,L_pariout),'r')
print(' HV:',((len(index_xREFPeak))+len(index_xREFValley)),'\n','parioutcell:',ncell_pariout)
'''
'''
plt.figure('Peak')
plt.plot(ZRECPeak,xRECPeak,'b')
plt.hold
plt.plot(ZRECPeak,xREFPeak,'r')
plt.plot(ZRECPeak[index_xRECPeak],xRECPeak[index_xRECPeak],'bo')
plt.plot(ZRECPeak[index_xREFPeak],xREFPeak[index_xREFPeak],'r*')
plt.plot(ZRECPeak[index_xREFValley],xREFPeak[index_xREFValley],'r*')
'''
##############################计算固定极头半径######################################
r0_cal_rho=r0_pariout[4:]
L_cal_rho=L_pariout[4:]
r0_sum=0
for i in range(0,len(L_cal_rho)):
r0_sum=r0_sum+r0_cal_rho[i]*L_cal_rho[i]
r0_rho=r0_sum/Z_pariout[-1]
rho_constant=0.75*r0_rho
print(' CST_RHO_constant=',rho_constant,'cm')
##############################################################################
plt.show()
| gpl-3.0 | -2,863,287,004,672,663,600 | 19.009146 | 93 | 0.554636 | false |
cryptoprojects/ultimateonlinecash | test/functional/importmulti.py | 1 | 21761 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The UltimateOnlineCash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ImportMultiTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# UltimateOnlineCash Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| mit | 4,061,598,657,407,303,700 | 47.250554 | 137 | 0.61601 | false |
sepeth/relationships | relationships/relationship.py | 1 | 4426 |
import redis
from keys import key_list as default_key_list
class Relationship(object):
def __init__(self, redis_connection=None, key_list=None, actor=None):
if key_list:
default_key_list.update(key_list)
self.key_list = default_key_list
if redis_connection:
self.redis_connection = redis_connection
else:
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=0
)
self.actor = actor
def __call__(self, *args, **kwargs):
self.actor = args[0]
return self
def _action_call(self, command, from_id, to_id, operation_key):
command_values = ':'.join(('user', str(from_id), operation_key)), to_id
return getattr(self.redis_connection, command)(*command_values)
def _list_call(self, operation_key):
return self.redis_connection.smembers(
'user:{}:{}'.format(self._get_actor(), operation_key)
)
def _count_call(self, operation_key):
return self.redis_connection.scard(
'user:{}:{}'.format(
self._get_actor(),
operation_key
)
)
def _get_actor(self):
if hasattr(self, 'actor'):
return self.actor
raise ValueError("actor is not defined")
def block(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["blocked_by"])
def unblock(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["blocked_by"])
def follow(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["following"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["followers"])
def unfollow(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["following"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["followers"])
def friends(self):
return self.redis_connection.sinter(
"user:{}:{}".format(self._get_actor(), self.key_list["following"]),
"user:{}:{}".format(self._get_actor(), self.key_list["followers"]),
)
def followers(self):
return self._list_call(self.key_list["followers"])
def following(self):
return self._list_call(self.key_list["following"])
def blocks(self):
return self._list_call(self.key_list["blocked"])
def blocked(self):
return self._list_call(self.key_list["blocked_by"])
def follower_count(self):
return self._count_call(self.key_list["followers"])
def following_count(self):
return self._count_call(self.key_list["following"])
def block_count(self):
return self._count_call(self.key_list["blocked"])
def blocked_count(self):
return self._count_call(self.key_list["blocked_by"])
def is_follower(self, follower_id):
return self._action_call('sismember', self._get_actor(), follower_id, self.key_list["followers"])
def is_following(self, following_id):
return self._action_call('sismember', self._get_actor(), following_id, self.key_list["following"])
def is_blocked(self, blocked_id):
return self._action_call('sismember', self._get_actor(), blocked_id, self.key_list["blocked"])
def is_blocked_by(self, blocked_by_id):
return self._action_call('sismember', self._get_actor(), blocked_by_id,self.key_list["blocked_by"])
def get_network(self, output):
user_id = self._get_actor()
try:
import pydot
except ImportError:
raise ImportError("You need pydot library to get network functionality.")
graph = pydot.Dot('network_of_user_{}'.format(user_id), graph_type='digraph')
target_node = pydot.Node(user_id)
for _id in self(user_id).following():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(target_node, user_node))
for _id in self(user_id).followers():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(user_node, target_node))
graph.write_png(output)
| mit | 7,119,113,668,999,396,000 | 30.614286 | 108 | 0.592634 | false |
martinburchell/crossword_collective | crossword.py | 1 | 11875 | import os.path
import urllib
import smtplib
import string
import StringIO
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from lxml import etree
from lxml.html.soupparser import fromstring
from lxml.cssselect import CSSSelector
from PIL import Image
from parser import MyHTMLParser
from line import Line
class Crossword(object):
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
def __init__(self, home_page, cross_type, data_dir, prefix, serial_number, density, border, border_color, smtp_server = None, from_email_address = None, to_email_address = None):
self.home_page = home_page
self.cross_type = cross_type
dir = os.path.join(data_dir,str(serial_number))
self.dir = dir
self.mkdir(dir)
self.prefix = prefix
self.serial_number = serial_number
self.density = density
self.border = border
self.border_color = border_color
self.basename = self.prefix + "_" + self.serial_number
self.smtp_server = smtp_server
self.from_email_address = from_email_address
self.to_email_address = to_email_address
def mkdir(self, dir):
if not os.path.isdir(dir):
os.mkdir(dir)
def download_pdf(self):
url = self.home_page + self.cross_type + "/" + self.serial_number
content = urllib.urlopen(url).read()
root = fromstring(content)
selector = CSSSelector('p#stand-first a')
pdf_url = False
for element in selector(root):
href = element.get("href")
if href != None and href[-4:] == ".pdf":
pdf_url = href
if pdf_url:
pdf_stream = urllib.urlopen(pdf_url)
pdf_basename = pdf_url[pdf_url.rfind("/") + 1:]
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
pdf_basename = ''.join(c for c in pdf_basename if c in valid_chars)
self.basename = pdf_basename[:-4]
self.pdf_filename = os.path.join(self.dir, pdf_basename)
self.mkdir(self.dir)
pdf_file = open(self.pdf_filename, "w")
while True:
buffer = pdf_stream.read(1024)
if buffer == "":
break
pdf_file.write(buffer)
pdf_file.close()
pdf_stream.close()
return True
return False
def tag_matches(self, element, tag):
return element.tag == tag or element.tag == "{%s}%s" % (self.XHTML_NAMESPACE, tag)
def convert_to_png(self):
# Hmmm...
png_basename = self.basename + ".png"
self.png_filename = os.path.join(self.dir, png_basename)
command = "convert -alpha off -density %s %s[0] -trim +repage -format png32 -depth 3 -define png:color-type=2 %s" % (self.density, self.pdf_filename, self.png_filename)
ok = os.system(command)
if ok == 0:
image = Image.open(self.png_filename)
self.image_width = image.size[0]
self.image_height = image.size[1]
return (ok == 0)
def find_grid(self):
image = Image.open(self.png_filename)
pixels = image.load()
threshold = 300
x_lines = []
for y in range (0, self.image_height):
x_count = 0
for x in range (0, self.image_width):
if (pixels[x, y] == (255,255,255) or x + 1 == self.image_width):
if x_count > threshold:
x_lines.append(Line(x - x_count, y, x, y))
x_count = 0
else:
x_count += 1
freq = {}
for line in x_lines:
width = line.end_x - line.start_x
n = freq.get(width, 0)
freq[width] = n + 1
max_count = 0
mode_width = None
for k, v in freq.iteritems():
if v > max_count:
max_count = v
mode_width = k
first_y = None
last_y = None
num_grid_lines = 0
previous_y = None
for line in x_lines:
if line.end_x - line.start_x == mode_width:
# only count non-adjacent lines
if previous_y == None or line.start_y - previous_y > 1:
num_grid_lines += 1
previous_y = line.start_y
if first_y == None:
first_y = line
last_y = line
self.grid_x = first_y.start_x
self.grid_y = first_y.start_y
self.grid_width = mode_width
self.grid_height = mode_width
if num_grid_lines < 2:
print "Not enough grid lines"
return False
self.grid_size = num_grid_lines - 1
self.square_size = mode_width / self.grid_size
return True
def reformat(self):
image_in = Image.open(self.png_filename)
if self.image_width - self.grid_width < 50:
# move the clues right of the grid
width_out = self.image_width * 2 + self.border * 3
grid_height = self.grid_y + self.grid_height
clues_height = self.image_height - self.grid_height
if clues_height > self.grid_height:
height_out = clues_height
else:
height_out = self.grid_height + self.border * 2
image_out = Image.new(image_in.mode,
(width_out, height_out),
self.border_color)
grid_box = (0, 0, self.image_width, grid_height)
grid = image_in.crop(grid_box)
image_out.paste(grid, (self.border, self.border))
clues = image_in.crop((0, grid_height + 1,
self.image_width, self.image_height))
image_out.paste(clues, (self.image_width + self.border * 2 + 1,
self.border))
else:
width_out = self.image_width + self.border * 2
height_out = self.image_height + self.border * 2
image_out = Image.new(image_in.mode,
(width_out, height_out),
self.border_color)
image_out.paste(image_in, (self.border, self.border))
self.image_width = width_out
self.image_height = height_out
self.grid_x += self.border
self.grid_y += self.border
image_out.save(self.png_filename);
return True
def create_pdf_html(self):
html_basename = self.basename + "_pdf.html"
self.html_filename = os.path.join(self.dir, html_basename)
html_file = open(self.html_filename, "w")
image = Image.open(self.png_filename).convert("1")
pixels = image.load()
html_file.write("<div id=\"v6vf\" style=\"text-align: left;\">\n")
html_file.write("\t<img src=\"\" width=\"%d\" height=\"%d\">\n" % (self.image_width, self.image_height))
html_file.write("\t<div>\n")
html_file.write("\t\t<table>\n")
html_file.write("\t\t\t<tbody>\n")
# make the array one square bigger to cope with the edge pixels
squares = [[0 for i in range(self.grid_size + 1)] for j in range(self.grid_size + 1)]
for y in range (0, self.grid_height):
square_y = y / self.square_size
for x in range (0, self.grid_width):
square_x = x / self.square_size
n = squares[square_x][square_y]
if pixels[x + self.grid_x, y + self.grid_y] == 0:
# black
n = n - 1
else:
# white
n = n + 1
squares[square_x][square_y] = n
for square_y in range (0, self.grid_size):
html_file.write("\t\t\t\t<tr>\n")
for square_x in range (0, self.grid_size):
if squares[square_x][square_y] > 0:
cell_class = "white"
else:
cell_class = "black"
html_file.write("\t\t\t\t\t<td class=\"%s\"><br></td>\n" % cell_class)
html_file.write("\t\t\t\t</tr>\n")
html_file.write("\t\t\t</tbody>\n")
html_file.write("\t\t</table>\n")
html_file.write("\t</div>\n")
html_file.write("</div>\n")
html_file.close()
return True
def create_pdf_css(self):
css_basename = self.basename + "_pdf.css"
self.css_filename = os.path.join(self.dir, css_basename)
css_file = open(self.css_filename, "w")
css_file.write("img\n")
css_file.write("{\n")
css_file.write("\tposition: absolute;\n")
css_file.write("\tleft: 0;\n")
css_file.write("\ttop: 0;\n")
css_file.write("\tz-index: -1;\n")
css_file.write("}\n\n")
css_file.write("table\n")
css_file.write("{\n")
css_file.write("\tposition: absolute;\n")
css_file.write("\tleft: %dpx;\n" % self.grid_x)
css_file.write("\ttop: %dpx;\n" % self.grid_y)
css_file.write("\twidth: %dpx;\n" % self.grid_width)
css_file.write("\theight: %dpx;\n" % self.grid_height)
css_file.write("\tborder: thin solid black;\n")
css_file.write("}\n\n")
css_file.write("td\n")
css_file.write("{\n")
css_file.write("\twidth:%dpx;\n" % (self.square_size -4))
css_file.write("\theight:%dpx;\n" % (self.square_size -4))
css_file.write("\ttext-align: center;\n")
css_file.write("\tvertical-align: middle;\n")
css_file.write("}\n\n")
css_file.write(".black\n")
css_file.write("{\n")
css_file.write("\tbackground-color:#000;\n")
css_file.write("}\n")
css_file.close()
return True
def send_email(self):
message = MIMEMultipart()
message["Subject"] = "%s Crossword Number %s " % (self.cross_type.capitalize(), self.serial_number)
message["From"] = self.from_email_address
message["To"] = self.to_email_address
message.preamble = message["Subject"]
f = open(self.html_filename)
text = MIMEText(f.read(), "html")
f.close()
text.add_header("Content-Disposition", "attachment", filename=self.basename + ".html")
message.attach(text)
f = open(self.css_filename)
text = MIMEText(f.read(), "css")
f.close()
text.add_header("Content-Disposition", "attachment", filename=self.basename + ".css")
message.attach(text)
server = smtplib.SMTP(self.smtp_server)
# server.set_debuglevel(1)
server.sendmail(self.from_email_address, self.to_email_address, message.as_string())
server.quit
return True
def create(self):
ok = self.download_pdf()
if not ok:
print "Failed to download PDF"
return False
ok = self.convert_to_png()
if not ok:
print "Failed to convert PDF to PNG"
return False
ok = self.find_grid()
if not ok:
print "Failed to find grid"
return False
ok = self.reformat()
if not ok:
print "Failed to reformat"
return False
ok = self.create_pdf_html()
if not ok:
print "Failed to create HTML"
return False
ok = self.create_pdf_css()
if not ok:
print "Failed to create CSS"
return False
if not self.smtp_server is None:
ok = self.send_email()
if not ok:
print "Failed to send email"
return False
return True
| gpl-3.0 | -6,237,270,378,994,270,000 | 29.924479 | 182 | 0.527663 | false |
antgonza/qp-shotgun | qp_shogun/filter/tests/test_filter.py | 1 | 8158 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from os import close, remove, makedirs
from os.path import exists, isdir, join
from shutil import rmtree, copyfile
from tempfile import mkstemp, mkdtemp
from json import dumps
from functools import partial
import os
from qiita_client.testing import PluginTestCase
from qp_shogun import plugin
from qp_shogun.filter.filter import (
generate_filter_commands, filter)
from qp_shogun.filter.utils import (
get_dbs, get_dbs_list, generate_filter_dflt_params)
from qp_shogun.utils import (_format_params, _per_sample_ainfo)
BOWTIE2_PARAMS = {
'x': 'Bowtie2 database to filter',
'p': 'Number of threads'}
class QC_FilterTests(PluginTestCase):
maxDiff = None
def setUp(self):
plugin("https://localhost:21174", 'register', 'ignored')
db_path = os.environ["QC_FILTER_DB_DP"]
self.params = {
'Bowtie2 database to filter': join(db_path,
'phix/phix'),
'Number of threads': '5'
}
self._clean_up_files = []
def tearDown(self):
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_get_dbs(self):
db_path = os.environ["QC_FILTER_DB_DP"]
obs = get_dbs(db_path)
exp = {'phix': join(db_path, 'phix', 'phix')}
self.assertEqual(obs, exp)
def test_get_dbs_list(self):
db_path = os.environ["QC_FILTER_DB_DP"]
obs = get_dbs_list(db_path)
exp = join(join('"'+db_path, 'phix', 'phix')+'"')
self.assertEqual(obs, exp)
def test_generate_filter_dflt_params(self):
db_path = os.environ["QC_FILTER_DB_DP"]
obs = generate_filter_dflt_params()
exp = {'phix': {'Bowtie2 database to filter': join(db_path, 'phix',
'phix'),
'Number of threads': 15}}
self.assertEqual(obs, exp)
def test_format_filter_params(self):
db_path = os.environ["QC_FILTER_DB_DP"]
obs = _format_params(self.params, BOWTIE2_PARAMS)
exp = ('-p 5 -x %sphix/phix') % db_path
self.assertEqual(obs, exp)
def test_generate_filter_analysis_commands_forward_reverse(self):
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write(MAPPING_FILE)
self._clean_up_files.append(fp)
db_path = os.environ["QC_FILTER_DB_DP"]
exp_cmd = [
('bowtie2 -p 5 -x %sphix/phix --very-sensitive '
'-1 fastq/s1.fastq.gz -2 fastq/s1.R2.fastq.gz | '
'samtools view -f 12 -F 256 -b -o temp/s1.unsorted.bam; '
'samtools sort -T temp/s1 -@ 5 -n '
'-o temp/s1.bam temp/s1.unsorted.bam; '
'bedtools bamtofastq -i temp/s1.bam -fq '
'temp/s1.R1.fastq -fq2 '
'temp/s1.R2.fastq; '
'pigz -p 5 -c temp/s1.R1.fastq > '
'output/s1.R1.fastq.gz; '
'pigz -p 5 -c temp/s1.R2.fastq > '
'output/s1.R2.fastq.gz;') % db_path
]
exp_sample = [
('s1', 'SKB8.640193', 'fastq/s1.fastq.gz', 'fastq/s1.R2.fastq.gz')
]
obs_cmd, obs_sample = generate_filter_commands(
['fastq/s1.fastq.gz'],
['fastq/s1.R2.fastq.gz'],
fp, 'output', 'temp', self.params)
self.assertEqual(obs_cmd, exp_cmd)
self.assertEqual(obs_sample, exp_sample)
def test_filter(self):
# generating filepaths
in_dir = mkdtemp()
self._clean_up_files.append(in_dir)
fp1_1 = join(in_dir, 'kd_test_1_R1.fastq.gz')
fp1_2 = join(in_dir, 'kd_test_1_R2.fastq.gz')
fp2_1 = join(in_dir, 'kd_test_2_R1.fastq.gz')
fp2_2 = join(in_dir, 'kd_test_2_R2.fastq.gz')
copyfile('support_files/kd_test_1_R1.fastq.gz', fp1_1)
copyfile('support_files/kd_test_1_R2.fastq.gz', fp1_2)
copyfile('support_files/kd_test_1_R1.fastq.gz', fp2_1)
copyfile('support_files/kd_test_1_R2.fastq.gz', fp2_2)
# inserting new prep template
prep_info_dict = {
'SKB7.640196': {'run_prefix': 'kd_test_1'},
'SKB8.640193': {'run_prefix': 'kd_test_2'}
}
data = {'prep_info': dumps(prep_info_dict),
# magic #1 = testing study
'study': 1,
'data_type': 'Metagenomic'}
pid = self.qclient.post('/apitest/prep_template/', data=data)['prep']
# inserting artifacts
data = {
'filepaths': dumps([
(fp1_1, 'raw_forward_seqs'),
(fp1_2, 'raw_reverse_seqs'),
(fp2_1, 'raw_forward_seqs'),
(fp2_2, 'raw_reverse_seqs')]),
'type': "per_sample_FASTQ",
'name': "Test QC_Trim artifact",
'prep': pid}
aid = self.qclient.post('/apitest/artifact/', data=data)['artifact']
self.params['input'] = aid
data = {'user': '[email protected]',
'command': dumps(['qp-shogun', '072020', 'QC_Filter']),
'status': 'running',
'parameters': dumps(self.params)}
jid = self.qclient.post('/apitest/processing_job/', data=data)['job']
out_dir = mkdtemp()
self._clean_up_files.append(out_dir)
success, ainfo, msg = filter(self.qclient, jid, self.params, out_dir)
self.assertEqual("", msg)
self.assertTrue(success)
# we are expecting 3 artifacts in total
self.assertEqual(1, len(ainfo))
obs_fps = []
for a in ainfo:
self.assertEqual("per_sample_FASTQ", a.artifact_type)
obs_fps.append(a.files)
od = partial(join, out_dir)
exp_fps = [
[(od('kd_test_1.R1.fastq.gz'), 'raw_forward_seqs'),
(od('kd_test_1.R2.fastq.gz'), 'raw_reverse_seqs'),
(od('kd_test_2.R1.fastq.gz'), 'raw_forward_seqs'),
(od('kd_test_2.R2.fastq.gz'), 'raw_reverse_seqs')]]
self.assertEqual(exp_fps, obs_fps)
def test_per_sample_ainfo_error(self):
in_dir = mkdtemp()
self._clean_up_files.append(in_dir)
makedirs(join(in_dir, 'sampleA'))
makedirs(join(in_dir, 'sampleB'))
# Paired-end
with self.assertRaises(ValueError):
_per_sample_ainfo(in_dir, (('sampleA', None, None, None),
('sampleB', None, None, None)), [],
'filtering', 'QC_Filter Files', True)
MAPPING_FILE = (
"#SampleID\tplatform\tbarcode\texperiment_design_description\t"
"library_construction_protocol\tcenter_name\tprimer\trun_prefix\t"
"instrument_model\tDescription\n"
"SKB7.640196\tILLUMINA\tA\tA\tA\tANL\tA\ts3\tIllumina MiSeq\tdesc1\n"
"SKB8.640193\tILLUMINA\tA\tA\tA\tANL\tA\ts1\tIllumina MiSeq\tdesc2\n"
"SKD8.640184\tILLUMINA\tA\tA\tA\tANL\tA\ts2\tIllumina MiSeq\tdesc3\n"
)
MAPPING_FILE_2 = (
"#SampleID\tplatform\tbarcode\texperiment_design_description\t"
"library_construction_protocol\tcenter_name\tprimer\t"
"run_prefix\tinstrument_model\tDescription\n"
"SKB7.640196\tILLUMINA\tA\tA\tA\tANL\tA\ts3\tIllumina MiSeq\tdesc1\n"
"SKB8.640193\tILLUMINA\tA\tA\tA\tANL\tA\ts1\tIllumina MiSeq\tdesc2\n"
"SKD8.640184\tILLUMINA\tA\tA\tA\tANL\tA\ts1\tIllumina MiSeq\tdesc3\n"
)
if __name__ == '__main__':
main()
| bsd-3-clause | -1,719,745,592,090,536,200 | 34.914027 | 79 | 0.532851 | false |
martinloland/rov | pc/func.py | 1 | 3401 | '''
func.py
- General classes
- Handling events initatied by user input
'''
class SENS:
def __init__(self):
self.press = 1.0
self.temp = 0
self.volt = 0
self.curr = 0
self.roll = 0
self.yaw = 0
self.pitch = 0
self.ax = 0
self.ay = 0
self.az = 0
self.compass = 0
self.depth = 0
class ACT:
def __init__(self):
self.led = 0
self.pan = 90
self.tilt = 90
self.lf = 0
self.rf = 0
self.lb = 0
self.cb = 0
self.rb = 0
self.pwr = 100
class SOCKET:
def __init__(self, ip, port):
self.socket = socket.socket()
self.socket.bind((ip, port))
self.socket.listen(5)
self.conn, self.addr = self.socket.accept()
def close(self):
None
def closeProgram():
if not uiTest:
actFile.close()
pygame.quit()
sys.exit()
def snapshot():
filename = str(datetime.datetime.now().date()) + '_' + str(datetime.datetime.now().time()) + '.jpg'
filename = filename.replace(':','.')
print filename
path = os.path.join('snapshots', filename)
pygame.image.save(pygame.transform.rotate(ui.video.img, 180), path)
#Flash
surface.fill(WHITE)
pygame.display.flip()
def motor(buttons):
moving = False
pwrIncrement = 6
max = 190
min = 0
thresholdU = max-pwrIncrement
thresholdL = pwrIncrement
dev = math.sin(math.radians(sens.pitch))
#Power
if any("mDecrease" in s for s in buttons) and act.pwr >= thresholdL:
act.pwr -= pwrIncrement
if any("mIncrease" in s for s in buttons) and act.pwr <= thresholdU:
act.pwr += pwrIncrement
# Turning
if any("mForward" in s for s in buttons): #forward
moving = True
act.lb = act.pwr
act.rb = act.pwr
if any("mBack" in s for s in buttons): #backward
moving = True
act.lb = -act.pwr
act.rb = -act.pwr
if any("mLeft" in s for s in buttons):
moving = True
act.lb = -act.pwr
act.rb = act.pwr
if any("mRight" in s for s in buttons):
moving = True
act.lb = act.pwr
act.rb = -act.pwr
#up/down
if any("mUp" in s for s in buttons):
moving = True
act.lf = act.rf = sorted([-max, int(act.pwr*(1-dev)), max])[1]
act.cb = sorted([-max, int(act.pwr*(1+dev)), max])[1]
if any("mDown" in s for s in buttons):
moving = True
act.lf = act.rf = sorted([-max, int(-act.pwr*(1+dev)), max])[1]
act.cb = sorted([-max, int(-act.pwr*(1-dev)), max])[1]
if not moving:
act.lf = act.rf = act.lb = act.cb = act.rb = 0
def toggle_fullscreen():
global fullscreen
[SCREEN_WIDTH, SCREEN_HEIGHT] = [1296,730]
if fullscreen == False:
pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT),pygame.FULLSCREEN)
fullscreen = True
else:
pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
fullscreen = False
def gimbal(buttons):
increment = 6
panMax = 30
tiltMax = 55
zeroPoint = 90
threshP = [-panMax+increment, panMax-increment]
threshT = [-tiltMax+increment, tiltMax-increment]
if any("gRight" in s for s in buttons) and act.pan-zeroPoint > threshP[0]:
act.pan -= increment
if any("gLeft" in s for s in buttons) and act.pan-zeroPoint < threshP[1]:
act.pan += increment
if any("gDown" in s for s in buttons) and act.tilt-zeroPoint > threshT[0]:
act.tilt -= increment
if any("gUp" in s for s in buttons) and act.tilt-zeroPoint < threshT[1]:
act.tilt += increment
if any("resetGimbal" in s for s in buttons):
act.pan = act.tilt = 90 | mit | -3,157,018,102,161,540,600 | 23.780303 | 100 | 0.626286 | false |
mrcrgl/stockstore | stockstore/wsgi.py | 1 | 1434 | """
WSGI config for stockstore2 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "stockstore2.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stockstore2.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 5,247,661,201,262,824,000 | 43.8125 | 79 | 0.794979 | false |
prateek-1708/pg-aws-python | src/ecs-deploy.py | 1 | 4415 | #!/usr/bin/python3
import boto3
import argparse
import pprint
import sys
##############################################################################
def debug(args):
print('Cluster Name: {}'.format(args.cluster))
print('Service Name: {}'.format(args.service))
print('Image Version: {}'.format(args.image_version))
##############################################################################
def die(message='I am dying...'):
print("Error: {}".format(message))
sys.exit(1)
##############################################################################
def debug_and_die():
debug()
die()
##############################################################################
def get_client(client_type):
try:
return boto3.client(client_type)
except:
die('Cannot call boto3.client...')
##############################################################################
def search_and_return_arn_from_haystack(haystack, key, needle):
arns = haystack[key]
match = [arn for arn in arns if needle in arn]
arn = match.pop()
return arn
##############################################################################
def read_arguments():
parser = argparse.ArgumentParser("Deploy Docker image to ecs cluster")
parser.add_argument(
"-c",
"--cluster",
required=True,
dest="cluster",
help="Cluster name where this docker image needs to be deployed"
)
parser.add_argument(
"-s",
"--service",
required=True,
dest="service",
help="Service name where this docker image needs to be deployed"
)
parser.add_argument(
"-i",
"--image-version",
required=True,
dest="image_version",
help="Version of the image to be deployed"
)
args = parser.parse_args()
if not args.cluster:
parser.error("Cluster name is required in order for this to work")
if not args.service:
parser.error("Service name is required in order for this to work")
if not args.image_version:
parser.error("Image version is required in order to do the deploy")
return parser.parse_args()
##############################################################################
def main():
args = read_arguments()
cluster_name_to_search = args.cluster
service_name_to_search = args.service
debug(args)
# create the kms client to do the decrypttion
ecs_client = get_client('ecs')
# Getting the cluster
clusters = ecs_client.list_clusters()
cluster_arn = search_and_return_arn_from_haystack(clusters, 'clusterArns', cluster_name_to_search)
# Getting the services
services = ecs_client.list_services(cluster=cluster_arn)
service_arn = search_and_return_arn_from_haystack(services, 'serviceArns', service_name_to_search)
# describing the service
service_details = ecs_client.describe_services(cluster=cluster_arn, services=[service_arn])
task_definition_arn = ((service_details['services']).pop())['taskDefinition']
task_def_details = ecs_client.describe_task_definition(taskDefinition=task_definition_arn)
task_definition = task_def_details['taskDefinition']
print(task_definition)
family = task_definition['family']
print(family)
volumes = task_definition['volumes']
container_definition = task_definition['containerDefinitions'][0]
print(container_definition)
image = container_definition['image']
print(image)
split_array = image.split("/")
image_name_and_tag = split_array[1].split(":")
new_image_name_and_tag = image_name_and_tag[0] + ":" + args.image_version
repo_and_image_name_with_tag = split_array[0] + "/" + new_image_name_and_tag
container_definition['image'] = repo_and_image_name_with_tag
response = ecs_client.register_task_definition(
family=family,
containerDefinitions=[container_definition],
volumes=volumes
)
pprint.pprint(response)
pprint.pprint(response['taskDefinition']['taskDefinitionArn'])
deployed = ecs_client.update_service(
cluster=cluster_arn,
service=service_arn,
taskDefinition=response['taskDefinition']['taskDefinitionArn']
)
pprint.pprint(deployed)
##############################################################################
if __name__ == '__main__':
main() | mit | 2,897,100,967,628,455,400 | 28.637584 | 102 | 0.560589 | false |
UdK-VPT/Open_eQuarter | mole3/qgisinteraction/plugin_interaction.py | 1 | 13083 | from qgis.PyQt import QtCore
from qgis.core import QgsProject, QgsCoordinateReferenceSystem, QgsMapLayer, QgsRasterLayer, QgsVectorLayer
from qgis.core import QgsField, QgsFeature, QgsDistanceArea, QgsPoint
from qgis import utils
from os import path
import sys
from mole3.qgisinteraction.layer_interaction import find_layer_by_name, add_attributes_if_not_exists, delete_layer_files
from mole3.qgisinteraction import legend
from mole3.project import config
def get_plugin_ifexists(plugin_name):
"""
Check if a plugin with the given name exists.
:param plugin_name: Name of the plugin to check existence of.
:type plugin_name: str
:return plugin: Return the plugin if it was found or None otherwise
:rtype: plugin instance
"""
from mole3 import oeq_global
try:
plugin = utils.plugins[plugin_name]
return plugin
except KeyError:
oeq_global.OeQ_push_warning(title="Mandatory Plugins: ", message="Please install Plugin '" + plugin_name + "' ")
return None
class PstInteraction(object):
def __init__(self, iface, plugin_name='pointsamplingtool'):
if isinstance(plugin_name, str):
try:
self.plugin_folder = path.dirname(sys.modules[plugin_name].__file__)
# if the pst is not part of the path, add it to the path, so the modules can be imported
if self.plugin_folder not in sys.path:
sys.path.insert(0, self.plugin_folder)
except KeyError:
print((KeyError, plugin_name))
from doPointSamplingTool import Dialog
self.pst_dialog = Dialog(iface)
self.path_to_output_layer = ''
def set_input_layer(self, layer_name):
layernode = legend.nodeByName(layer_name,'layer')
if len(layernode) == 0:
return None
in_layer = self.pst_dialog.inSample
#print in_layer
index = in_layer.findText(layer_name)
in_layer.setCurrentIndex(index)
#if layer_name is not None and not layer_name.isspace():
# layer_registry = QgsProject.instance()
# layer_available = layer_registry.mapLayersByName(layer_name)
# if layer_available:
# drop down menu, listing all available layers
def select_and_rename_files_for_sampling(self,sample_fields):
"""
Select all available layers for the point sampling and rename multiple occurrences of the same name.
Prepend an index, to separate the layers and append the information, which color value is displayed.
:return plugin: Return the plugin if it was found or None otherwise
:rtype: plugin instance
"""
import mole3.extensions as extensions
sample_list = self.pst_dialog.inData
table = self.pst_dialog.fieldsTable
number_of_samples = len(sample_list)
RGBa_appendices = ['R', 'G', 'B', 'a']
RGBa_index = 0
last_name = ''
prefix = 0
replacement_map = {}
for i in range(number_of_samples):
# select all fields via the inData-view,
# so the point sampling tool can manage its model accordingly/appropriately
sample_list.setItemSelected(sample_list.item(i), True)
# Get the source-name (as displayed in the field-table) and check if it was used already
# (the name has to be split, since it is displayed in the form 'layer_name : Band x' to get the layer_name)
table_index = table.rowCount()-1
table_text = table.item(table_index, 0).text().split(' : ')
layer_name = table_text[0]
band_name = table_text[1]
layer = find_layer_by_name(layer_name)
ext=extensions.by_layername(layer_name, 'Import')
#if ext:
# print "Test: "+ext[0].layer_name
# Check if the layer was already used
if last_name != layer_name:
last_name = layer_name
prefix += 1
RGBa_index = 0
if (layer.name() == config.building_outline_layer_name and
(band_name.startswith('AREA') or band_name.startswith('PERIMETER') or band_name.startswith(config.building_id_key))):
continue
elif (layer.type() == QgsMapLayer.RasterLayer and
layer.rasterType() == QgsRasterLayer.Multiband and
layer.bandCount() == 4
):
# Truncate the name to a maximum of 6 characters, since QGIS limits the length of a feature's name to 10
# prepend prefix (with leading zero), truncated name and RGBa-appendix
try:
rgba = RGBa_appendices[RGBa_index]
RGBa_index += 1
except IndexError as IError:
RGBa_index = 0
print((self.__module__, 'IndexError when appending the RGBa-Appendix: {}'.format(IError)))
if ext:
export_name = ext[0].field_id + '_' + rgba
else:
export_name = '{:02d}{}_{}'.format(prefix, layer_name[0:6], rgba)
replacement_map[layer_name] = export_name[:-2]
# Change the text in the table, so the pst can manage its model accordingly/appropriately
table.item(table_index, 1).setText(export_name)
continue
elif ext:
# NEW fieldname ist nicht klar
if ext[0].field_rename is not None:
if band_name.startswith(tuple(ext[0].field_rename.keys())):
if ext[0].field_rename[band_name]:
table.item(table_index, 1).setText(ext[0].field_rename[band_name])
continue
elif band_name.startswith(tuple(ext[0].par_in)):
continue
sample_list.setItemSelected(sample_list.item(i), False)
return replacement_map
def start_sampling(self, path_to_layer, layer_name):
if not path_to_layer or path_to_layer.isspace() or not layer_name or layer_name.isspace():
return ''
else:
delete_layer_files(layer_name)
full_path = path.join(path_to_layer, layer_name + '.shp')
self.set_input_layer(config.building_coordinate_layer_name)
self.pst_dialog.sampling(full_path)
return full_path
class OlInteraction(object):
def __init__(self, plugin_name = 'openlayers_plugin'):
"""
Make the plugin accessible by looking it up in the plugin-dictionary
:param plugin_name: Name of the open-layers-plugin (as stored in utils.plugins)
:type plugin_name: str
:return:
:rtype:
"""
self.plugin = None
try:
plugin = utils.plugins[plugin_name]
except KeyError as ke:
print("The open layers plugin has not been found under the given name " + plugin_name)
return None
if plugin is not None:
self.plugin = plugin
def open_osm_layer(self, layer_type_id):
"""
Interact with the Open-Street-Map plugin and open an open street map according to open_layer_type_id
:param open_layer_type_id: ID of the open-layer type
:type open_layer_type_id: int
:return:
:rtype:
"""
open_layer = self.plugin._olLayerTypeRegistry.getById(layer_type_id)
number_of_layers = len(QgsProject.instance().mapLayers())
self.plugin.addLayer(open_layer)
return (number_of_layers+1) == len(QgsProject.instance().mapLayers())
def set_map_crs(self, crs_string):
"""
Use the openlayer-plugin to set the project crs to the given crs and to do a re-projection to keep the currently viewed extent focused
:param crs: The new crs to set the project to
:type crs: str
:return:
:rtype:
"""
# if the given crs is valid
if not crs_string.isspace() and QgsCoordinateReferenceSystem().createFromUserInput(crs_string):
self.plugin.setMapCrs(QgsCoordinateReferenceSystem(crs_string, QgsCoordinateReferenceSystem.EpsgCrsId))
class RealCentroidInteraction(object):
def __init__(self, plugin_name='realcentroid'):
"""
Make the plugin accessible by looking it up in the plugin-dictionary
:param plugin_name: Name of the realcentroids-plugin (as stored in utils.plugins)
:type plugin_name: str
:return:
:rtype:
"""
self.plugin = None
try:
plugin = utils.plugins[plugin_name]
self.plugin = plugin
self.plugin.__init__(utils.iface)
except KeyError as KError:
print((KError, 'The realcentroid plugin has not been found by the given name "{}"'.format(plugin_name)))
def create_centroids(self, polygon_name, path_to_output_shape):
from mole3 import oeq_global
self.plugin.dlg.showEvent(QtCore.QEvent.Show)
polygon_combobox = self.plugin.dlg.layerBox
for i in range(polygon_combobox.count()):
if polygon_combobox.itemText(i) == polygon_name:
polygon_combobox.setCurrentIndex(i)
break
else:
print(('Layer {} not found in combobox.'.format(polygon_name)))
return None
self.plugin.dlg.shapefileName = path_to_output_shape
self.plugin.dlg.encoding = sys.getfilesystemencoding()
self.plugin.dlg.addBox.setCheckState(QtCore.Qt.Checked)
self.plugin.generate()
file_info = QtCore.QFileInfo(path_to_output_shape)
if file_info.exists():
layer_name = file_info.completeBaseName()
output_layer = QgsVectorLayer(path_to_output_shape,layer_name, "ogr")
oeq_global.OeQ_wait(0.5)
return output_layer
else:
return None
def calculate_accuracy(self, polygon_layer, point_layer):
"""
Calculate the distance of each centroid on a point-layer to their surrounding polygons
:param polygon_layer: A layer containing polygons
:type polygon_layer: QgsVectorLayer
:param point_layer: A layer containing the (supposed to be) centroids of that polygon
:type point_layer: QgsVectorLayer
:return:
:rtype:
"""
point_provider = point_layer.dataProvider()
add_attributes_if_not_exists(point_layer, [QgsField('DIST', QtCore.QVariant.Double)])
distance_area = QgsDistanceArea()
poly_iterator = polygon_layer.dataProvider().getFeatures()
point_iterator = point_provider.getFeatures()
poly_feature = QgsFeature()
point_feature = QgsFeature()
field_index = point_provider.fieldNameIndex('DIST')
while (poly_iterator.nextFeature(poly_feature) and
point_iterator.nextFeature(point_feature)):
geom= poly_feature.geometry()
if geom is not None:
try:
poly_point = geom.asPolygon()[0]
centroid = geom.asPoint()
except IndexError:
continue
distances = {}
for i, point in enumerate(poly_point):
end = poly_point[(i+1) % len(poly_point)]
try:
intersect = self.intersect_point_to_line(centroid, point, end)
if intersect != centroid:
dist = distance_area.measureLine(centroid, intersect)
distances[intersect] = dist
except ZeroDivisionError as InvalidMath:
continue
values = {field_index: min(distances.values())}
point_provider.changeAttributeValues({point_feature.id(): values})
def intersect_point_to_line(self, point, line_start, line_end):
"""
Finds the point i on a line which, given a point p describes a line ip, orthogonal to a given line
(as found on http://gis.stackexchange.com/questions/59169/how-to-draw-perpendicular-lines-in-qgis)
:param point: The point p
:type point: QgsPoint
:param line_start: The lines start
:type line_start: QgsPoint
:param line_end: The lines end
:type line_end: QgsPoint
:return: The point i, which is the end of the orthogonal line
:rtype: QgsPoint
"""
magnitude = line_start.sqrDist(line_end)
# minimum distance
u = ((point.x() - line_start.x()) * (line_end.x() - line_start.x()) + (point.y() - line_start.y()) * (line_end.y() - line_start.y()))/(magnitude)
# intersection point on the line
ix = line_start.x() + u * (line_end.x() - line_start.x())
iy = line_start.y() + u * (line_end.y() - line_start.y())
return QgsPoint(ix,iy)
| gpl-2.0 | 1,665,867,565,011,549,700 | 39.630435 | 153 | 0.595047 | false |
fugwenna/bunkbot | src/roulette/roulette_cog.py | 1 | 1297 | from random import randint
from discord.ext.commands import command, Context, Cog
from ..bunkbot import BunkBot
from ..channel.channel_service import ChannelService
from ..core.bunk_exception import BunkException
from ..core.bunk_user import BunkUser
from ..core.registry import CHANNEL_SERVICE, USER_SERVICE
from ..user.user_service import UserService
DESCRIPTION = """Basic one in six chance for a russian roulette"""
class Roulette(Cog):
def __init__(self, channels: ChannelService, users: USER_SERVICE):
self.channels: ChannelService = channels
self.users: UserService = users
@command(pass_context=True, cls=None, help=DESCRIPTION)
async def roulette(self, ctx: Context) -> None:
try:
await ctx.trigger_typing()
message: str = "Click..."
user: BunkUser = self.users.get_by_id(ctx.message.author.id)
bullet_location: int = randint(0, 5)
if randint(0, 5) == bullet_location:
message = "{0} :gun: BANG!!!!!!!!!".format(user.mention)
await ctx.send(message)
except Exception as e:
await self.channels.log_error(e, "roll")
def setup(bot) -> None:
bot.add_cog(Roulette(CHANNEL_SERVICE, USER_SERVICE))
| mit | -3,034,951,817,091,481,600 | 32.131579 | 72 | 0.642251 | false |
billgertz/FAB-UI | fabui/python/force_reset.py | 1 | 1051 | #!/usr/bin/python
#Force Totumduino Reset
import RPi.GPIO as GPIO
import time,sys
import serial
import ConfigParser
import logging
config = ConfigParser.ConfigParser()
config.read('/var/www/fabui/python/config.ini')
trace_file=config.get('macro', 'trace_file')
response_file=config.get('macro', 'response_file')
logging.basicConfig(filename=trace_file,level=logging.INFO,format='%(message)s')
open(trace_file, 'w').close() #reset trace file
def trace(string):
logging.info(string)
return
trace("Start reset controller...")
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
def reset():
pin = 11
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.HIGH)
time.sleep(0.12)
GPIO.output(pin, GPIO.LOW)
time.sleep(0.12)
GPIO.output(pin, GPIO.HIGH)
reset()
serial_port = config.get('serial', 'port')
serial_baud = config.get('serial', 'baud')
serial = serial.Serial(serial_port, serial_baud, timeout=0.5)
serial.flushInput()
serial.flush()
serial.close()
GPIO.cleanup()
trace("Controller ready")
sys.exit()
| gpl-2.0 | 3,917,042,093,612,731,400 | 17.438596 | 80 | 0.721218 | false |
kkozarev/mwacme | src/catalog/hek_find_halo_cmes_v2.py | 1 | 4200 | # -*- coding: utf-8 -*-
import json
import os
from sunpy.net import hek
from pprint import pprint
from datetime import datetime,timedelta
from time import strftime
import numpy as np
import matplotlib.pyplot as plt
client = hek.HEKClient()
#SEARCH FOR FAST AND WIDE CMEs IN THE HEK!
#'FAST' means linear speed higher than [minlinvel] km/s
minlinvel=500.
#'WIDE' means angular width larger than [minangwidth] degrees
minangwidth=60.
#Use the C2 start time
tstart = '08/01/2011 07:23:56'
tst=datetime.strptime(tstart,"%m/%d/%Y %H:%M:%S")
#The current time
tend = strftime("%m/%d/%Y %H:%M:%S") #'06/05/2015 12:40:29'
tet=datetime.strptime(tend,"%m/%d/%Y %H:%M:%S")
event_type = 'CE'
frm_name='CACTus (Computer Aided CME Tracking)'
#help(hek.attrs.fl)}
#The extent for the size of field around the Sun center to search, in arcseconds
extent=5000
result = client.query(hek.attrs.Time(tst,tet), hek.attrs.EventType(event_type),
hek.attrs.FRM.Name == frm_name,
hek.attrs.SpatialRegion(x1=-1.*extent,x2=extent,y1=-1.*extent,y2=extent),
hek.attrs.CE.RadialLinVel >= minlinvel,
hek.attrs.CE.AngularWidth >= minangwidth)
#Create the x-axis values in a numpy array
timearray=[]
[timearray.append(elem["event_starttime"]) for elem in result]
time0=datetime.strptime(timearray[0],"%Y-%m-%dT%H:%M:%S")-timedelta(days=30)
time1=datetime.strptime(timearray[-1],"%Y-%m-%dT%H:%M:%S")+timedelta(days=30)
timearray=np.array(timearray)
#Get the CME speeds
linvels=[]
[linvels.append(elem["cme_radiallinvel"]) for elem in result]
maxlinvel=np.amax(linvels)*1.
#Get the CME angular widths
angwidths=[]
[angwidths.append(elem["cme_angularwidth"]) for elem in result]
maxangwidth=np.amax(angwidths)*1.
#Set the equally-spaced groups of CME angular widths
nawgroups=4
angwidth_colors=['k','b','g','r']
degree_sign= u'\N{DEGREE SIGN}'
angwidth_groups=np.arange(1,nawgroups+1)*(maxangwidth-minangwidth)/4.+minangwidth
#Create the subsets of CME information based on angular widths
aw_linvel_subsets = [[] for i in range(nawgroups)]
aw_time_subsets = [[] for i in range(nawgroups)]
aw_aw_subsets = [[] for i in range(nawgroups)]
aw_markersizes_subsets = [[] for i in range(nawgroups)]
for ii,aw in enumerate(angwidths):
if (aw >= angwidth_groups[0] and aw < angwidth_groups[1]):
aw_linvel_subsets[1].append(linvels[ii])
aw_time_subsets[1].append(timearray[ii])
aw_aw_subsets[1].append(aw)
aw_markersizes_subsets[1].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
elif (aw >= angwidth_groups[1] and aw < angwidth_groups[2]):
aw_linvel_subsets[2].append(linvels[ii])
aw_time_subsets[2].append(timearray[ii])
aw_aw_subsets[2].append(aw)
aw_markersizes_subsets[2].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
elif (aw >= angwidth_groups[2] and aw <= angwidth_groups[3]):
aw_linvel_subsets[3].append(linvels[ii])
aw_time_subsets[3].append(timearray[ii])
aw_aw_subsets[3].append(aw)
aw_markersizes_subsets[3].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
else:
aw_linvel_subsets[0].append(linvels[ii])
aw_time_subsets[0].append(timearray[ii])
aw_aw_subsets[0].append(aw)
aw_markersizes_subsets[0].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
#Set the plot for CME SPEEDS with Marker sizes encoding CME angular widths
plt.title(str(len(result))+' Fast and Wide CMEs from HEK/CACTus in the AIA Era')
plt.axis([time0,time1,minlinvel/1.05,maxlinvel*1.05])
plt.xlabel('Date',fontsize=16)
plt.ylabel('CME Radial Linear Speed [km/s]',fontsize=16)
#Plot the subsets of CMES based on their angular widths
for ii in range(nawgroups):
if ii == 0:
staw=minangwidth
enaw=angwidth_groups[ii]
else:
staw=angwidth_groups[ii-1]
enaw=angwidth_groups[ii]
plt.scatter(aw_time_subsets[ii],aw_linvel_subsets[ii],c=angwidth_colors[ii],alpha=0.5,\
s=aw_markersizes_subsets[ii],label=str(staw)+degree_sign+' < CME Ang. Width < '+str(enaw)+degree_sign)
plt.legend(bbox_to_anchor=(1.05, 1.05))
#Show the plot
plt.show()
| gpl-2.0 | -4,243,700,712,274,347,500 | 38.622642 | 118 | 0.689762 | false |
flavors/countries | setup.py | 1 | 1907 | import os
import re
from setuptools import find_packages, setup
def get_long_description():
for filename in ('README.rst',):
with open(filename, 'r') as f:
yield f.read()
def get_version(package):
with open(os.path.join(package, '__init__.py')) as f:
pattern = r'^__version__ = [\'"]([^\'"]*)[\'"]'
return re.search(pattern, f.read(), re.MULTILINE).group(1)
setup(
name='django-countries-flavor',
version=get_version('countries'),
license='MIT',
description='A Django application that provides a data collection '
'for internationalization and localization purposes.',
long_description='\n\n'.join(get_long_description()),
author='mongkok',
author_email='[email protected]',
maintainer='mongkok',
url='https://github.com/flavors/django-countries/',
packages=find_packages(exclude=['tests*']),
install_requires=[
'Django>=1.9',
'psycopg2>=2.6.2',
'requests>=1.1.0',
],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Django',
],
zip_safe=False,
tests_require=[
'Django>=1.9',
'factory-boy>=2.8.1',
'psycopg2>=2.6.2',
'requests>=1.1.0',
],
package_data={
'countries': [
'fixtures/**/*.json',
'locale/*/LC_MESSAGES/django.po',
'locale/*/LC_MESSAGES/django.mo',
],
},
)
| mit | 4,646,107,103,152,200,000 | 28.796875 | 71 | 0.567908 | false |
russellb/nova | nova/notifier/list_notifier.py | 1 | 2207 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.exception import ClassNotFound
list_notifier_drivers_opt = cfg.MultiStrOpt('list_notifier_drivers',
default=['nova.notifier.no_op_notifier'],
help='List of drivers to send notifications')
FLAGS = flags.FLAGS
FLAGS.register_opt(list_notifier_drivers_opt)
LOG = logging.getLogger(__name__)
drivers = None
class ImportFailureNotifier(object):
"""Noisily re-raises some exception over-and-over when notify is called."""
def __init__(self, exception):
self.exception = exception
def notify(self, message):
raise self.exception
def _get_drivers():
"""Instantiates and returns drivers based on the flag values."""
global drivers
if not drivers:
drivers = []
for notification_driver in FLAGS.list_notifier_drivers:
try:
drivers.append(utils.import_object(notification_driver))
except ClassNotFound as e:
drivers.append(ImportFailureNotifier(e))
return drivers
def notify(message):
"""Passes notification to multiple notifiers in a list."""
for driver in _get_drivers():
try:
driver.notify(message)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to send to "
"notification driver %(driver)s." % locals()))
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global drivers
drivers = None
| apache-2.0 | 7,981,357,401,778,414,000 | 30.084507 | 79 | 0.676031 | false |
lorien/grab | tests/grab_redirect.py | 1 | 4599 | # coding: utf-8
from six.moves.urllib.parse import quote, unquote
from grab.error import GrabTooManyRedirectsError
from tests.util import BaseGrabTestCase, build_grab
def build_location_callback(url, counter):
meta = {
'counter': counter,
'url': url,
}
def callback():
if meta['counter']:
status = 301
headers = [('Location', meta['url'])]
body = b''
else:
status = 200
headers = []
body = b'done'
meta['counter'] -= 1
return {
'type': 'response',
'status': status,
'body': body,
'headers': headers,
}
return callback
def build_refresh_callback(url, counter):
meta = {
'counter': counter,
'url': url,
}
def callback():
if meta['counter']:
status = 200
body = (
b'<html><head><meta '
b'http-equiv="refresh" content="5"></head>'
)
else:
status = 200
body = b'done'
meta['counter'] -= 1
return {
'type': 'response',
'status': status,
'body': body
}
return callback
class GrabRedirectTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_follow_refresh_off(self):
# By default meta-redirect is off
meta_url = self.server.get_url('/foo')
self.server.response_once['get.data'] =\
'<meta http-equiv="refresh" content="5; url=%s">' % meta_url
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/')
self.assertEqual(grab.doc.url, self.server.get_url())
def test_follow_refresh_on(self):
meta_url = self.server.get_url('/foo')
# Now test meta-auto-redirect
self.server.response_once['get.data'] =\
'<meta http-equiv="refresh" content="5; url=%s">' % meta_url
grab = build_grab()
grab.setup(follow_refresh=True)
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/foo')
self.assertEqual(grab.doc.url, meta_url)
def test_spaces_in_refresh_url(self):
meta_url = self.server.get_url('/foo')
# Test spaces in meta tag
self.server.response_once['get.data'] =\
"<meta http-equiv='refresh' content='0;url= %s'>" % meta_url
grab = build_grab()
grab.setup(follow_refresh=True)
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/foo')
self.assertEqual(grab.doc.url, meta_url)
def test_refresh_redirect_limit(self):
self.server.response['get.callback'] =\
build_refresh_callback(self.server.get_url(), 10)
grab = build_grab()
grab.setup(redirect_limit=10, follow_refresh=True)
grab.go(self.server.get_url())
self.assertTrue(b'done' in grab.doc.body)
self.server.response['get.callback'] =\
build_refresh_callback(self.server.get_url(), 10)
grab.setup(redirect_limit=5, follow_refresh=True)
self.assertRaises(GrabTooManyRedirectsError,
lambda: grab.go(self.server.get_url()))
def test_redirect_limit(self):
self.server.response['get.callback'] = (
build_location_callback(self.server.get_url(), 10)
)
grab = build_grab()
grab.setup(redirect_limit=5)
self.assertRaises(GrabTooManyRedirectsError,
lambda: grab.go(self.server.get_url()))
self.server.response['get.callback'] =\
build_location_callback(self.server.get_url(), 10)
grab.setup(redirect_limit=20)
grab.go(self.server.get_url())
self.assertTrue(b'done' in grab.doc.body)
# Test fails, Maybe test server incorrectly processed UTF-8 :(
#def test_redirect_utf_location(self):
# self.server.response_once['code'] = 301
# self.server.response_once['headers'] = [
# ('Location', (self.server.get_url() + u'фыва').encode('utf-8')),
# ]
# self.server.response_once['data'] = 'content-1'
# self.server.response['data'] = 'content-2'
# grab = build_grab(debug=True, follow_location=True)
# grab.go(self.server.get_url())
# print('~~~', grab.doc.url)
# self.assertTrue(
# quote(u'/фыва'.encode('utf-8'), safe='/') in unquote(grab.doc.url)
# )
| mit | 2,294,489,589,820,108,000 | 31.560284 | 79 | 0.557613 | false |
quarkslab/irma | common/src/plugins/manager.py | 1 | 4896 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import os
import sys
import pkgutil
import logging
from irma.common.utils.oopatterns import Singleton
##############################################################################
# Plugin imports
##############################################################################
from irma.common.plugins.exceptions import PluginError, PluginCrashed, \
PluginLoadError, PluginFormatError, DependencyMissing, \
ModuleDependencyMissing, BinaryDependencyMissing, FileDependencyMissing, \
FolderDependencyMissing
from irma.common.plugins.dependencies import Dependency, ModuleDependency, \
BinaryDependency, FileDependency, FolderDependency, PlatformDependency
class PluginManager(Singleton):
__plugins_cls = {}
##########################################################################
# plugin management
##########################################################################
def get_all_plugins(self):
return list(self.__plugins_cls.values())
def discover(self, path=os.path.dirname(__file__), prefix=None):
dirname = os.path.basename(path)
if prefix is None:
prefix = dirname
for importer, name, ispkg in pkgutil.walk_packages([path]):
try:
pkg_name = '%s.%s' % (prefix, name)
if pkg_name not in sys.modules:
__import__(pkg_name)
if ispkg:
self.discover(os.path.join(path, name), pkg_name)
except PluginFormatError as error:
logging.warn(' *** [{name}] Plugin error: {error}'
''.format(name=name, error=error))
except PluginLoadError as error:
logging.warn(' *** [{name}] Plugin failed to load: {error}'
''.format(name=name, error=error))
except PluginCrashed as error:
logging.warn(' *** [{name}] Plugin crashed: {error}'
''.format(name=name, error=error))
except ImportError as error:
logging.exception(error)
##########################################################################
# plugin registering
##########################################################################
@classmethod
def register_plugin(cls, plugin):
logging.debug('Found plugin {name}. Trying to register it.'
''.format(name=plugin.plugin_name))
# check for dependencies
for dependency in plugin.plugin_dependencies:
try:
dependency.check()
except DependencyMissing as error:
# get plugin info
plugin_name = plugin.plugin_name
# get dependency info
dependency = error.dependency
dependency_name = dependency.dependency_name
dependency_type = dependency.__class__.__name__
dependency_help = dependency.help
# warn user and stop loading
warning = '{name} miss dependencies: {deps} ({type}).'
if dependency_help is not None:
warning += ' {help}'
raise PluginLoadError(warning.format(type=dependency_type,
name=plugin_name,
deps=dependency_name,
help=dependency_help))
# if required, run additionnal verifications on the plugin
if hasattr(plugin, 'verify'):
try:
plugin.verify()
except Exception as error:
raise PluginLoadError(error)
# add plugin to internal list
if plugin.plugin_canonical_name in cls.__plugins_cls:
logging.debug('Plugin {name} already registered'
''.format(name=plugin.plugin_name))
else:
cls.__plugins_cls[plugin.plugin_canonical_name] = plugin
# mark plugin as active
if plugin.plugin_active is None:
plugin.plugin_active = True
logging.debug('Plugin {name} registered, active set as {state}'
''.format(name=plugin.plugin_name,
state=plugin.plugin_active))
| apache-2.0 | 8,253,696,942,616,350,000 | 42.327434 | 78 | 0.520221 | false |
google/llvm-propeller | lldb/test/API/functionalities/data-formatter/boolreference/TestFormattersBoolRefPtr.py | 2 | 2712 | """
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class DataFormatterBoolRefPtr(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
def test_boolrefptr_with_run_command(self):
"""Test the formatters we use for BOOL& and BOOL* in Objective-C."""
self.build()
self.boolrefptr_data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.mm', '// Set break point at this line.')
def boolrefptr_data_formatter_commands(self):
"""Test the formatters we use for BOOL& and BOOL* in Objective-C."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.mm", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
isArm = 'arm' in self.getArchitecture()
# Now check that we use the right summary for BOOL&
self.expect('frame variable yes_ref',
substrs=['YES'])
self.expect('frame variable no_ref',
substrs=['NO'])
if not(isArm):
self.expect('frame variable unset_ref', substrs=['12'])
# Now check that we use the right summary for BOOL*
self.expect('frame variable yes_ptr',
substrs=['YES'])
self.expect('frame variable no_ptr',
substrs=['NO'])
if not(isArm):
self.expect('frame variable unset_ptr', substrs=['12'])
# Now check that we use the right summary for BOOL
self.expect('frame variable yes',
substrs=['YES'])
self.expect('frame variable no',
substrs=['NO'])
if not(isArm):
self.expect('frame variable unset', substrs=['12'])
| apache-2.0 | -4,510,948,938,182,836,700 | 34.220779 | 85 | 0.59587 | false |
yrchen/CommonRepo | commonrepo/groups_api/permissions.py | 1 | 1342 | # -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: [email protected]
# Maintained By: [email protected]
#
from __future__ import absolute_import, unicode_literals
from rest_framework import permissions
__author__ = '[email protected] (Xaver Y.R. Chen)'
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet
return obj.creator == request.user
| apache-2.0 | 5,770,680,299,890,904,000 | 30.209302 | 74 | 0.716841 | false |
menify/sandbox | tags/aql_beta_1_16032008/setup.py | 1 | 3780 |
import logging
import utils
import options
_Warning = logging.Warning
#//===========================================================================//
_site_setup = []
_user_setup = {}
_tools_setup = {}
_tools_post_setup = {}
def ResetSetup( site_setup = _site_setup,
tools_setup = _tools_setup,
tools_post_setup = _tools_post_setup ):
del site_setup[:]
tools_setup.clear()
tools_post_setup.clear()
#//===========================================================================//
def AddSiteSetup( setup_function, _site_setup = _site_setup, toList = utils.toList ):
_site_setup += toList( setup_function )
def SiteSetup( options, os_env ):
global _site_setup
for f in _site_setup:
f( options = options, os_env = os_env )
UserSetup( options, os_env )
#//===========================================================================//
def AddUserSetup( setup_id, setup_function, _user_setup = _user_setup ):
AddToolSetup( setup_id, setup_function, _user_setup )
def UserSetup( options, os_env, _user_setup = _user_setup ):
for s in options.setup.GetList():
for f in _user_setup.get( s, [] ):
f( options = options, os_env = os_env )
#//===========================================================================//
def AddToolSetup( tool_name, setup_function, tools_setup = _tools_setup, toList = utils.toList ):
current_setup_functions = tools_setup.setdefault( tool_name, [] )
tools_setup[ tool_name ] = current_setup_functions + toList( setup_function )
#//===========================================================================//
def AddToolPostSetup( tool_name, setup_function, tools_post_setup = _tools_post_setup ):
AddToolSetup( tool_name, setup_function, tools_post_setup )
#//===========================================================================//
def _tool_setup( tool_name, env, tools_setup = _tools_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
return
options.SetEnv( env )
os_env = env['ENV']
for f in tools_setup.get( tool_name, [] ):
f( env = env, options = options, os_env = os_env )
#//===========================================================================//
def _tool_post_setup( tool_name, env, tools_post_setup = _tools_post_setup ):
_tool_setup( tool_name, env, tools_post_setup )
#//===========================================================================//
def _tool_exists( self, env ):
if self._aql_is_exist is None:
_tool_setup( self.name, env )
self._aql_is_exist = self._aql_exists( env )
return self._aql_is_exist
#//===========================================================================//
def _tool_generate( self, env ):
if self._aql_is_exist is None:
if not _tool_exists( self, env ):
_Warning( "Tool: '%s' has not been found, but it has been added." % (self.name) )
self._aql_generate( env )
_tool_post_setup( self.name, env )
#//===========================================================================//
def _init_tool( self, name, toolpath = [], **kw ):
_SCons_Tool_Tool_init( self, name, toolpath, **kw )
self._aql_is_exist = None
self._aql_generate = self.generate
self._aql_exists = self.exists
self.exists = lambda env, self = self: _tool_exists( self, env )
self.generate = lambda env, self = self: _tool_generate( self, env )
#//===========================================================================//
import SCons.Tool
_SCons_Tool_Tool_init = SCons.Tool.Tool.__init__
SCons.Tool.Tool.__init__ = _init_tool
| mit | 3,797,604,539,554,396,000 | 31.033898 | 101 | 0.462169 | false |
SEL-Columbia/commcare-hq | corehq/apps/users/util.py | 1 | 4131 | import re
from django.conf import settings
from django.contrib.auth.models import User
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from dimagi.utils.couch.database import get_db
from django.core.cache import cache
from django_prbac.exceptions import PermissionDenied
from django_prbac.utils import ensure_request_has_privilege
WEIRD_USER_IDS = ['commtrack-system', 'demo_user']
def cc_user_domain(domain):
sitewide_domain = settings.HQ_ACCOUNT_ROOT
return ("%s.%s" % (domain, sitewide_domain)).lower()
def format_username(username, domain):
return "%s@%s" % (username.lower(), cc_user_domain(domain))
def normalize_username(username, domain=None):
from django.core.validators import validate_email
username = re.sub(r'\s+', '.', username).lower()
if domain:
username = format_username(username, domain)
validate_email(username)
else:
# if no domain, make sure that the username is a valid "local part" of an email address
validate_email("%[email protected]" % username)
return username
def raw_username(username):
"""
Strips the @domain.commcarehq.org from the username if it's there
"""
sitewide_domain = settings.HQ_ACCOUNT_ROOT
username = username.lower()
try:
u, d = username.split("@")
except Exception:
return username
if d.endswith('.' + sitewide_domain):
return u
else:
return username
def user_id_to_username(user_id):
from corehq.apps.users.models import CouchUser
if not user_id:
return user_id
elif user_id == "demo_user":
return "demo_user"
try:
login = CouchUser.get_db().get(user_id)
except ResourceNotFound:
return None
return raw_username(login['username']) if "username" in login else None
def cached_user_id_to_username(user_id):
if not user_id:
return None
key = 'user_id_username_cache_{id}'.format(id=user_id)
ret = cache.get(key)
if ret:
return ret
else:
ret = user_id_to_username(user_id)
cache.set(key, ret)
return ret
def django_user_from_couch_id(id):
"""
From a couch id of a profile object, get the django user
"""
# get the couch doc
couch_rep = get_db().get(id)
django_id = couch_rep["django_user"]["id"]
return User.objects.get(id=django_id)
def doc_value_wrapper(doc_cls, value_cls):
"""
Wrap both the doc and the value
Code copied from couchdbkit.schema.base.QueryMixin.__view
"""
#from corehq.apps.users.models import CouchUser
def wrapper(row):
data = row.get('value')
docid = row.get('id')
doc = row.get('doc')
data['_id'] = docid
if 'rev' in data:
data['_rev'] = data.pop('rev')
value_cls._allow_dynamic_properties = True
doc_cls._allow_dynamic_properties = True
value_inst = value_cls.wrap(data)
doc_inst = doc_cls.wrap(doc)
return doc_inst, value_inst
return wrapper
def user_data_from_registration_form(xform):
"""
Helper function for create_or_update_from_xform
"""
user_data = {}
if "user_data" in xform.form and "data" in xform.form["user_data"]:
items = xform.form["user_data"]["data"]
if not isinstance(items, list):
items = [items]
for item in items:
user_data[item["@key"]] = item["#text"]
return user_data
def can_add_extra_mobile_workers(request):
from corehq.apps.users.models import CommCareUser
from corehq.apps.accounting.models import BillingAccount
num_web_users = CommCareUser.total_by_domain(request.domain)
user_limit = request.plan.user_limit
if user_limit == -1 or num_web_users < user_limit:
return True
try:
ensure_request_has_privilege(request, privileges.ALLOW_EXCESS_USERS)
except PermissionDenied:
account = BillingAccount.get_account_by_domain(request.domain)
if account is None or account.date_confirmed_extra_charges is None:
return False
return True
| bsd-3-clause | 3,465,629,854,168,599 | 28.719424 | 95 | 0.648995 | false |
richhorace/docker-getpocket-elastic | scripts/reprocess-pocket-raw-data.py | 1 | 2316 | #/usr/bin/env python3
import argparse
import glob
import json
import logging
import datetime
import os
from local import DATA_DIR, LOG_DIR, REPROCESS_DIR
def parse_files(fnames):
for fname in fnames:
stat = os.stat(fname)
f_date = str(datetime.datetime.utcfromtimestamp(stat.st_birthtime).isoformat())
data = read_file(fname)
parse_data(data, fname,f_date )
def read_file(fname):
with open(fname, 'r', encoding='utf-8') as f:
return json.load(f)
def parse_data(data, fname, f_date):
LOG_PATH = '{}/getpocket-reprocessed.log'.format(LOG_DIR)
logging.basicConfig(level=logging.INFO,
format='{"retrieved": "' + f_date +'", "level": "%(levelname)s", %(message)s}',
filename=LOG_PATH,
filemode='a+')
total = 0
resolved_id_missing = 0
for v in data['list'].values():
fn = {"filename": fname }
v.update(fn)
# Remove unnecessary data
if v.get('image'):
del v['image']
if v.get('images'):
del v['images']
if v.get('videos'):
del v['videos']
if v.get('resolved_id', 0) == 0:
resolved_id_missing += 1
logging.error('"pocket_data": {}'.format(json.dumps(v)))
# logging.error('"pocket_data": {}, "filename": {}'.format(json.dumps(v)))
continue
if v.get('authors'):
try:
author_data = v['authors'].values()
v['authors'] = [(a['name']) for a in author_data]
except BaseException:
print(v['authors'])
if v.get('tags'):
try:
tag_data = v['tags'].keys()
v['tags'] = [a for a in tag_data]
except BaseException:
print(v['tags'])
fn = {"filename": fname }
v.update(fn)
logging.info('"pocket_data": {}'.format(json.dumps(v)))
total += 1
print("Total ({}): {}".format(fname, total))
print("Missing Resolved Id ({}): {}".format(fname, resolved_id_missing))
def main():
# Get local JSON file names
file_names = glob.glob('{}/*.json'.format(REPROCESS_DIR))
# Parse all JSON files
parse_files(file_names)
main()
| mit | 6,101,022,340,132,048,000 | 26.903614 | 103 | 0.525475 | false |
WmHHooper/aima-python | submissions/Martinez/vacuum2.py | 1 | 1449 | import agents as ag
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus, = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if bump == 'None':
action = 'Left'
if bump != 'None':
action = 'Right'
if bump != 'None' and lastAction == 'Left':
action = 'Right'
if bump != 'None' and lastAction == 'Right':
action = 'Down'
if bump != 'None' and lastAction == 'Down':
action = 'Up'
if bump == 'None' and lastAction == 'Down':
action = 'Down'
if bump == 'None' and lastAction == 'Right':
action = 'Right'
if bump == 'None' and lastAction == 'Left':
action = 'Right'
if bump != 'None' and lastAction == 'Left':
action = 'Up'
#it says local variable might be referenced before assingment?
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['NoOp']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt | mit | -7,472,466,343,766,180,000 | 26.884615 | 62 | 0.507246 | false |
Novartis/railroadtracks | src/test/test_recipe.py | 1 | 26005 | # Copyright 2014-2015 Novartis Institutes for Biomedical Research
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, tempfile, csv, shutil, logging
from railroadtracks import core, hortator, rnaseq, easy, environment
from railroadtracks.model.simulate import (PHAGEGFF,
PHAGEGTF,
PHAGEFASTA)
import railroadtracks.model.simulate
from railroadtracks.test.test_model import (_build_UpToAlign,
_build_StepIndex,
_build_StepAlign,
_build_StepQuantify)
# Test the writing of recipes
from railroadtracks import easy
class RecipeTestCase(unittest.TestCase):
def setUp(self):
# -- recipe-init-begin
# -- initialization boiler plate code
wd = tempfile.mkdtemp()
project = easy.Project(rnaseq, wd=wd)
# declare the 3rd-party command-line tools we will use
env = easy.Environment(rnaseq)
# -- recipe-init-end
# -- recipe-data-begin
# Phage genome shipped with the package for testing purposes
PHAGEFASTA = railroadtracks.model.simulate.PHAGEFASTA
PHAGEGFF = railroadtracks.model.simulate.PHAGEGFF
# create random data for 6 samples (just testing here)
nsamples = 6
samplereads = list()
with open(PHAGEFASTA) as fasta_fh:
reference = next(railroadtracks.model.simulate.readfasta_iter(fasta_fh))
for sample_i in range(nsamples):
read1_fh = tempfile.NamedTemporaryFile(prefix='read1', suffix='.fq')
read2_fh = tempfile.NamedTemporaryFile(prefix='read2', suffix='.fq')
read1_fh, read2_fh = railroadtracks.model.simulate.randomPEreads(read1_fh,
read2_fh,
reference)
samplereads.append((read1_fh, read2_fh))
sampleinfo_fh = tempfile.NamedTemporaryFile(suffix='.csv', mode='w+')
csv_w = csv.writer(sampleinfo_fh)
csv_w.writerow(['sample_id', 'group'])
for i in range(6):
csv_w.writerow([str(i), ('A','B')[i%2]])
sampleinfo_fh.flush()
referenceannotation = rnaseq.GFFFile(PHAGEGFF)
# -- recipe-data-end
self._wd = wd
self.project = project
self.reference_fn = PHAGEFASTA
self.env = env
self.nsamples = nsamples
self.samplereads = samplereads
self.sampleinfo_fh = sampleinfo_fh
self.referenceannotation = referenceannotation
self._PHAGEFASTA = PHAGEFASTA
self._PHAGEGFF = PHAGEGFF
def tearDown(self):
samplereads = self.samplereads
# -- recipe-teardown-begin
for read1_fh, read2_fh in self.samplereads:
read1_fh.close()
read2_fh.close()
# FIXME: delete the temporary directory
shutil.rmtree(self.project.wd)
# -- recipe-teardown-end
def test_File(self):
#FIXME: rather test it in the model ?
reference = core.File(self.reference_fn)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2 and/or htseq-count is not in the PATH')
def test_RecipeSimpleIncremental(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
from railroadtracks import easy
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index,
assets)
# the step is not done
self.assertEqual(hortator._TASK_TODO, task_index.info[1])
torun.append(task_index)
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
task.status = hortator._TASK_DONE
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
# now that the tasks have run let's open the same project
project_same = easy.Project(project.model, wd=project.wd)
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index_same = project_same.add_task(bowtie2index,
assets)
self.assertNotEqual(task_index, task_index_same)
self.assertNotEqual(task_index.call.assets, task_index_same.call.assets)
self.assertListEqual(list(task_index.call.assets.source.reference),
list(task_index_same.call.assets.source.reference))
self.assertListEqual(list(task_index.call.assets.target.indexfilepattern),
list(task_index_same.call.assets.target.indexfilepattern))
self.assertEqual(hortator._TASK_DONE, task_index_same.info[1])
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
def _recipesimpleincremental(self, runtasks):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
for iteration in range(5):
nextiteration = False
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index, assets)
torun.append(task_index)
if iteration < 1:
nextiteration = True
runtasks(torun)
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
continue
# process all samples
sample_counts = list()
for sample_i, (read1_fh, read2_fh) in enumerate(samplereads):
# align
Assets = bowtie2align.Assets
assets = Assets(Assets.Source(task_index.call.assets.target.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(bowtie2align, assets)
torun.append(task_align)
if iteration < 2:
nextiteration = True
runtasks(torun)
self.assertEqual(1+(sample_i+1), project.persistent_graph.nconcrete_steps)
continue
# quantify
# (non-default parameters to fit our demo GFF)
params = rnaseq.HTSeqCount._noexons_parameters
Assets = htseqcount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(htseqcount,
assets,
parameters=params)
torun.append(task_quantify)
if iteration < 3:
nextiteration = True
runtasks(torun)
self.assertEqual(1+len(samplereads)+(sample_i+1),
project.persistent_graph.nconcrete_steps)
continue
# keep a pointer to the counts, as we will use it in the merge step
sample_counts.append(task_quantify.call.assets)
if nextiteration:
continue
# merge the sample data into a table (so differential expression can be computed)
Assets = merge.Assets
counts = tuple(x.target.counts for x in sample_counts)
assets = Assets(Assets.Source(rnaseq.CSVFileSequence(counts)),
merge.Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets,
parameters=("0", "1"))
torun.append(task_merge)
if iteration < 4:
nextiteration = True
runtasks(torun)
self.assertEqual(1+2*len(samplereads)+1,
project.persistent_graph.nconcrete_steps)
continue
# differential expression with edgeR
Assets = edger.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
rnaseq.CSVFile(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(edger,
assets)
if iteration < 5:
nextiteration = True
runtasks(torun)
self.assertEqual(1+2*len(samplereads)+2, # 1 index + 2 FASTQ per sample + 1 merge + 1 differential expression
project.persistent_graph.nconcrete_steps)
continue
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2 and/or htseq-count is not in the PATH')
def test_RecipeSimpleIncrementalComplete(self):
def runtasks(torun):
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
self._recipesimpleincremental(runtasks)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2, htseq-count, R (with package "edgeR") must be in the PATH')
def test_RecipeSimpleIncrementalCompleteNoRun(self):
def runtasks(torun):
# do nothing
pass
self._recipesimpleincremental(runtasks)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2, htseq-count, R (with package "edgeR") must be in the PATH')
def test_RecipeSimple(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# -- recipesimple-test-begin
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
from railroadtracks import easy
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index, assets)
torun.append(task_index)
# process all samples
sample_counts = list()
for read1_fh, read2_fh in samplereads:
# align
Assets = bowtie2align.Assets
assets = Assets(Assets.Source(task_index.call.assets.target.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(bowtie2align, assets)
torun.append(task_align)
# quantify
# (non-default parameters to fit our demo GFF)
params = rnaseq.HTSeqCount._noexons_parameters
Assets = htseqcount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(htseqcount,
assets,
parameters=params)
torun.append(task_quantify)
# keep a pointer to the counts,
# as we will use them in the merge step
sample_counts.append(task_quantify.call.assets)
# merge the sample data into a table
# (so differential expression can be computed)
Assets = merge.Assets
counts = tuple(x.target.counts for x in sample_counts)
assets = Assets(Assets.Source(rnaseq.CSVFileSequence(counts)),
merge.Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets,
parameters=("0","1"))
torun.append(task_merge)
# differential expression with edgeR
Assets = edger.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
rnaseq.CSVFile(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(edger,
assets)
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
# get results
final_storedentities = project.get_targetsofactivity(rnaseq.ACTIVITY.DIFFEXP)
# get the step that created the results files
final_steps = list()
for stored_entity in final_storedentities:
final_steps.append(project.persistent_graph.get_parenttask_of_storedentity(stored_entity))
# -- recipesimple-test-end
self.assertEqual(1, len(final_storedentities))
self.assertEqual(core.File.__name__, final_storedentities[0].clsname)
self.assertEqual('railroadtracks.model.diffexp.EdgeR', final_steps[0].clsname)
# FIXME: not yet implemented
# now that we have all steps, we "only" have to run them
#steps = todo.stepcrawler()
#for s in steps:
# print('%s' % (s.unifiedname))
# s.run()
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('bowtie-build') and \
environment.Executable.ispresent('STAR') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None and \
environment.R('R').packageversion_or_none('DESeq') is not None and \
environment.R('R').packageversion_or_none('DESeq2') is not None and \
environment.R('R').packageversion_or_none('limma') is not None),
'bowtie2, bowtie, STAR, TopHat2, and R (with packages "edgeR", "DESeq", "DESeq2", "limma") must be in the PATH')
def test_RecipeLoop(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# -- recipeloop-test-begin
from railroadtracks import easy
torun = list()
# bowtie
bowtie1index = env.activities.INDEX.bowtiebuild
bowtie1align = env.activities.ALIGN.bowtie
Assets = bowtie1index.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_bowtie1 = project.add_task(bowtie1index,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_bowtie1)
# bowtie2
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
Assets = bowtie2index.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_bowtie2 = project.add_task(bowtie2index,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_bowtie2)
# STAR
starindex = env.activities.INDEX.starindex
staralign = env.activities.ALIGN.staralign
Assets = starindex.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_star = project.add_task(starindex,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_star)
# TopHat2
# (index from bowtie2 used)
#tophat2 = env.activities.ALIGN.tophat2
# featureCount
featurecount = env.activities.QUANTIFY.featurecount
# Merge columns (obtained from counting)
merge = env.activities.UTILITY.columnmerger
# EdgeR, DESeq, DESeq2, and LIMMA voom
edger = env.activities.DIFFEXP.edger
deseq = env.activities.DIFFEXP.deseq
deseq2 = env.activities.DIFFEXP.deseq2
voom = env.activities.DIFFEXP.limmavoom
# Now explore the different alignment presets in bowtie2, and vanilla star
from itertools import cycle
from collections import namedtuple
Options = namedtuple('Options', 'aligner assets_index parameters')
# Try various presets for bowtie2
bowtie2_parameters = (('--very-fast', ), ('--fast', ),
('--sensitive', ), ('--very-sensitive', ))
options = [Options(*x) for x in zip(cycle((bowtie2align,)),
cycle((task_index_bowtie2.call.assets.target,)),
bowtie2_parameters)]
# add bowtie
options.append(Options(bowtie1align, task_index_bowtie1.call.assets.target, tuple()))
# add STAR (vanilla, no specific options beside the size of index k-mers)
options.append(Options(staralign,
task_index_star.call.assets.target,
('--genomeChrBinNbits', '12')))
# add TopHat2
#options.append(Options(tophat2, task_index_bowtie2.call.assets.target, tuple()))
# loop over the options
for option in options:
sample_counts = list()
# loop over the samples
for sample_i in range(nsamples):
read1_fh, read2_fh = samplereads[sample_i]
# align
Assets = option.aligner.Assets
assets = Assets(Assets.Source(option.assets_index.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(option.aligner,
assets,
parameters=option.parameters)
torun.append(task_align)
# quantify
# (non-default parameters to fit our demo GFF)
Assets = featurecount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(featurecount,
assets,
parameters = ('--gtf-featuretype', 'CDS',
'--gtf-attrtype', 'ID'))
torun.append(task_quantify)
# keep a pointer to the counts, as we will use it in the merge step
sample_counts.append(task_quantify.call.assets)
# merge the sample data into a table (so differential expression can be computed)
Assets = merge.Assets
source = Assets.Source(rnaseq.CSVFileSequence(tuple(x.target.counts\
for x in sample_counts)))
assets_merge = Assets(source,
Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets_merge,
parameters=("0","1"))
torun.append(task_merge)
# differential expression with edgeR, deseq2, and voom
# (deseq is too whimsical for tests)
for diffexp, params in ((edger, ()),
(deseq, ('--dispersion-fittype=local', )),
(deseq2, ()),
(voom, ())):
Assets = diffexp.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
core.File(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(diffexp,assets)
torun.append(task_de)
# run the tasks
# (this is an integration test rather than a unit test - the
# 3rd-party tools are often brittle and we want to keep the noise level down)
env_log_level = environment.logger.level
environment.logger.level = logging.ERROR
try:
for task in torun:
if task.info[1] != hortator._TASK_DONE:
try:
task.execute()
status = easy.hortator._TASK_DONE
except:
status = easy.hortator._TASK_FAILED
project.persistent_graph.step_concrete_state(hortator.DbID(task.task_id, False),
easy.hortator._TASK_STATUS_LIST[status])
finally:
environment.logger.level = env_log_level
# -- recipeloop-test-end
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,608,266,947,258,619,000 | 44.304878 | 133 | 0.55051 | false |
smurn/twistit | twistit/test_testing.py | 1 | 1640 | # Copyright (C) 2015 Stefan C. Mueller
import unittest
from twisted.internet import defer
import twistit
class TestExtract(unittest.TestCase):
def test_success(self):
d = defer.succeed(42)
self.assertEqual(42, twistit.extract(d))
def test_fail(self):
d = defer.fail(ValueError())
self.assertRaises(ValueError, twistit.extract, d)
def test_not_called(self):
d = defer.Deferred()
self.assertRaises(twistit.NotCalledError, twistit.extract, d)
class TestExtractFailure(unittest.TestCase):
def test_success(self):
d = defer.succeed(42)
self.assertRaises(ValueError, twistit.extract_failure, d)
def test_fail(self):
d = defer.fail(ValueError())
f = twistit.extract_failure(d)
self.assertTrue(f.check(ValueError))
def test_not_called(self):
d = defer.Deferred()
self.assertRaises(twistit.NotCalledError, twistit.extract_failure, d)
class TestHasValue(unittest.TestCase):
def test_success(self):
d = defer.succeed(None)
self.assertTrue(twistit.has_result(d))
def test_fail(self):
d = defer.fail(ValueError())
self.assertTrue(twistit.has_result(d))
d.addErrback(lambda _:None) # avoid stderr output during test.
def test_notcalled(self):
d = defer.Deferred()
self.assertFalse(twistit.has_result(d))
def test_paused(self):
d = defer.succeed(None)
d.addCallback(lambda _:defer.Deferred())
self.assertFalse(twistit.has_result(d)) | mit | -6,057,147,474,698,230,000 | 28.836364 | 77 | 0.622561 | false |
coinbase/coinbase-python | coinbase/wallet/auth.py | 1 | 1718 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import hmac
from requests.utils import to_native_string
import time
from requests.auth import AuthBase
class HMACAuth(AuthBase):
def __init__(self, api_key, api_secret, api_version):
self.api_key = api_key
self.api_secret = api_secret
self.api_version = api_version
def __call__(self, request):
timestamp = str(int(time.time()))
message = timestamp + request.method + request.path_url + (request.body or '')
secret = self.api_secret
if not isinstance(message, bytes):
message = message.encode()
if not isinstance(secret, bytes):
secret = secret.encode()
signature = hmac.new(secret, message, hashlib.sha256).hexdigest()
request.headers.update({
to_native_string('CB-VERSION'): self.api_version,
to_native_string('CB-ACCESS-KEY'): self.api_key,
to_native_string('CB-ACCESS-SIGN'): signature,
to_native_string('CB-ACCESS-TIMESTAMP'): timestamp,
})
return request
class OAuth2Auth(AuthBase):
def __init__(self, access_token_getter, api_version):
self.access_token_getter = access_token_getter
self.api_version = api_version
def __call__(self, request):
access_token = self.access_token_getter()
request.headers.update({
to_native_string('CB-VERSION'): self.api_version,
to_native_string('Authorization'): to_native_string('Bearer {}'.format(access_token)),
})
return request
| apache-2.0 | -6,887,145,672,861,208,000 | 32.038462 | 98 | 0.633295 | false |
aerospike/aerospike-client-python | examples/client/remove_bin.py | 1 | 4429 | # -*- coding: utf-8 -*-
##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
from __future__ import print_function
import aerospike
import sys
from optparse import OptionParser
##########################################################################
# Options Parsing
##########################################################################
usage = "usage: %prog [options] key bin_names"
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option(
"--help", dest="help", action="store_true",
help="Displays this message.")
optparser.add_option(
"-U", "--username", dest="username", type="string", metavar="<USERNAME>",
help="Username to connect to database.")
optparser.add_option(
"-P", "--password", dest="password", type="string", metavar="<PASSWORD>",
help="Password to connect to database.")
optparser.add_option(
"-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_option(
"-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_option(
"-n", "--namespace", dest="namespace", type="string", default="test", metavar="<NS>",
help="Port of the Aerospike server.")
optparser.add_option(
"-s", "--set", dest="set", type="string", default="demo", metavar="<SET>",
help="Port of the Aerospike server.")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
if len(args) < 2:
optparser.print_help()
print()
sys.exit(1)
##########################################################################
# Client Configuration
##########################################################################
config = {
'hosts': [(options.host, options.port)]
}
##########################################################################
# Application
##########################################################################
exitCode = 0
try:
# ----------------------------------------------------------------------------
# Connect to Cluster
# ----------------------------------------------------------------------------
client = aerospike.client(config).connect(
options.username, options.password)
# ----------------------------------------------------------------------------
# Perform Operation
# ----------------------------------------------------------------------------
try:
namespace = options.namespace if options.namespace and options.namespace != 'None' else None
set = options.set if options.set and options.set != 'None' else None
pk = args.pop(0)
bin_names = args
status = client.remove_bin((namespace, set, pk), bin_names)
print("Status of bin removal is: %d" % (status))
print("OK, bins removed from the record at", (namespace, set, pk))
except Exception as exception:
if exception.code == 602:
print("error: Record not found")
else:
print("error: {0}".format(
(exception.code, exception.msg, file, exception.line)), file=sys.stderr)
rc = 1
# ----------------------------------------------------------------------------
# Close Connection to Cluster
# ----------------------------------------------------------------------------
client.close()
except Exception as eargs:
print("error: {0}".format(eargs), file=sys.stderr)
exitCode = 3
##########################################################################
# Exit
##########################################################################
sys.exit(exitCode)
| apache-2.0 | 4,883,970,964,990,964,000 | 32.300752 | 100 | 0.470761 | false |
nitheeshkl/miniPOS | mpos/dlg_add.py | 1 | 6520 | #! /usr/bin/python
# encoding: -*- utf-8 -*-
# dialogs.py
# This file is part of miniPOS.
# miniPOS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# miniPOS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with miniPOS. If not, see <http://www.gnu.org/licenses/>.
import wx
import config
import string
import mpos_utility
class AddDlg(wx.Dialog):
def __init__(self, parent, *args, **kwargs):
super(AddDlg, self).__init__(parent, *args, **kwargs)
font1 = wx.Font(20, wx.SWISS, wx.NORMAL, wx.NORMAL)
font2 = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.Size = (500, 260)
# Get necessary configuration information
#------------------------------------------------------------------
self.config = config.Configuration()
self.cSettings = self.config.cCurrency()
self.c_symbol = self.cSettings[0]
self.c_dec = self.cSettings[1]
sizer = wx.BoxSizer(wx.VERTICAL)
#------------------------------------------------------------------
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.add_label = wx.StaticText(self, -1, '-')
self.add_label.SetFont(font1)
hbox1.Add(self.add_label, 0)
sizer.Add(hbox1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.TOP|wx.BOTTOM, 5)
#--#
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND)
#--#
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.item_label = wx.StaticText(self, -1, '-',
size=(130, -1))
self.item_label.SetFont(font2)
self.item_input = wx.TextCtrl(self, -1, style=wx.TE_CENTER)
hbox2.Add(self.item_label, 0, wx.TOP|wx.RIGHT, 5)
hbox2.Add(self.item_input, 1)
sizer.Add(hbox2, 0, wx.EXPAND|wx.ALL, 10)
#--#
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.barcode_label = wx.StaticText(self, -1, 'Barcode',
size=(130, -1))
self.barcode_label.SetFont(font2)
self.barcode_input = wx.TextCtrl(self, -1, style=wx.TE_CENTER)
hbox6.Add(self.barcode_label, 0, wx.TOP|wx.RIGHT, 5)
hbox6.Add(self.barcode_input, 1)
sizer.Add(hbox6, 0, wx.EXPAND|wx.ALL, 10)
#--#
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.price_label = wx.StaticText(self, -1,
'-' +' ('+self.c_symbol+')',
size=(130, -1))
self.price_label.SetFont(font2)
self.price_input = wx.TextCtrl(self, -1, style=wx.TE_CENTER)
hbox3.Add(self.price_label, 0, wx.TOP|wx.RIGHT, 5)
hbox3.Add(self.price_input, 1)
sizer.Add(hbox3, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 10)
#--#
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.bulk = wx.CheckBox(self, -1, 'Bulk Item')
hbox4.Add(self.bulk, 0)
sizer.Add(hbox4, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 10)
#--#
hbox5 = self.CreateButtonSizer(wx.OK|wx.CANCEL)
sizer.Add(hbox5, 1, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 10)
sizer.Add((-1, 10))
self.SetSizer(sizer)
self.item_input.SetFocus()
# Set Language ----------------------------------------------------
self.Language()
# Set Event Bindings
#------------------------------------------------------------------
self.price_input.Bind(wx.EVT_KEY_UP, self.ValidatePrice)
self.price_input.Bind(wx.EVT_KILL_FOCUS, self.PrClean)
#######################################################################
def ValidatePrice(self, evt):
'Checks that the user has entered a valid price'
try:
num = self.price_input.GetValue()
if num[-1] not in string.digits+'.':
wx.Bell()
self.price_input.SetValue(num[:-1])
self.price_input.SetInsertionPointEnd()
except IndexError, e:
pass
#----------------------------------------------------------------------
def AddGetVals(self):
'Gets Values from the dlg input TextCtrls'
item = self.item_input.GetValue().strip()
barcode = self.barcode_input.GetValue().strip()
price = self.price_input.GetValue()
if self.bulk.GetValue():
bulk = 1
else:
bulk = 0
if item and self.numCheck(price):
return [item, barcode, price, bulk]
else:
self.m1 = 'You must enter the item name and price'
self.t1 = 'Add Product Fail!'
wx.MessageBox(self.m1, self.t1)
#----------------------------------------------------------------------
def PrClean(self, evt):
'Turns whatever the user entered into a valid price'
try:
ins = '%.'+self.c_dec+'f'
num = float(self.price_input.GetValue())
rslt = ins % num
self.price_input.SetValue(rslt)
except ValueError, e:
self.price_input.SetValue('')
#----------------------------------------------------------------------
def Language(self):
'Sets the config lang to the object.'
# Create the word id list
id_list = (5, 6, 7, 8, 9, 81)
words = mpos_utility.lang(id_list)
# Set Objects
self.add_label.SetLabel(words[0])
self.item_label.SetLabel(words[1])
self.price_label.SetLabel(words[2] + ' ('+unicode(self.c_symbol, 'utf8')+')')
self.t1 = words[3]
self.m1 = words[4]
self.bulk.SetLabel(words[5])
#----------------------------------------------------------------------
def numCheck(self, number):
'Checks that the final number is valid.'
try:
x = float(number)
return True
except ValueError:
return False
| gpl-3.0 | -6,303,529,669,219,090,000 | 36.257143 | 87 | 0.496472 | false |
nigelb/gdata-utils | gdata_utils/fs/__init__.py | 1 | 5204 | # Helper utils for gdata.
#
# Copyright (C) 2012 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import os
import gdata.media, gdata.client, gdata.docs.data
from gdata_utils.fs.constants import *
from simpleui.utils import UserConfig
class GoogleDocs:
def __init__(self, client, cache_dir):
self.client = client
self.config = UserConfig(dir_name=cache_dir, config_file="cache", def_config_callback=lambda x:{})
self.cache_dir = cache_dir
if not self.config.read_config():
self.config.initialize_dir(None)
def getFolders(self):
folders = self.get_list('/feeds/default/private/full/-/folder')
if not folders.entry:
return None
return [Folder(self, x) for x in folders.entry]
def getFolder(self, descriptor, etag=None):
return Folder(self, self.client.GetResourceById(descriptor[id], etag=etag))
def __cached_entry(self, id):
return os.path.join(self.cache_dir, id)
def get_list(self, url):
feed = self.client.GetResources(uri=url)
if not feed.entry:
return None
if feed.GetNextLink():
feed.entry += self.get_list(feed.GetNextLink().href).entry
return feed
def get_cache_descriptor(self, id):
if self.config.has_key(id): self.config[id]
return None
def open_cached_file(self, id, **kwargs):
return open(self.__cached_entry(id), **kwargs)
def download(self, id, extra_params=None):
item_etag = None
if self.config.has_key(id):
item_etag = self.config[id][etag]
entry = self.client.GetResourceById(id, etag=item_etag)
self.client.DownloadResource(entry, self.__cached_entry(id), extra_params=extra_params)
self.config[id] = create_descriptor(entry)
self.config.write_config()
def create(self, title, folder_entry, mime_type="text/plain"):
ms = gdata.data.MediaSource(file_handle=StringIO(" "), content_type=mime_type, content_length=1)
entry = gdata.docs.data.Resource(type='file', title=title)
return self.client.CreateResource(entry, media=ms, collection=folder_entry)
def write(self, entry, stream, length, mime_type="text/plain"):
ms = gdata.data.MediaSource(file_handle=stream, content_type=mime_type, content_length=length)
self.client.UpdateResource(entry, media=ms)
def create_descriptor(entry):
return{
title: entry.title.text.encode('UTF-8'),
etag: entry.etag,
id: entry.resource_id.text,
mime: entry.content.type,
}
class GD:
def title(self):
return self.entry.title.text.encode('UTF-8')
def getID(self):
return self.entry.resource_id.text
def createDescriptor(self):
return create_descriptor(self.entry)
def content_type(self):
return self.entry.content.type
class Folder(GD):
def __init__(self, fs, entry):
self.fs = fs
self.entry = entry
def list(self):
feed = self.fs.get_list("%s/%s" % (self.entry.GetSelfLink().href, "contents"))
toRet = []
if feed is None: return toRet
for item in feed.entry:
for category in item.category:
if category.term == folder_type:
toRet.append(Folder(self.fs, item))
elif category.term == file_type:
toRet.append(File(self.fs, item))
return toRet
def __repr__(self):
return self.title()
def create_file(self, name, mime_type="text/plain"):
return File(self.fs, self.fs.create(name, folder_entry=self.entry, mime_type=mime_type))
def get_file(self, name):
for itm in self.list():
if itm.__class__ == File and itm.title() == name:
try:
itm.download()
except gdata.client.NotModified, ne:
pass
return itm
return None
class File(GD):
def __init__(self, fs, entry):
self.fs = fs
self.entry = entry
def getID(self):
return self.entry.resource_id.text
def open(self, **kwargs):
""" Opens the cached contents of this file. **kwargs is passed to the open function."""
return self.fs.open_cached_file(self.getID(), **kwargs)
def write(self, stream, length, mime_type="text/plain"):
self.fs.write(self.entry, stream, length, mime_type=mime_type)
def download(self, extra_params = None):
self.fs.download(self.getID(), extra_params=extra_params)
| gpl-3.0 | -8,418,994,547,592,379,000 | 33.236842 | 106 | 0.63259 | false |
saullocastro/pyNastran | pyNastran/utils/__init__.py | 1 | 9338 | # -*- coding: utf-8 -*-
from __future__ import print_function
from types import MethodType
import os
import io
import sys
from codecs import open as codec_open
from itertools import count
from six import PY2, string_types, iteritems, StringIO
import numpy as np
if PY2:
integer_types = (int, long, np.int32, np.int64)
integer_float_types = (int, long, np.int32, np.int64, float)
else:
integer_types = (int, np.int32, np.int64)
integer_float_types = (int, np.int32, np.int64, float)
def ipython_info():
"""determines if iPython/Jupyter notebook is running"""
ip = False
if 'ipykernel' in sys.modules:
ip = 'notebook'
elif 'Ipython' in sys.modules:
ip = 'terminal'
return ip
def is_file_obj(filename):
"""does this object behave like a file object?"""
#if not (hasattr(out_filename, 'read') and hasattr(out_filename, 'write')) or
# isinstance(out_filename, file) or isinstance(out_filename, StringIO):
return ((hasattr(filename, 'read') and hasattr(filename, 'write'))
or isinstance(filename, file)
or isinstance(filename, StringIO))
def b(string):
"""reimplementation of six.b(...) to work in Python 2"""
return string.encode('latin-1')
def merge_dicts(dict_list, strict=True):
"""merges two or more dictionaries"""
assert isinstance(dict_list, list), type(dict_list)
dict_out = {}
for adict in dict_list:
assert isinstance(adict, dict), adict
for key, value in iteritems(adict):
if key not in dict_out:
dict_out[key] = value
elif strict:
raise RuntimeError('key=%r exists in multiple dictionaries' % key)
else:
print('key=%r is dropped?' % key)
return dict_out
def is_binary_file(filename):
"""
Return true if the given filename is binary.
Parameters
----------
filename : str
the filename to test
Returns
-------
binary_flag : bool
True if filename is a binary file (contains null byte)
and False otherwise.
:raises: IOError if the file cannot be opened.
Based on the idea (.. seealso:: http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text)
that file is binary if it contains null.
.. warning:: this may not work for unicode."""
assert isinstance(filename, string_types), '%r is not a valid filename' % filename
assert os.path.exists(filename), '%r does not exist\n%s' % (filename, print_bad_path(filename))
with io.open(filename, mode='rb') as fil:
for chunk in iter(lambda: fil.read(1024), bytes()):
if b'\0' in chunk: # found null byte
return True
return False
def print_bad_path(path):
"""
Prints information about the existence (access possibility) of the parts
of the given path. Useful for debugging when the path to a given file
is wrong.
Parameters
----------
path : str
path to check
Returns
-------
msg : str
string with informations whether access to parts of the path
is possible
"""
#raw_path = path
if len(path) > 255:
path = os.path.abspath(_filename(path))
npath = os.path.dirname(path)
res = [path]
while path != npath:
path, npath = npath, os.path.dirname(npath)
res.append(path)
msg = {True: 'passed', False: 'failed'}
return '\n'.join(['%s: %s' % (msg[os.path.exists(i)], i[4:]) for i in res])
else:
path = os.path.abspath(path)
npath = os.path.dirname(path)
res = [path]
while path != npath:
path, npath = npath, os.path.dirname(npath)
res.append(path)
msg = {True: 'passed', False: 'failed'}
return '\n'.join(['%s: %s' % (msg[os.path.exists(i)], i) for i in res])
def _filename(filename):
"""
Prepends some magic data to a filename in order to have long filenames.
.. warning:: This might be Windows specific.
"""
if len(filename) > 255:
return '\\\\?\\' + filename
return filename
def __object_attr(obj, mode, keys_to_skip, attr_type):
"""list object attributes of a given type"""
#print('keys_to_skip=%s' % keys_to_skip)
keys_to_skip = [] if keys_to_skip is None else keys_to_skip
test = {
'public': lambda k: (not k.startswith('_') and k not in keys_to_skip),
'private': lambda k: (k.startswith('_') and not k.startswith('__') and k not in keys_to_skip),
'both': lambda k: (not k.startswith('__') and k not in keys_to_skip),
'all': lambda k: (k not in keys_to_skip),
}
if not mode in test:
print('Wrong mode! Accepted modes: public, private, both, all.')
return None
check = test[mode]
out = []
for k in dir(obj):
if k in keys_to_skip:
continue
if check(k) and attr_type(getattr(obj, k)):
out.append(k)
out.sort()
return out
#return sorted([k for k in dir(obj) if (check(k) and
# attr_type(getattr(obj, k)))])
def object_methods(obj, mode='public', keys_to_skip=None):
"""
List the names of methods of a class as strings. Returns public methods
as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of methods will be listed
* "public" - names that do not begin with underscore
* "private" - names that begin with single underscore
* "both" - private and public
* "all" - all methods that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
method : List[str]
sorted list of the names of methods of a given type
or None if the mode is wrong
"""
return __object_attr(obj, mode, keys_to_skip, lambda x: isinstance(x, MethodType))
def object_attributes(obj, mode='public', keys_to_skip=None):
"""
List the names of attributes of a class as strings. Returns public
attributes as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of attributes will be listed
* 'public' - names that do not begin with underscore
* 'private' - names that begin with single underscore
* 'both' - private and public
* 'all' - all attributes that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
attribute_names : List[str]
sorted list of the names of attributes of a given type or None
if the mode is wrong
"""
return __object_attr(obj, mode, keys_to_skip, lambda x: not isinstance(x, MethodType))
#def write_object_attributes(name, obj, nspaces=0, nbase=0, is_class=True, debug=False):
#"""
#Writes a series of nested objects
#"""
#spaces = (nbase + nspaces) * ' '
#msg = spaces
#xml = spaces
#if is_class:
#equals = '='
#else:
#equals = ':'
#if debug:
#print('attr=%s equals=%r' % (name, equals))
## name
#if isinstance(obj, dict):
#if nspaces == 0:
#msg += '%s %s ' % (name, equals)
#else:
#if isinstance(name, tuple):
#msg += '%s %s ' % (str(name), equals)
#else:
#msg += "'%s' %s " % (name, equals)
#elif isinstance(name, string_types):
#if is_class:
#key = '%s' % name
#else:
#key = "'%s'" % name
## elif isinstance(name, unicode):
## if is_class:
## key = u'%s' % name
## else:
## key = "u'%s'" % name
#elif isinstance(name, (int, float, tuple)) or name is None:
#key = '%s' % str(name)
#else:
#raise RuntimeError('key=%s is not a string. Type=%s' % (name, type(name)))
#if debug:
#print('name=%s type=%s' % (name, type(obj)))
## write the object
#if isinstance(obj, (int, float)) or obj is None:
#xml += '<name=%s value=%s type=%s>' % (name, obj, type(obj))
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif is_string(obj):
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif isinstance(obj, dict):
#msg += write_dict(obj, nspaces, nbase, is_class) + ',\n'
#elif isinstance(obj, (tuple, list)):
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif isinstance(obj, np.ndarray):
#starter = '%s%s %s' % (nspaces, key, equals)
#msg += '%s %s %s,\n' % (key, equals, write_array(obj, nspaces + 6 + len(starter)))
#else: # generic class
#objectType = obj.__class__.__name__
##raise RuntimeError('objectType=%s is not supported' % objectType)
#msg += '%s %s ' % (key, equals)
#msg += write_class(name, obj, nspaces, nbase) + ',\n' # comma for class
#if nspaces == 0:
#msg = msg[:-2]
#if debug:
#print('%r' % msg)
#return msg
| lgpl-3.0 | -8,305,409,958,395,564,000 | 31.996466 | 112 | 0.57507 | false |
freezas/hy | tests/compilers/test_ast.py | 1 | 18450 | # Copyright 2017 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import unicode_literals
from hy import HyString
from hy.models import HyObject
from hy.compiler import hy_compile
from hy.importer import import_buffer_to_hst
from hy.errors import HyCompileError, HyTypeError
from hy.lex.exceptions import LexException
from hy._compat import PY3
import ast
def _ast_spotcheck(arg, root, secondary):
if "." in arg:
local, full = arg.split(".", 1)
return _ast_spotcheck(full,
getattr(root, local),
getattr(secondary, local))
assert getattr(root, arg) == getattr(secondary, arg)
def can_compile(expr):
return hy_compile(import_buffer_to_hst(expr), "__main__")
def cant_compile(expr):
try:
hy_compile(import_buffer_to_hst(expr), "__main__")
assert False
except HyTypeError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.expression, HyObject)
assert e.message
return e
except HyCompileError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.exception, HyTypeError)
assert e.traceback
return e
def test_ast_bad_type():
"Make sure AST breakage can happen"
class C:
pass
try:
hy_compile(C(), "__main__")
assert True is False
except HyCompileError:
pass
def test_ast_bad_if():
"Make sure AST can't compile invalid if*"
cant_compile("(if*)")
cant_compile("(if* foobar)")
cant_compile("(if* 1 2 3 4 5)")
def test_ast_valid_if():
"Make sure AST can compile valid if*"
can_compile("(if* foo bar)")
def test_ast_valid_unary_op():
"Make sure AST can compile valid unary operator"
can_compile("(not 2)")
can_compile("(~ 1)")
def test_ast_invalid_unary_op():
"Make sure AST can't compile invalid unary operator"
cant_compile("(not 2 3 4)")
cant_compile("(not)")
cant_compile("(not 2 3 4)")
cant_compile("(~ 2 2 3 4)")
cant_compile("(~)")
def test_ast_bad_while():
"Make sure AST can't compile invalid while"
cant_compile("(while)")
cant_compile("(while (True))")
def test_ast_good_do():
"Make sure AST can compile valid do"
can_compile("(do)")
can_compile("(do 1)")
def test_ast_good_raise():
"Make sure AST can compile valid raise"
can_compile("(raise)")
can_compile("(raise Exception)")
can_compile("(raise e)")
if PY3:
def test_ast_raise_from():
can_compile("(raise Exception :from NameError)")
def test_ast_bad_raise():
"Make sure AST can't compile invalid raise"
cant_compile("(raise Exception Exception)")
def test_ast_good_try():
"Make sure AST can compile valid try"
can_compile("(try 1 (except) (else 1))")
can_compile("(try 1 (finally 1))")
can_compile("(try 1 (except) (finally 1))")
can_compile("(try 1 (except [x]) (except [y]) (finally 1))")
can_compile("(try 1 (except) (else 1) (finally 1))")
can_compile("(try 1 (except [x]) (except [y]) (else 1) (finally 1))")
def test_ast_bad_try():
"Make sure AST can't compile invalid try"
cant_compile("(try)")
cant_compile("(try 1)")
cant_compile("(try 1 bla)")
cant_compile("(try 1 bla bla)")
cant_compile("(try (do bla bla))")
cant_compile("(try (do) (else 1) (else 2))")
cant_compile("(try 1 (else 1))")
cant_compile("(try 1 (else 1) (except))")
cant_compile("(try 1 (finally 1) (except))")
cant_compile("(try 1 (except) (finally 1) (else 1))")
def test_ast_good_except():
"Make sure AST can compile valid except"
can_compile("(try 1 (except))")
can_compile("(try 1 (except []))")
can_compile("(try 1 (except [Foobar]))")
can_compile("(try 1 (except [[]]))")
can_compile("(try 1 (except [x FooBar]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
def test_ast_bad_except():
"Make sure AST can't compile invalid except"
cant_compile("(except 1)")
cant_compile("(try 1 (except 1))")
cant_compile("(try 1 (except [1 3]))")
cant_compile("(try 1 (except [x [FooBar] BarBar]))")
def test_ast_good_assert():
"""Make sure AST can compile valid asserts. Asserts may or may not
include a label."""
can_compile("(assert 1)")
can_compile("(assert 1 \"Assert label\")")
can_compile("(assert 1 (+ \"spam \" \"eggs\"))")
can_compile("(assert 1 12345)")
can_compile("(assert 1 None)")
can_compile("(assert 1 (+ 2 \"incoming eggsception\"))")
def test_ast_bad_assert():
"Make sure AST can't compile invalid assert"
cant_compile("(assert)")
cant_compile("(assert 1 2 3)")
cant_compile("(assert 1 [1 2] 3)")
def test_ast_good_global():
"Make sure AST can compile valid global"
can_compile("(global a)")
can_compile("(global foo bar)")
def test_ast_bad_global():
"Make sure AST can't compile invalid global"
cant_compile("(global)")
cant_compile("(global (foo))")
if PY3:
def test_ast_good_nonlocal():
"Make sure AST can compile valid nonlocal"
can_compile("(nonlocal a)")
can_compile("(nonlocal foo bar)")
def test_ast_bad_nonlocal():
"Make sure AST can't compile invalid nonlocal"
cant_compile("(nonlocal)")
cant_compile("(nonlocal (foo))")
def test_ast_good_defclass():
"Make sure AST can compile valid defclass"
can_compile("(defclass a)")
can_compile("(defclass a [])")
def test_ast_bad_defclass():
"Make sure AST can't compile invalid defclass"
cant_compile("(defclass)")
cant_compile("(defclass a None)")
cant_compile("(defclass a None None)")
def test_ast_good_lambda():
"Make sure AST can compile valid lambda"
can_compile("(fn [])")
can_compile("(fn [] 1)")
def test_ast_bad_lambda():
"Make sure AST can't compile invalid lambda"
cant_compile("(fn)")
def test_ast_good_yield():
"Make sure AST can compile valid yield"
can_compile("(yield 1)")
def test_ast_bad_yield():
"Make sure AST can't compile invalid yield"
cant_compile("(yield 1 2)")
def test_ast_good_import_from():
"Make sure AST can compile valid selective import"
can_compile("(import [x [y]])")
def test_ast_require():
"Make sure AST respects (require) syntax"
can_compile("(require tests.resources.tlib)")
can_compile("(require [tests.resources.tlib [qplah parald]])")
can_compile("(require [tests.resources.tlib [*]])")
can_compile("(require [tests.resources.tlib :as foobar])")
can_compile("(require [tests.resources.tlib [qplah :as quiz]])")
can_compile("(require [tests.resources.tlib [qplah :as quiz parald]])")
cant_compile("(require [tests.resources.tlib])")
cant_compile("(require [tests.resources.tlib [* qplah]])")
cant_compile("(require [tests.resources.tlib [qplah *]])")
cant_compile("(require [tests.resources.tlib [* *]])")
def test_ast_no_pointless_imports():
def contains_import_from(code):
return any([isinstance(node, ast.ImportFrom)
for node in can_compile(code).body])
# `reduce` is a builtin in Python 2, but not Python 3.
# The version of `map` that returns an iterator is a builtin in
# Python 3, but not Python 2.
if PY3:
assert contains_import_from("reduce")
assert not contains_import_from("map")
else:
assert not contains_import_from("reduce")
assert contains_import_from("map")
def test_ast_good_get():
"Make sure AST can compile valid get"
can_compile("(get x y)")
def test_ast_bad_get():
"Make sure AST can't compile invalid get"
cant_compile("(get)")
cant_compile("(get 1)")
def test_ast_good_cut():
"Make sure AST can compile valid cut"
can_compile("(cut x)")
can_compile("(cut x y)")
can_compile("(cut x y z)")
can_compile("(cut x y z t)")
def test_ast_bad_cut():
"Make sure AST can't compile invalid cut"
cant_compile("(cut)")
cant_compile("(cut 1 2 3 4 5)")
def test_ast_good_take():
"Make sure AST can compile valid 'take'"
can_compile("(take 1 [2 3])")
def test_ast_good_drop():
"Make sure AST can compile valid 'drop'"
can_compile("(drop 1 [2 3])")
def test_ast_good_assoc():
"Make sure AST can compile valid assoc"
can_compile("(assoc x y z)")
def test_ast_bad_assoc():
"Make sure AST can't compile invalid assoc"
cant_compile("(assoc)")
cant_compile("(assoc 1)")
cant_compile("(assoc 1 2)")
cant_compile("(assoc 1 2 3 4)")
def test_ast_bad_with():
"Make sure AST can't compile invalid with"
cant_compile("(with*)")
cant_compile("(with* [])")
cant_compile("(with* [] (pass))")
def test_ast_valid_while():
"Make sure AST can't compile invalid while"
can_compile("(while foo bar)")
def test_ast_valid_for():
"Make sure AST can compile valid for"
can_compile("(for [a 2] (print a))")
def test_ast_invalid_for():
"Make sure AST can't compile invalid for"
cant_compile("(for* [a 1] (else 1 2))")
def test_ast_expression_basics():
""" Ensure basic AST expression conversion works. """
code = can_compile("(foo bar)").body[0]
tree = ast.Expr(value=ast.Call(
func=ast.Name(
id="foo",
ctx=ast.Load(),
),
args=[
ast.Name(id="bar", ctx=ast.Load())
],
keywords=[],
starargs=None,
kwargs=None,
))
_ast_spotcheck("value.func.id", code, tree)
def test_ast_anon_fns_basics():
""" Ensure anon fns work. """
code = can_compile("(fn (x) (* x x))").body[0].value
assert type(code) == ast.Lambda
code = can_compile("(fn (x) (print \"multiform\") (* x x))").body[0]
assert type(code) == ast.FunctionDef
can_compile("(fn (x))")
cant_compile("(fn)")
def test_ast_non_decoratable():
""" Ensure decorating garbage breaks """
cant_compile("(with-decorator (foo) (* x x))")
def test_ast_lambda_lists():
"""Ensure the compiler chokes on invalid lambda-lists"""
cant_compile('(fn [&key {"a" b} &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional a &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional [a b c]] a)')
cant_compile('(fn [&optional [1 2]] (list 1 2))')
def test_ast_print():
code = can_compile("(print \"foo\")").body[0]
assert type(code.value) == ast.Call
def test_ast_tuple():
""" Ensure tuples work. """
code = can_compile("(, 1 2 3)").body[0].value
assert type(code) == ast.Tuple
def test_argument_destructuring():
""" Ensure argument destructuring compilers. """
can_compile("(fn [[a b]] (print a b))")
cant_compile("(fn [[]] 0)")
def test_lambda_list_keywords_rest():
""" Ensure we can compile functions with lambda list keywords."""
can_compile("(fn (x &rest xs) (print xs))")
cant_compile("(fn (x &rest xs &rest ys) (print xs))")
can_compile("(fn (&optional a &rest xs) (print xs))")
def test_lambda_list_keywords_key():
""" Ensure we can compile functions with &key."""
can_compile("(fn (x &key {foo True}) (list x foo))")
cant_compile("(fn (x &key {bar \"baz\"} &key {foo 42}) (list x bar foo))")
cant_compile("(fn (x &key {1 2 3 4}) (list x))")
def test_lambda_list_keywords_kwargs():
""" Ensure we can compile functions with &kwargs."""
can_compile("(fn (x &kwargs kw) (list x kw))")
cant_compile("(fn (x &kwargs xs &kwargs ys) (list x xs ys))")
can_compile("(fn (&optional x &kwargs kw) (list x kw))")
def test_lambda_list_keywords_kwonly():
"""Ensure we can compile functions with &kwonly if we're on Python
3, or fail with an informative message on Python 2."""
kwonly_demo = "(fn [&kwonly a [b 2]] (print 1) (print a b))"
if PY3:
code = can_compile(kwonly_demo)
for i, kwonlyarg_name in enumerate(('a', 'b')):
assert kwonlyarg_name == code.body[0].args.kwonlyargs[i].arg
assert code.body[0].args.kw_defaults[0] is None
assert code.body[0].args.kw_defaults[1].n == 2
else:
exception = cant_compile(kwonly_demo)
assert isinstance(exception, HyTypeError)
message, = exception.args
assert message == ("keyword-only arguments are only "
"available under Python 3")
def test_lambda_list_keywords_mixed():
""" Ensure we can mix them up."""
can_compile("(fn (x &rest xs &kwargs kw) (list x xs kw))")
cant_compile("(fn (x &rest xs &fasfkey {bar \"baz\"}))")
if PY3:
can_compile("(fn [x &rest xs &kwargs kwxs &kwonly kwoxs]"
" (list x xs kwxs kwoxs))")
def test_missing_keyword_argument_value():
"""Ensure the compiler chokes on missing keyword argument values."""
try:
can_compile("((fn [x] x) :x)")
except HyTypeError as e:
assert(e.message == "Keyword argument :x needs a value.")
else:
assert(False)
def test_ast_unicode_strings():
"""Ensure we handle unicode strings correctly"""
def _compile_string(s):
hy_s = HyString(s)
hy_s.start_line = hy_s.end_line = 0
hy_s.start_column = hy_s.end_column = 0
code = hy_compile(hy_s, "__main__")
# code == ast.Module(body=[ast.Expr(value=ast.Str(s=xxx))])
return code.body[0].value.s
assert _compile_string("test") == "test"
assert _compile_string("\u03b1\u03b2") == "\u03b1\u03b2"
assert _compile_string("\xc3\xa9") == "\xc3\xa9"
def test_ast_unicode_vs_bytes():
def f(x): return can_compile(x).body[0].value.s
assert f('"hello"') == u"hello"
assert type(f('"hello"')) is (str if PY3 else unicode) # noqa
assert f('b"hello"') == (eval('b"hello"') if PY3 else "hello")
assert type(f('b"hello"')) == (bytes if PY3 else str)
assert f('b"\\xa0"') == (bytes([160]) if PY3 else chr(160))
def test_compile_error():
"""Ensure we get compile error in tricky cases"""
try:
can_compile("(fn [] (in [1 2 3]))")
except HyTypeError as e:
assert(e.message == "`in' needs 2 arguments, got 1")
else:
assert(False)
def test_for_compile_error():
"""Ensure we get compile error in tricky 'for' cases"""
try:
can_compile("(fn [] (for)")
except LexException as e:
assert(e.message == "Premature end of input")
else:
assert(False)
try:
can_compile("(fn [] (for)))")
except LexException as e:
assert(e.message == "Ran into a RPAREN where it wasn't expected.")
else:
assert(False)
try:
can_compile("(fn [] (for [x] x))")
except HyTypeError as e:
assert(e.message == "`for' requires an even number of args.")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx]))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx] (else 1)))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
def test_attribute_access():
"""Ensure attribute access compiles correctly"""
can_compile("(. foo bar baz)")
can_compile("(. foo [bar] baz)")
can_compile("(. foo bar [baz] [0] quux [frob])")
can_compile("(. foo bar [(+ 1 2 3 4)] quux [frob])")
cant_compile("(. foo bar :baz [0] quux [frob])")
cant_compile("(. foo bar baz (0) quux [frob])")
cant_compile("(. foo bar baz [0] quux {frob})")
def test_attribute_empty():
"""Ensure using dot notation with a non-expression is an error"""
cant_compile(".")
cant_compile("foo.")
cant_compile(".foo")
cant_compile('"bar".foo')
cant_compile('[2].foo')
def test_cons_correct():
"""Ensure cons gets compiled correctly"""
can_compile("(cons a b)")
def test_invalid_list_comprehension():
"""Ensure that invalid list comprehensions do not break the compiler"""
cant_compile("(genexpr x [])")
cant_compile("(genexpr [x [1 2 3 4]] x)")
cant_compile("(list-comp None [])")
cant_compile("(list-comp [x [1 2 3]] x)")
def test_bad_setv():
"""Ensure setv handles error cases"""
cant_compile("(setv if* 1)")
cant_compile("(setv (a b) [1 2])")
def test_defn():
"""Ensure that defn works correctly in various corner cases"""
cant_compile("(defn if* [] 1)")
cant_compile("(defn \"hy\" [] 1)")
cant_compile("(defn :hy [] 1)")
can_compile("(defn &hy [] 1)")
def test_setv_builtins():
"""Ensure that assigning to a builtin fails, unless in a class"""
cant_compile("(setv None 42)")
cant_compile("(defn get [&rest args] 42)")
can_compile("(defclass A [] (defn get [self] 42))")
can_compile("""
(defclass A []
(defn get [self] 42)
(defclass B []
(defn get [self] 42))
(defn if* [self] 0))
""")
def test_lots_of_comment_lines():
# https://github.com/hylang/hy/issues/1313
can_compile(1000 * ";\n")
def test_exec_star():
code = can_compile('(exec* "print(5)")').body[0]
assert type(code) == (ast.Expr if PY3 else ast.Exec)
if not PY3:
assert code.body.s == "print(5)"
assert code.globals is None
assert code.locals is None
code = can_compile('(exec* "print(a)" {"a" 3})').body[0]
assert type(code) == (ast.Expr if PY3 else ast.Exec)
if not PY3:
assert code.body.s == "print(a)"
assert code.globals.keys[0].s == "a"
assert code.locals is None
code = can_compile('(exec* "print(a + b)" {"a" "x"} {"b" "y"})').body[0]
assert type(code) == (ast.Expr if PY3 else ast.Exec)
if not PY3:
assert code.body.s == "print(a + b)"
assert code.globals.keys[0].s == "a"
assert code.locals.keys[0].s == "b"
def test_compiler_macro_tag_try():
"""Check that try forms within defmacro/deftag are compiled correctly"""
# https://github.com/hylang/hy/issues/1350
can_compile("(defmacro foo [] (try None (except [] None)) `())")
can_compile("(deftag foo [] (try None (except [] None)) `())")
| mit | 6,325,422,572,845,489,000 | 28.472843 | 78 | 0.600542 | false |
SWLBot/electronic-blackboard | board.py | 1 | 2049 | import tornado.ioloop
import tornado.web
import tornado.httpserver
from tornado.options import define, options, parse_command_line
import os.path
from broadcast_api import load_schedule
import argparse
import config.settings
define('port',default=4000,help='run the server on the given port',type=int)
#define('log_file_prefix',default='board.log',help='log file name',type=str)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class MainHandler(BaseHandler):
def get(self):
self.set_cookie("_xsrf",self.xsrf_token)
self.render("board.html")
class Get_DB_Data(BaseHandler):
def get(self):
display_content = load_schedule()
if display_content['result'] == 'fail':
pass
elif display_content['display_type'] == 'image':
self.render('show-image.html',img_info=display_content)
elif display_content['display_type'] == 'text':
from tornado.template import Loader
loader = Loader('template')
print(loader.load('show-text.html').generate(text_info=display_content))
self.render('show-text.html', text_info=display_content)
elif display_content['display_type'] == 'news':
self.render('show-news.html', news_info=display_content)
def main():
base_dir = os.path.dirname(__file__)
settings = {
"cookie_secret": config.settings.board['cookie_secret'],
"template_path":os.path.join(base_dir,"template"),
"static_path":os.path.join(base_dir,"static"),
"thumbnail_path":os.path.join(base_dir,"thumbnail"),
"debug":True,
}
application = tornado.web.Application([
tornado.web.url(r"/",MainHandler,name="main"),
tornado.web.url(r"/db_schedule",Get_DB_Data),
],**settings)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| apache-2.0 | -8,772,363,605,874,281,000 | 33.728814 | 84 | 0.65349 | false |
denys-duchier/django-wiki-py3 | wiki/core/plugins/registry.py | 1 | 1646 | # -*- coding: utf-8 -*-
from django.utils.importlib import import_module
_cache = {}
_settings_forms = []
_markdown_extensions = []
_article_tabs = []
_sidebar = []
def register(PluginClass):
"""
Register a plugin class. This function will call back your plugin's
constructor.
"""
if PluginClass in list(_cache.keys()):
raise Exception("Plugin class already registered")
plugin = PluginClass()
_cache[PluginClass] = plugin
settings_form = getattr(PluginClass, 'settings_form', None)
if settings_form:
if isinstance(settings_form, str):
klassname = settings_form.split(".")[-1]
modulename = ".".join(settings_form.split(".")[:-1])
form_module = import_module(modulename)
settings_form = getattr(form_module, klassname)
_settings_forms.append(settings_form)
if getattr(PluginClass, 'article_tab', None):
_article_tabs.append(plugin)
if getattr(PluginClass, 'sidebar', None):
_sidebar.append(plugin)
_markdown_extensions.extend(getattr(PluginClass, 'markdown_extensions', []))
def get_plugins():
"""Get loaded plugins - do not call before all plugins are loaded."""
return _cache
def get_markdown_extensions():
"""Get all markdown extension classes from plugins"""
return _markdown_extensions
def get_article_tabs():
"""Get all article tab dictionaries from plugins"""
return _article_tabs
def get_sidebar():
"""Returns plugin classes that should connect to the sidebar"""
return _sidebar
def get_settings_forms():
return _settings_forms | gpl-3.0 | 4,763,319,060,811,272,000 | 28.945455 | 88 | 0.647631 | false |
licon02/pnn | pruebas/prueba04_b.py | 1 | 2843 | #!/usr/bin/python
#prueba de red LSTM
#genera grafica de las senales de entrada y la respuesta de la redes
#entrada senoidal
from __future__ import division
import numpy as np
from pybrain.datasets import SequentialDataSet
from itertools import cycle
from pybrain.supervised import RPropMinusTrainer
from sys import stdout
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
import matplotlib.pyplot as plt
plt.close('all') #cierra figuras anteriores
#construct target signal:
T = 1 #periodo de la senal
Nyq = 20 #minimo 2 por el teorema de Nyquist
Ts = T/Nyq #periodo de muestreo
f = 1/T #frecuencia de la senal
fs = 1/Ts #frecuencia de periodo
A = 10 #amplitud
Tiempo = 5 #tiempo total de muestreo
#NN input signal:
t0 = np.arange(0,Tiempo,Ts) #genera un vector de n hasta N, con incrementos de i (n,N,i)
#valor en el instante t0
#np.sin(Wn*t0) Wn=2*pi*f t0=instante de tiempo
data = A*np.cos(2*np.pi*f*t0) #senal de entrada a la red
print 'numero de datos de entrenamiento %i'%len(data)
net = buildNetwork(1, 15, 1,hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
#print("\r epoch {}/{}".format(epoch, EPOCHS))
stdout.flush()
print "final error =", train_errors[-1]
y0 = []#muestra
y1 = []#red
y2 = []#objetivo
for sample, target in ds.getSequenceIterator(0):
y0.append(sample)
y1.append(net.activate(sample))
y2.append(target)
#print(" sample = %4.1f" % sample)
#print("predicted next sample = %4.1f" % net.activate(sample))
#print(" actual next sample = %4.1f" % target)
fsize=8
t0 = np.arange(0,len(data),1)
fig1 = plt.figure(1)
plt.plot(t0, y1, 'ro',label='original')
plt.plot(t0, y2, 'k',label='red')
plt.xlabel('Time',fontsize=fsize)
plt.ylabel('Amplitude',fontsize=fsize)
plt.grid()
plt.title('Target range = [0,%0.1f]'%len(data),fontsize=fsize)
plt.xlim(1.2*np.min(t0),1.2*np.max(t0))
plt.ylim(1.2*np.min(y1),1.2*np.max(y1))
fig1name = './prueba04_b_fig1.png'
print 'Saving Fig. 1 to:', fig1name
fig1.savefig(fig1name, bbox_inches='tight')
fig2 = plt.figure(2)
plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
fig2name = './prueba04_b_fig2.png'
print 'Saving Fig. 2 to:', fig2name
fig2.savefig(fig2name, bbox_inches='tight')
plt.show()
| gpl-3.0 | 2,559,812,335,112,786,400 | 29.569892 | 90 | 0.687654 | false |
google/prettytensor | prettytensor/train.py | 1 | 1183 | # Copyright 2015 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports some utilities for training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import, wildcard-import
from prettytensor.input_helpers import batch
from prettytensor.input_helpers import feed_numpy
from prettytensor.local_trainer import create_checkpointing_runner
from prettytensor.local_trainer import create_follower_runner
from prettytensor.local_trainer import Runner
from prettytensor.recurrent_networks import RecurrentRunner
from prettytensor.replay_queue import ReplayableQueue
| apache-2.0 | -369,588,201,578,420,300 | 50.434783 | 74 | 0.803888 | false |
google/verible | bazel/build-version.py | 1 | 1343 | #!/usr/bin/env python3
# Copyright 2020-2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invoke bazel with --workspace_status_command=bazel/build-version.py to get this invoked and populate bazel-out/volatile-status.txt
"""
import os
from subprocess import Popen, PIPE
def run(*cmd):
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
return output.strip().decode()
def main():
try:
date = run("git", "log", "-n1", "--date=short", "--format=%cd")
except:
date = ""
try:
version = run("git", "describe")
except:
version = ""
if not date:
date = os.environ["GIT_DATE"]
if not version:
version = os.environ["GIT_VERSION"]
print("GIT_DATE", '"{}"'.format(date))
print("GIT_DESCRIBE", '"{}"'.format(version))
if __name__ == "__main__":
main()
| apache-2.0 | 656,127,483,831,813,100 | 24.826923 | 133 | 0.679821 | false |
dimka665/vk | tests/conftest.py | 1 | 1967 | import requests
from pytest import fixture
from vk.session import APIBase
@fixture('session')
def v():
"""
Actual vk API version
"""
return '5.80'
class Attributable(object):
def set_attrs(self, attributes):
for attr_name, attr_value in attributes.items():
setattr(self, attr_name, attr_value)
class RequestData(Attributable):
def __init__(self, data):
self.set_attrs(data)
def __repr__(self):
return '<RequestData {}>'.format(self.__dict__)
class Request(Attributable):
def __init__(self, method, url, **kwargs):
self.method = method
self.url = url
self.data = RequestData(kwargs.pop('data', {}))
self.set_attrs(kwargs)
class Response(object):
def __init__(self, text='', status_code=200, url=None):
self.text = text
self.status_code = status_code
self.url = url
def raise_for_status(self):
if self.status_code != 200:
raise ValueError(self.status_code)
@fixture
def response_class():
return Response
class MockedSessionBase(requests.Session):
def __init__(self):
super(MockedSessionBase, self).__init__()
self.history = []
self.last_request = None
def request(self, method, url, **kwargs):
self.last_request = Request(method, url, **kwargs)
response = self.mocked_request(method, url, **kwargs)
if not response:
raise NotImplementedError
return response
@fixture
def session_class():
return MockedSessionBase
@fixture
def mock_requests_session(monkeypatch):
class MockedSession(MockedSessionBase):
def mocked_request(self, verb, url, **kwargs):
if verb == 'POST':
if url.startswith(APIBase.API_URL):
# method = url[len(vk.Session.API_URL):]
return Response()
monkeypatch.setattr('requests.Session', MockedSession)
| mit | 8,648,724,391,978,122,000 | 21.101124 | 61 | 0.608541 | false |
WadeHsiao/B | 3rd/yuv/libyuv/setup_links.py | 1 | 17186 | #!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Setup links to a Chromium checkout for WebRTC.
WebRTC standalone shares a lot of dependencies and build tools with Chromium.
To do this, many of the paths of a Chromium checkout is emulated by creating
symlinks to files and directories. This script handles the setup of symlinks to
achieve this.
It also handles cleanup of the legacy Subversion-based approach that was used
before Chrome switched over their master repo from Subversion to Git.
"""
import ctypes
import errno
import logging
import optparse
import os
import shelve
import shutil
import subprocess
import sys
import textwrap
DIRECTORIES = [
'build',
'buildtools',
'mojo', # TODO(kjellander): Remove, see webrtc:5629.
'native_client',
'net',
'testing',
'third_party/binutils',
'third_party/drmemory',
'third_party/instrumented_libraries',
'third_party/libjpeg',
'third_party/libjpeg_turbo',
'third_party/llvm-build',
'third_party/lss',
'third_party/yasm',
'third_party/WebKit', # TODO(kjellander): Remove, see webrtc:5629.
'tools/clang',
'tools/gn',
'tools/gyp',
'tools/memory',
'tools/python',
'tools/swarming_client',
'tools/valgrind',
'tools/vim',
'tools/win',
]
from sync_chromium import get_target_os_list
target_os = get_target_os_list()
if 'android' in target_os:
DIRECTORIES += [
'base',
'third_party/accessibility_test_framework',
'third_party/android_platform',
'third_party/android_tools',
'third_party/apache_velocity',
'third_party/appurify-python',
'third_party/ashmem',
'third_party/bouncycastle',
'third_party/catapult',
'third_party/closure_compiler',
'third_party/guava',
'third_party/hamcrest',
'third_party/icu',
'third_party/icu4j',
'third_party/ijar',
'third_party/intellij',
'third_party/jsr-305',
'third_party/junit',
'third_party/libxml',
'third_party/mockito',
'third_party/modp_b64',
'third_party/ow2_asm',
'third_party/protobuf',
'third_party/requests',
'third_party/robolectric',
'third_party/sqlite4java',
'tools/android',
'tools/grit',
]
if 'ios' in target_os:
DIRECTORIES.append('third_party/class-dump')
FILES = {
'tools/isolate_driver.py': None,
'third_party/BUILD.gn': None,
}
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHROMIUM_CHECKOUT = os.path.join('chromium', 'src')
LINKS_DB = 'links'
# Version management to make future upgrades/downgrades easier to support.
SCHEMA_VERSION = 1
def query_yes_no(question, default=False):
"""Ask a yes/no question via raw_input() and return their answer.
Modified from http://stackoverflow.com/a/3041990.
"""
prompt = " [%s/%%s]: "
prompt = prompt % ('Y' if default is True else 'y')
prompt = prompt % ('N' if default is False else 'n')
if default is None:
default = 'INVALID'
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if choice == '' and default != 'INVALID':
return default
if 'yes'.startswith(choice):
return True
elif 'no'.startswith(choice):
return False
print "Please respond with 'yes' or 'no' (or 'y' or 'n')."
# Actions
class Action(object):
def __init__(self, dangerous):
self.dangerous = dangerous
def announce(self, planning):
"""Log a description of this action.
Args:
planning - True iff we're in the planning stage, False if we're in the
doit stage.
"""
pass
def doit(self, links_db):
"""Execute the action, recording what we did to links_db, if necessary."""
pass
class Remove(Action):
def __init__(self, path, dangerous):
super(Remove, self).__init__(dangerous)
self._priority = 0
self._path = path
def announce(self, planning):
log = logging.warn
filesystem_type = 'file'
if not self.dangerous:
log = logging.info
filesystem_type = 'link'
if planning:
log('Planning to remove %s: %s', filesystem_type, self._path)
else:
log('Removing %s: %s', filesystem_type, self._path)
def doit(self, _):
os.remove(self._path)
class Rmtree(Action):
def __init__(self, path):
super(Rmtree, self).__init__(dangerous=True)
self._priority = 0
self._path = path
def announce(self, planning):
if planning:
logging.warn('Planning to remove directory: %s', self._path)
else:
logging.warn('Removing directory: %s', self._path)
def doit(self, _):
if sys.platform.startswith('win'):
# shutil.rmtree() doesn't work on Windows if any of the directories are
# read-only, which svn repositories are.
subprocess.check_call(['rd', '/q', '/s', self._path], shell=True)
else:
shutil.rmtree(self._path)
class Makedirs(Action):
def __init__(self, path):
super(Makedirs, self).__init__(dangerous=False)
self._priority = 1
self._path = path
def doit(self, _):
try:
os.makedirs(self._path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Symlink(Action):
def __init__(self, source_path, link_path):
super(Symlink, self).__init__(dangerous=False)
self._priority = 2
self._source_path = source_path
self._link_path = link_path
def announce(self, planning):
if planning:
logging.info(
'Planning to create link from %s to %s', self._link_path,
self._source_path)
else:
logging.debug(
'Linking from %s to %s', self._link_path, self._source_path)
def doit(self, links_db):
# Files not in the root directory need relative path calculation.
# On Windows, use absolute paths instead since NTFS doesn't seem to support
# relative paths for symlinks.
if sys.platform.startswith('win'):
source_path = os.path.abspath(self._source_path)
else:
if os.path.dirname(self._link_path) != self._link_path:
source_path = os.path.relpath(self._source_path,
os.path.dirname(self._link_path))
os.symlink(source_path, os.path.abspath(self._link_path))
links_db[self._source_path] = self._link_path
class LinkError(IOError):
"""Failed to create a link."""
pass
# Handles symlink creation on the different platforms.
if sys.platform.startswith('win'):
def symlink(source_path, link_path):
flag = 1 if os.path.isdir(source_path) else 0
if not ctypes.windll.kernel32.CreateSymbolicLinkW(
unicode(link_path), unicode(source_path), flag):
raise OSError('Failed to create symlink to %s. Notice that only NTFS '
'version 5.0 and up has all the needed APIs for '
'creating symlinks.' % source_path)
os.symlink = symlink
class WebRTCLinkSetup(object):
def __init__(self, links_db, force=False, dry_run=False, prompt=False):
self._force = force
self._dry_run = dry_run
self._prompt = prompt
self._links_db = links_db
def CreateLinks(self, on_bot):
logging.debug('CreateLinks')
# First, make a plan of action
actions = []
for source_path, link_path in FILES.iteritems():
actions += self._ActionForPath(
source_path, link_path, check_fn=os.path.isfile, check_msg='files')
for source_dir in DIRECTORIES:
actions += self._ActionForPath(
source_dir, None, check_fn=os.path.isdir,
check_msg='directories')
if not on_bot and self._force:
# When making the manual switch from legacy SVN checkouts to the new
# Git-based Chromium DEPS, the .gclient_entries file that contains cached
# URLs for all DEPS entries must be removed to avoid future sync problems.
entries_file = os.path.join(os.path.dirname(ROOT_DIR), '.gclient_entries')
if os.path.exists(entries_file):
actions.append(Remove(entries_file, dangerous=True))
actions.sort()
if self._dry_run:
for action in actions:
action.announce(planning=True)
logging.info('Not doing anything because dry-run was specified.')
sys.exit(0)
if any(a.dangerous for a in actions):
logging.warn('Dangerous actions:')
for action in (a for a in actions if a.dangerous):
action.announce(planning=True)
print
if not self._force:
logging.error(textwrap.dedent("""\
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
A C T I O N R E Q I R E D
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Because chromium/src is transitioning to Git (from SVN), we needed to
change the way that the WebRTC standalone checkout works. Instead of
individually syncing subdirectories of Chromium in SVN, we're now
syncing Chromium (and all of its DEPS, as defined by its own DEPS file),
into the `chromium/src` directory.
As such, all Chromium directories which are currently pulled by DEPS are
now replaced with a symlink into the full Chromium checkout.
To avoid disrupting developers, we've chosen to not delete your
directories forcibly, in case you have some work in progress in one of
them :).
ACTION REQUIRED:
Before running `gclient sync|runhooks` again, you must run:
%s%s --force
Which will replace all directories which now must be symlinks, after
prompting with a summary of the work-to-be-done.
"""), 'python ' if sys.platform.startswith('win') else '', sys.argv[0])
sys.exit(1)
elif self._prompt:
if not query_yes_no('Would you like to perform the above plan?'):
sys.exit(1)
for action in actions:
action.announce(planning=False)
action.doit(self._links_db)
if not on_bot and self._force:
logging.info('Completed!\n\nNow run `gclient sync|runhooks` again to '
'let the remaining hooks (that probably were interrupted) '
'execute.')
def CleanupLinks(self):
logging.debug('CleanupLinks')
for source, link_path in self._links_db.iteritems():
if source == 'SCHEMA_VERSION':
continue
if os.path.islink(link_path) or sys.platform.startswith('win'):
# os.path.islink() always returns false on Windows
# See http://bugs.python.org/issue13143.
logging.debug('Removing link to %s at %s', source, link_path)
if not self._dry_run:
if os.path.exists(link_path):
if sys.platform.startswith('win') and os.path.isdir(link_path):
subprocess.check_call(['rmdir', '/q', '/s', link_path],
shell=True)
else:
os.remove(link_path)
del self._links_db[source]
@staticmethod
def _ActionForPath(source_path, link_path=None, check_fn=None,
check_msg=None):
"""Create zero or more Actions to link to a file or directory.
This will be a symlink on POSIX platforms. On Windows this requires
that NTFS is version 5.0 or higher (Vista or newer).
Args:
source_path: Path relative to the Chromium checkout root.
For readability, the path may contain slashes, which will
automatically be converted to the right path delimiter on Windows.
link_path: The location for the link to create. If omitted it will be the
same path as source_path.
check_fn: A function returning true if the type of filesystem object is
correct for the attempted call. Otherwise an error message with
check_msg will be printed.
check_msg: String used to inform the user of an invalid attempt to create
a file.
Returns:
A list of Action objects.
"""
def fix_separators(path):
if sys.platform.startswith('win'):
return path.replace(os.altsep, os.sep)
else:
return path
assert check_fn
assert check_msg
link_path = link_path or source_path
link_path = fix_separators(link_path)
source_path = fix_separators(source_path)
source_path = os.path.join(CHROMIUM_CHECKOUT, source_path)
if os.path.exists(source_path) and not check_fn:
raise LinkError('_LinkChromiumPath can only be used to link to %s: '
'Tried to link to: %s' % (check_msg, source_path))
if not os.path.exists(source_path):
logging.debug('Silently ignoring missing source: %s. This is to avoid '
'errors on platform-specific dependencies.', source_path)
return []
actions = []
if os.path.exists(link_path) or os.path.islink(link_path):
if os.path.islink(link_path):
actions.append(Remove(link_path, dangerous=False))
elif os.path.isfile(link_path):
actions.append(Remove(link_path, dangerous=True))
elif os.path.isdir(link_path):
actions.append(Rmtree(link_path))
else:
raise LinkError('Don\'t know how to plan: %s' % link_path)
# Create parent directories to the target link if needed.
target_parent_dirs = os.path.dirname(link_path)
if (target_parent_dirs and
target_parent_dirs != link_path and
not os.path.exists(target_parent_dirs)):
actions.append(Makedirs(target_parent_dirs))
actions.append(Symlink(source_path, link_path))
return actions
def _initialize_database(filename):
links_database = shelve.open(filename)
# Wipe the database if this version of the script ends up looking at a
# newer (future) version of the links db, just to be sure.
version = links_database.get('SCHEMA_VERSION')
if version and version != SCHEMA_VERSION:
logging.info('Found database with schema version %s while this script only '
'supports %s. Wiping previous database contents.', version,
SCHEMA_VERSION)
links_database.clear()
links_database['SCHEMA_VERSION'] = SCHEMA_VERSION
return links_database
def main():
on_bot = os.environ.get('CHROME_HEADLESS') == '1'
parser = optparse.OptionParser()
parser.add_option('-d', '--dry-run', action='store_true', default=False,
help='Print what would be done, but don\'t perform any '
'operations. This will automatically set logging to '
'verbose.')
parser.add_option('-c', '--clean-only', action='store_true', default=False,
help='Only clean previously created links, don\'t create '
'new ones. This will automatically set logging to '
'verbose.')
parser.add_option('-f', '--force', action='store_true', default=on_bot,
help='Force link creation. CAUTION: This deletes existing '
'folders and files in the locations where links are '
'about to be created.')
parser.add_option('-n', '--no-prompt', action='store_false', dest='prompt',
default=(not on_bot),
help='Prompt if we\'re planning to do a dangerous action')
parser.add_option('-v', '--verbose', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='Print verbose output for debugging.')
options, _ = parser.parse_args()
if options.dry_run or options.force or options.clean_only:
options.verbose = logging.DEBUG
logging.basicConfig(format='%(message)s', level=options.verbose)
# Work from the root directory of the checkout.
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
if sys.platform.startswith('win'):
def is_admin():
try:
return os.getuid() == 0
except AttributeError:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
if not is_admin():
logging.error('On Windows, you now need to have administrator '
'privileges for the shell running %s (or '
'`gclient sync|runhooks`).\nPlease start another command '
'prompt as Administrator and try again.', sys.argv[0])
return 1
if not os.path.exists(CHROMIUM_CHECKOUT):
logging.error('Cannot find a Chromium checkout at %s. Did you run "gclient '
'sync" before running this script?', CHROMIUM_CHECKOUT)
return 2
links_database = _initialize_database(LINKS_DB)
try:
symlink_creator = WebRTCLinkSetup(links_database, options.force,
options.dry_run, options.prompt)
symlink_creator.CleanupLinks()
if not options.clean_only:
symlink_creator.CreateLinks(on_bot)
except LinkError as e:
print >> sys.stderr, e.message
return 3
finally:
links_database.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 | -4,101,265,656,489,156,600 | 32.897436 | 80 | 0.636216 | false |
ProjectBabbler/ebird-api | src/ebird/api/statistics.py | 1 | 2558 | """Functions for fetching basic statistics about observers and observations."""
from ebird.api.utils import call
from ebird.api.validation import (
clean_area,
clean_date,
clean_max_observers,
clean_rank,
clean_region,
)
TOP_100_URL = "https://ebird.org/ws2.0/product/top100/%s/%s"
TOTALS_URL = "https://ebird.org/ws2.0/product/stats/%s/%s"
def get_top_100(token, region, date, rank="spp", max_results=100):
"""
Get the observers who have seen the most species or submitted the
greatest number of checklists on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#2d8d3f94-c4b0-42bd-9c8e-71edfa6347ba
:param token: the token needed to access the API.
:param region: the code for the region, eg. US-NV.
:param date: the date, since Jan 1st 1800.
:param rank: order results by species seen (spp) or checklists submitted (cl).
:param max_results: the maximum number of entries to return from
1 to 100. The default value is 100.
:return: the list of observers.
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOP_100_URL % (clean_region(region), date.strftime("%Y/%m/%d"))
params = {
"maxObservers": clean_max_observers(max_results),
"rankedBy": clean_rank(rank),
}
headers = {
"X-eBirdApiToken": token,
}
return call(url, params, headers)
def get_totals(token, area, date):
"""
Get the number of contributors, checklists submitted and species seen
on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#4416a7cc-623b-4340-ab01-80c599ede73e
:param token: the token needed to access the API.
:param area: the code for a country subnational1 , subnational2 region
or location
:param date: the date, since Jan 1st 1800.
:return: the totals for the given date
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOTALS_URL % (clean_area(area), clean_date(date))
headers = {
"X-eBirdApiToken": token,
}
return call(url, {}, headers)
| mit | -4,960,243,422,118,333,000 | 27.422222 | 110 | 0.681392 | false |
diggcoin/diggcoin | contrib/linearize/linearize-hashes.py | 1 | 3037 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9886
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | -6,657,036,970,384,412,000 | 25.876106 | 90 | 0.663813 | false |
openstack/surveil | surveil/api/handlers/status/live_host_handler.py | 1 | 2622 | # Copyright 2014 - Savoir-Faire Linux inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from surveil.api.datamodel.status import live_host
from surveil.api.handlers.status import mongodb_query
from surveil.api.handlers.status import status_handler
class HostHandler(status_handler.StatusHandler):
"""Fulfills a request on the live hosts."""
def get(self, host_name):
"""Return a host."""
mongo_s = self.request.mongo_connection.alignak_live.hosts.find_one(
{"host_name": host_name}
)
return live_host.LiveHost(**_host_dict_from_mongo_item(mongo_s))
def get_all(self, live_query=None):
"""Return all live hosts."""
host_mappings = {
"last_check": "last_chk",
"description": "display_name",
"plugin_output": "output",
"acknowledged": "problem_has_been_acknowledged"
}
if live_query:
lq = mongodb_query.translate_live_query(live_query.as_dict(),
host_mappings)
else:
lq = {}
query, kwargs = mongodb_query.build_mongodb_query(lq)
mongo_dicts = (self.request.mongo_connection.
alignak_live.hosts.find(*query, **kwargs))
host_dicts = [
_host_dict_from_mongo_item(s) for s in mongo_dicts
]
hosts = []
for host_dict in host_dicts:
host = live_host.LiveHost(**host_dict)
hosts.append(host)
return hosts
def _host_dict_from_mongo_item(mongo_item):
"""Create a dict from a mongodb item."""
mappings = [
('last_chk', 'last_check', int),
('last_state_change', 'last_state_change', int),
('output', 'plugin_output', str),
('problem_has_been_acknowledged', 'acknowledged', bool),
('state', 'state', str),
('display_name', 'description', str),
]
for field in mappings:
value = mongo_item.pop(field[0], None)
if value is not None:
mongo_item[field[1]] = field[2](value)
return mongo_item
| apache-2.0 | -4,852,115,580,305,140,000 | 31.37037 | 76 | 0.606789 | false |
scott-w/view-helpers | test_project/test_project/authtest/tests.py | 1 | 3128 | """Test the auth mixins.
"""
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
class LoginRequiredTestCase(TestCase):
"""Test the LoginRequiredMixin.
"""
def setUp(self):
"""Set up the test client.
"""
self.client = Client()
def test_plain(self):
"""Test the LoginRequiredMixin without overriding the redirect
paremeter.
"""
url = reverse('authtest-login')
response = self.client.get(url)
redirect_url = '{}?next={}'.format(
settings.LOGIN_URL, url)
self.assertRedirects(response, redirect_url)
def test_redirect_set(self):
"""Test the LoginRequiredMixin by overriding the redirect_field_name
"""
url = reverse('authtest-redirect')
response = self.client.get(url)
redirect_url = '{}?go_to={}'.format(
settings.LOGIN_URL, url)
self.assertRedirects(response, redirect_url)
class AlreadyLoggedInTestCase(TestCase):
"""Test the already_logged_in function.
"""
def setUp(self):
User.objects.create_user('test_user', '[email protected]', 'password')
self.client = Client()
self.client.login(username='test_user', password='password')
def test_already_logged_in(self):
"""We send logged in users to the value of next.
"""
response = self.client.get(
reverse('logged-in'),
{'next': reverse('arbitrary-view')})
self.assertRedirects(response, '/arbitrary_url/')
def test_already_logged_in_settings_redirect(self):
"""We send logged in users who don't pass in next to the default
LOGIN_REDIRECT_URL.
"""
response = self.client.get(reverse('logged-in'))
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
def test_not_logged_in(self):
"""We send non-logged in users to the login page.
"""
self.client.logout()
response = self.client.get(
reverse('logged-in'),
{'next': reverse('arbitrary-view')})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['next'], '/arbitrary_url/')
class LoggedInViewTestCase(TestCase):
"""Test the logged_in_view function.
"""
def setUp(self):
User.objects.create_user('test_user', '[email protected]', 'password')
self.client = Client()
self.client.login(username='test_user', password='password')
def test_not_logged_in(self):
"""Users not logged in will get the not logged in config.
"""
self.client.logout()
response = self.client.get(reverse('logged-in-view'))
self.assertEqual(response.content, 'not logged in class')
def test_logged_in(self):
"""Users logged in will get the logged in config.
"""
response = self.client.get(reverse('logged-in-view'))
self.assertEqual(response.content, 'logged in class')
| bsd-3-clause | -8,050,070,320,160,211,000 | 31.247423 | 77 | 0.623082 | false |
Fewbytes/cosmo-plugin-openstack-sg-provisioner | openstack_sg_provisioner/tests/test_openstack_sg_provisioner.py | 1 | 1906 | #!/usr/bin/env python
# vim: ts=4 sw=4 et
import logging
import random
import string
import unittest
import openstack_sg_provisioner.tasks as tasks
RANDOM_LEN = 3 # cosmo_test_nova_XXX_something
PORT = 65000
CIDR = '1.2.3.0/24'
class OpenstackSGProvisionerTestCase(unittest.TestCase):
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.logger = logging.getLogger("test_openstack_sg_provisioner")
self.logger.level = logging.DEBUG
self.logger.info("setUp called")
self.nova_client = tasks._init_client({})
self.name_prefix = 'cosmo_test_nova_{0}_'.format(''.join(
[random.choice(string.ascii_uppercase + string.digits) for i in range(RANDOM_LEN)]
))
def tearDown(self):
# CLI all tests cleanup:
for sg in self.nova_client.security_groups.list():
if sg.name.startswith(self.name_prefix):
self.logger.error("Cleaning up security group {0} (id {1})".format(sg.name, sg.id))
self.nova_client.security_groups.delete(sg.id)
def test_all(self):
name = self.name_prefix + 'sg1'
sg_data = {
'name': name,
'description': 'description for ' + name,
'rules': [
{'port': PORT, 'cidr': CIDR},
]
}
tasks.provision(name, {}, sg_data)
sg = tasks._get_sg_by_name(self.nova_client, name)
self.assertIsNotNone(sg)
# print(dir(sg.rules), sg.rules)
self.assertEquals(sg.rules[0]['from_port'], PORT)
self.assertEquals(sg.rules[0]['to_port'], PORT)
self.assertEquals(sg.rules[0]['ip_range']['cidr'], CIDR)
tasks.terminate({}, sg_data)
sg = tasks._get_sg_by_name(self.nova_client, name)
self.assertIsNone(sg)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,024,366,868,894,776,000 | 30.766667 | 99 | 0.593389 | false |
aptivate/sarpaminfohub | django/sarpaminfohub/infohub/tests/drug_searcher_tests.py | 1 | 5964 | # -*- coding: iso-8859-15 -*-
from sarpaminfohub.infohub.tests.sarpam_test_case import SarpamTestCase
from sarpaminfohub.infohub.test_backend import TestBackend
from sarpaminfohub.infohub.drug_searcher import DrugSearcher
class DrugSearcherTest(SarpamTestCase):
def setUp(self):
test_backend = TestBackend()
self.drug_searcher = DrugSearcher(test_backend)
def test_prices_converted_to_usd(self):
self.set_up_exchange_rate_for_nad()
fob_price_in_nad = 58.64
landed_price_in_nad = 67.44
exchange_rate = 0.12314
issue_unit = 500
fob_price_in_usd = (fob_price_in_nad * exchange_rate) / issue_unit
landed_price_in_usd = (landed_price_in_nad * exchange_rate) / issue_unit
rows = self.drug_searcher.get_formulations_that_match("amitriptyline")
row = rows[0]
self.assertEquals(fob_price_in_usd, row['fob_price'])
self.assertEquals(landed_price_in_usd, row['landed_price'])
def test_no_error_when_issue_unit_none(self):
rows = self.drug_searcher.get_formulations_that_match("issue unit none")
row = rows[0]
self.assertEquals(None, row['fob_price'])
self.assertEquals(None, row['landed_price'])
def test_prices_for_amoxycillin_is_converted_to_usd(self):
self.set_up_exchange_rate_for_nad()
self.set_up_exchange_rate_for_usd()
fob_price_in_nad = 58.64
landed_price_in_nad = 67.44
exchange_rate = 0.12314
issue_unit = 500
fob_price_in_usd = (fob_price_in_nad * exchange_rate) / issue_unit
landed_price_in_usd = (landed_price_in_nad * exchange_rate) / issue_unit
rows = self.drug_searcher.get_prices_for_formulation_with_id("amitriptyline")
row = rows[0]
self.assertEquals(fob_price_in_usd, row['fob_price'])
self.assertEquals(landed_price_in_usd, row['landed_price'])
def test_gets_formulation_name_from_backend_given_id(self):
name = self.drug_searcher.get_formulation_name_with_id(1)
self.assertEquals("amitriptyline 25mg tablet", name)
def get_formulations_that_match_amox(self):
self.set_up_exchange_rate_for_eur()
self.set_up_exchange_rate_for_nad()
self.set_up_exchange_rate_for_usd()
return self.drug_searcher.get_formulations_that_match("amox")
def test_matching_formulations_grouped_by_formulation_name(self):
formulations = self.get_formulations_that_match_amox()
self.assertEquals(3, len(formulations))
amoxycillin125 = formulations[0]
amoxycillin500 = formulations[1]
tamoxifen = formulations[2]
self.assertEquals(amoxycillin125['formulation'],
"amoxycillin 125mg/5ml suspension")
self.assertEquals(amoxycillin500['formulation'],
"amoxycillin 500mg tablet/capsule")
self.assertEquals(tamoxifen['formulation'],
"tamoxifen 20mg tablet")
def test_matching_formulations_include_median_fob_price(self):
formulations = self.get_formulations_that_match_amox()
# fob prices are:
# Angola None
# DRC 0.004
# Namibia 0.005
# Botswana 0.009
amoxycillin125 = formulations[0]
fob_price_for_namibia = 4.36
nad_exchange_rate = 0.12314
issue_unit = 100
expected_median = (fob_price_for_namibia * nad_exchange_rate) / issue_unit
self.assertAlmostEquals(expected_median, amoxycillin125['fob_price'])
def test_matching_formulations_include_median_landed_price(self):
formulations = self.get_formulations_that_match_amox()
amoxycillin125 = formulations[0]
landed_price_for_namibia = 4.93
nad_exchange_rate = 0.12314
issue_unit = 100
expected_median = (landed_price_for_namibia * nad_exchange_rate) / issue_unit
self.assertAlmostEquals(expected_median, amoxycillin125['landed_price'])
def test_amitrilon_25_returned_as_product_based_on_amitryptyline(self):
registrations = self.get_amitrilon_25_registrations()
self.assertEquals("AMITRILON-25", registrations[0]['product']['name'])
self.assertEquals("AMITRILON-25", registrations[1]['product']['name'])
def test_afrifarmacia_and_aspen_returned_as_suppliers_of_amitryptyline(self):
registrations = self.get_amitrilon_25_registrations()
afrifarmacia = {'id': 1, 'name':u"Afrifármacia, Lda",
'url':"/suppliers/1/test"}
aspen_pharmacare = {'id': 2, 'name':"Aspen Pharmacare Ltd, S.A",
'url':"/suppliers/2/test"}
self.assertEquals(afrifarmacia, registrations[0]['supplier'])
self.assertEquals(aspen_pharmacare, registrations[1]['supplier'])
def test_stallion_laboratories_returned_as_manufacturer_of_amitryptyline(self):
registrations = self.get_amitrilon_25_registrations()
stallion = {'name':"STALLION LABORATORIES LTD-INDIA"}
self.assertEquals(stallion, registrations[0]['manufacturer'])
self.assertEquals(stallion, registrations[1]['manufacturer'])
def get_amitrilon_25_registrations(self):
registrations = self.drug_searcher.get_product_registrations_based_on_formulation_with_id(1)
return registrations
def test_amitrilon_25_returned_as_product_supplied_by_afrifarmacia(self):
products = self.drug_searcher.get_products_from_supplier_with_id(1)
amitrilon25 = {}
amitrilon25['product'] = "AMITRILON-25"
amitrilon25['formulation_name'] = "amitriptyline 25mg tablet"
amitrilon25['formulation_url'] = "/formulation/1/test"
expected_products = [amitrilon25]
self.assertEquals(expected_products, products)
| gpl-3.0 | 7,975,553,864,656,356,000 | 37.980392 | 100 | 0.642354 | false |
patrick91/pycon | backend/integrations/tests/test_tasks.py | 1 | 1179 | from unittest.mock import MagicMock, patch
from django.test import override_settings
from integrations.tasks import notify_new_submission, switchable_task
def test_notify_new_submission():
with patch("integrations.slack.send_message") as m1:
notify_new_submission(
"test_title",
"test_elevator_pitch",
"test_submission_type",
"test_admin_url",
"test_topic",
42,
)
blocks = m1.call_args[0][0]
attachments = m1.call_args[0][1]
assert blocks[0]["text"]["text"] == "New _test_submission_type_ Submission"
assert (
attachments[0]["blocks"][0]["text"]["text"]
== "*<test_admin_url|Test_title>*\n*"
"Elevator Pitch*\ntest_elevator_pitch"
)
assert attachments[0]["blocks"][0]["fields"][2]["text"] == "42"
assert attachments[0]["blocks"][0]["fields"][3]["text"] == "test_topic"
@override_settings(USE_SCHEDULER=True)
def test_switchable_task():
def dummy_task():
pass
dummy_task.delay = MagicMock()
switchable_dummy_task = switchable_task(dummy_task)
switchable_dummy_task()
assert dummy_task.delay.called
| mit | 500,003,179,471,033,660 | 27.756098 | 79 | 0.615776 | false |
jpopelka/osbs-client | setup.py | 1 | 1520 | #!/usr/bin/python
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import re
import sys
from setuptools import setup, find_packages
data_files = {
"share/osbs": [
"inputs/image_stream.json",
"inputs/prod.json",
"inputs/prod_inner.json",
"inputs/simple.json",
"inputs/simple_inner.json",
],
}
def _get_requirements(path):
try:
with open(path) as f:
packages = f.read().splitlines()
except (IOError, OSError) as ex:
raise RuntimeError("Can't open file with requirements: %s", repr(ex))
return [p.strip() for p in packages if not re.match(r"^\s*#", p)]
def _install_requirements():
requirements = _get_requirements('requirements.txt')
if sys.version_info[0] >= 3:
requirements += _get_requirements('requirements-py3.txt')
return requirements
setup(
name="osbs-client",
description='Python module and command line client for OpenShift Build Service',
version="0.16",
author='Red Hat, Inc.',
author_email='[email protected]',
url='https://github.com/projectatomic/osbs-client',
license="BSD",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
entry_points={
'console_scripts': ['osbs=osbs.cli.main:main'],
},
install_requires=_install_requirements(),
data_files=data_files.items(),
)
| bsd-3-clause | 2,019,618,046,527,079,000 | 27.679245 | 84 | 0.644737 | false |
dethi/duhpy | duhpy.py | 1 | 3987 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import sys
import time
import argparse
from os import getenv
from os.path import expanduser
from threading import Thread
try:
from Queue import Queue
except:
from queue import Queue
import dropbox
PY2 = (sys.version_info[0] == 2)
if PY2:
input = raw_input
API_KEY = getenv('DUHPY_API_KEY', 'YOUR_API_KEY')
APP_SECRET = getenv('DUHPY_APP_SECRET', 'YOUR_APP_SECRET')
CONFIG_PATH = expanduser('~/.duhpy')
RED = '\033[91m'
NO = '\033[0m'
class APICrawler(object):
def __init__(self, client, nb_threads=10):
self.client = client
self.values = Queue()
self.q = Queue()
for i in range(nb_threads):
worker = Thread(target=self.worker)
worker.daemon = True
worker.start()
def run(self, path='/'):
self.q.put(path)
self.q.join()
total_size = 0
self.values.put('--END--')
for i in iter(self.values.get, '--END--'):
total_size += i
return total_size
def worker(self):
while True:
path = self.q.get()
#print(path)
try:
json = self.client.metadata(path)
if not is_dir(json):
self.values.put(json['bytes'])
dir_size = 0
for item in json['contents']:
if is_dir(item):
self.q.put(item['path'])
else:
dir_size += item['bytes']
self.values.put(dir_size)
except dropbox.rest.ErrorResponse as e:
if e.status == 429:
#print(RED, '*** Dropbox API rate limit reached ***', NO)
time.sleep(1.5)
self.q.put(path)
self.q.task_done()
def request_token():
if API_KEY == 'YOUR_API_KEY' or APP_SECRET == 'YOUR_APP_SECRET':
print('Please, see the documentation https://github.com/dethi/duhpy')
sys.exit(1)
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(API_KEY, APP_SECRET)
authorize_url = flow.start()
print('1. Go to: ', authorize_url)
print('2. Click "Allow".')
print('3. Copy the authorization code.')
code = input('Enter the authorization code here: ').strip()
try:
access_token, user_id = flow.finish(code)
except:
print('[ERROR] Invalid code')
access_token = None
return access_token
def is_dir(metadata):
if metadata is None:
return False
return metadata["is_dir"]
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return '{:.1f} {}{}'.format(num, unit, suffix)
num /= 1024.0
return '{:.1f} {}{}'.format(num, 'Yi', suffix)
def main():
parser = argparse.ArgumentParser(
prog='duhpy',
description='`du -h` command for Dropbox (online).')
parser.add_argument('path', metavar='PATH', type=str, nargs='+')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
try:
with open(CONFIG_PATH, 'r') as f:
token = f.read()
except IOError:
token = None
while (token is None):
token = request_token()
with open(CONFIG_PATH, 'w') as f:
f.write(token)
client = dropbox.client.DropboxClient(token)
crawler = APICrawler(client)
path_len = min(max(max(map(len, args.path)), 13), 64)
print('{0:^{2}} | {1:^13}'.format('PATH', 'SIZE', path_len))
print('{0:-<{1}}+{0:-<14}'.format('-', path_len + 1))
for path in args.path:
result = crawler.run(path)
print('{0:<{2}.{2}} | {1:>13}'.format(path, sizeof_fmt(result),
path_len))
print()
if __name__ == '__main__':
main()
| mit | 1,625,170,860,030,826,800 | 26.308219 | 78 | 0.536494 | false |
thp44/delphin_6_automation | pytest/test_weather_models.py | 1 | 1932 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pytest
import numpy as np
# RiBuild Modules
from delphin_6_automation.delphin_setup import weather_modeling
from delphin_6_automation.file_parsing import weather_parser
# -------------------------------------------------------------------------------------------------------------------- #
# TEST WEATHER MODELS
@pytest.mark.skip('Catch ration model is not implemented correctly')
def test_rain_model_1(test_folder):
rain = weather_parser.ccd_to_list(test_folder + '/weather/vertical_rain.ccd')
wind_speed = weather_parser.ccd_to_list(test_folder + '/weather/wind_speed.ccd')
wind_direction = weather_parser.ccd_to_list(test_folder + '/weather/wind_direction.ccd')
wall_location = {'height': 5.0, 'width': 5.0}
wdr = weather_modeling.driving_rain(rain, wind_direction, wind_speed, wall_location, 90, 0)
assert rain == wdr
def test_rain_model_2(test_folder):
rain = weather_parser.ccd_to_list(test_folder + '/weather/vertical_rain.ccd')
wind_speed = weather_parser.ccd_to_list(test_folder + '/weather/wind_speed.ccd')
wind_direction = weather_parser.ccd_to_list(test_folder + '/weather/wind_direction.ccd')
wall_location = {'height': 5.0, 'width': 5.0}
wdr = weather_modeling.driving_rain(rain, wind_direction, wind_speed, wall_location, 90, 0, 1)
assert rain == wdr
def test_solar_radiation(test_folder):
# TODO - Create some assertments
diff_rad = np.array(weather_parser.ccd_to_list(test_folder + '/weather/diffuse_radiation.ccd'))
dir_rad = np.array(weather_parser.ccd_to_list(test_folder + '/weather/direct_radiation.ccd'))
radiation = diff_rad + dir_rad
short_wave = weather_modeling.short_wave_radiation(radiation, -2.083, 57.167, 0, 230)
| mit | -7,866,846,956,237,236,000 | 39.25 | 120 | 0.621118 | false |
grave-w-grave/zulip | confirmation/models.py | 2 | 5962 | # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <[email protected]>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import re
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from confirmation.util import get_status_field
from zerver.lib.utils import generate_random_token
from zerver.models import PreregistrationUser
from typing import Optional, Union, Any, Text
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
B16_RE = re.compile('^[a-f0-9]{40}$')
def check_key_is_valid(creation_key):
# type: (Text) -> bool
if not RealmCreationKey.objects.filter(creation_key=creation_key).exists():
return False
days_sofar = (now() - RealmCreationKey.objects.get(creation_key=creation_key).date_created).days
# Realm creation link expires after settings.REALM_CREATION_LINK_VALIDITY_DAYS
if days_sofar <= settings.REALM_CREATION_LINK_VALIDITY_DAYS:
return True
return False
def generate_key():
# type: () -> Text
return generate_random_token(40)
def generate_activation_url(key, host=None):
# type: (Text, Optional[str]) -> Text
if host is None:
host = settings.EXTERNAL_HOST
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
host,
reverse('confirmation.views.confirm',
kwargs={'confirmation_key': key}))
def generate_realm_creation_url():
# type: () -> Text
key = generate_key()
RealmCreationKey.objects.create(creation_key=key, date_created=now())
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.create_realm',
kwargs={'creation_key': key}))
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
# type: (str) -> Union[bool, PreregistrationUser]
if B16_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.model_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def get_link_for_object(self, obj, host=None):
# type: (Union[ContentType, int], Optional[str]) -> Text
key = generate_key()
self.create(content_object=obj, date_sent=now(), confirmation_key=key)
return generate_activation_url(key, host=host)
def send_confirmation(self, obj, email_address, additional_context=None,
subject_template_path=None, body_template_path=None,
host=None):
# type: (ContentType, Text, Optional[Dict[str, Any]], Optional[str], Optional[str], Optional[str]) -> Confirmation
confirmation_key = generate_key()
current_site = Site.objects.get_current()
activate_url = generate_activation_url(confirmation_key, host=host)
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
if additional_context is not None:
context.update(additional_context)
if obj.realm is not None and obj.realm.is_zephyr_mirror_realm:
template_name = "mituser"
else:
template_name = obj._meta.model_name
templates = [
'confirmation/%s_confirmation_email_subject.txt' % (template_name,),
'confirmation/confirmation_email_subject.txt',
]
if subject_template_path:
template = loader.get_template(subject_template_path)
else:
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % (template_name,),
'confirmation/confirmation_email_body.txt',
]
if body_template_path:
template = loader.get_template(body_template_path)
else:
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta(object):
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
# type: () -> Text
return _('confirmation email for %s') % (self.content_object,)
class RealmCreationKey(models.Model):
creation_key = models.CharField(_('activation key'), max_length=40)
date_created = models.DateTimeField(_('created'), default=now)
| apache-2.0 | 4,702,718,434,550,939,000 | 38.746667 | 122 | 0.643911 | false |
julienawilson/data-structures | src/test_linked_list.py | 1 | 4803 | """Tests for linked_list.py."""
import pytest
@pytest.fixture
def sample_linked_list():
"""Create testing linked list."""
from linked_list import LinkedList
one_llist = LinkedList([1])
empty_llist = LinkedList()
new_llist = LinkedList([1, 2, 3, 4, 5])
return (one_llist, empty_llist, new_llist)
def test_node_init():
"""Test node class init."""
from linked_list import Node
new_node = Node(0, None)
assert new_node.contents == 0 and new_node.next_node is None
def test_linkedlist_init_empty_size():
"""Test for empty LinkedList init."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert empty_llist.length == 0
def test_linkedlist_init_empty_head():
"""Test head in empty LinkedList init."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert empty_llist.head_node is None
def test_linkedlist_init_one_size():
"""Test for LinkedList init single item."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert one_llist.length == 1
def test_linkedlist_init_one_head():
"""Test head in LinkedList init single item."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert one_llist.head_node.contents == 1
def test_linkedlist_init_list_size():
"""Test for LinkedList init with list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.length == 5
def test_linkedlist_init_list_head():
"""Test head in LinkedList init with list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.head_node.contents == 5
def test_linkedlist_push_size():
"""Test for LinkedList size after push."""
one_llist, empty_llist, new_llist = sample_linked_list()
new_llist.push("new")
assert new_llist.length == 6
def test_linkedlist_push_val():
"""Test for LinkedList head value after push."""
one_llist, empty_llist, new_llist = sample_linked_list()
new_llist.push("new")
assert new_llist.head_node.contents == "new"
def test_linkedlist_pop_one():
"""Test for Linked List pop on list with one item."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert one_llist.pop() == 1
def test_linkedlist_pop_list():
"""Test for Linked List pop on multi-item list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.pop() == 5
def test_linkedlist_search_list():
"""Test for LinkedList search list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.search(2).contents == 2
def test_linkedlist_search_empty():
"""Test for LinkedList search empty list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert empty_llist.search(2) is None
def test_linkedlist_remove_from_empty():
"""Test for LinkedList remove empty list."""
one_llist, empty_llist, new_llist = sample_linked_list()
with pytest.raises(ValueError):
empty_llist.remove(empty_llist.search(2))
def test_linkedlist_search_list_false():
"""Test for LinkedList search list when search value is not in list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.search(100) is None
def test_linkedlist_remove():
"""Test LinkedList remove() on a list."""
one_llist, empty_llist, new_llist = sample_linked_list()
new_llist.remove(new_llist.search(3))
assert new_llist.search(3) is None
assert new_llist.search(4).next_node.contents == 2
def test_linkedlist_remove_lastnode():
"""Test LinkedList remove() on a list."""
one_llist, empty_llist, new_llist = sample_linked_list()
new_llist.remove(new_llist.search(5))
assert new_llist.search(5) is None
def test_linkedlist_remove_head():
"""Test LinkedList remove() the head on a list."""
one_llist, empty_llist, new_llist = sample_linked_list()
new_llist.remove(new_llist.search(5))
assert new_llist.head_node.contents == 4
def test_linkedlist_remove_none():
"""Test LinkedList remove() on a list list."""
one_llist, empty_llist, new_llist = sample_linked_list()
with pytest.raises(ValueError):
new_llist.remove(new_llist.search(100))
def test_linkedlist_display():
"""Test for LinkedList display."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert new_llist.display() == (5, 4, 3, 2, 1)
def test_linkedlist_display_one():
"""Test for LinkedList display on single item list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert one_llist.display() == (1,)
def test_linkedlist_display_empty():
"""Test for LinkedList display on empty list."""
one_llist, empty_llist, new_llist = sample_linked_list()
assert empty_llist.display() is None
| mit | -5,856,760,384,943,909,000 | 30.188312 | 75 | 0.673746 | false |
bsamseth/project-euler | 094/94.py | 1 | 1451 | """
It is easily proved that no equilateral triangle exists with integral length
sides and integral area. However, the almost equilateral triangle 5-5-6 has an
area of 12 square units.
We shall define an almost equilateral triangle to be a triangle for which two
sides are equal and the third differs by no more than one unit.
Find the sum of the perimeters of all almost equilateral triangles with
integral side lengths and area and whose perimeters do not exceed one billion
(1,000,000,000).
Solution comment: Instant time.
Area is A = ((a ± 1) * h) / 2, and from Pythagoras
a^2 = ((a ± 1)/2)^2 + h^2
=> ((3a ± 1)/2)^2 - 3h^2 = 1
=> x^2 - 3y^2 = 1 -> Bell equation.
Wolfram Alpha then helpfully supplies integer solutions, with
x_n = 1/2 ((2 - √3)**n + (2 + √3)**n))
Only n >= 2 are valid for us. Then get side length from
a = (2x ± 1) / 3,
making sure to pick the sign that makes the division work.
"""
from math import sqrt
upper = int(1e9)
s = 0
sqrt3 = sqrt(3)
n = 2
while True:
x = int(round(0.5 * ((2 - sqrt3)**n + (2 + sqrt3)**n)))
a = 2*x-1
if a >= upper + 1:
break
if a % 3 == 0:
a = a//3
s += 3*a - 1
print(a, a, a-1)
else:
a = (a + 2) // 3
s += 3*a + 1
print(a, a, a+1)
n += 1
print(s)
| mit | -7,009,995,585,974,352,000 | 29.702128 | 79 | 0.544006 | false |
flopp/airports_map | airports/db.py | 1 | 3270 | import os
import random
import typing
from airports.airport import Airport, AirportType
from airports.airportstable import AirportsTable
from airports.download import download
from airports.runwaystable import RunwaysTable
from airports.wikipediahelper import get_wikipedia_articles
class DB:
def __init__(self) -> None:
self._airports: typing.Dict[str, Airport] = {}
self._large: typing.List[str] = []
self._medium: typing.List[str] = []
self._small: typing.List[str] = []
self._other: typing.List[str] = []
def load(self, cache_dir: str, reset_cache: bool) -> None:
airports_csv = os.path.join(cache_dir, "airports.csv")
runways_csv = os.path.join(cache_dir, "runways.csv")
wikipedia_json = os.path.join(cache_dir, "wikipedia_json")
if reset_cache:
for file_name in [airports_csv, runways_csv, wikipedia_json]:
if os.path.exists(file_name):
os.remove(file_name)
airports = AirportsTable(download("https://ourairports.com/data/airports.csv", airports_csv))
runways = RunwaysTable(download("https://ourairports.com/data/runways.csv", runways_csv))
articles = get_wikipedia_articles(wikipedia_json)
airports.add_wikipedia(articles)
airports.compute_bounds(runways.to_dict())
airports.check()
for airport in airports.good_airports():
self._airports[airport.icao_code()] = airport
if airport.airport_type() == AirportType.LARGE_AIRPORT:
self._large.append(airport.icao_code())
elif airport.airport_type() == AirportType.MEDIUM_AIRPORT:
self._medium.append(airport.icao_code())
elif airport.airport_type() == AirportType.SMALL_AIRPORT:
self._small.append(airport.icao_code())
else:
self._other.append(airport.icao_code())
def get_all_icaos(self) -> typing.List[str]:
return list(self._airports.keys())
def get(self, icao: str) -> typing.Optional[Airport]:
icao = icao.strip().upper()
if icao in self._airports:
return self._airports[icao]
return None
def get_random(self) -> Airport:
if random.choice([True, False]):
return self._airports[random.choice(self._large)]
if random.choice([True, False]):
return self._airports[random.choice(self._medium)]
if random.choice([True, False]):
return self._airports[random.choice(self._small)]
return self._airports[random.choice(list(self._airports.keys()))]
def get_random_list(self, count: int) -> typing.List[Airport]:
return random.sample(list(self._airports.values()), count)
def search(self, needle: str) -> typing.Optional[Airport]:
needle = needle.strip().upper()
for airport in self._airports.values():
if airport.matches_code(needle):
return airport
for airport in self._airports.values():
if airport.matches_name(needle):
return airport
for airport in self._airports.values():
if airport.matches_location(needle):
return airport
return None
| mit | -1,287,605,393,672,882,000 | 41.467532 | 101 | 0.62263 | false |
opencb/cellbase | cellbase-client/src/main/python/tests/test_cbrestclients.py | 1 | 7814 | import unittest
import pycellbase.cbrestclients as cbfts
from pycellbase.cbconfig import ConfigClient
from requests import Session
class GeneClientTest(unittest.TestCase):
"""Tests the GeneClient class"""
def setUp(self):
"""Initializes the gene client"""
self._gc = cbfts.GeneClient(Session(), ConfigClient())
def test_get_biotypes(self):
"""Checks retrieval of gene biotypes"""
res = self._gc.get_biotypes()
assert len(res[0]['result']) == 29
assert 'protein_coding' in res[0]['result']
def test_get_list(self):
"""Checks retrieval of gene list"""
res = self._gc.get_list(include="id", limit=10000)
assert len(res[0]['result']) == 10000
assert res[0]['result'][0]['id'] == 'ENSG00000223972'
def test_get_protein(self):
"""Checks retrieval of protein"""
res = self._gc.get_protein('BRCA1')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['name'][0] == 'BRCA1_HUMAN'
def test_get_transcript(self):
"""Checks retrieval of protein"""
res = self._gc.get_transcript('BRCA1')
assert len(res[0]['result']) == 27
assert res[0]['result'][0]['name'] == 'BRCA1-001'
def test_get_tfbs(self):
"""Checks retrieval of protein"""
res = self._gc.get_tfbs('BRCA1')
assert len(res[0]['result']) == 175
assert res[0]['result'][0]['tfName'] == 'E2F4'
def test_get_snp(self):
"""Checks retrieval of snp"""
res = self._gc.get_snp('LDLR')
assert len(res[0]['result']) == 4108
assert res[0]['result'][0]['id'] == 'rs191244119'
def test_get_info(self):
"""Checks retrieval of gene info"""
res = self._gc.get_info('BRCA1')
assert len(res[0]['result']) == 1
assert res[0]['id'] == 'BRCA1'
def test_search(self):
"""Checks retrieval of gene info given a set of filters"""
res = self._gc.search(name='BRCA1')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['id'] == 'ENSG00000012048'
res = self._gc.search(name='BRCA1', include='chromosome')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['chromosome'] == '17'
class ProteinClientTest(unittest.TestCase):
"""Tests the ProteinClient class"""
def setUp(self):
"""Initializes the protein client"""
self._pc = cbfts.ProteinClient(Session(), ConfigClient())
def test_get_substitution_scores(self):
"""Checks retrieval of protein substitution scores"""
res = self._pc.get_substitution_scores('BRCA1_HUMAN')
assert len(res[0]['result']) == 1
assert (res[0]['result'][0]['1']['W'] ==
{'pe': 0, 'ps': 0.995, 'ss': 0, 'se': 1})
def test_get_sequence(self):
"""Checks retrieval of protein sequence"""
res = self._pc.get_sequence('Q9UL59')
assert len(res[0]['result']) == 1
assert len(res[0]['result'][0]) == 606
class TrancriptClientTest(unittest.TestCase):
"""Tests the TrancriptClient class"""
def setUp(self):
"""Initializes the transcript client"""
self._tc = cbfts.TranscriptClient(Session(), ConfigClient())
def test_get_function_prediction(self):
"""Checks retrieval of function predictions"""
res = self._tc.get_function_prediction('ENST00000536068')
assert len(res[0]['result']) == 1
assert (res[0]['result'][0]['10']['E'] ==
{'pe': 1, 'se': 1, 'ps': 0.497, 'ss': 0})
def test_get_gene(self):
"""Checks retrieval of genes which codify the transcript"""
res = self._tc.get_gene('ENST00000536068')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['name'] == 'ZNF214'
def test_get_protein(self):
"""Checks retrieval of codified proteins"""
res = self._tc.get_protein('ENST00000536068')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['name'][0] == 'ZN214_HUMAN'
def test_get_sequence(self):
"""Checks retrieval of the transcript sequence"""
res = self._tc.get_sequence('ENST00000536068')
assert len(res[0]['result']) == 1
assert len(res[0]['result'][0]) == 2562
class VariationClientTest(unittest.TestCase):
"""Tests the VariationClient class"""
def setUp(self):
"""Initializes the variation client"""
self._vc = cbfts.VariationClient(Session(), ConfigClient())
def test_get_consequence_types(self):
"""Checks retrieval of consequence types list"""
res = self._vc.get_consequence_types()
assert 'coding_sequence_variant' in res[0]['result']
def test_get_consequence_type(self):
"""Checks retrieval of consequence types for a variation"""
res = self._vc.get_consequence_type('rs6025')
assert len(res[0]['result']) == 1
assert res[0]['result'][0] == 'missense_variant'
class GenomicRegionTest(unittest.TestCase):
"""Tests the GenomicRegion class"""
def setUp(self):
"""Initializes the variation client"""
self._gr = cbfts.RegionClient(Session(), ConfigClient())
def test_get_clinical(self):
"""Checks retrieval of clinical data"""
res = self._gr.get_clinical('3:100000-900000')
assert len(res[0]['result']) == 469
assert res[0]['result'][0]['mutationCDS'] == 'c.4G>A'
def test_get_conservation(self):
"""Checks retrieval of conservation data"""
res = self._gr.get_conservation('3:100000-900000')
assert len(res[0]['result']) == 3
assert res[0]['result'][0]['source'] == 'gerp'
def test_get_gene(self):
"""Checks retrieval of genes"""
res = self._gr.get_gene('3:100000-900000')
assert len(res[0]['result']) == 8
assert res[0]['result'][0]['name'] == 'AC090044.1'
def test_get_regulatory(self):
"""Checks retrieval of regulatory elements"""
res = self._gr.get_regulatory('3:100000-900000')
assert len(res[0]['result']) == 677
assert res[0]['result'][0]['name'] == 'H3K27me3'
def test_get_sequence(self):
"""Checks retrieval of sequence"""
res = self._gr.get_sequence('3:100-200')
assert len(res[0]['result']) == 1
assert len(res[0]['result'][0]['sequence']) == 101
def test_get_tfbs(self):
"""Checks retrieval of transcription factor binding sites (TFBSs)"""
res = self._gr.get_tfbs('3:100000-900000')
assert len(res[0]['result']) == 239
assert res[0]['result'][0]['name'] == 'CTCF'
def test_get_transcript(self):
"""Checks retrieval of transcripts"""
res = self._gr.get_transcript('3:1000-100000')
assert len(res[0]['result']) == 2
assert res[0]['result'][0]['id'] == 'ENST00000440867'
def test_get_variation(self):
"""Checks retrieval of variations"""
res = self._gr.get_variation('3:10000-100000')
assert res[0]['result'][0]['id'] == 'rs192023809'
class VariantTest(unittest.TestCase):
"""Tests the Variant class"""
def setUp(self):
"""Initializes the variation client"""
self._vc = cbfts.VariantClient(Session(), ConfigClient())
def test_get_annotation(self):
"""Checks retrieval of annotation data"""
res = self._vc.get_annotation('19:45411941:T:C')
assert len(res[0]['result']) == 1
assert res[0]['result'][0]['id'] == 'rs429358'
def test_get_cadd(self):
"""Checks retrieval of cadd"""
res = self._vc.get_cadd('19:45411941:T:C')
assert len(res[0]['result']) == 2
assert res[0]['result'][0]['score'] == -1.1800003051757812
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,429,973,344,240,424,000 | 36.033175 | 76 | 0.584848 | false |
arcan1s/git-etc | sources/ctrlconf/aboutwin.py | 1 | 5647 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'aboutwin.ui'
#
# Created: Mon Feb 18 04:26:37 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AboutWindow(object):
def setupUi(self, AboutWindow):
AboutWindow.setObjectName(_fromUtf8("AboutWindow"))
AboutWindow.resize(418, 298)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(AboutWindow.sizePolicy().hasHeightForWidth())
AboutWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(AboutWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.button_close = QtGui.QPushButton(self.centralwidget)
self.button_close.setMinimumSize(QtCore.QSize(100, 20))
self.button_close.setMaximumSize(QtCore.QSize(100, 25))
self.button_close.setDefault(True)
self.button_close.setObjectName(_fromUtf8("button_close"))
self.gridLayout.addWidget(self.button_close, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.text_about = QtGui.QTextBrowser(self.centralwidget)
self.text_about.setMinimumSize(QtCore.QSize(410, 260))
self.text_about.setObjectName(_fromUtf8("text_about"))
self.gridLayout.addWidget(self.text_about, 0, 0, 1, 3)
AboutWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(AboutWindow)
QtCore.QMetaObject.connectSlotsByName(AboutWindow)
AboutWindow.setTabOrder(self.text_about, self.button_close)
def retranslateUi(self, AboutWindow):
AboutWindow.setWindowTitle(_translate("AboutWindow", "About", None))
self.button_close.setText(_translate("AboutWindow", "Закрыть", None))
self.text_about.setHtml(_translate("AboutWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Droid Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">git2etc 2.0.0</p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Лицензия: GPL</p>\n"
"<p align=\"justify\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">GUI интерфейс к демону git-etc, написанный на python2.7/PyQt4. Позволяет посмотреть список коммитов и изменения в файлах, записанные в коммитах. Также данное приложение позволяет откатить к определенному коммиту все файлы (git reset --hard) или отдельно взятые (git diff && git apply). Дополнительно предусмотрена возможность слияния старых и новых конфигурационных файлов (используются две ветки репозитория - master и experimental).</p>\n"
"<p align=\"justify\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Автор: Евгений Алексеев aka arcanis</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">e-mail: [email protected]</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Jabber: [email protected]</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">ICQ: 407-398-235</p></body></html>", None))
| gpl-3.0 | -5,526,175,977,805,946,000 | 67.217949 | 589 | 0.713588 | false |
suutari/shoop | shuup_tests/front/test_carousel.py | 1 | 7375 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shuup Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime, timedelta
import pytest
from django.utils import translation
from filer.models import Image
from shuup.front.apps.carousel.models import Carousel, LinkTargetType, Slide
from shuup.front.apps.carousel.plugins import BannerBoxPlugin, CarouselPlugin
from shuup.testing.factories import get_default_category, get_default_product
from shuup_tests.front.fixtures import get_jinja_context
from shuup_tests.simple_cms.utils import create_page
@pytest.mark.django_db
def test_carousel_plugin_form():
test_carousel = Carousel.objects.create(name="test")
plugin = CarouselPlugin(config={})
form_class = plugin.get_editor_form_class()
form = form_class(data={"carousel": test_carousel.pk}, plugin=plugin)
assert form.is_valid()
assert form.get_config() == {"carousel": test_carousel.pk}
@pytest.mark.django_db
def test_carousel_plugin_form_get_context():
context = get_jinja_context()
test_carousel = Carousel.objects.create(name="test")
plugin = CarouselPlugin(config={"carousel": test_carousel.pk})
assert plugin.get_context_data(context).get("carousel") == test_carousel
@pytest.mark.django_db
def test_banner_box_plugin():
context = get_jinja_context()
test_carousel = Carousel.objects.create(name="test")
plugin = BannerBoxPlugin(config={"carousel": test_carousel.pk, "title": "Test"})
data = plugin.get_context_data(context)
assert data.get("carousel") == test_carousel
assert data.get("title") == "Test"
@pytest.mark.django_db
def test_image_translations():
test_carousel = Carousel.objects.create(name="test")
test_image_1 = Image.objects.create(original_filename="slide1.jpg")
test_image_2 = Image.objects.create(original_filename="slide2.jpg")
with translation.override("en"):
test_slide = Slide.objects.create(carousel=test_carousel, name="test", image=test_image_1)
assert len(test_carousel.slides.all()) == 1
assert test_slide.get_translated_field("image").original_filename == "slide1.jpg"
test_slide.set_current_language("fi")
assert test_slide.get_translated_field("image").original_filename == "slide1.jpg"
test_slide.image = test_image_2
test_slide.save()
assert test_slide.get_translated_field("image").original_filename == "slide2.jpg"
test_slide.set_current_language("en")
assert test_slide.get_translated_field("image").original_filename == "slide1.jpg"
test_slide.set_current_language("jp")
assert test_slide.get_translated_field("image").original_filename == "slide1.jpg"
@pytest.mark.django_db
def test_slide_links():
test_carousel = Carousel.objects.create(name="test")
test_image_1 = Image.objects.create(original_filename="slide1.jpg")
with translation.override("en"):
test_slide = Slide.objects.create(carousel=test_carousel, name="test", image=test_image_1)
# Test external link
assert len(test_carousel.slides.all()) == 1
test_link = "http://example.com"
test_slide.external_link = test_link
test_slide.save()
assert test_slide.get_translated_field("external_link") == test_link
assert test_slide.get_link_url() == test_link
# Test Product url and link priorities
test_product = get_default_product()
test_slide.product_link = test_product
test_slide.save()
assert test_slide.get_link_url() == test_link
test_slide.external_link = None
test_slide.save()
assert test_slide.get_link_url().startswith("/p/") # Close enough...
# Test Category url and link priorities
test_category = get_default_category()
test_slide.category_link = test_category
test_slide.save()
assert test_slide.get_link_url().startswith("/p/") # Close enough...
test_slide.product_link = None
test_slide.save()
assert test_slide.get_link_url().startswith("/c/") # Close enough...
# Test CMS page url and link priorities
attrs = {"url": "test"}
test_page = create_page(**attrs)
test_slide.cms_page_link = test_page
test_slide.save()
assert test_slide.get_link_url().startswith("/c/") # Close enough...
test_slide.category_link = None
test_slide.save()
assert test_slide.get_link_url().startswith("/test/")
# Check that external link overrides everything
test_slide.external_link = test_link
test_slide.save()
assert test_slide.get_link_url() == test_link
@pytest.mark.django_db
def test_visible_manager():
test_dt = datetime(2016, 3, 18, 20, 34, 1, 922791)
test_carousel = Carousel.objects.create(name="test")
test_image = Image.objects.create(original_filename="slide.jpg")
test_slide = Slide.objects.create(carousel=test_carousel, name="test", image=test_image)
assert not list(test_carousel.slides.visible(dt=test_dt))
# Available since last week
test_slide.available_from = test_dt - timedelta(days=7)
test_slide.save()
assert len(test_carousel.slides.visible(dt=test_dt)) == 1
# Available until tomorrow
test_slide.available_to = test_dt + timedelta(days=1)
test_slide.save()
assert len(test_carousel.slides.visible(dt=test_dt)) == 1
# Expired yesterday
test_slide.available_to = test_dt - timedelta(days=1)
test_slide.save()
assert not list(test_carousel.slides.visible(dt=test_dt))
# Not available until next week
test_slide.available_from = test_dt + timedelta(days=7)
test_slide.available_to = test_dt + timedelta(days=8)
test_slide.save()
assert not list(test_carousel.slides.visible(dt=test_dt))
@pytest.mark.django_db
def test_is_visible():
test_dt = datetime(2016, 3, 18, 20, 34, 1, 922791)
test_carousel = Carousel.objects.create(name="test")
test_image = Image.objects.create(original_filename="slide.jpg")
test_slide = Slide.objects.create(carousel=test_carousel, name="test", image=test_image)
assert not test_slide.is_visible(dt=test_dt)
# Available since last week
test_slide.available_from = test_dt - timedelta(days=7)
test_slide.save()
assert test_slide.is_visible(dt=test_dt)
# Available until tomorrow
test_slide.available_to = test_dt + timedelta(days=1)
test_slide.save()
assert test_slide.is_visible(dt=test_dt)
# Expired yesterday
test_slide.available_to = test_dt - timedelta(days=1)
test_slide.save()
assert not test_slide.is_visible(dt=test_dt)
# Not available until next week
test_slide.available_from = test_dt + timedelta(days=7)
test_slide.available_to = test_dt + timedelta(days=8)
test_slide.save()
assert not test_slide.is_visible(dt=test_dt)
@pytest.mark.django_db
@pytest.mark.parametrize("target_type,expected_target", [
(LinkTargetType.CURRENT, "_self"),
(LinkTargetType.NEW, "_blank"),
])
def test_get_link_target(target_type, expected_target):
test_carousel = Carousel.objects.create(name="test")
test_image = Image.objects.create(original_filename="slide.jpg")
test_slide = Slide.objects.create(carousel=test_carousel, name="test", image=test_image, target=target_type)
assert test_slide.get_link_target() == expected_target
| agpl-3.0 | -3,138,957,484,464,057,300 | 37.411458 | 112 | 0.701966 | false |
DXCanas/content-curation | contentcuration/contentcuration/node_metadata/annotations.py | 1 | 8076 | from django.contrib.postgres.aggregates.general import ArrayAgg
from django.contrib.postgres.aggregates.general import BoolOr
from django.db.models import BooleanField
from django.db.models import CharField
from django.db.models import IntegerField
from django.db.models.aggregates import Count
from django.db.models.aggregates import Max
from django.db.models.expressions import Case
from django.db.models.expressions import F
from django.db.models.expressions import Value
from django.db.models.expressions import When
from django.db.models.functions import Coalesce
from le_utils.constants import content_kinds
from le_utils.constants import roles
from contentcuration.db.models.expressions import BooleanComparison
from contentcuration.db.models.expressions import WhenQ
from contentcuration.node_metadata.cte import AssessmentCountCTE
from contentcuration.node_metadata.cte import ResourceSizeCTE
from contentcuration.node_metadata.cte import TreeMetadataCTE
class MetadataAnnotation(object):
cte = None
cte_columns = ()
def get_annotation(self, cte):
"""
:type cte: With|None
"""
raise NotImplementedError("Metadata annotation needs to implement this method")
def build_kind_condition(self, kind_id, value, comparison="="):
return [BooleanComparison(kind_id, comparison, Value(value))]
def build_topic_condition(self, kind_id, comparison="="):
return self.build_kind_condition(kind_id, content_kinds.TOPIC, comparison)
class AncestorAnnotation(MetadataAnnotation):
cte = TreeMetadataCTE
cte_columns = ("lft", "rght", "pk")
def __init__(self, *args, **kwargs):
self.include_self = kwargs.pop("include_self", False)
super(AncestorAnnotation, self).__init__(*args, **kwargs)
def build_ancestor_condition(self, cte):
"""
@see MPTTModel.get_ancestors()
"""
left_op = "<="
right_op = ">="
if not self.include_self:
left_op = "<"
right_op = ">"
return [
BooleanComparison(cte.col.lft, left_op, F("lft")),
BooleanComparison(cte.col.rght, right_op, F("rght")),
]
class AncestorArrayAgg(AncestorAnnotation):
def get_annotation(self, cte):
ancestor_condition = self.build_ancestor_condition(cte)
return ArrayAgg(
Case(
When(condition=WhenQ(*ancestor_condition), then=cte.col.pk),
default=Value(None),
),
output_field=CharField(),
)
class DescendantCount(MetadataAnnotation):
def get_annotation(self, cte=None):
"""
@see MPTTModel.get_descendant_count()
"""
return Max(
Case(
# when selected node is topic, use algorithm to get descendant count
When(
condition=WhenQ(*self.build_topic_condition(F("kind_id"))),
then=(F("rght") - F("lft") - Value(1)) / Value(2),
),
# integer division floors the result in postgres
default=Value(1),
)
)
class DescendantAnnotation(MetadataAnnotation):
cte = TreeMetadataCTE
cte_columns = ("lft", "rght")
def __init__(self, *args, **kwargs):
self.include_self = kwargs.pop("include_self", False)
super(DescendantAnnotation, self).__init__(*args, **kwargs)
def build_descendant_condition(self, cte):
"""
@see MPTTModel.get_descendants()
"""
left_op = ">="
right_op = "<="
if not self.include_self:
left_op = ">"
right_op = "<"
return [
BooleanComparison(cte.col.lft, left_op, F("lft")),
BooleanComparison(cte.col.lft, right_op, F("rght")),
]
class AssessmentCount(DescendantAnnotation):
cte = AssessmentCountCTE
cte_columns = ("content_id", "assessment_count")
def get_annotation(self, cte):
return Coalesce(cte.col.assessment_count, Value(0), output_field=IntegerField())
class ResourceCount(DescendantAnnotation):
cte_columns = ("content_id", "kind_id") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
topic_condition = self.build_topic_condition(cte.col.kind_id, "!=")
topic_condition += self.build_descendant_condition(cte)
return Count(
Case(
# when selected node is not a topic, then count = 1
When(condition=WhenQ(*resource_condition), then=F("content_id")),
# when it is a topic, then count descendants
When(condition=WhenQ(*topic_condition), then=cte.col.content_id),
default=Value(None),
),
distinct=True,
)
class CoachCount(DescendantAnnotation):
cte_columns = ("content_id", "role_visibility") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
topic_condition = self.build_topic_condition(F("kind_id"))
topic_condition += self.build_descendant_condition(cte)
topic_condition += self.build_coach_condition(cte.col.role_visibility)
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
resource_condition += self.build_coach_condition(F("role_visibility"))
return Count(
Case(
# when selected node is a coach topic, then count descendent content_id's
When(condition=WhenQ(*topic_condition), then=cte.col.content_id),
# when selected node is not a topic, count its content_id
When(condition=WhenQ(*resource_condition), then=F("content_id")),
default=Value(None),
),
distinct=True,
)
def build_coach_condition(self, role_visibility):
return [BooleanComparison(role_visibility, "=", Value(roles.COACH))]
class HasChanged(DescendantAnnotation):
cte_columns = ("changed",) + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
whens = [
# when selected node is not a topic, just use its changed status
When(condition=WhenQ(*resource_condition), then=F("changed")),
]
if self.include_self:
# when selected node is a topic and it's changed and including self, then return that
whens.append(When(condition=WhenQ(*[F("changed")]), then=F("changed")))
return Coalesce(
Case(
*whens,
# fallback to aggregating descendant changed status when a unchanged topic
default=BoolOr(cte.col.changed)
),
Value(False),
output_field=BooleanField(),
)
class SortOrderMax(DescendantAnnotation):
cte_columns = ("parent_id", "sort_order") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
topic_condition = self.build_child_condition(cte)
return Coalesce(
Max(
Case(
# when selected node is not a topic, use its sort_order
When(condition=WhenQ(*resource_condition), then=F("sort_order")),
# when selected node is a topic, then find max of children
When(condition=WhenQ(*topic_condition), then=cte.col.sort_order),
default=Value(None),
)
),
Value(1),
output_field=IntegerField(),
)
def build_child_condition(self, cte):
return [BooleanComparison(cte.col.parent_id, "=", F("id"))]
class ResourceSize(DescendantAnnotation):
cte = ResourceSizeCTE
cte_columns = ("resource_size",)
def get_annotation(self, cte):
return Max(cte.col.resource_size, output_field=IntegerField())
| mit | -4,523,287,446,646,386,000 | 33.810345 | 97 | 0.616518 | false |
lipis/github-stats | main/api/v1/repo.py | 1 | 1840 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
import flask_restful
import flask
from api import helpers
import auth
import model
import util
from main import api_v1
@api_v1.resource('/repo/', endpoint='api.repo.list')
class RepoListAPI(flask_restful.Resource):
def get(self):
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/repo/<string:repo_key>/', endpoint='api.repo')
class RepoAPI(flask_restful.Resource):
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
helpers.make_not_found_exception('Repo %s not found' % repo_key)
return helpers.make_response(repo_db, model.Repo.FIELDS)
###############################################################################
# Admin
###############################################################################
@api_v1.resource('/admin/repo/', endpoint='api.admin.repo.list')
class AdminRepoListAPI(flask_restful.Resource):
@auth.admin_required
def get(self):
repo_keys = util.param('repo_keys', list)
if repo_keys:
repo_db_keys = [ndb.Key(urlsafe=k) for k in repo_keys]
repo_dbs = ndb.get_multi(repo_db_keys)
return helpers.make_response(repo_dbs, model.repo.FIELDS)
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/admin/repo/<string:repo_key>/', endpoint='api.admin.repo')
class AdminRepoAPI(flask_restful.Resource):
@auth.admin_required
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
helpers.make_not_found_exception('Repo %s not found' % repo_key)
return helpers.make_response(repo_db, model.Repo.FIELDS)
| mit | 9,207,477,670,009,611,000 | 31.280702 | 79 | 0.646196 | false |
twilio/twilio-python | twilio/rest/accounts/__init__.py | 1 | 1547 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.accounts.v1 import V1
class Accounts(Domain):
def __init__(self, twilio):
"""
Initialize the Accounts Domain
:returns: Domain for Accounts
:rtype: twilio.rest.accounts.Accounts
"""
super(Accounts, self).__init__(twilio)
self.base_url = 'https://accounts.twilio.com'
# Versions
self._v1 = None
@property
def v1(self):
"""
:returns: Version v1 of accounts
:rtype: twilio.rest.accounts.v1.V1
"""
if self._v1 is None:
self._v1 = V1(self)
return self._v1
@property
def auth_token_promotion(self):
"""
:rtype: twilio.rest.accounts.v1.auth_token_promotion.AuthTokenPromotionList
"""
return self.v1.auth_token_promotion
@property
def credentials(self):
"""
:rtype: twilio.rest.accounts.v1.credential.CredentialList
"""
return self.v1.credentials
@property
def secondary_auth_token(self):
"""
:rtype: twilio.rest.accounts.v1.secondary_auth_token.SecondaryAuthTokenList
"""
return self.v1.secondary_auth_token
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts>'
| mit | 7,574,503,216,198,596,000 | 22.089552 | 83 | 0.5585 | false |
flavoi/diventi | diventi/products/migrations/0060_auto_20200901_1950.py | 1 | 3075 | # Generated by Django 2.2.13 on 2020-09-01 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0059_auto_20200830_1804'),
]
operations = [
migrations.AlterField(
model_name='chapter',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='chapter',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='chapter',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
]
| apache-2.0 | 258,824,874,770,298,750 | 33.943182 | 83 | 0.555122 | false |
erdc/proteus | proteus/tests/LS_with_edgeBased_EV/MCorr/thelper_cons_ls.py | 1 | 3450 | from __future__ import division
from past.utils import old_div
from proteus import Domain
from proteus import Context
ct=Context.Options([
# General parameters #
("T",0.1,"Final time"),
("nDTout",1,"Number of time steps to archive"),
("refinement",0,"Level of refinement"),
("unstructured",False,"Use unstructured mesh. Set to false for periodic BCs"),
("SSPOrder",2,"SSP method of order 1, 2 or 3") ,
("cfl",0.5,"Target cfl"),
# PARAMETERS FOR NCLS #
("level_set_function" ,1,"0: distance function, 1: saturated distance function"),
("STABILIZATION_TYPE_ncls",1,"0: SUPG, 1: EV, 2: smoothness based indicator"),
("SATURATED_LEVEL_SET",True,"Saturate the distance function or not?"),
("ENTROPY_TYPE_ncls",2,"1: parabolic, 2: logarithmic"),
("COUPEZ",True,"Flag to turn on/off the penalization term in Coupez approach"),
("DO_REDISTANCING",True,"Solve Eikonal type equation after transport?"),
("cE_ncls",1.0,"Entropy viscosity constant"),
# PARAMETERS FOR VOF #
("STABILIZATION_TYPE_vof",1,"0: SUPG, 1: TG, 2: edge based EV, 3: edge based smoothness ind."),
("ENTROPY_TYPE_vof",1,"0: quadratic, 1: logarithmic"),
("FCT",True,"Use Flux Corrected Transport"),
("cE_vof",0.1,"Entropy viscosity constant"),
("cK",1.0,"Artificial compression constant")
],mutable=True)
# OTHER NUMERICAL PARAMETERS FOR NCLS #
epsCoupez=3
redist_tolerance=0.1
epsFactRedistance=0.33 #For signed dist function
lambda_coupez = 1.0 #Strength of redistancing and coupez force
epsFactHeaviside=epsFactDirac=1.5
# number of space dimensions #
nd=2
# MASS CORRECTION #
applyCorrection=True
correctionType = 'cg'
# General parameters #
parallel = False
linearSmoother = None
checkMass=False
runCFL = ct.cfl
# Finite elmenet spaces #
pDegree_ncls=1
pDegree_vof=pDegree_ncls #volume of fluid should match ls for now
useHex=False
useBernstein=False
# Quadrature order #
quad_order = 2*pDegree_ncls+1
# parallel partitioning info #
from proteus import MeshTools
partitioningType = MeshTools.MeshParallelPartitioningTypes.node
# Create mesh #
nn=nnx=(2**ct.refinement)*10+1
nny=nnx
nnz=1
he=old_div(1.0,(nnx-1.0))
box=Domain.RectangularDomain(L=(1.0,1.0),
x=(0.0,0.0),
name="box");
box.writePoly("box")
if ct.unstructured:
domain=Domain.PlanarStraightLineGraphDomain(fileprefix="box")
domain.boundaryTags = box.boundaryTags
bt = domain.boundaryTags
triangleOptions="pAq30Dena%8.8f" % (0.5*he**2,)
else:
domain = box
domain.MeshOptions.nnx = nnx
domain.MeshOptions.nny = nny
domain.MeshOptions.nnz = nnz
domain.MeshOptions.nn = nn
domain.MeshOptions.triangleFlag=0
# REDISTANCING #
redist_Newton=True
onlyVOF=False
# SMOOTHING FACTORS # (eps)
epsFactHeaviside=epsFactDirac=epsFact_vof=1.5
epsFactRedistance=0.33
epsFactDiffusion=10.0
# SHOCK CAPTURING PARAMETERS #
shockCapturingFactor_vof=0.2
shockCapturingFactor_ncls=0.2
shockCapturingFactor_rd=0.9
lag_shockCapturing_vof=True
lag_shockCapturing_ls=True
lag_shockCapturing_rd=False
# use absolute tolerances on al models
atolRedistance = 1.0e-5
atolConservation = 1.0e-9
atolVolumeOfFluid= 1.0e-9
atolLevelSet = 1.0e-9
#controls
linearSolverConvergenceTest = 'r-true' #rits is do a set number of iterations, r-true uses true residual, PETSc default is preconditioned residual
#redist solver
fmmFlag=0
soname="cons_ls_level_"+repr(ct.refinement)
| mit | -2,499,542,141,089,966,600 | 29.263158 | 146 | 0.71913 | false |
frePPLe/frePPLe | freppledb/common/report.py | 1 | 134827 | #
# Copyright (C) 2007-2019 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
r"""
This module implements a generic view to presents lists and tables.
It provides the following functionality:
- Pagination of the results.
- Ability to filter on fields, using different operators.
- Ability to sort on a field.
- Export the results as a CSV file, ready for use in a spreadsheet.
- Import CSV formatted data files.
- Show time buckets to show data by time buckets.
The time buckets and time boundaries can easily be updated.
"""
import codecs
import csv
from datetime import date, datetime, timedelta, time
from decimal import Decimal
import functools
import logging
import math
import operator
import json
import re
from time import timezone, daylight
from io import StringIO, BytesIO
import urllib
from openpyxl import load_workbook, Workbook
from openpyxl.utils import get_column_letter
from openpyxl.cell import WriteOnlyCell
from openpyxl.styles import NamedStyle, PatternFill
from dateutil.parser import parse
from openpyxl.comments import Comment as CellComment
from django.db.models import Model
from django.db.utils import DEFAULT_DB_ALIAS, load_backend
from django.contrib.auth.models import Group
from django.contrib.auth import get_permission_codename
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admin.utils import unquote, quote
from django.core.exceptions import ValidationError
from django.core.management.color import no_style
from django.db import connections, transaction, models
from django.db.models.fields import CharField, AutoField
from django.db.models.fields.related import RelatedField
from django.forms.models import modelform_factory
from django.http import HttpResponse, StreamingHttpResponse, HttpResponseNotFound
from django.http import Http404, HttpResponseNotAllowed, HttpResponseForbidden
from django.shortcuts import render
from django.utils import translation
from django.utils.decorators import method_decorator
from django.utils.encoding import smart_str, force_str
from django.utils.html import escape
from django.utils.translation import gettext as _
from django.utils.formats import get_format
from django.utils.text import capfirst, get_text_list, format_lazy
from django.contrib.admin.models import LogEntry, CHANGE, ADDITION, DELETION
from django.contrib.contenttypes.models import ContentType
from django.views.generic.base import View
from freppledb.boot import getAttributeFields
from freppledb.common.models import (
User,
Comment,
Parameter,
BucketDetail,
Bucket,
HierarchyModel,
)
from freppledb.common.dataload import parseExcelWorksheet, parseCSVdata
logger = logging.getLogger(__name__)
# A list of models with some special, administrative purpose.
# They should be excluded from bulk import, export and erasing actions.
EXCLUDE_FROM_BULK_OPERATIONS = (Group, User, Comment)
separatorpattern = re.compile(r"[\s\-_]+")
def create_connection(alias=DEFAULT_DB_ALIAS):
connections.ensure_defaults(alias)
connections.prepare_test_settings(alias)
db = connections.databases[alias]
backend = load_backend(db["ENGINE"])
return backend.DatabaseWrapper(db, alias)
def matchesModelName(name, model):
"""
Returns true if the first argument is a valid name for the model passed as second argument.
The string must match either:
- the model name
- the verbose name
- the pural verbose name
The comparison is case insensitive and also ignores whitespace, dashes and underscores.
The comparison tries to find a match using the current active language, as well as in English.
"""
checkstring = re.sub(separatorpattern, "", name.lower())
# Try with the localized model names
if checkstring == re.sub(separatorpattern, "", model._meta.model_name.lower()):
return True
elif checkstring == re.sub(separatorpattern, "", model._meta.verbose_name.lower()):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name_plural.lower()
):
return True
else:
# Try with English model names
with translation.override("en"):
if checkstring == re.sub(
separatorpattern, "", model._meta.model_name.lower()
):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name.lower()
):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name_plural.lower()
):
return True
else:
return False
def getHorizon(request, future_only=False):
# Pick up the current date
try:
current = parse(
Parameter.objects.using(request.database).get(name="currentdate").value
)
except Exception:
current = datetime.now()
current = current.replace(microsecond=0)
horizontype = request.GET.get("horizontype", request.user.horizontype)
horizonunit = request.GET.get("horizonunit", request.user.horizonunit)
try:
horizonlength = int(request.GET.get("horizonlength"))
except Exception:
horizonlength = request.user.horizonlength
if horizontype:
# First type: Horizon relative to the current date
start = current.replace(hour=0, minute=0, second=0, microsecond=0)
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
end = end.replace(hour=0, minute=0, second=0)
elif horizonunit == "week":
end = start.replace(hour=0, minute=0, second=0) + timedelta(
weeks=horizonlength or 8, days=7 - start.weekday()
)
else:
y = start.year
m = start.month + (horizonlength or 2) + (start.day > 1 and 1 or 0)
while m > 12:
y += 1
m -= 12
end = datetime(y, m, 1)
else:
# Second type: Absolute start and end dates given
try:
horizonstart = datetime.strptime(
request.GET.get("horizonstart"), "%Y-%m-%d"
)
except Exception:
horizonstart = request.user.horizonstart
try:
horizonend = datetime.strptime(request.GET.get("horizonend"), "%Y-%m-%d")
except Exception:
horizonend = request.user.horizonend
start = horizonstart
if not start or (future_only and start < current):
start = current.replace(hour=0, minute=0, second=0, microsecond=0)
end = horizonend
if end:
if end < start:
if future_only and end < current:
# Special case to assure a minimum number of future buckets
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
elif horizonunit == "week":
end = start + timedelta(weeks=horizonlength or 8)
else:
end = start + timedelta(weeks=horizonlength or 8)
else:
# Swap start and end to assure the start is before the end
tmp = start
start = end
end = tmp
else:
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
elif horizonunit == "week":
end = start + timedelta(weeks=horizonlength or 8)
else:
end = start + timedelta(weeks=horizonlength or 8)
return (current, start, end)
class GridField(object):
"""
Base field for columns in grid views.
"""
def __init__(self, name, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
if "key" in kwargs:
self.editable = False
if "title" not in kwargs and not self.title:
self.title = self.name and _(self.name) or ""
if not self.name:
self.sortable = False
self.search = False
if "field_name" not in kwargs:
self.field_name = self.name
def __str__(self):
o = [
'"name":"%s","index":"%s","editable":%s,"label":"%s","align":"%s","title":false,"field_name":"%s"'
% (
self.name or "",
self.name or "",
self.editable and "true" or "false",
force_str(self.title).title().replace("'", "\\'"),
self.align,
self.field_name,
)
]
if self.key:
o.append(',"key":true')
if not self.sortable:
o.append(',"sortable":false')
if not self.search:
o.append(',"search":false')
if self.formatter:
o.append(',"formatter":"%s"' % self.formatter)
if self.unformat:
o.append(',"unformat":"%s"' % self.unformat)
if self.searchrules:
o.append(',"searchrules":{%s}' % self.searchrules)
if self.hidden:
o.append(',"alwayshidden":true, "hidden":true')
if self.searchoptions:
o.append(',"searchoptions":%s' % self.searchoptions)
if self.extra:
if callable(self.extra):
o.append(",%s" % force_str(self.extra()))
else:
o.append(",%s" % force_str(self.extra))
return "".join(o)
name = None
field_name = None
formatter = None
width = 100
editable = True
sortable = True
search = True
key = False
unformat = None
title = None
extra = None
align = "center"
searchrules = None
hidden = False # NEVER display this field
initially_hidden = False # Hide the field by default, but allow the user to add it
searchoptions = '{"searchhidden": true}'
class GridFieldDateTime(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d H:i:s","newformat":"Y-m-d H:i:s"}'
searchoptions = (
'{"sopt":["cn","eq","ne","lt","le","gt","ge","win"],"searchhidden": true}'
)
width = 140
class GridFieldTime(GridField):
formatter = "time"
extra = '"formatoptions":{"srcformat":"H:i:s","newformat":"H:i:s"}'
width = 80
class GridFieldDate(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d","newformat":"Y-m-d"}'
searchoptions = (
'{"sopt":["cn","eq","ne","lt","le","gt","ge","win"],"searchhidden":true}'
)
width = 140
class GridFieldInteger(GridField):
formatter = "integer"
extra = '"formatoptions":{"defaultValue": ""}'
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
width = 70
searchrules = '"integer":true'
class GridFieldNumber(GridField):
formatter = "number"
extra = '"formatoptions":{"defaultValue":"","decimalPlaces":"auto"}'
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
width = 70
searchrules = '"number":true'
class GridFieldBool(GridField):
extra = '"formatoptions":{"disabled":false}, "edittype":"checkbox", "editoptions":{"value":"True:False"}'
width = 60
class GridFieldLastModified(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d H:i:s","newformat":"Y-m-d H:i:s"}'
searchoptions = '{"sopt":["cn","em","nm","in","ni","eq","bw","ew","bn","nc","en","win"],"searchhidden":true}'
title = _("last modified")
editable = False
width = 140
class GridFieldJSON(GridField):
width = 200
align = "left"
searchoptions = '{"sopt":["cn","nc"],"searchhidden":true}'
class GridFieldLocalDateTime(GridFieldDateTime):
pass
class GridFieldText(GridField):
width = 200
align = "left"
searchoptions = '{"sopt":["cn","nc","eq","ne","lt","le","gt","ge","bw","bn","in","ni","ew","en"],"searchhidden":true}'
class GridFieldChoice(GridField):
width = 100
align = "center"
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
e = ['"formatter":"select", "edittype":"select", "editoptions":{"value":"']
first = True
self.choices = kwargs.get("choices", [])
for i in self.choices:
if first:
first = False
e.append("%s:" % i[0])
else:
e.append(";%s:" % i[0])
e.append(i[1])
e.append('"}')
self.extra = format_lazy("{}" * len(e), *e)
def validateValues(self, data):
result = []
for f in data.split(","):
for c in self.choices:
if f.lower() in (c[0].lower(), force_str(c[1]).lower()):
result.append(c[0])
return ",".join(result)
class GridFieldBoolNullable(GridFieldChoice):
width = 60
def __init__(self, name, **kwargs):
kwargs["choices"] = (("", ""), ("False", _("No")), ("True", _("Yes")))
super().__init__(name, **kwargs)
def getCurrency():
try:
cur = Parameter.getValue("currency").split(",")
if len(cur) < 2:
return ("", " %s" % escape(cur[0]))
else:
return ("%s " % escape(cur[0]), " %s" % escape(cur[1]))
except Exception:
return ("", " $")
class GridFieldCurrency(GridField):
formatter = "currency"
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
def extra(self):
cur = getCurrency()
return '"formatoptions":%s' % json.dumps(
{"prefix": cur[0], "suffix": cur[1], "defaultvalue": ""}
)
width = 80
class GridFieldDuration(GridField):
formatter = "duration"
width = 80
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
class EncodedCSVReader:
"""
A CSV reader which will iterate over lines in the CSV data buffer.
The reader will scan the BOM header in the data to detect the right encoding.
"""
def __init__(self, datafile, **kwds):
# Read the file into memory
# TODO Huge file uploads can overwhelm your system!
data = datafile.read()
# Detect the encoding of the data by scanning the BOM.
# Skip the BOM header if it is found.
if data.startswith(codecs.BOM_UTF32_BE):
self.reader = StringIO(data.decode("utf_32_be"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF32_LE):
self.reader = StringIO(data.decode("utf_32_le"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF16_BE):
self.reader = StringIO(data.decode("utf_16_be"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF16_LE):
self.reader = StringIO(data.decode("utf_16_le"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF8):
self.reader = StringIO(data.decode("utf_8"))
self.reader.read(1)
else:
# No BOM header found. We assume the data is encoded in the default CSV character set.
self.reader = StringIO(data.decode(settings.CSV_CHARSET))
self.csvreader = csv.reader(self.reader, **kwds)
def __next__(self):
return next(self.csvreader)
def __iter__(self):
return self
class GridReport(View):
"""
The base class for all jqgrid views.
The parameter values defined here are used as defaults for all reports, but
can be overwritten.
"""
# Points to template to be used
template = "admin/base_site_grid.html"
# The title of the report. Used for the window title
title = ""
# A optional text shown after the title in the content.
# It is however not added in the page title or the breadcrumb name
post_title = ""
# Link to the documentation
help_url = None
# The resultset that returns a list of entities that are to be
# included in the report.
# This query is used to return the number of records.
# It is also used to generate the actual results, in case no method
# "query" is provided on the class.
basequeryset = None
# Specifies which column is used for an initial ordering
default_sort = (0, "asc")
# A model class from which we can inherit information.
model = None
# Allow editing in this report or not
editable = True
# Allow filtering of the results or not
filterable = True
# Include time bucket support in the report
hasTimeBuckets = False
# Allow to exclude time buckets in the past
showOnlyFutureTimeBuckets = False
# Default logic: if there is an argument to the report, we always show table + graph
# New logic: if there is an argument, we can still choose whether or not to use table and/or graph
# Not very clean, but doing otherwise is backward incompatible and needs changing quite some templates :-(
new_arg_logic = False
# Allow this report to automatically restore the previous filter
# (unless a filter is already applied explicitly in the URL of course)
autofilter = True
# Specify a minimum level for the time buckets available in the report.
# Higher values (ie more granular) buckets can then not be selected.
maxBucketLevel = None
minBucketLevel = None
# Show a select box in front to allow selection of records
multiselect = True
# Control the height of the grid. By default the full browser window is used.
height = None
# Number of columns frozen in the report
frozenColumns = 0
# A list with required user permissions to view the report
permissions = ()
# Defines the difference between height of the grid and its boundaries
heightmargin = 75
# Define a list of actions
actions = None
_attributes_added = False
@classmethod
def getKey(cls, request, *args, **kwargs):
return "%s.%s" % (cls.__module__, cls.__name__)
@classmethod
def _localize(cls, value, decimal_separator):
"""
Localize numbers.
Dates are always represented as YYYY-MM-DD hh:mm:ss since this is
a format that is understood uniformly across different regions in the
world.
"""
if callable(value):
value = value()
if isinstance(value, numericTypes):
return (
decimal_separator == "," and str(value).replace(".", ",") or str(value)
)
elif isinstance(value, timedelta):
return _parseSeconds(value)
elif isinstance(value, (list, tuple)):
return "|".join([str(cls._localize(i, decimal_separator)) for i in value])
else:
return value
@staticmethod
def getBOM(encoding):
try:
# Get the official name of the encoding (since encodings can have many alias names)
name = codecs.lookup(encoding).name
except Exception:
return "" # Unknown encoding, without BOM header
if name == "utf-32-be":
return codecs.BOM_UTF32_BE
elif name == "utf-32-le":
return codecs.BOM_UTF32_LE
elif name == "utf-16-be":
return codecs.BOM_UTF16_BE
elif name == "utf-16-le":
return codecs.BOM_UTF16_LE
elif name == "utf-8":
return codecs.BOM_UTF8
else:
return ""
@classmethod
def getAppLabel(cls):
"""
Return the name of the Django application which defines this report.
"""
if hasattr(cls, "app_label"):
return cls.app_label
s = cls.__module__.split(".")
for i in range(len(s), 0, -1):
x = ".".join(s[0:i])
if x in settings.INSTALLED_APPS:
cls.app_label = s[i - 1]
return cls.app_label
raise Exception("Can't identify app of reportclass %s" % cls)
# Extra variables added to the report template
@classmethod
def extra_context(cls, request, *args, **kwargs):
return {}
@staticmethod
def _getJSONValue(data, field=None, request=None):
if isinstance(data, str) or isinstance(data, (list, tuple)):
return json.dumps(data)
elif isinstance(data, timedelta):
return data.total_seconds()
elif data is None:
return '""'
elif (
isinstance(data, datetime)
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
and request
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
return '"%s"' % (data + request.tzoffset)
else:
return '"%s"' % data
@classmethod
def _getCSVValue(cls, data, field=None, request=None, decimal_separator=""):
if data is None:
return ""
else:
if (
isinstance(data, datetime)
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
and request
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
data += request.tzoffset
return force_str(
cls._localize(data, decimal_separator),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
@classmethod
def getBuckets(cls, request, *args, **kwargs):
"""
This function gets passed a name of a bucketization.
It returns a tuple with:
- the start date of the report horizon
- the end date of the reporting horizon
- a list of buckets.
The functions takes into consideration some special flags:
- showOnlyFutureTimeBuckets: filter to allow only future time buckets to be shown
- maxBucketLevel: respect the lowest supported level in the time bucket hierarchy
- minBucketLevel: respect the highest supported level in the time bucket hierarchy
"""
# Select the bucket size
if not cls.maxBucketLevel:
maxlvl = 999
elif callable(cls.maxBucketLevel):
maxlvl = cls.maxBucketLevel(request)
else:
maxlvl = cls.maxBucketLevel
if not cls.minBucketLevel:
minlvl = -999
elif callable(cls.minBucketLevel):
minlvl = cls.minBucketLevel(request)
else:
minlvl = cls.minBucketLevel
arg_buckets = request.GET.get("buckets", None)
try:
bucket = (
Bucket.objects.using(request.database)
.get(
name=arg_buckets or request.user.horizonbuckets,
level__lte=maxlvl,
level__gte=minlvl,
)
.name
)
except Exception:
try:
bucket = (
Bucket.objects.using(request.database)
.filter(level__lte=maxlvl, level__gte=minlvl)
.order_by("-level")[0]
.name
)
except Exception:
bucket = None
if not arg_buckets and not request.user.horizonbuckets and bucket:
request.user.horizonbuckets = bucket
request.user.save()
# Get the report horizon
current, start, end = getHorizon(
request, future_only=cls.showOnlyFutureTimeBuckets
)
# Filter based on the start and end date
request.current_date = str(current)
request.report_startdate = start
request.report_enddate = end
request.report_bucket = str(bucket)
if bucket:
res = BucketDetail.objects.using(request.database).filter(bucket=bucket)
if start:
res = res.filter(enddate__gt=start)
if end:
res = res.filter(startdate__lt=end)
request.report_bucketlist = res.values("name", "startdate", "enddate")
else:
request.report_bucketlist = []
@staticmethod
def getTimezoneOffset(request):
"""
Return the difference between the end user's UTC offset and the server's UTC offset
"""
return timedelta(
seconds=timezone - int(request.COOKIES.get("tzoffset", 0)) - daylight * 3600
)
@classmethod
def has_permission(cls, user):
for perm in cls.permissions:
if not user.has_perm("auth.%s" % perm[0]):
return False
if cls.model:
return user.has_perm(
"%s.view_%s" % (cls.model._meta.app_label, cls.model._meta.model_name)
)
return True
@method_decorator(staff_member_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
# Verify the user is authorized to view the report
if not self.has_permission(request.user):
return HttpResponseForbidden("<h1>%s</h1>" % _("Permission denied"))
# Unescape special characters in the arguments.
# All arguments are encoded with escaping function used on the django admin.
args_unquoted = [unquote(i) for i in args]
# Add attributes if not done already
if hasattr(self.__class__, "initialize"):
self.__class__.initialize(request)
if not self._attributes_added and self.model:
self.__class__._attributes_added = True
for f in getAttributeFields(self.model):
self.__class__.rows += (f,)
# Set row and cross attributes on the request
if hasattr(self, "rows"):
if callable(self.rows):
request.rows = self.rows(request, *args, **kwargs)
else:
request.rows = self.rows
if hasattr(self, "crosses"):
if callable(self.crosses):
request.crosses = self.crosses(request, *args, **kwargs)
else:
request.crosses = self.crosses
# Dispatch to the correct method
if request.method == "GET":
return self.get(request, *args_unquoted, **kwargs)
elif request.method == "POST":
return self.post(request, *args_unquoted, **kwargs)
else:
return HttpResponseNotAllowed(["get", "post"])
@classmethod
def _validate_rows(cls, request, prefs):
if not prefs:
return [
(
i,
request.rows[i].hidden or request.rows[i].initially_hidden,
request.rows[i].width,
)
for i in range(len(request.rows))
]
else:
# Validate the preferences to 1) map from name to index, 2) assure all rows
# are included, 3) ignore non-existing fields
defaultrows = {request.rows[i].name: i for i in range(len(request.rows))}
rows = []
for r in prefs:
try:
idx = int(r[0])
if idx < len(request.rows):
defaultrows.pop(request.rows[idx].name, None)
rows.append(r)
except (ValueError, IndexError):
if r[0] in defaultrows:
rows.append((defaultrows[r[0]], r[1], r[2]))
defaultrows.pop(r[0], None)
for r, idx in defaultrows.items():
rows.append(
(
idx,
request.rows[idx].hidden or request.rows[idx].initially_hidden,
request.rows[idx].width,
)
)
return rows
@classmethod
def _render_colmodel(
cls, request, is_popup=False, prefs=None, mode="graph", *args, **kwargs
):
if not prefs:
frozencolumns = cls.frozenColumns
rows = [
(i, request.rows[i].initially_hidden, request.rows[i].width)
for i in range(len(request.rows))
]
else:
frozencolumns = prefs.get("frozen", cls.frozenColumns)
rows = cls._validate_rows(request, prefs.get("rows"))
result = []
if is_popup:
result.append(
'{"name":"select","label":gettext("Select"),"width":75,"align":"center","formatter":"selectbutton","sortable":false,"search":false}'
)
count = -1
for (index, hidden, width) in rows:
count += 1
try:
result.append(
'{%s,"width":%s,"counter":%d%s%s%s}'
% (
request.rows[index],
width,
index,
count < frozencolumns and ',"frozen":true' or "",
is_popup and ',"popup":true' or "",
hidden
and not request.rows[index].hidden
and ',"hidden":true'
or "",
)
)
except IndexError:
logger.warning(
"Invalid preference value for %s: %s"
% (cls.getKey(request, *args, **kwargs), prefs)
)
return ",\n".join(result)
@classmethod
def _generate_spreadsheet_data(
cls, request, scenario_list, output, *args, **kwargs
):
# Create a workbook
wb = Workbook(write_only=True)
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
# Excel can't have longer worksheet names
ws = wb.create_sheet(title=force_str(title)[:31])
# Create a named style for the header row
headerstyle = NamedStyle(name="headerstyle")
headerstyle.fill = PatternFill(fill_type="solid", fgColor="70c4f4")
wb.add_named_style(headerstyle)
readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle")
readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb")
wb.add_named_style(readlonlyheaderstyle)
# Choose fields to export and write the title row
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and request.prefs.get("rows", None):
# Customized settings
fields = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1] and not request.rows[f[0]].hidden
]
else:
# Default settings
fields = [
i
for i in request.rows
if i.field_name and not i.hidden and not i.initially_hidden
]
# Write a formatted header row
header = []
comment = None
for f in fields:
cell = WriteOnlyCell(ws, value=force_str(f.title).title())
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if not f.key and f.formatter == "detail" and fname.endswith("__name"):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
header.append(cell)
if len(scenario_list) > 1:
cell = WriteOnlyCell(ws, value=force_str(_("scenario")).title())
cell.style = "readlonlyheaderstyle"
header.insert(0, cell)
ws.append(header)
# Add an auto-filter to the table
ws.auto_filter.ref = "A1:%s1048576" % get_column_letter(len(header))
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
# Loop over all records
for row in cls.data_query(request, *args, fields=fields, **kwargs):
if hasattr(row, "__getitem__"):
r = [
_getCellValue(row[f.field_name], field=f, request=request)
for f in fields
]
else:
r = [
_getCellValue(
getattr(row, f.field_name), field=f, request=request
)
for f in fields
]
if len(scenario_list) > 1:
r.insert(0, scenario)
ws.append(r)
finally:
request.database = original_database
# Write the spreadsheet
wb.save(output)
@classmethod
def _generate_csv_data(cls, request, scenario_list, *args, **kwargs):
sf = StringIO()
decimal_separator = get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True)
if decimal_separator == ",":
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=";")
else:
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Write a Unicode Byte Order Mark header, aka BOM (Excel needs it to open UTF-8 file properly)
yield cls.getBOM(settings.CSV_CHARSET)
# Choose fields to export
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and request.prefs.get("rows", None):
# Customized settings
custrows = cls._validate_rows(request, request.prefs["rows"])
r = [
force_str(
request.rows[f[0]].title,
encoding=settings.CSV_CHARSET,
errors="ignore",
).title()
for f in custrows
if not f[1] and not request.rows[f[0]].hidden
]
if len(scenario_list) > 1:
r.insert(0, _("scenario"))
writer.writerow(r)
fields = [
request.rows[f[0]]
for f in custrows
if not f[1] and not request.rows[f[0]].hidden
]
else:
# Default settings
r = [
force_str(
f.title, encoding=settings.CSV_CHARSET, errors="ignore"
).title()
for f in request.rows
if f.title and not f.hidden and not f.initially_hidden
]
if len(scenario_list) > 1:
r.insert(0, _("scenario"))
writer.writerow(r)
fields = [
i
for i in request.rows
if i.field_name and not i.hidden and not i.initially_hidden
]
# Write a header row
yield sf.getvalue()
# Write the report content
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
for row in cls.data_query(request, *args, fields=fields, **kwargs):
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
# Build the return value, encoding all output
if hasattr(row, "__getitem__"):
r = [
cls._getCSVValue(
row[f.field_name],
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in fields
]
else:
r = [
cls._getCSVValue(
getattr(row, f.field_name),
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in fields
]
if len(scenario_list) > 1:
r.insert(0, scenario)
writer.writerow(r)
# Return string
yield sf.getvalue()
finally:
request.database = original_database
@classmethod
def getSortName(cls, request):
"""
Build a jqgrid sort configuration pair sidx and sord:
For instance:
("fieldname1 asc, fieldname2", "desc")
"""
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
return (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = (
request.prefs.get("sidx", None),
request.prefs.get("sord", "asc"),
)
if sortname[0] and sortname[1]:
return sortname
# 3) Default sort order
if not cls.default_sort:
return ("", "")
elif len(cls.default_sort) >= 6:
return (
"%s %s, %s %s, %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
request.rows[cls.default_sort[4]].name,
),
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return (
"%s %s, %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
),
cls.default_sort[3],
)
elif len(cls.default_sort) >= 2:
return (request.rows[cls.default_sort[0]].name, cls.default_sort[1])
@classmethod
def _apply_sort(cls, request, query):
"""
Applies a sort to the query.
"""
sortname = None
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
sortname = "%s %s" % (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = "%s %s" % (
request.prefs.get("sidx", ""),
request.GET.get("sord", "asc"),
)
if not sortname or sortname == " asc":
# 3) Default sort order
if not cls.default_sort:
return query
elif len(cls.default_sort) > 6:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name),
request.rows[cls.default_sort[2]].field_name
if cls.default_sort[3] == "asc"
else ("-%s" % request.rows[cls.default_sort[2]].field_name),
request.rows[cls.default_sort[4]].field_name
if cls.default_sort[5] == "asc"
else ("-%s" % request.rows[cls.default_sort[4]].field_name),
)
elif len(cls.default_sort) >= 4:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name),
request.rows[cls.default_sort[2]].field_name
if cls.default_sort[3] == "asc"
else ("-%s" % request.rows[cls.default_sort[2]].field_name),
)
elif len(cls.default_sort) >= 2:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name)
)
else:
return query
else:
# Validate the field does exist.
# We only validate the first level field, and not the fields
# on related models.
sortargs = []
for s in sortname.split(","):
stripped = s.strip()
if not stripped:
continue
sortfield, direction = stripped.split(" ", 1)
try:
query.order_by(sortfield).query.__str__()
if direction.strip() != "desc":
sortargs.append(sortfield)
else:
sortargs.append("-%s" % sortfield)
except Exception:
for r in request.rows:
if r.name == sortfield:
try:
query.order_by(r.field_name).query.__str__()
if direction.strip() != "desc":
sortargs.append(r.field_name)
else:
sortargs.append("-%s" % r.field_name)
except Exception:
# Can't sort on this field
pass
break
if sortargs:
return query.order_by(*sortargs)
else:
return query
@classmethod
def _apply_sort_index(cls, request):
"""
Build an SQL fragment to sort on: Eg "1 asc, 2 desc"
"""
sortname = None
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
sortname = "%s %s" % (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = "%s %s" % (
request.prefs.get("sidx", ""),
request.prefs.get("sord", "asc"),
)
if not sortname or sortname == " asc":
# 3) Default sort order
if not cls.default_sort:
return "1 asc"
elif len(cls.default_sort) > 6:
return "%s %s, %s %s, %s %s" % (
cls.default_sort[0] + 1,
cls.default_sort[1],
cls.default_sort[2] + 1,
cls.default_sort[3],
cls.default_sort[4] + 1,
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return "%s %s, %s %s" % (
cls.default_sort[0] + 1,
cls.default_sort[1],
cls.default_sort[2] + 1,
cls.default_sort[3],
)
elif len(cls.default_sort) >= 2:
return "%s %s" % (cls.default_sort[0] + 1, cls.default_sort[1])
else:
return "1 asc"
else:
# Validate the field does exist.
# We only validate the first level field, and not the fields
# on related models.
sortargs = []
for s in sortname.split(","):
sortfield, dir = s.strip().split(" ", 1)
idx = 1
has_one = False
for i in request.rows:
if i.name == sortfield:
sortargs.append(
"%s %s" % (idx, "desc" if dir == "desc" else "asc")
)
if idx == 1:
has_one = True
idx += 1
if sortargs:
if not has_one:
sortargs.append("1 asc")
return ", ".join(sortargs)
else:
return "1 asc"
@classmethod
def defaultSortString(cls, request):
if not cls.default_sort:
return " asc"
elif len(cls.default_sort) >= 6:
return "%s %s, %s %s, %s %s" % (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
request.rows[cls.default_sort[4]].name,
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return (
"%s %s, %s %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
),
)
elif len(cls.default_sort) >= 2:
return "%s %s" % (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
)
else:
return " asc"
@classmethod
def get_sort(cls, request):
try:
if "sidx" in request.GET:
# Special case when views have grouping.
# The group-by column is then added automatically.
column = request.GET["sidx"]
comma = column.find(",")
if comma > 0:
column = column[comma + 2 :]
sort = 1
ok = False
for r in request.rows:
if r.name == column:
ok = True
break
sort += 1
if not ok:
sort = cls.default_sort[0]
else:
sort = cls.default_sort[0]
except Exception:
sort = cls.default_sort[0]
if request.GET.get("sord", None) == "desc" or cls.default_sort[1] == "desc":
return "%s desc" % sort
else:
return "%s asc" % sort
@classmethod
def data_query(cls, request, *args, fields=None, page=None, **kwargs):
if not fields:
raise Exception("No fields gives")
if not hasattr(request, "query"):
if callable(cls.basequeryset):
request.query = cls.filter_items(
request, cls.basequeryset(request, *args, **kwargs), False
).using(request.database)
else:
request.query = cls.filter_items(request, cls.basequeryset).using(
request.database
)
query = cls._apply_sort(request, request.query)
if page:
cnt = (page - 1) * request.pagesize + 1
if hasattr(cls, "query"):
return cls.query(request, query[cnt - 1 : cnt + request.pagesize])
else:
return query[cnt - 1 : cnt + request.pagesize].values(*fields)
else:
if hasattr(cls, "query"):
return cls.query(request, query)
else:
fields = [i.field_name for i in request.rows if i.field_name]
return query.values(*fields)
@classmethod
def count_query(cls, request, *args, **kwargs):
if not hasattr(request, "query"):
if callable(cls.basequeryset):
request.query = cls.filter_items(
request, cls.basequeryset(request, *args, **kwargs), False
).using(request.database)
else:
request.query = cls.filter_items(request, cls.basequeryset).using(
request.database
)
with connections[request.database].cursor() as cursor:
tmp = request.query.query.get_compiler(request.database).as_sql(
with_col_aliases=False
)
cursor.execute("select count(*) from (" + tmp[0] + ") t_subquery", tmp[1])
return cursor.fetchone()[0]
@classmethod
def _generate_json_data(cls, request, *args, **kwargs):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
recs = cls.count_query(request, *args, **kwargs)
page = "page" in request.GET and int(request.GET["page"]) or 1
total_pages = math.ceil(float(recs) / request.pagesize)
if page > total_pages:
page = total_pages
if page < 1:
page = 1
yield '{"total":%d,\n' % total_pages
yield '"page":%d,\n' % page
yield '"records":%d,\n' % recs
if hasattr(cls, "extraJSON"):
# Hook to insert extra fields to the json
tmp = cls.extraJSON(request)
if tmp:
yield tmp
yield '"rows":[\n'
# GridReport
first = True
fields = [i.field_name for i in request.rows if i.field_name]
for i in cls.data_query(request, *args, fields=fields, page=page, **kwargs):
if first:
r = ["{"]
first = False
else:
r = [",\n{"]
first2 = True
for f in request.rows:
if not f.name:
continue
s = cls._getJSONValue(i[f.field_name], field=f, request=request)
if first2:
r.append('"%s":%s' % (f.name, s))
first2 = False
elif i[f.field_name] is not None:
r.append(', "%s":%s' % (f.name, s))
r.append("}")
yield "".join(r)
yield "\n]}\n"
@classmethod
def post(cls, request, *args, **kwargs):
if len(request.FILES) > 0:
# Note: the detection of the type of uploaded file depends on the
# browser setting the right mime type of the file.
csvcount = 0
xlscount = 0
for filename, file in request.FILES.items():
if (
file.content_type
== "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
):
xlscount += 1
elif filename.endswith(".xls"):
return HttpResponseNotFound(
"""
Files in the old .XLS excel format can't be read.<br>
Please convert them to the new .XLSX format.
"""
)
else:
csvcount += 1
if csvcount == 0:
# Uploading a spreadsheet file
return StreamingHttpResponse(
content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls.parseSpreadsheetUpload(request),
)
elif xlscount == 0:
# Uploading a CSV file
return StreamingHttpResponse(
content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls.parseCSVupload(request),
)
else: # mixed files
return HttpResponseNotFound("Files must have the same type.")
else:
# Saving after inline edits
return cls.parseJSONupload(request)
@classmethod
def _validate_crosses(cls, request, prefs):
cross_idx = []
for i in prefs:
try:
num = int(i)
if num < len(request.crosses) and request.crosses[num][1].get(
"visible", True
):
cross_idx.append(num)
except ValueError:
for j in range(len(request.crosses)):
if request.crosses[j][0] == i and request.crosses[j][1].get(
"visible", True
):
cross_idx.append(j)
return cross_idx
@classmethod
def getScenarios(cls, request, *args, **kwargs):
scenario_permissions = []
if len(request.user.scenarios) > 1:
original_database = request.database
for scenario in request.user.scenarios:
try:
# request.database needs to be changed for has_perm to work properly
request.database = scenario.name
user = User.objects.using(scenario.name).get(
username=request.user.username
)
if cls.has_permission(user):
scenario_permissions.append(
[
scenario.name,
scenario.description
if scenario.description
else scenario.name,
1 if request.database == original_database else 0,
]
)
except Exception:
pass
# reverting to original request database as permissions are checked
request.database = original_database
return scenario_permissions
@classmethod
def get(cls, request, *args, **kwargs):
# Pick up the list of time buckets
if cls.hasTimeBuckets:
cls.getBuckets(request, args, kwargs)
bucketnames = Bucket.objects.using(request.database)
if cls.maxBucketLevel:
if callable(cls.maxBucketLevel):
maxlvl = cls.maxBucketLevel(request)
bucketnames = bucketnames.filter(level__lte=maxlvl)
else:
bucketnames = bucketnames.filter(level__lte=cls.maxBucketLevel)
if cls.minBucketLevel:
if callable(cls.minBucketLevel):
minlvl = cls.minBucketLevel(request)
bucketnames = bucketnames.filter(level__gte=minlvl)
else:
bucketnames = bucketnames.filter(level__gte=cls.minBucketLevel)
bucketnames = bucketnames.order_by("-level").values_list("name", flat=True)
else:
bucketnames = None
fmt = request.GET.get("format", None)
reportkey = cls.getKey(request, *args, **kwargs)
request.prefs = request.user.getPreference(reportkey, database=request.database)
if request.prefs:
kwargs["preferences"] = request.prefs
# scenario_permissions is used to display multiple scenarios in the export dialog
if len(request.user.scenarios) > 1:
scenario_permissions = cls.getScenarios(request, *args, **kwargs)
else:
scenario_permissions = []
if not fmt:
# Return HTML page
if not hasattr(request, "crosses"):
cross_idx = None
cross_list = None
elif request.prefs and "crosses" in request.prefs:
cross_idx = str(
cls._validate_crosses(request, request.prefs["crosses"])
)
cross_list = cls._render_cross(request)
else:
cross_idx = str(
[
i
for i in range(len(request.crosses))
if request.crosses[i][1].get("visible", True)
and not request.crosses[i][1].get("initially_hidden", False)
]
)
cross_list = cls._render_cross(request)
if args and not cls.new_arg_logic:
mode = "table"
else:
mode = request.GET.get("mode", None)
if mode:
# Store the mode passed in the URL on the session to remember for the next report
request.session["mode"] = mode
else:
# Pick up the mode from the session
mode = request.session.get("mode", "graph")
is_popup = "_popup" in request.GET
sidx, sord = cls.getSortName(request)
autofilter = "noautofilter" not in request.GET and cls.autofilter
filters = cls.getQueryString(request)
if not filters and request.prefs and autofilter:
# Inherit the filter settings from the preferences
filters = request.prefs.get("filter", None)
if request.prefs and autofilter:
page = request.prefs.get("page", 1)
else:
page = 1
context = {
"reportclass": cls,
"title": _("%(title)s for %(entity)s")
% {"title": force_str(cls.title), "entity": force_str(args[0])}
if args and args[0]
else cls.title,
"post_title": cls.post_title,
"preferences": request.prefs,
"reportkey": reportkey,
"colmodel": cls._render_colmodel(
request, is_popup, request.prefs, mode, *args, **kwargs
),
"cross_idx": cross_idx,
"cross_list": cross_list,
"object_id": args and quote(args[0]) or None,
"page": page,
"sord": sord,
"sidx": sidx,
"default_sort": cls.defaultSortString(request),
"is_popup": is_popup,
"filters": json.loads(filters) if filters else None,
"args": args,
"bucketnames": bucketnames,
"model": cls.model,
"scenario_permissions": scenario_permissions,
"hasaddperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("add", cls.model._meta),
)
),
"hasdeleteperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("delete", cls.model._meta),
)
),
"haschangeperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("change", cls.model._meta),
)
),
"active_tab": "plan",
"mode": mode,
"actions": cls.actions,
}
for k, v in cls.extra_context(request, *args, **kwargs).items():
context[k] = v
return render(request, cls.template, context)
elif fmt == "json":
# Return JSON data to fill the grid.
response = StreamingHttpResponse(
content_type="application/json; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls._generate_json_data(request, *args, **kwargs),
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("spreadsheetlist", "spreadsheettable", "spreadsheet"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return an excel spreadsheet
output = BytesIO()
cls._generate_spreadsheet_data(
request, scenario_list, output, *args, **kwargs
)
response = HttpResponse(
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
content=output.getvalue(),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response["Content-Disposition"] = (
"attachment; filename*=utf-8''%s.xlsx"
% urllib.parse.quote(force_str(title))
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("csvlist", "csvtable", "csv"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return CSV data to export the data
response = StreamingHttpResponse(
content_type="text/csv; charset=%s" % settings.CSV_CHARSET,
streaming_content=cls._generate_csv_data(
request, scenario_list, *args, **kwargs
),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response["Content-Disposition"] = (
"attachment; filename*=utf-8''%s.csv"
% urllib.parse.quote(force_str(title))
)
response["Cache-Control"] = "no-cache, no-store"
return response
else:
raise Http404("Unknown format type")
@classmethod
def parseJSONupload(cls, request):
# Check permissions
if not cls.model or not cls.editable:
return HttpResponseForbidden(_("Permission denied"))
permname = get_permission_codename("change", cls.model._meta)
if not request.user.has_perm("%s.%s" % (cls.model._meta.app_label, permname)):
return HttpResponseForbidden(_("Permission denied"))
# Loop over the data records
resp = HttpResponse()
ok = True
with transaction.atomic(using=request.database, savepoint=False):
content_type_id = ContentType.objects.get_for_model(cls.model).pk
for rec in json.JSONDecoder().decode(
request.read().decode(request.encoding or settings.DEFAULT_CHARSET)
):
if "delete" in rec:
# Deleting records
for key in rec["delete"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
obj.delete()
LogEntry(
user_id=request.user.id,
content_type_id=content_type_id,
object_id=force_str(key),
object_repr=force_str(key)[:200],
action_flag=DELETION,
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
elif "copy" in rec:
# Copying records
for key in rec["copy"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
if isinstance(cls.model._meta.pk, CharField):
# The primary key is a string
obj.pk = "Copy of %s" % key
elif isinstance(cls.model._meta.pk, AutoField):
# The primary key is an auto-generated number
obj.pk = None
else:
raise Exception(
_("Can't copy %s") % cls.model._meta.app_label
)
obj.save(using=request.database, force_insert=True)
LogEntry(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=obj.pk,
object_repr=force_str(obj),
action_flag=ADDITION,
change_message=_("Copied from %s.") % key,
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
else:
# Editing records
pk = rec["id"]
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(
pk=rec["id"]
)
del rec["id"]
for i in rec:
if (
rec[i] == "\xa0"
): # Workaround for Jqgrid issue: date field can't be set to blank
rec[i] = None
if hasattr(cls.model, "getModelForm"):
UploadForm = cls.model.getModelForm(
tuple(rec.keys()), database=request.database
)
else:
UploadForm = modelform_factory(
cls.model,
fields=tuple(rec.keys()),
formfield_callback=lambda f: (
isinstance(f, RelatedField)
and f.formfield(using=request.database)
)
or f.formfield(),
)
form = UploadForm(rec, instance=obj)
if form.has_changed():
obj = form.save(commit=False)
obj.save(using=request.database)
LogEntry(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=obj.pk,
object_repr=force_str(obj),
action_flag=CHANGE,
change_message=_("Changed %s.")
% get_text_list(form.changed_data, _("and")),
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % pk)))
resp.write("<br>")
except (ValidationError, ValueError):
transaction.savepoint_rollback(sid)
ok = False
for error in form.non_field_errors():
resp.write(escape("%s: %s" % (pk, error)))
resp.write("<br>")
for field in form:
for error in field.errors:
resp.write(
escape(
"%s %s: %s: %s"
% (obj.pk, field.name, rec[field.name], error)
)
)
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
if ok:
resp.write("OK")
resp.status_code = ok and 200 or 500
return resp
@staticmethod
def dependent_models(m, found):
""" An auxilary method that constructs a set of all dependent models"""
for f in m._meta.get_fields():
if (
f.is_relation
and f.auto_created
and f.related_model != m
and f.related_model not in found
):
for sub in f.related_model.__subclasses__():
# if sub not in found:
found.update([sub])
found.update([f.related_model])
GridReport.dependent_models(f.related_model, found)
@staticmethod
def sort_models(models):
# Inject additional dependencies that are not reflected in database constraints
for m in models:
for e in getattr(m[1], "extra_dependencies", []):
for m2 in models:
if m2[1] == e:
m2[3].update([m[1]])
# Sort the list of models, based on dependencies between models
models.sort(key=lambda m: (m[1].__name__, m[0].upper()))
cnt = len(models)
ok = False
while not ok:
ok = True
for i in range(cnt):
j = i + 1
while j < cnt and ok:
if models[i][1] != models[j][1] and models[i][1] in models[j][3]:
i_base = models[i][1].__base__
if i_base == Model or i_base._meta.abstract:
i_base = None
j_base = models[j][1].__base__
if j_base == Model or j_base._meta.abstract:
j_base = None
if i_base == j_base and i_base and j_base:
j += 1
continue
if i_base == models[j][1] or j_base == models[i][1]:
j += 1
continue
models.append(models.pop(i))
while j < cnt:
if models[i][1] == models[j][1]:
models.append(models.pop(j))
j += 1
ok = False
break
elif (
models[i][1] == models[j][1]
and models[i][0].upper() > models[j][0].upper()
):
models[i], models[j] = models[j], models[i]
ok = False
j += 1
return models
@classmethod
def erase(cls, request):
# Build a list of dependencies
deps = set([cls.model])
# Special case for MO/PO/DO/DLVR that cannot be truncated
if cls.model.__name__ not in (
"PurchaseOrder",
"ManufacturingOrder",
"DistributionOrder",
"DeliveryOrder",
):
GridReport.dependent_models(cls.model, deps)
# Check the delete permissions for all related objects
for m in deps:
permname = get_permission_codename("delete", m._meta)
if not request.user.has_perm("%s.%s" % (m._meta.app_label, permname)):
return format_lazy(
"{}:{}", m._meta.verbose_name, _("Permission denied")
)
# Delete the data records
cursor = connections[request.database].cursor()
with transaction.atomic(using=request.database):
sql_list = []
containsOperationPlan = any(m.__name__ == "OperationPlan" for m in deps)
for m in deps:
if "getDeleteStatements" in dir(m) and not containsOperationPlan:
sql_list.extend(m.getDeleteStatements())
else:
sql_list = connections[request.database].ops.sql_flush(
no_style(), [m._meta.db_table for m in deps], []
)
for sql in sql_list:
cursor.execute(sql)
# Erase comments and history
content_ids = [ContentType.objects.get_for_model(m) for m in deps]
LogEntry.objects.filter(content_type__in=content_ids).delete()
Comment.objects.filter(content_type__in=content_ids).delete()
# Prepare message
for m in deps:
messages.add_message(
request,
messages.INFO,
_("Erasing data from %(model)s")
% {"model": force_str(m._meta.verbose_name)},
)
# Finished successfully
return None
@classmethod
def parseCSVupload(cls, request):
"""
This method reads CSV data from a string (in memory) and creates or updates
the database records.
The data must follow the following format:
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right delimiter and language
delimiter = (
get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True) == ","
and ";"
or ","
)
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield format_lazy("<div>{}</div>", returnvalue)
return
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s</th></tr>' % filename
data = EncodedCSVReader(file, delimiter=delimiter)
for error in parseCSVdata(
cls.model,
data,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield " "
continue
if firsterror and error[0] in (logging.ERROR, logging.WARNING):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
except GeneratorExit:
logging.warning("Connection Aborted")
except NameError:
pass
@classmethod
def parseSpreadsheetUpload(cls, request):
"""
This method reads a spreadsheet file (in memory) and creates or updates
the database records.
The data must follow the following format:
- only the first tab in the spreadsheet is read
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right language
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield '<br><samp style="padding-left: 15px;">%s</samp><br>' % returnvalue
raise StopIteration
# Header in output
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s</th></tr>' % filename
# Loop through the data records
wb = load_workbook(filename=file, read_only=True, data_only=True)
numsheets = len(wb.sheetnames)
for ws_name in wb.sheetnames:
rowprefix = "" if numsheets == 1 else "%s " % ws_name
ws = wb[ws_name]
for error in parseExcelWorksheet(
cls.model,
ws,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield " "
continue
if firsterror and error[0] in (
logging.ERROR,
logging.WARNING,
):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
except GeneratorExit:
logger.warning("Connection Aborted")
except NameError:
pass
@classmethod
def _getRowByName(cls, request, name):
if not hasattr(cls, "_rowsByName"):
cls._rowsByName = {}
for i in request.rows:
cls._rowsByName[i.name] = i
if i.field_name != i.name:
cls._rowsByName[i.field_name] = i
return cls._rowsByName[name]
@staticmethod
def _filter_ne(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return ~models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bn(query, reportrow, data):
return ~models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_en(query, reportrow, data):
return ~models.Q(
**{"%s__iendswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_nc(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__contains" % reportrow.field_name: smart_str(data).strip()}
)
else:
return ~models.Q(
**{"%s__icontains" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_ni(query, reportrow, data):
return ~models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_in(query, reportrow, data):
return models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_eq(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bw(query, reportrow, data):
return models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_gt(query, reportrow, data):
return models.Q(**{"%s__gt" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_gte(query, reportrow, data):
return models.Q(**{"%s__gte" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_lt(query, reportrow, data):
return models.Q(**{"%s__lt" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_lte(query, reportrow, data):
return models.Q(**{"%s__lte" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_ew(query, reportrow, data):
return models.Q(
**{"%s__iendswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_cn(query, reportrow, data):
return models.Q(
**{"%s__icontains" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_win(query, reportrow, data):
limit = date.today() + timedelta(int(float(smart_str(data))))
return models.Q(**{"%s__lte" % reportrow.field_name: limit})
_filter_map_jqgrid_django = {
# jqgrid op: (django_lookup, use_exclude, use_extra_where)
"ne": _filter_ne.__func__,
"bn": _filter_bn.__func__,
"en": _filter_en.__func__,
"nc": _filter_nc.__func__,
"ni": _filter_ni.__func__,
"in": _filter_in.__func__,
"eq": _filter_eq.__func__,
"bw": _filter_bw.__func__,
"gt": _filter_gt.__func__,
"ge": _filter_gte.__func__,
"lt": _filter_lt.__func__,
"le": _filter_lte.__func__,
"ew": _filter_ew.__func__,
"cn": _filter_cn.__func__,
"win": _filter_win.__func__,
}
_filter_map_django_jqgrid = {
# django lookup: jqgrid op
"in": "in",
"exact": "eq",
"startswith": "bw",
"iexact": "eq",
"istartswith": "bw",
"gt": "gt",
"gte": "ge",
"lt": "lt",
"lte": "le",
"endswith": "ew",
"contains": "cn",
"iendswith": "ew",
"icontains": "cn"
# 'win' exist in jqgrid, but not in django
}
@classmethod
def getQueryString(cls, request):
# Django-style filtering (which uses URL parameters) are converted to a jqgrid filter expression
filtered = False
filters = ['{"groupOp":"AND","rules":[']
first = True
for i, j in request.GET.items():
for r in request.rows:
if r.field_name and (
(i == r.field_name) or i.startswith(r.field_name + "__")
):
operator = (i == r.field_name) and "exact" or i[i.rfind("__") + 2 :]
try:
if first:
first = False
else:
filters.append(",")
filters.append(
'{"field":"%s","op":"%s","data":"%s"}'
% (
r.field_name,
cls._filter_map_django_jqgrid[operator],
unquote(j).replace('"', '\\"'),
)
)
filtered = True
except Exception:
pass # Ignore invalid operators
if not filtered:
return None
filters.append("]}")
return "".join(filters)
@classmethod
def _get_q_filter(cls, request, filterdata):
q_filters = []
for rule in filterdata["rules"]:
try:
op, field, data = rule["op"], rule["field"], rule["data"]
reportrow = cls._getRowByName(request, field)
if data == "":
# No filter value specified, which makes the filter invalid
continue
else:
q_filters.append(
cls._filter_map_jqgrid_django[op](
q_filters,
reportrow,
reportrow.validateValues(data)
if isinstance(reportrow, GridFieldChoice)
else data,
)
)
except Exception:
pass # Silently ignore invalid filters
if "groups" in filterdata:
for group in filterdata["groups"]:
try:
z = cls._get_q_filter(request, group)
if z:
q_filters.append(z)
except Exception:
pass # Silently ignore invalid groups
if len(q_filters) == 0:
return None
elif filterdata["groupOp"].upper() == "OR":
return functools.reduce(operator.ior, q_filters)
else:
return functools.reduce(operator.iand, q_filters)
@classmethod
def filter_items(cls, request, items, plus_django_style=True):
# Jqgrid-style advanced filtering
filters = None
_filters = request.GET.get("filters")
if _filters:
# Validate complex search JSON data
try:
filters = json.loads(_filters)
except ValueError:
filters = None
# Single field searching, which is currently not used
if request.GET.get("_search") == "true" and not filters:
field = request.GET.get("searchField")
op = request.GET.get("searchOper")
data = request.GET.get("searchString")
if all([field, op, data]):
filters = {
"groupOp": "AND",
"rules": [{"op": op, "field": field, "data": data}],
}
if filters:
z = cls._get_q_filter(request, filters)
if z:
return items.filter(z)
else:
return items
# Django-style filtering, using URL parameters
if plus_django_style:
for i, j in request.GET.items():
for r in request.rows:
if r.name and (
i == r.field_name or i.startswith(r.field_name + "__")
):
try:
items = items.filter(
**{
i: r.validateValues(unquote(j))
if isinstance(r, GridFieldChoice)
else unquote(j)
}
)
except Exception:
pass # silently ignore invalid filters
return items
class GridPivot(GridReport):
# Cross definitions.
# Possible attributes for a cross field are:
# - title:
# Name of the cross that is displayed to the user.
# It defaults to the name of the field.
# - editable:
# True when the field is editable in the page.
# The default value is false.
crosses = ()
template = "admin/base_site_gridpivot.html"
hasTimeBuckets = True
editable = False
multiselect = False
@classmethod
def _render_cross(cls, request):
result = []
for i in request.crosses:
m = {"key": i[0]}
for key, value in i[1].items():
if callable(value):
if key == "title":
m["name"] = capfirst(force_str(value(request)))
else:
m[key] = force_str(value(request), strings_only=True)
else:
if key == "title":
m["name"] = capfirst(force_str(value))
else:
m[key] = force_str(value, strings_only=True)
if "editable" not in m:
m["editable"] = False
result.append(json.dumps(m))
return ",\n".join(result)
@classmethod
def _render_colmodel(
cls, request, is_popup=False, prefs=None, mode="graph", *args, **kwargs
):
if prefs and "rows" in prefs:
# Validate the preferences to 1) map from name to index, 2) assure all rows
# are included, 3) ignore non-existing fields
prefrows = prefs["rows"]
defaultrows = {request.rows[i].name: i for i in range(len(request.rows))}
rows = []
for r in prefrows:
try:
idx = int(r[0])
defaultrows.pop(request.rows[idx].name, None)
rows.append(r)
except (ValueError, IndexError):
if r[0] in defaultrows:
rows.append((defaultrows[r[0]], r[1], r[2]))
defaultrows.pop(r[0], None)
for r, idx in defaultrows.items():
rows.append(
(
idx,
request.rows[idx].hidden or request.rows[idx].initially_hidden,
request.rows[idx].width,
)
)
else:
# Default configuration
rows = [
(
i,
request.rows[i].initially_hidden or request.rows[i].hidden,
request.rows[i].width,
)
for i in range(len(request.rows))
]
result = []
if is_popup:
result.append(
'{"name":"select","label":gettext("Select"),"width":75,"align":"center","sortable":false,"search":false,"fixed":true}'
)
count = -1
for (index, hidden, width) in rows:
try:
result.append(
'{%s,"width":%s,"counter":%d,"frozen":true%s,"hidden":%s,"fixed":true}'
% (
request.rows[index],
width,
index,
is_popup and ',"popup":true' or "",
hidden and "true" or "false",
)
)
count += 1
except IndexError:
pass
return ",\n".join(result)
@classmethod
def count_query(cls, request, *args, **kwargs):
if not hasattr(request, "basequery"):
if callable(cls.basequeryset):
request.basequery = cls.basequeryset(request, *args, **kwargs)
else:
request.basequery = cls.basequeryset
if args and args[0] and not cls.new_arg_logic:
request.basequery = request.basequery.filter(pk__exact=args[0])
return (
cls.filter_items(request, request.basequery, False)
.using(request.database)
.count()
)
@classmethod
def data_query(cls, request, *args, page=None, fields=None, **kwargs):
if not fields:
raise Exception("No fields for pivot report")
if not hasattr(request, "basequery"):
if callable(cls.basequeryset):
request.basequery = cls.basequeryset(request, *args, **kwargs)
else:
request.basequery = cls.basequeryset
if args and args[0] and not cls.new_arg_logic:
request.basequery = request.basequery.filter(pk__exact=args[0])
if page:
cnt = (page - 1) * request.pagesize + 1
return cls.query(
request,
cls._apply_sort(
request, cls.filter_items(request, request.basequery, False)
).using(request.database)[cnt - 1 : cnt + request.pagesize],
sortsql=cls._apply_sort_index(request),
)
else:
return cls.query(
request,
cls._apply_sort(
request, cls.filter_items(request, request.basequery, False)
).using(request.database),
sortsql=cls._apply_sort_index(request),
)
@classmethod
def _generate_json_data(cls, request, *args, **kwargs):
# Prepare the query
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
recs = cls.count_query(request, *args, **kwargs)
page = "page" in request.GET and int(request.GET["page"]) or 1
total_pages = math.ceil(float(recs) / request.pagesize)
if page > total_pages:
page = total_pages
if page < 1:
page = 1
# Generate header of the output
yield '{"total":%d,\n' % total_pages
yield '"page":%d,\n' % page
yield '"records":%d,\n' % recs
yield '"rows":[\n'
# Generate output
currentkey = None
r = []
fields = [i.field_name for i in request.rows if i.field_name]
for i in cls.data_query(request, *args, page=page, fields=fields, **kwargs):
# We use the first field in the output to recognize new rows.
if currentkey != i[request.rows[0].name]:
# New line
if currentkey:
yield "".join(r)
r = ["},\n{"]
else:
r = ["{"]
currentkey = i[request.rows[0].name]
first2 = True
for f in request.rows:
try:
s = cls._getJSONValue(i[f.name], field=f, request=request)
if first2:
r.append('"%s":%s' % (f.name, s))
first2 = False
elif i[f.name] is not None:
r.append(', "%s":%s' % (f.name, s))
except Exception:
pass
r.append(', "%s":[' % i["bucket"])
first2 = True
for f in request.crosses:
if i[f[0]] is None:
if first2:
r.append("null")
first2 = False
else:
r.append(",null")
else:
if first2:
r.append("%s" % i[f[0]])
first2 = False
else:
r.append(",%s" % i[f[0]])
r.append("]")
r.append("}")
r.append("\n]}\n")
yield "".join(r)
@classmethod
def _generate_csv_data(cls, request, scenario_list, *args, **kwargs):
sf = StringIO()
decimal_separator = get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True)
if decimal_separator == ",":
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=";")
else:
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
listformat = request.GET.get("format", "csvlist") == "csvlist"
# Write a Unicode Byte Order Mark header, aka BOM (Excel needs it to open UTF-8 file properly)
yield cls.getBOM(settings.CSV_CHARSET)
# Pick up the preferences
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and "rows" in request.prefs:
myrows = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1]
]
else:
myrows = [
f
for f in request.rows
if f.name and not f.hidden and not f.initially_hidden
]
if request.prefs and "crosses" in request.prefs:
mycrosses = [
request.crosses[f]
for f in cls._validate_crosses(request, request.prefs["crosses"])
]
else:
mycrosses = [f for f in request.crosses if f[1].get("visible", True)]
# Write a header row
fields = [
force_str(f.title, encoding=settings.CSV_CHARSET, errors="ignore").title()
for f in myrows
if f.name
]
if listformat:
fields.extend(
[
capfirst(
force_str(
_("bucket"), encoding=settings.CSV_CHARSET, errors="ignore"
)
)
]
)
fields.extend(
[
capfirst(
force_str(
_(
(
f[1]["title"](request)
if callable(f[1]["title"])
else f[1]["title"]
)
if "title" in f[1]
else f[0]
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
)
for f in mycrosses
]
)
else:
fields.extend(
[
capfirst(
force_str(
_("data field"),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
)
]
)
fields.extend(
[
force_str(b["name"], encoding=settings.CSV_CHARSET, errors="ignore")
for b in request.report_bucketlist
]
)
if len(scenario_list) > 1:
fields.insert(0, _("scenario"))
writer.writerow(fields)
yield sf.getvalue()
# Write the report content
orginal_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
query = cls.data_query(request, *args, fields=fields, **kwargs)
if listformat:
for row in query:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
# Data for rows
if hasattr(row, "__getitem__"):
fields = [
cls._getCSVValue(
row[f.name],
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in myrows
if f.name
]
fields.extend(
[
force_str(
row["bucket"],
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(row[f[0]], decimal_separator),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if row[f[0]] is not None
else ""
for f in mycrosses
]
)
else:
fields = [
cls._getCSVValue(
getattr(row, f.name),
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in myrows
if f.name
]
fields.extend(
[
force_str(
getattr(row, "bucket"),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
getattr(row, f[0]), decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if getattr(row, f[0]) is not None
else ""
for f in mycrosses
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
else:
currentkey = None
row_of_buckets = None
for row in query:
# We use the first field in the output to recognize new rows.
if not currentkey:
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
elif currentkey == row[request.rows[0].name]:
row_of_buckets.append(row)
else:
# Write an entity
for cross in mycrosses:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
fields = [
cls._getCSVValue(
row_of_buckets[0][s.name],
field=s,
request=request,
decimal_separator=decimal_separator,
)
for s in myrows
if s.name
]
fields.extend(
[
force_str(
capfirst(
_(
(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
if "title" in cross[1]
else cross[0]
)
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
bucket[cross[0]], decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if bucket[cross[0]] is not None
else ""
for bucket in row_of_buckets
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
# Write the last entity
if row_of_buckets:
for cross in mycrosses:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
fields = [
cls._getCSVValue(
row_of_buckets[0][s.name],
field=s,
request=request,
decimal_separator=decimal_separator,
)
for s in myrows
if s.name
]
fields.extend(
[
force_str(
capfirst(
_(
(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
if "title" in cross[1]
else cross[0]
)
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
bucket[cross[0]], decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
for bucket in row_of_buckets
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
finally:
request.database = orginal_database
@classmethod
def _generate_spreadsheet_data(
cls, request, scenario_list, output, *args, **kwargs
):
# Create a workbook
wb = Workbook(write_only=True)
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
# Excel can't have longer worksheet names
ws = wb.create_sheet(title=force_str(title)[:31])
# Create a named style for the header row
headerstyle = NamedStyle(name="headerstyle")
headerstyle.fill = PatternFill(fill_type="solid", fgColor="70c4f4")
wb.add_named_style(headerstyle)
readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle")
readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb")
wb.add_named_style(readlonlyheaderstyle)
# Pick up the preferences
listformat = request.GET.get("format", "spreadsheetlist") == "spreadsheetlist"
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and "rows" in request.prefs:
myrows = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1]
]
else:
myrows = [
f
for f in request.rows
if f.name and not f.initially_hidden and not f.hidden
]
if request.prefs and "crosses" in request.prefs:
mycrosses = [
request.crosses[f]
for f in cls._validate_crosses(request, request.prefs["crosses"])
]
else:
mycrosses = [f for f in request.crosses if f[1].get("visible", True)]
# Write a header row
fields = []
comment = None
for f in myrows:
if f.name:
cell = WriteOnlyCell(ws, value=force_str(f.title).title())
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if (
not f.key
and f.formatter == "detail"
and fname.endswith("__name")
):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
if listformat:
cell = WriteOnlyCell(ws, value=capfirst(force_str(_("bucket"))))
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if not f.key and f.formatter == "detail" and fname.endswith("__name"):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
for f in mycrosses:
cell = WriteOnlyCell(
ws,
value=capfirst(
force_str(
_(
(
f[1]["title"](request)
if callable(f[1]["title"])
else f[1]["title"]
)
if "title" in f[1]
else f[0]
)
)
),
)
if f[1].get("editable", False):
cell.style = "headerstyle"
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
else:
cell = WriteOnlyCell(ws, value=capfirst(_("data field")))
cell.style = "readlonlyheaderstyle"
fields.append(cell)
for b in request.report_bucketlist:
cell = WriteOnlyCell(ws, value=str(b["name"]))
cell.style = "readlonlyheaderstyle"
fields.append(cell)
if len(scenario_list) > 1:
cell = WriteOnlyCell(ws, value=capfirst(_("scenario")))
cell.style = "readlonlyheaderstyle"
fields.insert(0, cell)
ws.append(fields)
# Add an auto-filter to the table
ws.auto_filter.ref = "A1:%s1048576" % get_column_letter(len(fields))
# Write the report content
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
query = cls.data_query(request, *args, fields=fields, **kwargs)
if listformat:
for row in query:
# Append a row
if hasattr(row, "__getitem__"):
fields = [
_getCellValue(row[f.name], field=f, request=request)
for f in myrows
if f.name
]
fields.extend([_getCellValue(row["bucket"])])
fields.extend([_getCellValue(row[f[0]]) for f in mycrosses])
else:
fields = [
_getCellValue(
getattr(row, f.name), field=f, request=request
)
for f in myrows
if f.name
]
fields.extend([_getCellValue(getattr(row, "bucket"))])
fields.extend(
[_getCellValue(getattr(row, f[0])) for f in mycrosses]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
else:
currentkey = None
row_of_buckets = None
for row in query:
# We use the first field in the output to recognize new rows.
if not currentkey:
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
elif currentkey == row[request.rows[0].name]:
row_of_buckets.append(row)
else:
# Write a row
for cross in mycrosses:
if not cross[1].get("visible", True):
continue
fields = [
_getCellValue(
row_of_buckets[0][s.name],
field=s,
request=request,
)
for s in myrows
if s.name
]
fields.extend(
[
_getCellValue(
(
capfirst(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
)
if "title" in cross[1]
else capfirst(cross[0])
)
]
)
fields.extend(
[
_getCellValue(bucket[cross[0]])
for bucket in row_of_buckets
]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
# Write the last row
if row_of_buckets:
for cross in mycrosses:
if cross[1].get("visible", False):
continue
fields = [
_getCellValue(
row_of_buckets[0][s.name], field=s, request=request
)
for s in myrows
if s.name
]
fields.extend(
[
_getCellValue(
(
capfirst(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
)
if "title" in cross[1]
else capfirst(cross[0])
)
]
)
fields.extend(
[
_getCellValue(bucket[cross[0]])
for bucket in row_of_buckets
]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
finally:
request.database = original_database
# Write the spreadsheet
wb.save(output)
numericTypes = (Decimal, float, int)
def _buildMaskedNames(model, exportConfig):
"""
Build a map with anonymous names for a model, and store it in the exportConfiguration.
"""
modelname = model._meta.model_name
if modelname in exportConfig:
return
exportConfig[modelname] = {}
if issubclass(model, HierarchyModel):
keys = (
model.objects.only("pk").order_by("lvl", "pk").values_list("pk", flat=True)
)
else:
keys = model.objects.only("pk").order_by("pk").values_list("pk", flat=True)
idx = 1
for key in keys:
exportConfig[modelname][key] = "%s %07d" % (modelname, idx)
idx += 1
def _parseSeconds(data):
"""
Formats a number of seconds into format HH:MM:SS.XXXX
"""
total_seconds = data.total_seconds()
hours = math.floor(total_seconds / 3600)
minutes = math.floor((total_seconds - hours * 3600) / 60)
seconds = math.floor(total_seconds - hours * 3600 - minutes * 60)
remainder = total_seconds - 3600 * hours - 60 * minutes - seconds
return "%02d:%02d:%02d%s" % (
hours,
minutes,
seconds,
(".%s" % str(round(remainder, 8))[2:]) if remainder > 0 else "",
)
def _getCellValue(data, field=None, exportConfig=None, request=None):
if data is None:
return ""
elif isinstance(data, datetime):
if (
field
and request
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
return data + request.tzoffset
else:
return data
elif isinstance(data, numericTypes) or isinstance(data, date):
return data
elif isinstance(data, timedelta):
return _parseSeconds(data)
elif isinstance(data, time):
return data.isoformat()
elif not exportConfig or not exportConfig.get("anonymous", False):
return str(data)
else:
if field.primary_key and not isinstance(field, AutoField):
model = field.model
elif isinstance(field, RelatedField):
model = field.related_model
else:
return str(data)
if model._meta.app_label == "common":
return str(data)
modelname = model._meta.model_name
if modelname not in exportConfig:
_buildMaskedNames(model, exportConfig)
# Return the mapped value
return exportConfig[modelname].get(data, "unknown")
| agpl-3.0 | -1,733,423,649,352,162,600 | 39.234855 | 148 | 0.454501 | false |
ph1l/halo_radio | HaloRadio/UserSongStatsListMaker.py | 1 | 3143 | #
#
# Copyright (C) 2004 Philip J Freeman
#
# This file is part of halo_radio
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import HaloRadio.TopListMaker as TopListMaker
import HaloRadio.Exception as HaloException
import re
class UserSongStatsListMaker(TopListMaker.TopListMaker):
"""
- UserSongStatsListMaker -
"""
def __init__( self ):
self.list = [ ]
self.tablename = 'user_song_stats'
return
def GetRandomSong ( self, userid ):
self.list = [ ]
query = """SELECT songid FROM %s WHERE userid='%d' and requests > kills ORDER BY rand() DESC LIMIT 1""" % ( self.tablename, userid )
result = self.do_my_query( query )
for row in result:
(id, ) = row
self.list.append(id)
return
def GetBySong ( self, songid ):
self.list = [ ]
query = """SELECT id FROM %s WHERE songid="%s";"""%(self.tablename, songid )
result = self.do_my_query( query )
for row in result:
(id, ) = row
self.list.append(id)
return
def GetTopRank ( self, userid, num ):
self.list = [ ]
query = """SELECT songid, requests, kills FROM %s WHERE userid='%d' AND requests> kills > 0 ORDER BY requests DESC LIMIT %d""" % ( self.tablename, userid, num )
result = self.do_my_query( query )
for row in result:
(id, requests, kills) = row
self.list.append((id, requests, kills))
return
def GetBottomRank ( self, userid, num ):
self.list = [ ]
query = """SELECT songid, kills, requests FROM %s WHERE userid=%d ORDER BY kills - requests DESC LIMIT %d""" % ( self.tablename, userid, num )
result = self.do_my_query( query )
for row in result:
(id, kills, requests) = row
self.list.append((id, kills, requests))
return
def GetRandomSongForUsers ( self, useridlist ):
import HaloRadio.Song as Song
wheres = []
for userid in useridlist:
wheres.append(" userid = \"%s\" "%userid)
query = """SELECT SUM(requests) as requests, SUM(kills) as kills, (rand()*100) as rank, songid FROM %s WHERE %s GROUP BY songid HAVING requests > kills ORDER BY rank DESC LIMIT 1;""" % (self.tablename, " OR ".join(wheres) )
try:
((requests, kills, rank, songid),) = self.do_my_query( query )
except ValueError:
raise HaloException.SongNotExistant
try:
song = Song.Song(songid)
except HaloException.SongNotFound, snf:
song = self.GetRandomSongForUsers(useridlist)
return song
| gpl-2.0 | 6,443,673,913,120,863,000 | 35.126437 | 239 | 0.646834 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/invalid_path/snapshot/paths/invalid_path4.py | 1 | 1360 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, faild_point=13, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_vm, 'vm2', ],
[TestAction.detach_volume, 'volume1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm2']
Stopped:['vm1']
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']
attached:['volume2', 'volume3']
Detached:['volume1']
Deleted:[]
Expunged:[]
Ha:[]
Group:
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1@volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']---vm1@volume1_volume2_volume3
''' | apache-2.0 | 2,703,161,563,352,621,600 | 34.815789 | 161 | 0.7 | false |
liqd/adhocracy3.mercator | src/adhocracy_meinberlin/adhocracy_meinberlin/sheets/test_kiezkassen.py | 2 | 1796 | import colander
from pyramid import testing
from pytest import mark
from pytest import fixture
from pytest import raises
@mark.usefixtures('integration')
def test_includeme_register_proposal_sheet(registry):
from .kiezkassen import IProposal
context = testing.DummyResource(__provides__=IProposal)
assert registry.content.get_sheet(context, IProposal)
class TestProposalSheet:
@fixture
def meta(self):
from .kiezkassen import proposal_meta
return proposal_meta
@fixture
def context(self):
from adhocracy_core.interfaces import IItem
return testing.DummyResource(__provides__=IItem)
def test_create_valid(self, meta, context):
from zope.interface.verify import verifyObject
from adhocracy_core.interfaces import IResourceSheet
from .kiezkassen import IProposal
from .kiezkassen import ProposalSchema
inst = meta.sheet_class(meta, context, None)
assert IResourceSheet.providedBy(inst)
assert verifyObject(IResourceSheet, inst)
assert inst.meta.isheet == IProposal
assert inst.meta.schema_class == ProposalSchema
def test_get_empty(self, meta, context):
from decimal import Decimal
inst = meta.sheet_class(meta, context, None)
wanted = {'budget': Decimal(0),
'creator_participate': False,
'location_text': '',
}
assert inst.get() == wanted
class TestProposalSchema:
@fixture
def inst(self):
from .kiezkassen import ProposalSchema
return ProposalSchema()
def test_create(self, inst):
assert inst['budget'].validator.max == 50000
assert inst['budget'].required
assert inst['location_text'].validator.max == 100
| agpl-3.0 | 4,342,760,450,687,123,000 | 29.965517 | 60 | 0.671492 | false |
philscher/gkc | Benchmarks/IntegralCode/SolveOK.py | 1 | 8023 | from pylab import *
import scipy
#import math
import mpmath as mp
import traceback
import random
import numpy
import Dispersion_ConstTheta
#import fpectl
#fpectl.turnon_sigfpe()
import scipy.linalg as la
import scipy.sparse.linalg as sla
import SlepcDet
import gkcStyle
import iCode
class Species:
def __init__(self, m=0., q=1., T=1., n=0., eta=0., name="Unamed"):
self.m = m
self.q = q
self.T = T
self.n = n
self.eta = eta
self.name = name
############################## Settings for Integral Mode ######################################
Nx = 65
# Gao case Ln, Ls, Lx, Ly, theta, lambda_D2 = 1., 40., 12., 32., 0.1, 1.
# My setup
species = [ Species(m=0.,q=-1.,T=1.,n=1., eta=0.,name= "Adiab"), Species(1.,1.,1.,1., 5., "Ion") ]#, Species(1./1836.,-1.,1.,1., 4., "Electron")]
#species = [ Species(name= "Adiab"), Species(m=1.,q=1.,T=1.,n=1.,eta=5., name="Ion"), Species(m=0.0025,q=-1.,T=1.,n=1., eta=0., name="Electron") ]
Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 1./0.2, 64., 64., 0., [0.5]
## Gao Setup
species = [ Species(name= "Adiab"), Species(m=1836.,q=1.,T=1.,n=1.,eta=0., name="Ion"), Species(m=1.,q=-1.,T=1.,n=1., eta=3., name="Electron") ]
Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 0.025, 60., 64., 0., 2.*pi/64. * arange(1, 8)
#Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 0.025, 60., 64., 0., [0.3]
######################## Setup Grid ######################
kx_list = 2*pi/Lx * linspace(-Nx/2., Nx/2., Nx)
X = linspace(-Lx/2, Lx/2, Nx)
dx, dk = Lx/Nx, 2.*pi/Lx
dx, dk = dx * dk *dk , 1.
fig = figure(figsize=(30,10))
global w_min, D_min
w_min = 0.+0.j
D_min = 1e99 + 1.j*1.e99
sol = []
def solveDispersion(ky):
A = zeros((Nx,Nx), dtype=complex)
def setupA(w):
A[:,:] = 0.
iCode.setupMatrixPy(species, w, ky, X, kx_list, Ls, Ln, Nx, A, dk*dx, lambda_D2)
return A
def solveEquation(w):
global D_min, w_min
A = setupA(complex(w))
#print A
#val = SlepcDet.getMinAbsEigenvalue(A)
val = SlepcDet.getMinAbsEigenvaluLA(A)
#val = det(A)
#(sign, logdet) = np.linalg.slogdet(A)
#val = sign * logdet
if abs(val) < abs(D_min) : w_min = complex(w)
print ky, " w : %.3f+%.3f j" % (real(complex(w)), imag(complex(w))) , " Determinant : %.2e " % abs(val)
if val != val: return 0. + 0.j
return val
try :
w0= -0.01 + 0.02j
w_damp = complex(mp.findroot(solveEquation, (w0, w0-0.005j, w0+0.005), solver='muller', tol=1.e-8, ftol=1.e-8, maxsteps=5000))
#w_damp = PyPPL.getZero(solveEquation, init=(w0, w0+0.01j, w0+0.02), solver='muller', tol=1.e-9, ftol=1.e-6, maxsteps=5000)
except:
traceback.print_exc(file=sys.stdout)
try:
#for n in range(Nx):
n = 0
global w_min
print "-----------------> ", w_min
# solution found for w0, get solution vector
werr = solveEquation(w_min)
A = setupA(w_min)
#print A
#S = solve(A, append(1.+0.j,zeros(Nx-1, dtype=complex)))
#S = solve(A, append(1.+0.j, append(zeros(Nx-2, dtype=complex), 1.+0.j)))
#b = append(0., append(1.+0., zeros(Nx-2, dtype=complex)))
#b = zeros(Nx, dtype=complex)
#b = ones(Nx, dtype=complex)
#b[:] = 0. ;
#b[0] = 1.
#S, err = solve(A, b), 0.
#S, err = sla.lgmres(A,b, tol=1.e-9)
# We found our eigenvalue w_min, now we use the
# inverse iteration to find the closest eigenvector
I = (1.+0.j) * eye(Nx)
b = (1.+1.j) * ones(Nx, dtype=complex)
for n in range(4096):
b_prev = b
b = solve(A - w_min * I, b)
# RESCALE
b = b / sum(abs(b))
if (abs(sum( sqrt(sum(b**2)/sum(b_prev**2)) * b_prev - b )) < 1.e-10) : break
#print("Eigv Error : %.2e Abs : %.2e " % (abs(sum( sqrt(sum(b**2)/sum(b_prev**2)) * b_prev - b )), sum(abs(b))) )
#print "Sol : " , b
clf()
gkcStyle.newFigure(ratio='2.33:1', basesize=12)
subplot(131)
fig.suptitle("$\omega_0$ = %.4f %.4fi $\pm$ %.2e %.2e i" % (real(w_min), imag(w_min), real(werr), imag(werr)))
###################### Plot Fourier Modes ##########3
b = -real(b) + 1.j * imag(b)
plot(kx_list, real(b), 'r.-', label="real")
plot(kx_list, imag(b), '.-', label="imag", color=gkcStyle.color_indigo)
xlim((min(kx_list), max(kx_list)))
xlabel("$k_x$")
ylabel("$\phi(k_x)$")
legend(ncol=2).draw_frame(0)
################### Plot real modes ########3
subplot(132)
# Remove High frequency modes
#b[:3] = 0.;
#b[-4:] = 0.;
# We have to transform to FFTW format
F = append(append(b[Nx/2], b[Nx/2+1:]), b[:Nx/2])
print "F--------------->", F
plot(X,real(np.fft.ifft(F)), 'r.-', label='real')
plot(X,imag(np.fft.ifft(F)), '.-', label='imag', color=gkcStyle.color_indigo)
xlim((min(X), max(X)))
xlabel("$x$")
ylabel("$\phi(x)$")
legend(ncol=2).draw_frame(0)
################ Plot Contour
subplot(133)
y = linspace(0., Ly, 128)
KxKy = zeros((Nx, 65), dtype=complex)
nky = ky * Ly / (2.*pi)
KxKy[:,nky] = np.fft.ifft(F)
XY = np.fft.irfft(KxKy, axis=1)
xlabel("$x$")
ylabel("$y$")
contourf(X, y, XY.T, 20, vmin=-abs(XY).max(), vmax=abs(XY).max())
colorbar()
savefig("Plot2_" + str(ky) + ".png", bbox_inches='tight')
# append and normalize
sol.append(np.fft.ifft(b/abs(b).max()))
except:
#species = [ Species(name= "Adiab"), Species(m=1.,q=1.,T=1.,n=1.,eta=5., name="Ion"), Species(m=0.0025,q=-1.,T=1.,n=1., eta=0., name="Electron") ]
traceback.print_exc(file=sys.stdout)
return w_min, abs(solveEquation(w_min))
w_list1 = []
def plotMode():
for ky in ky_list:
wn, err = solveDispersion(ky)
w_list1.append (wn)
def plotContours():
ky = 0.5
R = linspace(-1.5, 0.5, 16)
I = linspace(0., 10., 16)
V = zeros((len(R),len(I)), dtype=complex)
for r in range(len(R)):
for i in range(len(I)):
A = zeros((Nx,Nx), dtype=complex)
iCode.setupMatrixPy(species, R[r]+1.j*I[i], ky, X, kx_list, Ls, Ln, Nx, A, dk*dx, lambda_D2)
val = det(A)
#(sign, logdet) = np.linalg.slogdet(A)
#val = sign * logdet
V[r,i] = val
print "x, y", R[r], I[i] , " r : ", val
subplot(131)
contourf(R,I,real(V), 100)
colorbar()
subplot(132)
contourf(R,I,imag(V), 100)
colorbar()
subplot(133)
contourf(R,I,abs(V), 100)
colorbar()
#pcolor(R,I,imag(V))
savefig("Contour.png")
#print "(Integral) Solution is w : ",w0
#print "(local) Solution is w : ",w_Local
#plotContours()
plotMode()
################################## Plot Figures ############################
### Plot
clf()
ky_list = array(ky_list)
plot(ky_list, real(w_list1), 'o-', label='real')
plot(ky_list, imag(w_list1), 'o-', label='imag')
legend(ncol=2, loc='best').draw_frame(0)
xlim((min(ky_list), max(ky_list)))
savefig("Results.png")
"""
# Make 3D Plot kx, ky, z
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
clf()
Z = array(sol)
ax = fig.add_subplot(121, projection='3d')
_X,_ky = np.meshgrid(X,ky_list)
ax.plot_surface(_X, _ky, real(Z), rstride=1, cstride=1, cmap=cm.jet)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(_X, _ky, imag(Z), rstride=1, cstride=1, cmap=cm.jet)
#ax.set_zlim3d(0, 1)
ax.set_xlabel(r'$\phi_\mathrm{real}$')
ax.set_ylabel(r'$\phi_\mathrm{im}$')
ax.w_yaxis.set_scale("log")
savefig("Results_3D.png")
"""
| gpl-3.0 | 5,590,745,320,015,303,000 | 27.756272 | 150 | 0.510408 | false |
TomAugspurger/DSADD | setup.py | 1 | 1281 | from setuptools import setup, find_packages
# To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dsadd',
version='0.0.2',
description='A python package for defensive data analysis.',
long_description='A python package for defensive data analysis.',
url='https://github.com/tomaugspurger/dsadd',
# Author details
author='Tom Augspurger',
author_email='[email protected]',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='data analysis',
packages=find_packages(exclude=['tests']),
# install_requires=['numpy', 'pandas'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [''],
'test': ['coverage', 'pytest'],
},
)
| mit | -496,449,566,151,237,250 | 26.255319 | 70 | 0.622951 | false |
schuhumi/timetravel | timetravel-gui.py | 1 | 3226 | #!/usr/bin/env python3
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from gi.repository import Gtk, Gio
import cairo
import math
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Timetravel")
#self.set_border_width(10)
self.set_default_size(800, 600)
hb = Gtk.HeaderBar()
hb.set_show_close_button(True)
hb.props.title = "Timetravel"
self.set_titlebar(hb)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="emblem-system-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
hb.pack_end(button)
box = Gtk.Box(spacing=6)
button = Gtk.Button(label="Snapshot")
box.add(button)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="zoom-in-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
box.add(button)
hb.pack_start(box)
self.darea = Gtk.DrawingArea()
self.darea.connect("draw", self.on_draw)
#self.darea.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.add(self.darea)
def on_draw(self, wid, cr):
cr.translate(700,250)
#cr.scale(800,600)
cr.set_source_rgb(0.6, 0.6, 0.6)
cr.set_line_width(1)
cr.set_dash([10.0, 6.0])
cr.move_to(0, -250)
cr.line_to(0, 210)
cr.stroke()
cr.set_dash([])
nowTxt = "now"
cr.set_source_rgb(0,0,0)
cr.set_font_size(15)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(nowTxt)
cr.move_to(-width/2, 210+height+10)
cr.show_text(nowTxt)
cr.stroke()
cr.set_source_rgb(0.2, 0.2, 0.7)
cr.set_line_width(3)
cr.move_to(-200, 0)
cr.line_to(0, 0)
cr.stroke()
drawSnapshot(cr, "current", 0, 0, 0.2, 0.7, 0.2)
drawSnapshot(cr, "snap-2015-07-16", -200, 0, 0.2, 0.2, 0.7)
def drawSnapshot (cr, name, x, y, r, g, b):
cr.set_source_rgb(r,g,b)
cr.arc(x, y, 8, 0, 2*math.pi)
cr.fill()
cr.set_source_rgb(0,0,0)
cr.set_font_size(15)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(name)
cr.move_to(x-width/2, y+height+10)
cr.show_text(name)
cr.stroke()
def drawNowAxis (cr):
pass
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| gpl-3.0 | 7,090,185,540,407,805,000 | 28.87037 | 87 | 0.599814 | false |
Adista-ste/metabaseparser | script2012r2.py | 1 | 1237 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
import argparse, sys, os, re
arguments = argparse.ArgumentParser()
arguments.add_argument("-s","--https",help="Traite également les domaines HTTPS", action='store_true')
arguments.add_argument("-f","--fichier",help="Définit le fichierxml utilise")
args = arguments.parse_args()
if not args.fichier:
print "Erreur : Pas de fichier de MetaBase indiqué"
arguments.print_help()
sys.exit(1)
elif not os.path.exists(args.fichier):
print "Erreur : Le fichier MetaBase indiqué n'existe pas"
arguments.print_help()
sys.exit(2)
tree = etree.parse(args.fichier)
#ns={'xmlns': 'urn:microsoft-catalog:XML_Metabase_V64_0'}
liste=[]
#for i in tree.iter(tag="{%s}IIsWebServer" % ns['xmlns']):
for sites in tree.iter(tag="site"):
for binding in sites.iter('binding'):
bind = binding.attrib.get('bindingInformation')
ndd = re.sub(r'\**:[0-9]+:', r' ',bind)
if ndd:
#print ndd
liste+=ndd.split()
#print bind['bindingInformation']
# if site:
# if args.https:
# inter=re.sub(r':443:', r' ', site)
# inter=re.sub(r':80:', r' ', site)
# liste+=inter.split()
#
liste.sort()
final=list(set(liste))
final.sort()
#
for j in final:
print "%s" % j
| agpl-3.0 | -6,761,721,896,002,896,000 | 23.176471 | 102 | 0.6691 | false |
guyrt/court-reminder | server/admin_app.py | 1 | 1277 | """
Flask Admin App
"""
import os
from flask import Flask, flash, render_template, request
from flask_admin import Admin
from flask_basicauth import BasicAuth
from azure.storage.blob import BlockBlobService, ContentSettings
from storage.models import Database
from storage.secrets import blob_key, blob_accountname, blob_container
from server.views import AinView
from server import config
db = Database()
blob_service = BlockBlobService(account_name=blob_accountname, account_key=blob_key)
app = Flask(__name__)
app.config.from_object(config.Config)
basic_auth = BasicAuth(app)
admin = Admin(app, name='ASAP', template_mode='bootstrap3')
admin.add_view(AinView(None, name='Ain', endpoint='ain'))
@app.route('/audio')
def audio():
ain = request.args.get('id')
azure_path = db.get_ain(ain).get('CallUploadUrl')
if azure_path:
file_root = os.path.join(app.root_path, 'static')
if not os.path.exists(file_root):
os.makedirs(file_root)
filename = ain + '.wav'
path = os.path.join(file_root, filename)
blob_service.get_blob_to_path(container_name=blob_container,
blob_name=azure_path, file_path=path)
else:
filename = None
return render_template('audio.html', filename=filename)
| mit | -6,595,723,578,014,805,000 | 28.697674 | 84 | 0.703994 | false |
jantaylor/road-home-time-tracker | timetracker/models.py | 1 | 3525 | from django.db import models
from django.contrib import admin
from django.db.models import signals
class Volunteer(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField(max_length=254, unique=True)
is_group = models.BooleanField(default=False, verbose_name="Group")
organization = models.CharField(null=True, max_length=50, blank=True)
group_count = models.IntegerField(null=True, blank=True)
times_clocked_out = models.IntegerField(default=0, editable=False)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.last_name) + ", {}".format(self.first_name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Site(models.Model):
name = models.CharField(max_length=50, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Activity(models.Model):
class Meta:
verbose_name_plural = "activities"
name = models.CharField(max_length=100, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
def print_out(sender, instance, created, **kwargs):
if instance.end is not None:
volunteer = instance.volunteer
if volunteer.is_group:
hours = ((instance.end - instance.start).total_seconds() / 3600) * volunteer.group_count
else:
hours = (instance.end - instance.start).total_seconds() / 3600
if instance.hours != hours:
instance.hours = hours
instance.save()
volunteer = instance.volunteer
volunteer.times_clocked_out += 1
volunteer.save()
class TimeEntry(models.Model):
class Meta:
verbose_name_plural = "Time Entries"
volunteer = models.ForeignKey(Volunteer, on_delete=models.CASCADE)
start = models.DateTimeField(verbose_name="Start Time")
end = models.DateTimeField(null=True, blank=True, verbose_name="End Time")
site = models.ForeignKey(Site, on_delete=models.CASCADE)
activity = models.ForeignKey(Activity, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
hours = models.DecimalField(null=True, blank=True, decimal_places=2, max_digits=12)
def __str__(self):
return "{}".format(self.id) + " {}".format(self.volunteer) + " {}".format(self.start.strftime('%m/%d/%Y'))
def delete(self, *args, **kwargs):
self.active = False
self.save()
signals.post_save.connect(print_out, sender=TimeEntry)
| mit | -9,162,268,426,743,387,000 | 36.5 | 114 | 0.670355 | false |
etherealpost/etherealpost.com | etherealpost/__init__.py | 1 | 3480 | from urllib.parse import urlparse
from etherealpost.db.filters import JinjaFilters
from pymongo import MongoClient
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPMovedPermanently
# http://stackoverflow.com/a/15705778
def add_auto_route(config, name, pattern, **kw):
config.add_route(name, pattern, **kw)
if not pattern.endswith('/'):
config.add_route(name + '_auto', pattern + '/')
def redirector(request):
return HTTPMovedPermanently(
request.route_url(name, _query=request.GET,
**request.matchdict))
config.add_view(redirector, route_name=name + '_auto')
import os
from pyramid.response import FileResponse
def favicon_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'favicon.ico')
return FileResponse(icon, request=request)
def robots_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'robots.txt')
return FileResponse(icon, request=request)
def sitemap_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'sitemap.xml')
return FileResponse(icon, request=request)
def main(global_config, **settings):
config = Configurator(settings=settings)
# Includes
config.include('pyramid_jinja2')
config.commit()
config.include('pyramid_scss')
# Database
db_url = urlparse(settings['mongo_uri'])
config.registry.db = MongoClient(host=db_url.hostname,
port=db_url.port)
def add_db(request):
db = config.registry.db[db_url.path[1:]]
if db_url.username and db_url.password:
db.authenticate(db_url.username, db_url.password)
return db
config.add_request_method(add_db, 'db', reify=True)
# Jinja Filters
jinja2_env = config.get_jinja2_environment()
# and Stripe and Paypal
jinja2_env.globals['stripe_pk'] = settings['stripe_pk']
jinja2_env.globals['paypal_id'] = settings['paypal_id']
jinja2_env.globals['btc_address'] = settings['btc_address']
jf = JinjaFilters(db=config.registry.db)
jf.set_filters(jinja2_env)
# Routing
add_auto_route(config, 'home', '/')
add_auto_route(config, 'stripe_charge', '/charge')
add_auto_route(config, 'about', '/about')
add_auto_route(config, 'region_switch', '{region:(us|eu)}')
add_auto_route(config, 'realm', '{region:(us|eu)}/{realm_slug}')
add_auto_route(config, 'item',
'{region:(us|eu)}/{realm_slug}/item/{item:\d+}')
add_auto_route(config, 'seller',
'{region:(us|eu)}/{realm_slug}/seller/{seller}')
# Static Files
config.add_route('favicon', '/favicon.ico')
config.add_route('robots', '/robots.txt')
config.add_route('sitemap', '/sitemap.xml')
config.add_view(view='etherealpost.favicon_view', route_name='favicon')
config.add_view(view='etherealpost.robots_view', route_name='robots')
config.add_view(view='etherealpost.sitemap_view', route_name='sitemap')
config.add_route('css', '/assets/css/{css_path:.*}.css')
config.add_view(route_name='css', view='pyramid_scss.controller.get_scss',
renderer='scss', request_method='GET')
config.add_static_view(name='assets', path='etherealpost:assets')
# Scan things
config.scan('.views')
# Here we gooooo!
return config.make_wsgi_app()
| mit | 1,368,858,778,148,236,000 | 34.876289 | 78 | 0.64569 | false |
Squishymedia/feedingdb | src/feeddb/feed/migrations/0087_auto__add_field_setup_technique.py | 1 | 41850 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Setup.technique'
db.add_column(u'feed_setup', 'technique',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Setup.technique'
db.delete_column(u'feed_setup', 'technique')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ontology_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['feed.MuscleOwl']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.animalapprovaltype': {
'Meta': {'object_name': 'AnimalApprovalType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'animalapprovaltype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
'bfo_part_of_some': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_parts'", 'symmetrical': 'False', 'to': u"orm['feed.BehaviorOwl']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_is_class': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_subClass_descendants'", 'symmetrical': 'False', 'to': u"orm['feed.BehaviorOwl']"}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.feeduserprofile': {
'Meta': {'object_name': 'FeedUserProfile'},
'institutional_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
'bfo_part_of_some': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_parts'", 'symmetrical': 'False', 'to': u"orm['feed.MuscleOwl']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_is_class': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_subClass_descendants'", 'symmetrical': 'False', 'to': u"orm['feed.MuscleOwl']"}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnimalApprovalType']", 'null': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.studyprivate': {
'Meta': {'object_name': 'StudyPrivate'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studyprivate_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'study': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Study']", 'unique': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']", 'null': 'True'}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behaviorowl_primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_in_trials'", 'null': 'True', 'to': u"orm['feed.BehaviorOwl']"}),
'behaviorowl_secondary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondary_in_trials'", 'null': 'True', 'to': u"orm['feed.BehaviorOwl']"}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed'] | gpl-3.0 | -7,785,624,208,790,066,000 | 82.869739 | 203 | 0.548053 | false |
SeungGiJeong/SK_FastIR | dump/mbr.py | 1 | 6740 | from construct import *
from distorm3 import Decode, Decode16Bits
import hexdump
import os
class Mbr:
def __init__(self, path):
self.mbrHexa = ""
self.mbrStruct = ""
self.bootloaderCode = ""
self.offset = 0
self.partition = {"name": []}
self.signature = ""
self.path = path
self.mbr = Struct("mbr",
HexDumpAdapter(Bytes("bootloaderCode", 446)),
Array(4,
Struct("partitions",
Enum(Byte("state"),
INACTIVE=0x00,
ACTIVE=0x80,
),
BitStruct("beginning",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
Enum(UBInt8("type"),
Nothing=0x00,
FAT12_CHS=0x01,
XENIX_ROOT=0x02,
XENIX_USR=0x03,
FAT16_16_32MB_CHS=0x04,
Extended_DOS=0x05,
FAT16_32MB_CHS=0x06,
NTFS=0x07,
FAT32_CHS=0x0b,
FAT32_LBA=0x0c,
FAT16_32MB_2GB_LBA=0x0e,
Microsoft_Extended_LBA=0x0f,
Hidden_FAT12_CHS=0x11,
Hidden_FAT16_16_32MB_CHS=0x14,
Hidden_FAT16_32MB_2GB_CHS=0x16,
AST_SmartSleep_Partition=0x18,
Hidden_FAT32_CHS=0x1b,
Hidden_FAT32_LBA=0x1c,
Hidden_FAT16_32MB_2GB_LBA=0x1e,
PQservice=0x27,
Plan_9_partition=0x39,
PartitionMagic_recovery_partition=0x3c,
Microsoft_MBR_Dynamic_Disk=0x42,
GoBack_partition=0x44,
Novell=0x51,
CP_M=0x52,
Unix_System_V=0x63,
PC_ARMOUR_protected_partition=0x64,
Solaris_x86_or_Linux_Swap=0x82,
LINUX_NATIVE=0x83,
Hibernation=0x84,
Linux_Extended=0x85,
NTFS_Volume_Set=0x86,
BSD_OS=0x9f,
FreeBSD=0xa5,
OpenBSD=0xa6,
Mac_OSX=0xa8,
NetBSD=0xa9,
Mac_OSX_Boot=0xab,
MacOS_X_HFS=0xaf,
BSDI=0xb7,
BSDI_Swap=0xb8,
Boot_Wizard_hidden=0xbb,
Solaris_8_boot_partition=0xbe,
CP_M_86=0xd8,
Dell_PowerEdge_Server_utilities_FAT_FS=0xde,
DG_UX_virtual_disk_manager_partition=0xdf,
BeOS_BFS=0xeb,
EFI_GPT_Disk=0xee,
EFI_System_Partition=0xef,
VMWare_File_System=0xfb,
VMWare_Swap=0xfc,
_default_=Pass,
),
BitStruct("ending",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
ULInt32("sector_offset"), # offset from MBR in sectors
ULInt32("size"), # in sectors
)
),
Const(Bytes("signature", 2), "\x55\xAA"),
)
def save_mbr(self, image):
file_image = open(image, "rb")
file_mbr = open(self.path + os.path.sep + "mbr_raw", "wb")
try:
file_mbr.write(file_image.read(512))
except Exception as err:
self.logger.error("Error to extract MBR")
file_image.close()
file_mbr.close()
return file_mbr.name
def extract_hexa(self, file_mbr):
# file = open(fileMbr,"rb")
hex_str = ""
for line in file_mbr.split('\n'):
hex_str += line[10:58]
hex_str = hex_str.replace(' ', '')
self.mbrHexa = hex_str
def mbr_parsing(self, image):
file_mbr = self.save_mbr(image)
self.extract_hexa(hexdump.hexdump(open(file_mbr, 'rb').read(512), "return"))
try:
cap1 = self.mbrHexa.decode("hex")
self.mbrStruct = self.mbr.parse(cap1)
return self.mbrStruct
except Exception as inst:
self.logger.error("Error MBR Parsing")
def boot_loader_disassembly(self):
l = Decode(0x000, self.mbrStruct.bootloaderCode, Decode16Bits)
assembly_code = ""
for (offset, size, instruction, hexdump) in l:
assembly_code = assembly_code + "%.8x: %-32s %s" % (offset, hexdump, instruction) + "\n"
h_file = open(self.path + os.path.sep + "bootLoaderAssemblyCode.txt", "w")
h_file.write(assembly_code)
h_file.close()
| gpl-3.0 | 5,784,401,525,415,355,000 | 49.676692 | 100 | 0.327448 | false |
caderache2014/django-rest-tutorial | tutorial/tutorial/settings.py | 1 | 2075 | """
Django settings for tutorial project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xnb^kzgkv4vd!(u@ry_=eo2xo_)@_c12bsvk63hv=c2%%4!zf#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'rest_framework',
'snippets',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tutorial.urls'
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tmp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/' | mit | -1,510,887,145,338,488,600 | 22.590909 | 71 | 0.697831 | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/optimize/tests/test_regression.py | 1 | 1237 | """Regression tests for optimize.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_almost_equal, \
assert_raises
import scipy.optimize
class TestRegression(TestCase):
def test_newton_x0_is_0(self):
"""Ticket #1074"""
tgt = 1
res = scipy.optimize.newton(lambda x: x - 1, 0)
assert_almost_equal(res, tgt)
def test_newton_integers(self):
"""Ticket #1214"""
root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
fprime=lambda x: 2*x)
assert_almost_equal(root, 1.0)
def test_lmdif_errmsg(self):
# this shouldn't cause a crash on Python 3
class SomeError(Exception):
pass
counter = [0]
def func(x):
counter[0] += 1
if counter[0] < 3:
return x**2 - np.array([9, 10, 11])
else:
raise SomeError()
assert_raises(SomeError,
scipy.optimize.leastsq,
func, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 797,545,178,663,493,000 | 25.488889 | 76 | 0.515764 | false |
saga-project/bliss | setup.py | 1 | 5145 | # -*- coding: utf-8 -*-
"""
Bliss setup script.
"""
import os
import sys
import shutil
import fileinput
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
from bliss import version
scripts = [] # ["bin/bliss-run"]
import sys
if sys.hexversion < 0x02040000:
raise RuntimeError, "Bliss requires Python 2.4 or higher"
class our_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a bliss/VERSION file
fn = os.path.join(self.install_dir, 'bliss', 'VERSION')
open(fn, 'w').write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a air/VERSION file
fn = os.path.join(base_dir, 'bliss', 'VERSION')
open(fn, 'w').write(version)
setup_args = {
'name': "bliss",
'version': version,
'description': "A native Python implementation of the OGF SAGA standard (GFD.90).",
'long_description': "SAGA-Python (a.k.a bliss) is a pragmatic and light-weight implementation of the OGF GFD.90 SAGA standard. SAGA-Python is written 100% in Python and focuses on usability and ease of deployment.",
'author': "Ole Christian Weidner, et al.",
'author_email': "[email protected]",
'maintainer': "Ole Christian Weidner",
'maintainer_email': "[email protected]",
'url': "http://saga-project.github.com/bliss/",
'license': "MIT",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: System :: Distributed Computing',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: BSD/OS',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: GNU Hurd',
'Operating System :: POSIX :: HP-UX',
'Operating System :: POSIX :: IRIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Other',
'Operating System :: POSIX :: SCO',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix'
],
'packages': [
"bliss",
"bliss.saga",
"bliss.saga.job",
"bliss.saga.resource",
"bliss.saga.filesystem",
#"bliss.sagacompat",
#"bliss.sagacompat.sd",
#"bliss.sagacompat.job",
#"bliss.sagacompat.filesystem",
"bliss.utils",
"bliss.runtime",
"bliss.interface",
"bliss.plugins",
"bliss.plugins.local",
"bliss.plugins.sge",
"bliss.plugins.pbs",
"bliss.plugins.sftp",
"bliss.plugins.ssh"
],
'scripts': scripts,
# mention data_files, even if empty, so install_data is called and
# VERSION gets copied
'data_files': [("bliss", [])],
'cmdclass': {
'install_data': our_install_data,
'sdist': our_sdist
}
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
try:
# If setuptools is installed, then we'll add setuptools-specific arguments
# to the setup args.
import setuptools #@UnusedImport
except ImportError:
pass
else:
setup_args['install_requires'] = [
'paramiko-on-pypi', 'pexpect'
]
if os.getenv('BLISS_NO_INSTALL_REQS'):
setup_args['install_requires'] = None
##
## PROCESS SETUP OPTIONS FOR DIFFERENT BACKENDS
##
# process AIR_AMQP_HOSTNAME and AIR_AMQP_PORT
#air_amqp_hostname = os.getenv('AIR_AMQP_HOST')
#air_amqp_port = os.getenv('AIR_AMQP_PORT')
#
#if not air_amqp_hostname:
# air_amqp_hostname = "localhost"
#
#print "setting default amqp hostname to '%s' in air/scripts/config.py" % air_amqp_hostname
#
#if not air_amqp_port:
# air_amqp_port = "5672"
#
#print "setting default amqp port to '%s' in air/scripts/config.py" % air_amqp_port
#
#
#shutil.copyfile("./air/scripts/config.py.in", "./air/scripts/config.py")
#s = open("./air/scripts/config.py.in").read()
#s = s.replace('###REPLACE_WITH_AMQP_HOSTNAME###', str(air_amqp_hostname))
#s = s.replace('###REPLACE_WITH_AMQP_PORT###', str(air_amqp_port))
#f = open("./air/scripts/config.py", 'w')
#f.write(s)
#f.close()
setup(**setup_args)
| mit | 3,389,903,422,821,095,400 | 30.181818 | 219 | 0.620214 | false |
EmmaIshta/QUANTAXIS | QUANTAXIS/QAData/proto/stock_min_pb2.py | 1 | 5509 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stock_min.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='stock_min.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0fstock_min.proto\"\xb9\x01\n\tstock_min\x12\x0c\n\x04\x63ode\x18\x01 \x01(\t\x12\x0c\n\x04open\x18\x02 \x01(\x02\x12\x0c\n\x04high\x18\x03 \x01(\x02\x12\x0b\n\x03low\x18\x04 \x01(\x02\x12\r\n\x05\x63lose\x18\x05 \x01(\x02\x12\x0e\n\x06volume\x18\x06 \x01(\x02\x12\x0c\n\x04\x64\x61te\x18\x07 \x01(\t\x12\x0e\n\x06\x61mount\x18\x08 \x01(\x02\x12\x12\n\ndate_stamp\x18\t \x01(\t\x12\x10\n\x08\x64\x61tetime\x18\n \x01(\t\x12\x12\n\ntime_stamp\x18\x0b \x01(\tb\x06proto3')
)
_STOCK_MIN = _descriptor.Descriptor(
name='stock_min',
full_name='stock_min',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='stock_min.code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='open', full_name='stock_min.open', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high', full_name='stock_min.high', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low', full_name='stock_min.low', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='close', full_name='stock_min.close', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volume', full_name='stock_min.volume', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='date', full_name='stock_min.date', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='amount', full_name='stock_min.amount', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='date_stamp', full_name='stock_min.date_stamp', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='datetime', full_name='stock_min.datetime', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time_stamp', full_name='stock_min.time_stamp', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=205,
)
DESCRIPTOR.message_types_by_name['stock_min'] = _STOCK_MIN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
stock_min = _reflection.GeneratedProtocolMessageType('stock_min', (_message.Message,), dict(
DESCRIPTOR = _STOCK_MIN,
__module__ = 'stock_min_pb2'
# @@protoc_insertion_point(class_scope:stock_min)
))
_sym_db.RegisterMessage(stock_min)
# @@protoc_insertion_point(module_scope)
| mit | -5,573,104,706,046,568,000 | 38.633094 | 494 | 0.679797 | false |
chrisrink10/mumpy | mumpy/interpreter.py | 1 | 5105 | """MUMPy Interpreter
The functions in this module represent various functions that may need
to be carried out from the command line (including starting the REPL
and compiling and executing a routine file).
Licensed under a BSD license. See LICENSE for more information.
Author: Christopher Rink"""
try:
# Used by Python's input() to provide readline functionality
# Does not work on Windows, so we'll just pass
import readline
except ImportError:
pass
import argparse
import mumpy
def main():
"""The main command line entry point for MUMPy."""
parser = argparse.ArgumentParser(
description="MUMPS interpreter. "
"Summoning this script without any arguments will open the "
"included MUMPS REPL capability."
)
parser.add_argument("-d", "--debug",
help="Enable debug output in REPL mode",
required=False,
action='store_true'
)
parser.add_argument("-c", "--compile",
help="A list of MUMPS scripts to compile.",
required=False,
nargs='*'
)
parser.add_argument("-f", "--file",
help="A MUMPS routine to execute.",
required=False,
nargs=1
)
parser.add_argument("-t", "--tag",
help="The tag to execute in the specified routine",
required=False,
nargs=1
)
parser.add_argument("-dev", "--device",
help="The I/O device this process should start with",
required=False,
nargs=1
)
parser.add_argument("-a", "--args",
help="The arguments to pass to the specified tag",
required=False,
nargs="*"
)
parser.add_argument("-r", "--recompile",
help="Recompile any routines before interpreting.",
required=False,
action='store_true'
)
args = parser.parse_args()
# Process routine compilations first
if args.compile:
compile_routine(args.compile,
args.debug)
# Then interpret any files
if args.file:
interpret(args.file[0],
tag=None if args.tag is None else args.tag[0],
device=None if args.device is None else args.device[0],
args=args.args,
recompile=args.recompile,
debug=args.debug)
# If the user wants to neither compile any routines or interpret any files,
# start the REPL
if not args.compile and not args.file:
start_repl(args.debug)
def start_repl(debug=False):
"""Start the interpreter loop."""
env = mumpy.MUMPSEnvironment()
p = mumpy.MUMPSParser(env, debug=debug)
# Catch the Keyboard Interrupt to let us exit gracefully
try:
# Accept user input
while True:
current_line = input("mumpy > ")
# Allow empty lines from the REPL
if current_line.strip() == "":
continue
# Catch any Syntax errors from the user input
try:
p.parse_repl(current_line)
except mumpy.MUMPSSyntaxError as e:
print(e)
# If output was emitted, we need to add an extra newline
if p.output:
print("")
except KeyboardInterrupt:
print("")
pass
def compile_routine(files, debug=False):
"""Compile a list of routines."""
# Compile the routines to an intermediate format
intf = []
for file in files:
print("Compiling {file}...".format(file=file))
try:
intf.append(mumpy.MUMPSFile(rou=file, debug=debug, recompile=True))
print("Success!")
except mumpy.MUMPSCompileError as e:
print(e)
print("Failed to compile {rou}!".format(rou=file))
def interpret(file, tag=None, args=None, device=None,
recompile=False, debug=False):
"""Interpret a routine file.."""
# Prepare the file
try:
f = mumpy.MUMPSFile(file, recompile=recompile, debug=debug)
except mumpy.MUMPSCompileError as e:
print(e)
return
# IF we recompiled and we made it this far, then there were no errors
if recompile:
print("{} recompiled successfully!".format(file))
# Prepare the environment and parser
env = mumpy.MUMPSEnvironment()
p = mumpy.MUMPSParser(env, debug=debug)
# If the user specifies another default device, use that
if device is not None:
env.open(device)
env.use(device)
# Parse the file
try:
p.parse_file(f, tag=tag, args=args)
except mumpy.MUMPSSyntaxError as e:
print(e)
| bsd-3-clause | -7,555,515,970,046,993,000 | 32.149351 | 80 | 0.543389 | false |
Kozea/pygal | pygal/graph/xy.py | 1 | 4016 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
XY Line graph: Plot a set of couple data points (x, y) connected by
straight segments.
"""
from functools import reduce
from pygal.graph.dual import Dual
from pygal.graph.line import Line
from pygal.util import cached_property, compose, ident
class XY(Line, Dual):
"""XY Line graph class"""
_x_adapters = []
@cached_property
def xvals(self):
"""All x values"""
return [
val[0] for serie in self.all_series for val in serie.values
if val[0] is not None
]
@cached_property
def yvals(self):
"""All y values"""
return [
val[1] for serie in self.series for val in serie.values
if val[1] is not None
]
@cached_property
def _min(self):
"""Getter for the minimum series value"""
return (
self.range[0] if (self.range and self.range[0] is not None) else
(min(self.yvals) if self.yvals else None)
)
@cached_property
def _max(self):
"""Getter for the maximum series value"""
return (
self.range[1] if (self.range and self.range[1] is not None) else
(max(self.yvals) if self.yvals else None)
)
def _compute(self):
"""Compute x/y min and max and x/y scale and set labels"""
if self.xvals:
if self.xrange:
x_adapter = reduce(compose, self._x_adapters) if getattr(
self, '_x_adapters', None
) else ident
xmin = x_adapter(self.xrange[0])
xmax = x_adapter(self.xrange[1])
else:
xmin = min(self.xvals)
xmax = max(self.xvals)
xrng = (xmax - xmin)
else:
xrng = None
if self.yvals:
ymin = self._min
ymax = self._max
if self.include_x_axis:
ymin = min(ymin or 0, 0)
ymax = max(ymax or 0, 0)
yrng = (ymax - ymin)
else:
yrng = None
for serie in self.all_series:
serie.points = serie.values
if self.interpolate:
vals = list(
zip(
*sorted(
filter(lambda t: None not in t, serie.points),
key=lambda x: x[0]
)
)
)
serie.interpolated = self._interpolate(vals[0], vals[1])
if self.interpolate:
self.xvals = [
val[0] for serie in self.all_series
for val in serie.interpolated
]
self.yvals = [
val[1] for serie in self.series for val in serie.interpolated
]
if self.xvals:
xmin = min(self.xvals)
xmax = max(self.xvals)
xrng = (xmax - xmin)
else:
xrng = None
# these values can also be 0 (zero), so testing explicitly for None
if xrng is not None:
self._box.xmin, self._box.xmax = xmin, xmax
if yrng is not None:
self._box.ymin, self._box.ymax = ymin, ymax
| lgpl-3.0 | -429,799,592,630,257,700 | 29.648855 | 79 | 0.537983 | false |
matichorvat/pydmrs | dmrs_preprocess/label.py | 1 | 4339 |
def create_label(dmrs_xml, carg_clean=False):
"""
Create an identifying label attribute for each node and link,
consisting of its arguments and properties.
:param dmrs_xml: Input DMRS XML
:return: Modified DMRS XML
"""
for entity in dmrs_xml:
if entity.tag == 'node':
node_attribs = collect_node_attribs(entity)
# Remove quotes around CARG
if node_attribs.get('carg') is not None and carg_clean:
clean_carg = node_attribs['carg'][1:-1]
entity.attrib['carg'] = clean_carg
node_attribs['carg'] = clean_carg
if node_attribs.get('gpred') is not None:
label = label_gpred(node_attribs)
elif node_attribs.get('pos') == 'n':
label = label_noun(node_attribs)
elif node_attribs.get('pos') == 'v':
label = label_verb(node_attribs)
else:
label = label_default(node_attribs)
# Attach the label to node XML
entity.attrib['label'] = label
elif entity.tag == 'link':
# Get ARG and POST of a link
arg = entity.findall('rargname')[0].text if entity.findall('rargname') else None
post = entity.findall('post')[0].text if entity.findall('post') else None
# Create a label and attach it to the link XML
entity.attrib['label'] = '_'.join([x for x in [arg, post] if x is not None])
return dmrs_xml
noun_like_gpreds = {'person', 'manner', 'reason', 'place_n', 'time_n', 'minute', 'mofy',
'numbered_hour', 'dofm', 'dofw', 'holiday', 'season', 'year_range',
'yofc', 'thing', 'measure', 'meas_np', 'named', 'named_n'}
def label_gpred(node_attribs):
if node_attribs.get('gpred') == 'pron':
label_list = [
node_attribs.get('gpred'),
node_attribs.get('pers'),
node_attribs.get('num'),
node_attribs.get('gend')
]
elif node_attribs.get('gpred') in noun_like_gpreds:
label_list = [
node_attribs.get('carg'),
node_attribs.get('gpred'),
simplify_gpred_num(node_attribs.get('num'))
]
else:
label_list = [
node_attribs.get('carg'),
node_attribs.get('gpred')
]
return '_'.join([unicode(x) for x in label_list if x is not None])
def label_noun(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense'),
node_attribs.get('pers') if node_attribs.get('pers') is not None else '3',
node_attribs.get('num') if node_attribs.get('num') is not None else 'sg'
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def label_verb(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense'),
node_attribs.get('tense'),
node_attribs.get('sf'),
'perf' if node_attribs.get('perf') != '-' else None,
'prog' if node_attribs.get('prog') != '-' else None
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def label_default(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense')
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def collect_node_attribs(node):
"""
Collect node attributes in a dictionary
:param node: XML node
:return: Dictionary of node attributes
"""
node_attribs = dict()
for node_info in node:
node_attribs.update(node_info.attrib)
if node_info.tag == 'gpred':
node_attribs[node_info.tag] = node_info.text
if node.attrib.get('carg') is not None:
node_attribs['carg'] = node.attrib['carg']
if node_attribs.get('tense') is not None and node_attribs.get('tense').lower() == 'untensed':
del node_attribs['tense']
if node_attribs.get('sf') == 'prop' or node_attribs.get('sf') == 'prop-or-ques':
del node_attribs['sf']
return node_attribs
def simplify_gpred_num(gpred_num):
return gpred_num if gpred_num == 'pl' else 'sg'
| mit | -1,478,493,958,019,896,300 | 29.342657 | 97 | 0.558424 | false |
jonaustin/advisoryscan | django/django/middleware/cache.py | 1 | 4057 | from django.conf import settings
from django.core.cache import cache
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers
class CacheMiddleware(object):
"""
Cache middleware. If this is enabled, each Django-powered page will be
cached for CACHE_MIDDLEWARE_SECONDS seconds. Cache is based on URLs.
Only parameter-less GET or HEAD-requests with status code 200 are cached.
If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a
simple and effective way of avoiding the caching of the Django admin (and
any other user-specific content).
This middleware expects that a HEAD request is answered with a response
exactly like the corresponding GET request.
When a hit occurs, a shallow copy of the original response object is
returned from process_request.
Pages will be cached based on the contents of the request headers
listed in the response's "Vary" header. This means that pages shouldn't
change their "Vary" header.
This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):
self.cache_timeout = cache_timeout
if cache_timeout is None:
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = key_prefix
if key_prefix is None:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
def process_request(self, request):
"Checks whether the page is already cached and returns the cached version if available."
if self.cache_anonymous_only:
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if not request.method in ('GET', 'HEAD') or request.GET:
request._cache_update_cache = False
return None # Don't bother checking the cache.
if self.cache_anonymous_only and request.user.is_authenticated():
request._cache_update_cache = False
return None # Don't cache requests from authenticated users.
cache_key = get_cache_key(request, self.key_prefix)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
request._cache_update_cache = False
return response
def process_response(self, request, response):
"Sets the cache, if needed."
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
# We don't need to update the cache, just return.
return response
if request.method != 'GET':
# This is a stronger requirement than above. It is needed
# because of interactions between this middleware and the
# HTTPMiddleware, which throws the body of a HEAD-request
# away before this middleware gets a chance to cache it.
return response
if not response.status_code == 200:
return response
patch_response_headers(response, self.cache_timeout)
cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix)
cache.set(cache_key, response, self.cache_timeout)
return response
| mit | 901,742,829,478,083,500 | 47.297619 | 301 | 0.685975 | false |
dgwartney-io/import-io-api-python | tests/unit/importio2/test_api_call.py | 1 | 1773 | #
# Copyright 2016 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
from unittest import TestCase
from importio2 import ApiCall
import json
logger = logging.getLogger(__name__)
class TestApiCall(TestCase):
def setUp(self):
self.api = ApiCall()
def test_constructor(self):
api = ApiCall()
# def test_http_delete(self):
# self.api.api_host = 'httpbin.org'
# self.api.path = '/delete'
# request = self.api.api_request()
# self.assertEqual(request.status_code, 200)
#
# d = json.loads(request.text)
# self.assertIsNotNone(d)
def test_http_get(self):
self.api.api_host = 'httpbin.org'
self.api.scheme = 'http'
self.api.path = "get"
self.api.headers = {"Accept": "application/json"}
self.api.api_request()
self.assertEqual(self.api.api_result.status_code, 200)
result = json.loads(self.api.api_result.text)
self.assertIsNotNone(result)
self.assertEqual('http://httpbin.org/get', result['url'])
def test_http_patch(self):
# self.assertFalse(True)
pass
def test_http_post(self):
# self.assertFalse(True)
pass
| apache-2.0 | 5,221,499,926,267,727,000 | 27.142857 | 74 | 0.658206 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.